def DeletePath(path, always_delete_fully=False): if HC.options['delete_to_recycle_bin'] == True and not always_delete_fully: HydrusPaths.RecyclePath(path) else: HydrusPaths.DeletePath(path)
def ShutdownModel( self ) -> None: if self.db is not None: self.db.Shutdown() while not self.db.LoopIsFinished(): self._PublishShutdownSubtext( 'waiting for db to finish up\u2026' ) time.sleep( 0.1 ) if self._fast_job_scheduler is not None: self._fast_job_scheduler.shutdown() self._fast_job_scheduler = None if self._slow_job_scheduler is not None: self._slow_job_scheduler.shutdown() self._slow_job_scheduler = None if hasattr( self, 'temp_dir' ): HydrusPaths.DeletePath( self.temp_dir ) with self._call_to_thread_lock: for call_to_thread in self._call_to_threads: call_to_thread.shutdown() for long_running_call_to_thread in self._long_running_call_to_threads: long_running_call_to_thread.shutdown() HG.model_shutdown = True self._pubsub.Wake()
def MainLoop( self ): try: self._InitDBCursor() # have to reinitialise because the thread id has changed self._InitDiskCache() self._InitCaches() except: self._DisplayCatastrophicError( traceback.format_exc() ) self._could_not_initialise = True return self._ready_to_serve_requests = True error_count = 0 while not ( ( self._local_shutdown or HG.model_shutdown ) and self._jobs.empty() ): try: job = self._jobs.get( timeout = 1 ) self._currently_doing_job = True self._current_job_name = job.ToString() self.publish_status_update() try: if HG.db_report_mode: summary = 'Running ' + job.ToString() HydrusData.ShowText( summary ) if HG.db_profile_mode: summary = 'Profiling ' + job.ToString() HydrusData.ShowText( summary ) HydrusData.Profile( summary, 'self._ProcessJob( job )', globals(), locals() ) else: self._ProcessJob( job ) error_count = 0 except: error_count += 1 if error_count > 5: raise self._jobs.put( job ) # couldn't lock db; put job back on queue time.sleep( 5 ) self._currently_doing_job = False self._current_job_name = '' self.publish_status_update() except queue.Empty: if self._transaction_contains_writes and HydrusData.TimeHasPassed( self._transaction_started + self.TRANSACTION_COMMIT_TIME ): self._Commit() self._BeginImmediate() self._transaction_contains_writes = False if HydrusData.TimeHasPassed( self._connection_timestamp + CONNECTION_REFRESH_TIME ): # just to clear out the journal files self._InitDBCursor() if self._pause_and_disconnect: self._CloseDBCursor() while self._pause_and_disconnect: if self._local_shutdown or HG.model_shutdown: break time.sleep( 1 ) self._InitDBCursor() self._CleanUpCaches() self._CloseDBCursor() temp_path = os.path.join( self._db_dir, self._durable_temp_db_filename ) HydrusPaths.DeletePath( temp_path ) self._loop_finished = True
def _DoExport( self ): query_hash_ids = HG.client_controller.Read( 'file_query_ids', self._file_search_context ) media_results = [] i = 0 base = 256 while i < len( query_hash_ids ): if HG.client_controller.new_options.GetBoolean( 'pause_export_folders_sync' ) or HydrusThreading.IsThreadShuttingDown(): return if i == 0: ( last_i, i ) = ( 0, base ) else: ( last_i, i ) = ( i, i + base ) sub_query_hash_ids = query_hash_ids[ last_i : i ] more_media_results = HG.client_controller.Read( 'media_results_from_ids', sub_query_hash_ids ) media_results.extend( more_media_results ) media_results.sort( key = lambda mr: mr.GetHashId() ) # terms = ParseExportPhrase( self._phrase ) previous_paths = set() for ( root, dirnames, filenames ) in os.walk( self._path ): previous_paths.update( ( os.path.join( root, filename ) for filename in filenames ) ) sync_paths = set() client_files_manager = HG.client_controller.client_files_manager num_copied = 0 for media_result in media_results: if HG.client_controller.new_options.GetBoolean( 'pause_export_folders_sync' ) or HydrusThreading.IsThreadShuttingDown(): return hash = media_result.GetHash() mime = media_result.GetMime() size = media_result.GetSize() try: source_path = client_files_manager.GetFilePath( hash, mime ) except HydrusExceptions.FileMissingException: raise Exception( 'A file to be exported, hash "{}", was missing! You should run file maintenance (under database->maintenance->files) to check the files for the export folder\'s search, and possibly all your files.' ) filename = GenerateExportFilename( self._path, media_result, terms ) dest_path = os.path.normpath( os.path.join( self._path, filename ) ) if not dest_path.startswith( self._path ): raise Exception( 'It seems a destination path for export folder "{}" was above the main export directory! The file was "{}" and its destination path was "{}".'.format( self._path, hash.hex(), dest_path ) ) dest_path_dir = os.path.dirname( dest_path ) HydrusPaths.MakeSureDirectoryExists( dest_path_dir ) if dest_path not in sync_paths: copied = HydrusPaths.MirrorFile( source_path, dest_path ) if copied: num_copied += 1 HydrusPaths.TryToGiveFileNicePermissionBits( dest_path ) sync_paths.add( dest_path ) if num_copied > 0: HydrusData.Print( 'Export folder ' + self._name + ' exported ' + HydrusData.ToHumanInt( num_copied ) + ' files.' ) if self._export_type == HC.EXPORT_FOLDER_TYPE_SYNCHRONISE: deletee_paths = previous_paths.difference( sync_paths ) for deletee_path in deletee_paths: ClientPaths.DeletePath( deletee_path ) deletee_dirs = set() for ( root, dirnames, filenames ) in os.walk( self._path, topdown = False ): if root == self._path: continue no_files = len( filenames ) == 0 useful_dirnames = [ dirname for dirname in dirnames if os.path.join( root, dirname ) not in deletee_dirs ] no_useful_dirs = len( useful_dirnames ) == 0 if no_useful_dirs and no_files: deletee_dirs.add( root ) for deletee_dir in deletee_dirs: if os.path.exists( deletee_dir ): HydrusPaths.DeletePath( deletee_dir ) if len( deletee_paths ) > 0: HydrusData.Print( 'Export folder {} deleted {} files and {} folders.'.format( self._name, HydrusData.ToHumanInt( len( deletee_paths ) ), HydrusData.ToHumanInt( len( deletee_dirs ) ) ) ) if self._delete_from_client_after_export: local_file_service_keys = HG.client_controller.services_manager.GetServiceKeys( ( HC.LOCAL_FILE_DOMAIN, ) ) service_keys_to_deletee_hashes = collections.defaultdict( list ) delete_lock_for_archived_files = HG.client_controller.new_options.GetBoolean( 'delete_lock_for_archived_files' ) for media_result in media_results: if delete_lock_for_archived_files and not media_result.GetInbox(): continue hash = media_result.GetHash() deletee_service_keys = media_result.GetLocationsManager().GetCurrent().intersection( local_file_service_keys ) for deletee_service_key in deletee_service_keys: service_keys_to_deletee_hashes[ deletee_service_key ].append( hash ) reason = 'Deleted after export to Export Folder "{}".'.format( self._path ) for ( service_key, deletee_hashes ) in service_keys_to_deletee_hashes.items(): chunks_of_hashes = HydrusData.SplitListIntoChunks( deletee_hashes, 64 ) for chunk_of_hashes in chunks_of_hashes: content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_FILES, HC.CONTENT_UPDATE_DELETE, chunk_of_hashes, reason = reason ) HG.client_controller.WriteSynchronous( 'content_updates', { service_key : [ content_update ] } )
def MainLoop(self): try: self._InitDBCursor( ) # have to reinitialise because the thread id has changed self._InitCaches() except: self._DisplayCatastrophicError(traceback.format_exc()) self._could_not_initialise = True return self._ready_to_serve_requests = True error_count = 0 while not ((self._local_shutdown or HG.model_shutdown) and self._jobs.empty()): try: job = self._jobs.get(timeout=1) self._currently_doing_job = True self._current_job_name = job.ToString() self.publish_status_update() try: if HG.db_report_mode: summary = 'Running ' + job.ToString() HydrusData.ShowText(summary) if HG.db_profile_mode: summary = 'Profiling ' + job.ToString() HydrusData.Profile(summary, 'self._ProcessJob( job )', globals(), locals(), show_summary=True) else: self._ProcessJob(job) error_count = 0 except: error_count += 1 if error_count > 5: raise self._jobs.put( job) # couldn't lock db; put job back on queue time.sleep(5) self._currently_doing_job = False self._current_job_name = '' self.publish_status_update() except queue.Empty: if self._cursor_transaction_wrapper.TimeToCommit(): self._cursor_transaction_wrapper.CommitAndBegin() if self._pause_and_disconnect: self._CloseDBCursor() while self._pause_and_disconnect: if self._local_shutdown or HG.model_shutdown: break time.sleep(1) self._InitDBCursor() self._CloseDBCursor() temp_path = os.path.join(self._db_dir, self._durable_temp_db_filename) HydrusPaths.DeletePath(temp_path) self._loop_finished = True
def TidyUp(self): time.sleep(2) HydrusPaths.DeletePath(self.db_dir)
def _DoExport( self ): query_hash_ids = HG.client_controller.Read( 'file_query_ids', self._file_search_context ) media_results = [] i = 0 base = 256 while i < len( query_hash_ids ): if HC.options[ 'pause_export_folders_sync' ] or HydrusThreading.IsThreadShuttingDown(): return if i == 0: ( last_i, i ) = ( 0, base ) else: ( last_i, i ) = ( i, i + base ) sub_query_hash_ids = query_hash_ids[ last_i : i ] more_media_results = HG.client_controller.Read( 'media_results_from_ids', sub_query_hash_ids ) media_results.extend( more_media_results ) media_results.sort( key = lambda mr: mr.GetHashId() ) # terms = ParseExportPhrase( self._phrase ) previous_paths = set() for ( root, dirnames, filenames ) in os.walk( self._path ): previous_paths.update( ( os.path.join( root, filename ) for filename in filenames ) ) sync_paths = set() client_files_manager = HG.client_controller.client_files_manager num_copied = 0 for media_result in media_results: if HC.options[ 'pause_export_folders_sync' ] or HydrusThreading.IsThreadShuttingDown(): return hash = media_result.GetHash() mime = media_result.GetMime() size = media_result.GetSize() source_path = client_files_manager.GetFilePath( hash, mime ) filename = GenerateExportFilename( self._path, media_result, terms ) dest_path = os.path.normpath( os.path.join( self._path, filename ) ) if not dest_path.startswith( self._path ): raise Exception( 'It seems a destination path for export folder "{}" was above the main export directory! The file was "{}" and its destination path was "{}".'.format( self._path, hash.hex(), dest_path ) ) dest_path_dir = os.path.dirname( dest_path ) HydrusPaths.MakeSureDirectoryExists( dest_path_dir ) if dest_path not in sync_paths: copied = HydrusPaths.MirrorFile( source_path, dest_path ) if copied: num_copied += 1 HydrusPaths.MakeFileWritable( dest_path ) sync_paths.add( dest_path ) if num_copied > 0: HydrusData.Print( 'Export folder ' + self._name + ' exported ' + HydrusData.ToHumanInt( num_copied ) + ' files.' ) if self._export_type == HC.EXPORT_FOLDER_TYPE_SYNCHRONISE: deletee_paths = previous_paths.difference( sync_paths ) for deletee_path in deletee_paths: ClientPaths.DeletePath( deletee_path ) deletee_dirs = set() for ( root, dirnames, filenames ) in os.walk( self._path, topdown = False ): if root == self._path: continue no_files = len( filenames ) == 0 useful_dirnames = [ dirname for dirname in dirnames if os.path.join( root, dirname ) not in deletee_dirs ] no_useful_dirs = len( useful_dirnames ) == 0 if no_useful_dirs and no_files: deletee_dirs.add( root ) for deletee_dir in deletee_dirs: if os.path.exists( deletee_dir ): HydrusPaths.DeletePath( deletee_dir ) if len( deletee_paths ) > 0: HydrusData.Print( 'Export folder {} deleted {} files and {} folders.'.format( self._name, HydrusData.ToHumanInt( len( deletee_paths ) ), HydrusData.ToHumanInt( len( deletee_dirs ) ) ) ) if self._delete_from_client_after_export: deletee_hashes = { media_result.GetHash() for media_result in media_results } chunks_of_hashes = HydrusData.SplitListIntoChunks( deletee_hashes, 64 ) reason = 'Deleted after export to Export Folder "{}".'.format( self._path ) content_updates = [ HydrusData.ContentUpdate( HC.CONTENT_TYPE_FILES, HC.CONTENT_UPDATE_DELETE, chunk_of_hashes, reason = reason ) for chunk_of_hashes in chunks_of_hashes ] for content_update in content_updates: HG.client_controller.WriteSynchronous( 'content_updates', { CC.LOCAL_FILE_SERVICE_KEY : [ content_update ] } )
def __init__(self, controller, db_dir, db_name): if HydrusPaths.GetFreeSpace(db_dir) < 500 * 1048576: raise Exception( 'Sorry, it looks like the db partition has less than 500MB, please free up some space.' ) HydrusDBBase.DBBase.__init__(self) self._controller = controller self._db_dir = db_dir self._db_name = db_name self._modules = [] HydrusDBBase.TemporaryIntegerTableNameCache() self._ssl_cert_filename = '{}.crt'.format(self._db_name) self._ssl_key_filename = '{}.key'.format(self._db_name) self._ssl_cert_path = os.path.join(self._db_dir, self._ssl_cert_filename) self._ssl_key_path = os.path.join(self._db_dir, self._ssl_key_filename) main_db_filename = db_name if not main_db_filename.endswith('.db'): main_db_filename += '.db' self._db_filenames = {} self._db_filenames['main'] = main_db_filename self._durable_temp_db_filename = db_name + '.temp.db' durable_temp_db_path = os.path.join(self._db_dir, self._durable_temp_db_filename) if os.path.exists(durable_temp_db_path): HydrusPaths.DeletePath(durable_temp_db_path) wal_lad = durable_temp_db_path + '-wal' if os.path.exists(wal_lad): HydrusPaths.DeletePath(wal_lad) shm_lad = durable_temp_db_path + '-shm' if os.path.exists(shm_lad): HydrusPaths.DeletePath(shm_lad) HydrusData.Print( 'Found and deleted the durable temporary database on boot. The last exit was probably not clean.' ) self._InitExternalDatabases() self._is_first_start = False self._is_db_updated = False self._local_shutdown = False self._pause_and_disconnect = False self._loop_finished = False self._ready_to_serve_requests = False self._could_not_initialise = False self._jobs = queue.Queue() self._currently_doing_job = False self._current_status = '' self._current_job_name = '' self._db = None self._is_connected = False self._cursor_transaction_wrapper = None if os.path.exists( os.path.join(self._db_dir, self._db_filenames['main'])): # open and close to clean up in case last session didn't close well self._InitDB() self._CloseDBConnection() self._InitDB() (version, ) = self._Execute('SELECT version FROM version;').fetchone() if version > HC.SOFTWARE_VERSION: self._ReportOverupdatedDB(version) if version < (HC.SOFTWARE_VERSION - 15): self._ReportUnderupdatedDB(version) if version < HC.SOFTWARE_VERSION - 50: raise Exception('Your current database version of hydrus ' + str(version) + ' is too old for this software version ' + str(HC.SOFTWARE_VERSION) + ' to update. Please try updating with version ' + str(version + 45) + ' or earlier first.') self._RepairDB(version) while version < HC.SOFTWARE_VERSION: time.sleep(self.UPDATE_WAIT) try: self._cursor_transaction_wrapper.BeginImmediate() except Exception as e: raise HydrusExceptions.DBAccessException(str(e)) try: self._UpdateDB(version) self._cursor_transaction_wrapper.Commit() self._is_db_updated = True except: e = Exception('Updating the ' + self._db_name + ' db to version ' + str(version + 1) + ' caused this error:' + os.linesep + traceback.format_exc()) try: self._cursor_transaction_wrapper.Rollback() except Exception as rollback_e: HydrusData.Print( 'When the update failed, attempting to rollback the database failed.' ) HydrusData.PrintException(rollback_e) raise e (version, ) = self._Execute('SELECT version FROM version;').fetchone() self._CloseDBConnection() self._controller.CallToThreadLongRunning(self.MainLoop) while not self._ready_to_serve_requests: time.sleep(0.1) if self._could_not_initialise: raise Exception( 'Could not initialise the db! Error written to the log!')