def _PresentMissingTablesWarningToUser( self, table_names ): table_names = sorted( table_names ) HydrusData.DebugPrint( 'The "{}" database module is missing the following tables:'.format( self.name ) ) HydrusData.DebugPrint( os.linesep.join( table_names ) ) message = 'Your "{}" database module was missing {} tables. More information has been written to the log. This is a serious problem.'.format( self.name, len( table_names ) ) message += os.linesep * 2 message += 'If this is happening on the first boot after an update, it is likely a fault in the update code. If you updated many versions in one go, kill the hydrus process now and update in a smaller version increment.' message += os.linesep * 2 message += 'If this is just a normal boot, you most likely encountered hard drive damage. You should check "install_dir/db/help my db is broke.txt" for background reading. Whatever happens next, you need to check that your hard drive is healthy.' message += os.linesep * 2 if self.CAN_REPOPULATE_ALL_MISSING_DATA: recovery_info = 'This module stores copies of core data and believes it can recover everything that was lost by recomputing its cache. It may do that immediately after this dialog, or it may be delayed to a later stage of boot. Either way, the regeneration job may take some time. There may also still be miscounts or other missing/incorrect data when you boot. Please let Hydev know how you get on.' else: recovery_info = 'Unfortunately, this module manages core data and may not be able to regenerate what was lost. The missing tables can be remade, but they will likely be empty. If you have a good functional backup, you should probably kill the hydrus process now, check your drive, and ultimately rollback to that backup. If you have no backup and must continue, you will likely encounter more problems with systems related to this module. With luck it will be something small, like a suddenly empty file maintenance queue, or it could be severe, such as not being able to load any file. If you are severely damaged with no backup, Hydev will be able to help figure out what to do next. If your backup is very old and you would rather not rollback to it, Hydev may be able to figure out a way to recover some of the mising data from that and still save most of your current database.' message += 'If you proceed, the missing tables will be recreated. {}'.format( recovery_info ) BlockingSafeShowMessage( message ) HG.client_controller.frame_splash_status.SetText( 'recreating tables' )
def _PresentMissingIndicesWarningToUser( self, index_names ): index_names = sorted( index_names ) HydrusData.DebugPrint( 'The "{}" database module is missing the following indices:'.format( self.name ) ) HydrusData.DebugPrint( os.linesep.join( index_names ) ) message = 'Your "{}" database module was missing {} indices. More information has been written to the log. This may or may not be a big deal, and on its own it is completely recoverable. If you do not have further problems, hydev does not need to know about it. The indices will be regenerated once you proceed--it may take some time.'.format( self.name, len( index_names ) ) BlockingSafeShowMessage( message ) HG.client_controller.frame_splash_status.SetText( 'recreating indices' )
def qt_code(win: QW.QWidget, job_key: ClientThreading.JobKey): try: if win is not None and not QP.isValid(win): raise HydrusExceptions.QtDeadWindowException( 'Parent Window was destroyed before Qt command was called!' ) result = func(*args, **kwargs) job_key.SetVariable('result', result) except (HydrusExceptions.QtDeadWindowException, HydrusExceptions.DBCredentialsException, HydrusExceptions.ShutdownException) as e: job_key.SetErrorException(e) except Exception as e: job_key.SetErrorException(e) HydrusData.Print('CallBlockingToQt just caught this error:') HydrusData.DebugPrint(traceback.format_exc()) finally: job_key.Finish()
def SetJSONSimple(self, name, value): if value is None: self._c.execute('DELETE FROM json_dict WHERE name = ?;', (name, )) else: dump = json.dumps(value) dump_buffer = GenerateBigSQLiteDumpBuffer(dump) try: self._c.execute( 'REPLACE INTO json_dict ( name, dump ) VALUES ( ?, ? );', (name, dump_buffer)) except: HydrusData.DebugPrint(dump) HydrusData.ShowText( 'Had a problem saving a JSON object. The dump has been printed to the log.' ) raise
def CatchExceptionClient(etype, value, tb): try: trace_list = traceback.format_tb(tb) trace = ''.join(trace_list) pretty_value = str(value) if os.linesep in pretty_value: (first_line, anything_else) = pretty_value.split(os.linesep, 1) trace = trace + os.linesep + anything_else else: first_line = pretty_value job_key = ClientThreading.JobKey() if etype == HydrusExceptions.ShutdownException: return else: try: job_key.SetVariable('popup_title', str(etype.__name__)) except: job_key.SetVariable('popup_title', str(etype)) job_key.SetVariable('popup_text_1', first_line) job_key.SetVariable('popup_traceback', trace) text = job_key.ToString() HydrusData.Print('Uncaught exception:') HydrusData.DebugPrint(text) HG.client_controller.pub('message', job_key) except: text = 'Encountered an error I could not parse:' text += os.linesep text += str((etype, value, tb)) try: text += traceback.format_exc() except: pass HydrusData.ShowText(text) time.sleep(1)
def _DisplayCatastrophicError( self, text ): message = 'The db encountered a serious error! This is going to be written to the log as well, but here it is for a screenshot:' message += os.linesep * 2 message += text HydrusData.DebugPrint( message )
def _TryEndModal(self, value): if not self.isModal( ): # in some rare cases (including spammy AutoHotkey, looks like), this can be fired before the dialog can clean itself up return False if not self._TestValidityAndPresentVetoMessage(value): return False if not self._UserIsOKToClose(value): return False if value == QW.QDialog.Rejected: self.SetCancelled(True) elif value == QW.QDialog.Accepted: self._SaveOKPosition() self._DoClose(value) self.CleanBeforeDestroy() try: self.done(value) except Exception as e: HydrusData.ShowText( 'This dialog seems to have been unable to close for some reason. I am printing the stack to the log. The dialog may have already closed, or may attempt to close now. Please inform hydrus dev of this situation. I recommend you restart the client if you can. If the UI is locked, you will have to kill it via task manager.' ) HydrusData.PrintException(e) import traceback HydrusData.DebugPrint(''.join(traceback.format_stack())) try: self.close() except: HydrusData.ShowText('The dialog would not close on command.') try: self.deleteLater() except: HydrusData.ShowText('The dialog would not destroy on command.') return True
def RecyclePath(path): if HG.file_report_mode: HydrusData.ShowText('Recycling {}'.format(path)) HydrusData.ShowText(''.join(traceback.format_stack())) if os.path.exists(path): MakeFileWritable(path) try: send2trash.send2trash(path) except: HydrusData.Print('Trying to recycle ' + path + ' created this error:') HydrusData.DebugPrint(traceback.format_exc()) HydrusData.Print('It has been fully deleted instead.') DeletePath(path)
def ShowExceptionTupleClient(etype, value, tb, do_wait=True): if etype is None: etype = HydrusExceptions.UnknownException if value is None: value = 'Unknown error' if tb is None: trace = 'No error trace--here is the stack:' + os.linesep + ''.join( traceback.format_stack()) else: trace = ''.join(traceback.format_exception(etype, value, tb)) pretty_value = str(value) if os.linesep in pretty_value: (first_line, anything_else) = pretty_value.split(os.linesep, 1) trace = trace + os.linesep + anything_else else: first_line = pretty_value job_key = ClientThreading.JobKey() if etype == HydrusExceptions.ShutdownException: return else: title = str(getattr(etype, '__name__', etype)) job_key.SetStatusTitle(title) job_key.SetVariable('popup_text_1', first_line) job_key.SetTraceback(trace) text = job_key.ToString() HydrusData.Print('Exception:') HydrusData.DebugPrint(text) HG.client_controller.pub('message', job_key) if do_wait: time.sleep(1)
def SetHashedJSONDumps(self, hashes_to_objs): for (hash, obj) in hashes_to_objs.items(): if self.HaveHashedJSONDump(hash): continue (dump_type, version, serialisable_info) = obj.GetSerialisableTuple() try: dump = json.dumps(serialisable_info) except Exception as e: HydrusData.ShowException(e) HydrusData.Print(obj) HydrusData.Print(serialisable_info) raise Exception( 'Trying to json dump the hashed object ' + str(obj) + ' caused an error. Its serialisable info has been dumped to the log.' ) maintenance_tracker = MaintenanceTracker.instance() maintenance_tracker.RegisterNewHashedSerialisable(len(dump)) dump_buffer = GenerateBigSQLiteDumpBuffer(dump) try: self._c.execute( 'INSERT INTO json_dumps_hashed ( hash, dump_type, version, dump ) VALUES ( ?, ?, ?, ? );', (sqlite3.Binary(hash), dump_type, version, dump_buffer)) except: HydrusData.DebugPrint(dump) HydrusData.ShowText( 'Had a problem saving a hashed JSON object. The dump has been printed to the log.' ) try: HydrusData.Print('Dump had length {}!'.format( HydrusData.ToHumanBytes(len(dump_buffer)))) except: pass raise
def SetTitleText(self, text, clear_undertexts=True, print_to_log=True): if self._updater is not None and print_to_log: HydrusData.DebugPrint(text) with self._lock: self._title_text = text if clear_undertexts: self._status_text = '' self._status_subtext = '' self._NotifyUI()
def DeletePath(path): if HG.file_report_mode: HydrusData.ShowText('Deleting {}'.format(path)) HydrusData.ShowText(''.join(traceback.format_stack())) if os.path.exists(path): MakeFileWritable(path) try: if os.path.isdir(path): shutil.rmtree(path) else: os.remove(path) except Exception as e: if 'Error 32' in str(e): # file in use by another process HydrusData.DebugPrint( 'Trying to delete ' + path + ' failed because it was in use by another process.') else: HydrusData.ShowText('Trying to delete ' + path + ' caused the following error:') HydrusData.ShowException(e)
def SetJSONDump(self, obj, force_timestamp=None): if isinstance(obj, HydrusSerialisable.SerialisableBaseNamed): (dump_type, dump_name, version, serialisable_info) = obj.GetSerialisableTuple() store_backups = False backup_depth = 1 if dump_type == HydrusSerialisable.SERIALISABLE_TYPE_GUI_SESSION_CONTAINER: if not obj.HasAllPageData(): raise Exception( 'A session with name "{}" was set to save, but it did not have all its page data!' .format(dump_name)) hashes_to_page_data = obj.GetHashesToPageData() self.SetHashedJSONDumps(hashes_to_page_data) if force_timestamp is None: store_backups = True backup_depth = HG.client_controller.new_options.GetInteger( 'number_of_gui_session_backups') try: dump = json.dumps(serialisable_info) except Exception as e: HydrusData.ShowException(e) HydrusData.Print(obj) HydrusData.Print(serialisable_info) raise Exception( 'Trying to json dump the object ' + str(obj) + ' with name ' + dump_name + ' caused an error. Its serialisable info has been dumped to the log.' ) if force_timestamp is None: object_timestamp = HydrusData.GetNow() if store_backups: existing_timestamps = sorted( self._STI( self._c.execute( 'SELECT timestamp FROM json_dumps_named WHERE dump_type = ? AND dump_name = ?;', (dump_type, dump_name)))) if len(existing_timestamps) > 0: # the user has changed their system clock, so let's make sure the new timestamp is larger at least largest_existing_timestamp = max(existing_timestamps) if largest_existing_timestamp > object_timestamp: object_timestamp = largest_existing_timestamp + 1 deletee_timestamps = existing_timestamps[: -backup_depth] # keep highest n values deletee_timestamps.append( object_timestamp ) # if save gets spammed twice in one second, we'll overwrite self._c.executemany( 'DELETE FROM json_dumps_named WHERE dump_type = ? AND dump_name = ? AND timestamp = ?;', [(dump_type, dump_name, timestamp) for timestamp in deletee_timestamps]) else: self._c.execute( 'DELETE FROM json_dumps_named WHERE dump_type = ? AND dump_name = ?;', (dump_type, dump_name)) else: object_timestamp = force_timestamp dump_buffer = GenerateBigSQLiteDumpBuffer(dump) try: self._c.execute( 'INSERT INTO json_dumps_named ( dump_type, dump_name, version, timestamp, dump ) VALUES ( ?, ?, ?, ?, ? );', (dump_type, dump_name, version, object_timestamp, dump_buffer)) except: HydrusData.DebugPrint(dump) HydrusData.ShowText( 'Had a problem saving a JSON object. The dump has been printed to the log.' ) try: HydrusData.Print('Dump had length {}!'.format( HydrusData.ToHumanBytes(len(dump_buffer)))) except: pass raise else: (dump_type, version, serialisable_info) = obj.GetSerialisableTuple() if dump_type == HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_SESSION_MANAGER: deletee_session_names = obj.GetDeleteeSessionNames() dirty_session_containers = obj.GetDirtySessionContainers() if len(deletee_session_names) > 0: for deletee_session_name in deletee_session_names: self.DeleteJSONDumpNamed( HydrusSerialisable. SERIALISABLE_TYPE_NETWORK_SESSION_MANAGER_SESSION_CONTAINER, dump_name=deletee_session_name) if len(dirty_session_containers) > 0: for dirty_session_container in dirty_session_containers: self.SetJSONDump(dirty_session_container) if not obj.IsDirty(): return elif dump_type == HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_BANDWIDTH_MANAGER: deletee_tracker_names = obj.GetDeleteeTrackerNames() dirty_tracker_containers = obj.GetDirtyTrackerContainers() if len(deletee_tracker_names) > 0: for deletee_tracker_name in deletee_tracker_names: self.DeleteJSONDumpNamed( HydrusSerialisable. SERIALISABLE_TYPE_NETWORK_BANDWIDTH_MANAGER_TRACKER_CONTAINER, dump_name=deletee_tracker_name) if len(dirty_tracker_containers) > 0: for dirty_tracker_container in dirty_tracker_containers: self.SetJSONDump(dirty_tracker_container) if not obj.IsDirty(): return try: dump = json.dumps(serialisable_info) except Exception as e: HydrusData.ShowException(e) HydrusData.Print(obj) HydrusData.Print(serialisable_info) raise Exception( 'Trying to json dump the object ' + str(obj) + ' caused an error. Its serialisable info has been dumped to the log.' ) self._c.execute('DELETE FROM json_dumps WHERE dump_type = ?;', (dump_type, )) dump_buffer = GenerateBigSQLiteDumpBuffer(dump) try: self._c.execute( 'INSERT INTO json_dumps ( dump_type, version, dump ) VALUES ( ?, ?, ? );', (dump_type, version, dump_buffer)) except: HydrusData.DebugPrint(dump) HydrusData.ShowText( 'Had a problem saving a JSON object. The dump has been printed to the log.' ) raise
def _InitDBCursor( self ): self._CloseDBCursor() db_path = os.path.join( self._db_dir, self._db_filenames[ 'main' ] ) db_just_created = not os.path.exists( db_path ) self._db = sqlite3.connect( db_path, isolation_level = None, detect_types = sqlite3.PARSE_DECLTYPES ) self._connection_timestamp = HydrusData.GetNow() self._c = self._db.cursor() if HG.no_db_temp_files: self._c.execute( 'PRAGMA temp_store = 2;' ) # use memory for temp store exclusively self._c.execute( 'ATTACH ":memory:" AS mem;' ) self._AttachExternalDatabases() # if this is set to 1, transactions are not immediately synced to the journal so multiple can be undone following a power-loss # if set to 2, all transactions are synced, so once a new one starts you know the last one is on disk # corruption cannot occur either way, but since we have multiple ATTACH dbs with diff journals, let's not mess around when power-cut during heavy file import or w/e synchronous = 2 if HG.db_synchronous_override is not None: synchronous = HG.db_synchronous_override # durable_temp is not excluded here db_names = [ name for ( index, name, path ) in self._c.execute( 'PRAGMA database_list;' ) if name not in ( 'mem', 'temp' ) ] for db_name in db_names: self._c.execute( 'PRAGMA {}.cache_size = -10000;'.format( db_name ) ) if HG.db_memory_journaling: self._c.execute( 'PRAGMA {}.journal_mode = MEMORY;'.format( db_name ) ) elif HG.no_wal: self._c.execute( 'PRAGMA {}.journal_mode = TRUNCATE;'.format( db_name ) ) else: self._c.execute( 'PRAGMA {}.journal_mode = WAL;'.format( db_name ) ) self._c.execute( 'PRAGMA {}.synchronous = {};'.format( db_name, synchronous ) ) try: self._c.execute( 'SELECT * FROM {}.sqlite_master;'.format( db_name ) ).fetchone() except sqlite3.OperationalError as e: if HG.no_wal: message = 'The database failed to read any data. Please check your hard drive and perhaps \'help my db is broke.txt\' in the db directory. Full error information:' else: message = 'The database failed to read some data. You may need to run the program in no-wal mode using the --no_wal command parameter. Full error information:' message += os.linesep * 2 message += str( e ) HydrusData.DebugPrint( message ) raise HydrusExceptions.DBAccessException( message ) try: self._BeginImmediate() except Exception as e: raise HydrusExceptions.DBAccessException( str( e ) )
def GetFFMPEGInfoLines( path, count_frames_manually = False, only_first_second = False ): # open the file in a pipe, provoke an error, read output cmd = [ FFMPEG_PATH, "-i", path ] if only_first_second: cmd.insert( 1, '-t' ) cmd.insert( 2, '1' ) if count_frames_manually: # added -an here to remove audio component, which was sometimes causing convert fails on single-frame music webms if HC.PLATFORM_WINDOWS: cmd += [ "-vf", "scale=-2:120", "-an", "-f", "null", "NUL" ] else: cmd += [ "-vf", "scale=-2:120", "-an", "-f", "null", "/dev/null" ] sbp_kwargs = HydrusData.GetSubprocessKWArgs() HydrusData.CheckProgramIsNotShuttingDown() try: process = subprocess.Popen( cmd, bufsize = 10**5, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, **sbp_kwargs ) except FileNotFoundError as e: global FFMPEG_MISSING_ERROR_PUBBED if not FFMPEG_MISSING_ERROR_PUBBED: message = 'FFMPEG, which hydrus uses to parse and render video, was not found! This may be due to it not being available on your system, or hydrus being unable to find it.' message += os.linesep * 2 if HC.PLATFORM_WINDOWS: message += 'You are on Windows, so there should be a copy of ffmpeg.exe in your install_dir/bin folder. If not, please check if your anti-virus has removed it and restore it through a new install.' else: message += 'If you are certain that FFMPEG is installed on your OS and accessible in your PATH, please let hydrus_dev know, as this problem is likely due to an environment problem. You may be able to solve this problem immediately by putting a static build of the ffmpeg executable in your install_dir/bin folder.' message += os.linesep * 2 message += 'You can check your current FFMPEG status through help->about.' HydrusData.ShowText( message ) FFMPEG_MISSING_ERROR_PUBBED = True raise FileNotFoundError( 'Cannot interact with video because FFMPEG not found--are you sure it is installed? Full error: ' + str( e ) ) ( stdout, stderr ) = HydrusThreading.SubprocessCommunicate( process ) data_bytes = stderr if len( data_bytes ) == 0: global FFMPEG_NO_CONTENT_ERROR_PUBBED if not FFMPEG_NO_CONTENT_ERROR_PUBBED: message = 'FFMPEG, which hydrus uses to parse and render video, did not return any data on a recent file metadata check! More debug info has been written to the log.' message += os.linesep * 2 message += 'You can check this info again through help->about.' HydrusData.ShowText( message ) message += os.linesep * 2 message += str( sbp_kwargs ) message += os.linesep * 2 message += str( os.environ ) message += os.linesep * 2 message += 'STDOUT Response: {}'.format( stdout ) message += os.linesep * 2 message += 'STDERR Response: {}'.format( stderr ) HydrusData.DebugPrint( message ) FFMPEG_NO_CONTENT_ERROR_PUBBED = True raise HydrusExceptions.DataMissing( 'Cannot interact with video because FFMPEG did not return any content.' ) del process ( text, encoding ) = HydrusText.NonFailingUnicodeDecode( data_bytes, 'utf-8' ) lines = text.splitlines() CheckFFMPEGError( lines ) return lines
def boot(): args = sys.argv[1:] if len(args) > 0: only_run = args[0] else: only_run = None try: threading.Thread(target=reactor.run, kwargs={ 'installSignalHandlers': 0 }).start() QP.MonkeyPatchMissingMethods() app = QW.QApplication(sys.argv) app.call_after_catcher = QP.CallAfterEventCatcher(app) try: # we run the tests on the Qt thread atm # keep a window alive the whole time so the app doesn't finish its mainloop win = QW.QWidget(None) win.setWindowTitle('Running tests...') controller = TestController.Controller(win, only_run) def do_it(): controller.Run(win) QP.CallAfter(do_it) app.exec_() except: HydrusData.DebugPrint(traceback.format_exc()) finally: HG.started_shutdown = True HG.view_shutdown = True controller.pubimmediate('wake_daemons') HG.model_shutdown = True controller.pubimmediate('wake_daemons') controller.TidyUp() except: HydrusData.DebugPrint(traceback.format_exc()) finally: reactor.callFromThread(reactor.stop) print('This was version ' + str(HC.SOFTWARE_VERSION)) if sys.stdin.isatty(): input('Press any key to exit.') if controller.was_successful: sys.exit(0) else: sys.exit(1)
def _InitDBConnection(self): self._CloseDBConnection() db_path = os.path.join(self._db_dir, self._db_filenames['main']) try: if os.path.exists( db_path) and not HydrusPaths.FileisWriteable(db_path): raise HydrusExceptions.DBAccessException( '"{}" seems to be read-only!'.format(db_path)) self._db = sqlite3.connect(db_path, isolation_level=None, detect_types=sqlite3.PARSE_DECLTYPES) c = self._db.cursor() self._SetCursor(c) self._is_connected = True self._cursor_transaction_wrapper = HydrusDBBase.DBCursorTransactionWrapper( self._c, HG.db_transaction_commit_period) if HG.no_db_temp_files: self._Execute('PRAGMA temp_store = 2;' ) # use memory for temp store exclusively self._AttachExternalDatabases() self._LoadModules() self._Execute('ATTACH ":memory:" AS mem;') except HydrusExceptions.DBAccessException as e: raise except Exception as e: raise HydrusExceptions.DBAccessException( 'Could not connect to database! If the answer is not obvious to you, please let hydrus dev know. Error follows:' + os.linesep * 2 + str(e)) HydrusDBBase.TemporaryIntegerTableNameCache.instance().Clear() # durable_temp is not excluded here db_names = [ name for (index, name, path) in self._Execute('PRAGMA database_list;') if name not in ('mem', 'temp') ] for db_name in db_names: # MB -> KB cache_size = HG.db_cache_size * 1024 self._Execute('PRAGMA {}.cache_size = -{};'.format( db_name, cache_size)) self._Execute('PRAGMA {}.journal_mode = {};'.format( db_name, HG.db_journal_mode)) if HG.db_journal_mode in ('PERSIST', 'WAL'): # We tried 1GB here, but I have reports of larger ones that don't seem to truncate ever? # Not sure what that is about, but I guess the db sometimes doesn't want to (expensively?) recover pages from the journal and just appends more data # In any case, this pragma is not a 'don't allow it to grow larger than', it's a 'after commit, truncate back to this', so no need to make it so large # default is -1, which means no limit self._Execute('PRAGMA {}.journal_size_limit = {};'.format( db_name, HydrusDBBase.JOURNAL_SIZE_LIMIT)) self._Execute('PRAGMA {}.synchronous = {};'.format( db_name, HG.db_synchronous)) try: self._Execute('SELECT * FROM {}.sqlite_master;'.format( db_name)).fetchone() except sqlite3.OperationalError as e: message = 'The database seemed valid, but hydrus failed to read basic data from it. You may need to run the program in a different journal mode using --db_journal_mode. Full error information:' message += os.linesep * 2 message += str(e) HydrusData.DebugPrint(message) raise HydrusExceptions.DBAccessException(message) try: self._cursor_transaction_wrapper.BeginImmediate() except Exception as e: if 'locked' in str(e): raise HydrusExceptions.DBAccessException( 'Database appeared to be locked. Please ensure there is not another client already running on this database, and then try restarting the client.' ) raise HydrusExceptions.DBAccessException(str(e))
try: from twisted.internet import reactor except: HG.twisted_is_broke = True except Exception as e: try: from hydrus.core import HydrusData HydrusData.DebugPrint( 'Critical boot error occurred! Details written to crash.log!') HydrusData.PrintException(e) except: pass error_trace = traceback.format_exc() print(error_trace) if 'db_dir' in locals() and os.path.exists(db_dir): emergency_dir = db_dir else:
def _PopulateHashIdsToHashesCache(self, hash_ids, exception_on_error=False): if len(self._hash_ids_to_hashes_cache) > 100000: if not isinstance(hash_ids, set): hash_ids = set(hash_ids) self._hash_ids_to_hashes_cache = { hash_id: hash for (hash_id, hash) in self._hash_ids_to_hashes_cache.items() if hash_id in hash_ids } uncached_hash_ids = { hash_id for hash_id in hash_ids if hash_id not in self._hash_ids_to_hashes_cache } if len(uncached_hash_ids) > 0: pubbed_error = False if len(uncached_hash_ids) == 1: (uncached_hash_id, ) = uncached_hash_ids rows = self._Execute( 'SELECT hash_id, hash FROM hashes WHERE hash_id = ?;', (uncached_hash_id, )).fetchall() else: with self._MakeTemporaryIntegerTable( uncached_hash_ids, 'hash_id') as temp_table_name: # temp hash_ids to actual hashes rows = self._Execute( 'SELECT hash_id, hash FROM {} CROSS JOIN hashes USING ( hash_id );' .format(temp_table_name)).fetchall() uncached_hash_ids_to_hashes = dict(rows) if len(uncached_hash_ids_to_hashes) < len(uncached_hash_ids): for hash_id in uncached_hash_ids: if hash_id not in uncached_hash_ids_to_hashes: if exception_on_error: raise HydrusExceptions.DataMissing( 'Did not find all entries for those hash ids!') HydrusData.DebugPrint('Database hash error: hash_id ' + str(hash_id) + ' was missing!') HydrusData.PrintException( Exception('Missing file identifier stack trace.')) if not pubbed_error: HydrusData.ShowText( 'A file identifier was missing! This is a serious error that means your client database has an orphan file id! Think about contacting hydrus dev!' ) pubbed_error = True hash = bytes.fromhex('aaaaaaaaaaaaaaaa') + os.urandom( 16) uncached_hash_ids_to_hashes[hash_id] = hash self._hash_ids_to_hashes_cache.update(uncached_hash_ids_to_hashes)
win = QW.QWidget(None) win.setWindowTitle('Running tests...') controller = TestController.Controller(win, only_run) def do_it(): controller.Run(win) QP.CallAfter(do_it) app.exec_() except: HydrusData.DebugPrint(traceback.format_exc()) finally: HG.view_shutdown = True controller.pubimmediate('wake_daemons') HG.model_shutdown = True controller.pubimmediate('wake_daemons') controller.TidyUp() except:
def _InitDBCursor(self): self._CloseDBCursor() db_path = os.path.join(self._db_dir, self._db_filenames['main']) try: self._db = sqlite3.connect(db_path, isolation_level=None, detect_types=sqlite3.PARSE_DECLTYPES) self._c = self._db.cursor() self._cursor_transaction_wrapper = DBCursorTransactionWrapper( self._c, HG.db_transaction_commit_period) self._LoadModules() if HG.no_db_temp_files: self._c.execute('PRAGMA temp_store = 2;' ) # use memory for temp store exclusively self._AttachExternalDatabases() self._c.execute('ATTACH ":memory:" AS mem;') except Exception as e: raise HydrusExceptions.DBAccessException( 'Could not connect to database! This could be an issue related to WAL and network storage, or something else. If it is not obvious to you, please let hydrus dev know. Error follows:' + os.linesep * 2 + str(e)) TemporaryIntegerTableNameCache.instance().Clear() # durable_temp is not excluded here db_names = [ name for (index, name, path) in self._c.execute('PRAGMA database_list;') if name not in ('mem', 'temp') ] for db_name in db_names: # MB -> KB cache_size = HG.db_cache_size * 1024 self._c.execute('PRAGMA {}.cache_size = -{};'.format( db_name, cache_size)) self._c.execute('PRAGMA {}.journal_mode = {};'.format( db_name, HG.db_journal_mode)) if HG.db_journal_mode in ('PERSIST', 'WAL'): self._c.execute('PRAGMA {}.journal_size_limit = {};'.format( db_name, 1024**3)) # 1GB for now self._c.execute('PRAGMA {}.synchronous = {};'.format( db_name, HG.db_synchronous)) try: self._c.execute('SELECT * FROM {}.sqlite_master;'.format( db_name)).fetchone() except sqlite3.OperationalError as e: message = 'The database seemed valid, but hydrus failed to read basic data from it. You may need to run the program in a different journal mode using --db_journal_mode. Full error information:' message += os.linesep * 2 message += str(e) HydrusData.DebugPrint(message) raise HydrusExceptions.DBAccessException(message) try: self._cursor_transaction_wrapper.BeginImmediate() except Exception as e: raise HydrusExceptions.DBAccessException(str(e))
def log_handler( loglevel, component, message ): HydrusData.DebugPrint( '[{}] {}: {}'.format( loglevel, component, message ) )