from hydrus.core import HydrusConstants as HC import random import unittest from hydrus.core import HydrusData from hydrus.core import HydrusNetworking from mock import patch now = HydrusData.GetNow() now_10 = now + 10 now_20 = now + 20 with patch.object(HydrusData, 'GetNow', return_value=now): HIGH_USAGE = HydrusNetworking.BandwidthTracker() for i in range(100): HIGH_USAGE.ReportRequestUsed() HIGH_USAGE.ReportDataUsed(random.randint(512, 1024)) LOW_USAGE = HydrusNetworking.BandwidthTracker() LOW_USAGE.ReportRequestUsed() LOW_USAGE.ReportDataUsed(1024) ZERO_USAGE = HydrusNetworking.BandwidthTracker() class TestBandwidthRules(unittest.TestCase):
def _ProcessJob(self, job): job_type = job.GetType() (action, args, kwargs) = job.GetCallableTuple() try: if job_type in ('read_write', 'write'): self._current_status = 'db write locked' self._transaction_contains_writes = True else: self._current_status = 'db read locked' self.publish_status_update() if job_type in ('read', 'read_write'): result = self._Read(action, *args, **kwargs) elif job_type in ('write'): result = self._Write(action, *args, **kwargs) if self._transaction_contains_writes and HydrusData.TimeHasPassed( self._transaction_started + self.TRANSACTION_COMMIT_TIME): self._current_status = 'db committing' self.publish_status_update() self._Commit() self._BeginImmediate() self._transaction_contains_writes = False else: self._Save() for (topic, args, kwargs) in self._pubsubs: self._controller.pub(topic, *args, **kwargs) if job.IsSynchronous(): job.PutResult(result) except Exception as e: self._ManageDBError(job, e) try: self._Rollback() except Exception as rollback_e: HydrusData.Print( 'When the transaction failed, attempting to rollback the database failed. Please restart the client as soon as is convenient.' ) self._in_transaction = False self._CloseDBCursor() self._InitDBCursor() HydrusData.PrintException(rollback_e) finally: self._pubsubs = [] self._current_status = '' self.publish_status_update()
def MainLoop(self): try: self._InitDBCursor( ) # have to reinitialise because the thread id has changed self._InitDiskCache() self._InitCaches() except: self._DisplayCatastrophicError(traceback.format_exc()) self._could_not_initialise = True return self._ready_to_serve_requests = True error_count = 0 while not ((self._local_shutdown or HG.model_shutdown) and self._jobs.empty()): try: job = self._jobs.get(timeout=1) self._currently_doing_job = True self._current_job_name = job.ToString() self.publish_status_update() try: if HG.db_report_mode: summary = 'Running ' + job.ToString() HydrusData.ShowText(summary) if HG.db_profile_mode: summary = 'Profiling ' + job.ToString() HydrusData.ShowText(summary) HydrusData.Profile(summary, 'self._ProcessJob( job )', globals(), locals()) else: self._ProcessJob(job) error_count = 0 except: error_count += 1 if error_count > 5: raise self._jobs.put( job) # couldn't lock db; put job back on queue time.sleep(5) self._currently_doing_job = False self._current_job_name = '' self.publish_status_update() except queue.Empty: if self._transaction_contains_writes and HydrusData.TimeHasPassed( self._transaction_started + self.TRANSACTION_COMMIT_TIME): self._Commit() self._BeginImmediate() self._transaction_contains_writes = False if HydrusData.TimeHasPassed( self._connection_timestamp + CONNECTION_REFRESH_TIME ): # just to clear out the journal files self._InitDBCursor() if self._pause_and_disconnect: self._CloseDBCursor() while self._pause_and_disconnect: if self._local_shutdown or HG.model_shutdown: break time.sleep(1) self._InitDBCursor() self._CleanUpCaches() self._CloseDBCursor() temp_path = os.path.join(self._db_dir, self._durable_temp_db_filename) HydrusPaths.DeletePath(temp_path) self._loop_finished = True
def SetJSONDump( self, obj ): if isinstance( obj, HydrusSerialisable.SerialisableBaseNamed ): ( dump_type, dump_name, version, serialisable_info ) = obj.GetSerialisableTuple() try: dump = json.dumps( serialisable_info ) except Exception as e: HydrusData.ShowException( e ) HydrusData.Print( obj ) HydrusData.Print( serialisable_info ) raise Exception( 'Trying to json dump the object ' + str( obj ) + ' with name ' + dump_name + ' caused an error. Its serialisable info has been dumped to the log.' ) store_backups = False if dump_type == HydrusSerialisable.SERIALISABLE_TYPE_GUI_SESSION: store_backups = True backup_depth = HG.client_controller.new_options.GetInteger( 'number_of_gui_session_backups' ) object_timestamp = HydrusData.GetNow() if store_backups: existing_timestamps = sorted( self._STI( self._c.execute( 'SELECT timestamp FROM json_dumps_named WHERE dump_type = ? AND dump_name = ?;', ( dump_type, dump_name ) ) ) ) if len( existing_timestamps ) > 0: # the user has changed their system clock, so let's make sure the new timestamp is larger at least largest_existing_timestamp = max( existing_timestamps ) if largest_existing_timestamp > object_timestamp: object_timestamp = largest_existing_timestamp + 1 deletee_timestamps = existing_timestamps[ : - backup_depth ] # keep highest n values deletee_timestamps.append( object_timestamp ) # if save gets spammed twice in one second, we'll overwrite self._c.executemany( 'DELETE FROM json_dumps_named WHERE dump_type = ? AND dump_name = ? AND timestamp = ?;', [ ( dump_type, dump_name, timestamp ) for timestamp in deletee_timestamps ] ) else: self._c.execute( 'DELETE FROM json_dumps_named WHERE dump_type = ? AND dump_name = ?;', ( dump_type, dump_name ) ) dump_buffer = GenerateBigSQLiteDumpBuffer( dump ) try: self._c.execute( 'INSERT INTO json_dumps_named ( dump_type, dump_name, version, timestamp, dump ) VALUES ( ?, ?, ?, ?, ? );', ( dump_type, dump_name, version, object_timestamp, dump_buffer ) ) except: if dump_type == HydrusSerialisable.SERIALISABLE_TYPE_GUI_SESSION: HydrusData.ShowText( 'A gui session could not be saved! This could be because it is too large. If your session is big, please try to trim it down now, or you will lose changes!' ) else: HydrusData.DebugPrint( dump ) HydrusData.ShowText( 'Had a problem saving a JSON object. The dump has been printed to the log.' ) try: HydrusData.Print( 'Dump was {}!'.format( HydrusData.ToHumanBytes( len( dump_buffer ) ) ) ) except: pass raise else: ( dump_type, version, serialisable_info ) = obj.GetSerialisableTuple() if dump_type == HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_SESSION_MANAGER: deletee_session_names = obj.GetDeleteeSessionNames() dirty_session_containers = obj.GetDirtySessionContainers() if len( deletee_session_names ) > 0: for deletee_session_name in deletee_session_names: self.DeleteJSONDumpNamed( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_SESSION_MANAGER_SESSION_CONTAINER, dump_name = deletee_session_name ) if len( dirty_session_containers ) > 0: for dirty_session_container in dirty_session_containers: self.SetJSONDump( dirty_session_container ) if not obj.IsDirty(): return elif dump_type == HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_BANDWIDTH_MANAGER: deletee_tracker_names = obj.GetDeleteeTrackerNames() dirty_tracker_containers = obj.GetDirtyTrackerContainers() if len( deletee_tracker_names ) > 0: for deletee_tracker_name in deletee_tracker_names: self.DeleteJSONDumpNamed( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_BANDWIDTH_MANAGER_TRACKER_CONTAINER, dump_name = deletee_tracker_name ) if len( dirty_tracker_containers ) > 0: for dirty_tracker_container in dirty_tracker_containers: self.SetJSONDump( dirty_tracker_container ) if not obj.IsDirty(): return try: dump = json.dumps( serialisable_info ) except Exception as e: HydrusData.ShowException( e ) HydrusData.Print( obj ) HydrusData.Print( serialisable_info ) raise Exception( 'Trying to json dump the object ' + str( obj ) + ' caused an error. Its serialisable info has been dumped to the log.' ) self._c.execute( 'DELETE FROM json_dumps WHERE dump_type = ?;', ( dump_type, ) ) dump_buffer = GenerateBigSQLiteDumpBuffer( dump ) try: self._c.execute( 'INSERT INTO json_dumps ( dump_type, version, dump ) VALUES ( ?, ?, ? );', ( dump_type, version, dump_buffer ) ) except: HydrusData.DebugPrint( dump ) HydrusData.ShowText( 'Had a problem saving a JSON object. The dump has been printed to the log.' ) raise
def _GenerateDBJob(self, job_type, synchronous, action, *args, **kwargs): return HydrusData.JobDatabase(job_type, synchronous, action, *args, **kwargs)
def _SizeAndPositionAndShow(self): try: gui_frame = self.parentWidget() possibly_on_hidden_virtual_desktop = not ClientGUIFunctions.MouseIsOnMyDisplay( gui_frame) gui_is_hidden = not gui_frame.isVisible() going_to_bug_out_at_hide_or_show = possibly_on_hidden_virtual_desktop or gui_is_hidden current_focus_tlw = QW.QApplication.activeWindow() self_is_active = current_focus_tlw == self main_gui_or_child_window_is_active = ClientGUIFunctions.TLWOrChildIsActive( gui_frame) num_messages_displayed = self._message_vbox.count() there_is_stuff_to_display = num_messages_displayed > 0 if there_is_stuff_to_display: parent_size = gui_frame.size() my_size = self.size() my_x = (parent_size.width() - my_size.width()) - 20 my_y = (parent_size.height() - my_size.height()) - 25 if gui_frame.isVisible(): my_position = ClientGUIFunctions.ClientToScreen( gui_frame, QC.QPoint(my_x, my_y)) if my_position != self.pos(): self.move(my_position) # Unhiding tends to raise the main gui tlw in some window managers, which is annoying if a media viewer window has focus show_is_not_annoying = main_gui_or_child_window_is_active or self._DisplayingError( ) ok_to_show = show_is_not_annoying and not going_to_bug_out_at_hide_or_show if ok_to_show: self.show() else: if not going_to_bug_out_at_hide_or_show: self.hide() except: text = 'The popup message manager experienced a fatal error and will now stop working! Please restart the client as soon as possible! If this keeps happening, please email the details and your client.log to the hydrus developer.' HydrusData.Print(text) HydrusData.Print(traceback.format_exc()) QW.QMessageBox.critical(gui_frame, 'Error', text) self._update_job.Cancel() self.CleanBeforeDestroy() self.deleteLater()
BAD_RESPONSE = b'500, it done broke' @all_requests def catch_all(url, request): raise Exception('An unexpected request for ' + url + ' came through in testing.') MOCK_DOMAIN = 'wew.lad' MOCK_SUBDOMAIN = 'top.wew.lad' MOCK_URL = 'https://wew.lad/folder/request&key1=value1&key2=value2' MOCK_SUBURL = 'https://top.wew.lad/folder2/request&key1=value1&key2=value2' MOCK_HYDRUS_SERVICE_KEY = HydrusData.GenerateKey() MOCK_HYDRUS_ADDRESS = '123.45.67.89' MOCK_HYDRUS_DOMAIN = '123.45.67.89:45871' MOCK_HYDRUS_URL = 'https://123.45.67.89:45871/muh_hydrus_command' @urlmatch(netloc='wew.lad') def catch_wew_error(url, request): return { 'status_code': 500, 'reason': 'Internal Server Error', 'content': BAD_RESPONSE }
def do_it(directory, neighbouring_txt_tag_service_keys, delete_afterwards, export_symlinks, quit_afterwards): pauser = HydrusData.BigJobPauser() for (index, (ordering_index, media)) in enumerate(to_do): try: QP.CallAfter( qt_update_label, HydrusData.ConvertValueRangeToPrettyString( index + 1, num_to_do)) hash = media.GetHash() mime = media.GetMime() path = self._GetPath(media) path = os.path.normpath(path) if not path.startswith(directory): raise Exception( 'It seems a destination path was above the main export directory! The file was "{}" and its destination path was "{}".' .format(hash.hex(), path)) path_dir = os.path.dirname(path) HydrusPaths.MakeSureDirectoryExists(path_dir) if export_tag_txts: tags_manager = media.GetTagsManager() tags = set() for service_key in neighbouring_txt_tag_service_keys: current_tags = tags_manager.GetCurrent( service_key, ClientTags.TAG_DISPLAY_SIBLINGS_AND_PARENTS) tags.update(current_tags) tags = list(tags) tags.sort() txt_path = path + '.txt' with open(txt_path, 'w', encoding='utf-8') as f: f.write(os.linesep.join(tags)) source_path = client_files_manager.GetFilePath( hash, mime, check_file_exists=False) if export_symlinks: os.symlink(source_path, path) else: HydrusPaths.MirrorFile(source_path, path) HydrusPaths.MakeFileWritable(path) except: QP.CallAfter( QW.QMessageBox.information, self, 'Information', 'Encountered a problem while attempting to export file with index ' + str(ordering_index + 1) + ':' + os.linesep * 2 + traceback.format_exc()) break pauser.Pause() if delete_afterwards: QP.CallAfter(qt_update_label, 'deleting') deletee_hashes = { media.GetHash() for (ordering_index, media) in to_do } chunks_of_hashes = HydrusData.SplitListIntoChunks( deletee_hashes, 64) reason = 'Deleted after manual export to "{}".'.format( directory) content_updates = [ HydrusData.ContentUpdate(HC.CONTENT_TYPE_FILES, HC.CONTENT_UPDATE_DELETE, chunk_of_hashes, reason=reason) for chunk_of_hashes in chunks_of_hashes ] for content_update in content_updates: HG.client_controller.WriteSynchronous( 'content_updates', {CC.LOCAL_FILE_SERVICE_KEY: [content_update]}) QP.CallAfter(qt_update_label, 'done!') time.sleep(1) QP.CallAfter(qt_update_label, 'export') QP.CallAfter(qt_done, quit_afterwards)
def run(self): try: while True: while self._queue.empty(): CheckIfThreadShuttingDown() self._event.wait(10.0) self._event.clear() CheckIfThreadShuttingDown() try: try: (callable, args, kwargs) = self._queue.get(1.0) except queue.Empty: # https://github.com/hydrusnetwork/hydrus/issues/750 # this shouldn't happen, but... # even if we assume we'll never get this, we don't want to make a business of hanging forever on things continue self._DoPreCall() self._callable = (callable, args, kwargs) if HG.profile_mode: summary = 'Profiling CallTo Job: {}'.format(callable) HydrusData.Profile( summary, 'callable( *args, **kwargs )', globals(), locals(), min_duration_ms=HG.callto_profile_min_job_time_ms) else: callable(*args, **kwargs) self._callable = None del callable except HydrusExceptions.ShutdownException: return except Exception as e: HydrusData.Print(traceback.format_exc()) HydrusData.ShowException(e) finally: self._currently_working = False time.sleep(0.00001) except HydrusExceptions.ShutdownException: return
def SetMedia( self, media, start_paused = False ): if media == self._media: return self._file_is_loaded = False self._disallow_seek_on_this_file = False self._media = media self._times_to_play_gif = 0 if self._media is not None and self._media.GetMime() == HC.IMAGE_GIF and not HG.client_controller.new_options.GetBoolean( 'always_loop_gifs' ): hash = self._media.GetHash() path = HG.client_controller.client_files_manager.GetFilePath( hash, HC.IMAGE_GIF ) self._times_to_play_gif = HydrusImageHandling.GetTimesToPlayGIF( path ) self._current_seek_to_start_count = 0 if self._media is None: self._player.pause = True if len( self._player.playlist ) > 0: try: self._player.command( 'playlist-remove', 'current' ) except: pass # sometimes happens after an error--screw it else: hash = self._media.GetHash() mime = self._media.GetMime() client_files_manager = HG.client_controller.client_files_manager path = client_files_manager.GetFilePath( hash, mime ) self._player.visibility = 'always' self._stop_for_slideshow = False self._player.pause = True try: self._player.loadfile( path ) except Exception as e: HydrusData.ShowException( e ) self._player.volume = self._GetCorrectCurrentVolume() self._player.mute = self._GetCorrectCurrentMute() self._player.pause = start_paused
def log_handler( loglevel, component, message ): HydrusData.DebugPrint( '[{}] {}: {}'.format( loglevel, component, message ) )
def _DoExport(self): query_hash_ids = HG.client_controller.Read('file_query_ids', self._file_search_context) media_results = [] i = 0 base = 256 while i < len(query_hash_ids): if HC.options[ 'pause_export_folders_sync'] or HydrusThreading.IsThreadShuttingDown( ): return if i == 0: (last_i, i) = (0, base) else: (last_i, i) = (i, i + base) sub_query_hash_ids = query_hash_ids[last_i:i] more_media_results = HG.client_controller.Read( 'media_results_from_ids', sub_query_hash_ids) media_results.extend(more_media_results) media_results.sort(key=lambda mr: mr.GetHashId()) # terms = ParseExportPhrase(self._phrase) previous_paths = set() for (root, dirnames, filenames) in os.walk(self._path): previous_paths.update( (os.path.join(root, filename) for filename in filenames)) sync_paths = set() client_files_manager = HG.client_controller.client_files_manager num_copied = 0 for media_result in media_results: if HC.options[ 'pause_export_folders_sync'] or HydrusThreading.IsThreadShuttingDown( ): return hash = media_result.GetHash() mime = media_result.GetMime() size = media_result.GetSize() try: source_path = client_files_manager.GetFilePath(hash, mime) except HydrusExceptions.FileMissingException: raise Exception( 'A file to be exported, hash "{}", was missing! You should run file maintenance (under database->maintenance->files) to check the files for the export folder\'s search, and possibly all your files.' ) filename = GenerateExportFilename(self._path, media_result, terms) dest_path = os.path.normpath(os.path.join(self._path, filename)) if not dest_path.startswith(self._path): raise Exception( 'It seems a destination path for export folder "{}" was above the main export directory! The file was "{}" and its destination path was "{}".' .format(self._path, hash.hex(), dest_path)) dest_path_dir = os.path.dirname(dest_path) HydrusPaths.MakeSureDirectoryExists(dest_path_dir) if dest_path not in sync_paths: copied = HydrusPaths.MirrorFile(source_path, dest_path) if copied: num_copied += 1 HydrusPaths.MakeFileWritable(dest_path) sync_paths.add(dest_path) if num_copied > 0: HydrusData.Print('Export folder ' + self._name + ' exported ' + HydrusData.ToHumanInt(num_copied) + ' files.') if self._export_type == HC.EXPORT_FOLDER_TYPE_SYNCHRONISE: deletee_paths = previous_paths.difference(sync_paths) for deletee_path in deletee_paths: ClientPaths.DeletePath(deletee_path) deletee_dirs = set() for (root, dirnames, filenames) in os.walk(self._path, topdown=False): if root == self._path: continue no_files = len(filenames) == 0 useful_dirnames = [ dirname for dirname in dirnames if os.path.join(root, dirname) not in deletee_dirs ] no_useful_dirs = len(useful_dirnames) == 0 if no_useful_dirs and no_files: deletee_dirs.add(root) for deletee_dir in deletee_dirs: if os.path.exists(deletee_dir): HydrusPaths.DeletePath(deletee_dir) if len(deletee_paths) > 0: HydrusData.Print( 'Export folder {} deleted {} files and {} folders.'.format( self._name, HydrusData.ToHumanInt(len(deletee_paths)), HydrusData.ToHumanInt(len(deletee_dirs)))) if self._delete_from_client_after_export: deletee_hashes = { media_result.GetHash() for media_result in media_results } chunks_of_hashes = HydrusData.SplitListIntoChunks( deletee_hashes, 64) reason = 'Deleted after export to Export Folder "{}".'.format( self._path) content_updates = [ HydrusData.ContentUpdate(HC.CONTENT_TYPE_FILES, HC.CONTENT_UPDATE_DELETE, chunk_of_hashes, reason=reason) for chunk_of_hashes in chunks_of_hashes ] for content_update in content_updates: HG.client_controller.WriteSynchronous( 'content_updates', {CC.LOCAL_FILE_SERVICE_KEY: [content_update]})
def __init__(self, parent, existing_folders_to_names, foldername, name, file_search_context, synchronised, media_sort, media_collect): ClientGUIScrolledPanels.EditPanel.__init__(self, parent) self._existing_folders_to_names = existing_folders_to_names self._original_folder_and_name = (foldername, name) self._foldername = QW.QLineEdit(self) self._name = QW.QLineEdit(self) self._media_sort = ClientGUIResultsSortCollect.MediaSortControl(self) self._media_collect = ClientGUIResultsSortCollect.MediaCollectControl( self, silent=True) page_key = HydrusData.GenerateKey() from hydrus.client.gui.search import ClientGUIACDropdown self._tag_autocomplete = ClientGUIACDropdown.AutoCompleteDropdownTagsRead( self, page_key, file_search_context, media_sort_widget=self._media_sort, media_collect_widget=self._media_collect, synchronised=synchronised, hide_favourites_edit_actions=True) self._include_media_sort = QW.QCheckBox(self) self._include_media_collect = QW.QCheckBox(self) width = ClientGUIFunctions.ConvertTextToPixelWidth( self._include_media_collect, 48) self._include_media_collect.setMinimumWidth(width) self._include_media_sort.stateChanged.connect(self._UpdateWidgets) self._include_media_collect.stateChanged.connect(self._UpdateWidgets) # if foldername is not None: self._foldername.setText(foldername) self._name.setText(name) if media_sort is not None: self._include_media_sort.setChecked(True) self._media_sort.SetSort(media_sort) if media_collect is not None: self._include_media_collect.setChecked(True) self._media_collect.SetCollect(media_collect) # rows = [] rows.append(('folder (blank for none): ', self._foldername)) rows.append(('name: ', self._name)) top_gridbox = ClientGUICommon.WrapInGrid(self, rows) rows = [] rows.append(('save sort: ', self._include_media_sort)) rows.append(('sort: ', self._media_sort)) rows.append(('save collect: ', self._include_media_collect)) rows.append(('collect: ', self._media_collect)) bottom_gridbox = ClientGUICommon.WrapInGrid(self, rows) vbox = QP.VBoxLayout() QP.AddToLayout(vbox, top_gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR) QP.AddToLayout(vbox, self._tag_autocomplete, CC.FLAGS_EXPAND_PERPENDICULAR) QP.AddToLayout(vbox, bottom_gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR) self.widget().setLayout(vbox)
def test_bandwidth_tracker(self): bandwidth_tracker = HydrusNetworking.BandwidthTracker() self.assertEqual(bandwidth_tracker.GetCurrentMonthSummary(), 'used 0B in 0 requests this month') now = HydrusData.GetNow() with patch.object(HydrusData, 'GetNow', return_value=now): self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_DATA, 0), 0) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_REQUESTS, 0), 0) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_DATA, 1), 0) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_REQUESTS, 1), 0) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_DATA, 2), 0) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_REQUESTS, 2), 0) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_DATA, 6), 0) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_REQUESTS, 6), 0) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_DATA, 3600), 0) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_REQUESTS, 3600), 0) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_DATA, None), 0) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_REQUESTS, None), 0) # bandwidth_tracker.ReportDataUsed(1024) bandwidth_tracker.ReportRequestUsed() self.assertEqual(bandwidth_tracker.GetCurrentMonthSummary(), 'used 1.0KB in 1 requests this month') self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_DATA, 0), 0) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_REQUESTS, 0), 0) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_DATA, 1), 1024) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_REQUESTS, 1), 1) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_DATA, 2), 1024) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_REQUESTS, 2), 1) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_DATA, 6), 1024) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_REQUESTS, 6), 1) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_DATA, 3600), 1024) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_REQUESTS, 3600), 1) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_DATA, None), 1024) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_REQUESTS, None), 1) # five_secs_from_now = now + 5 with patch.object(HydrusData, 'GetNow', return_value=five_secs_from_now): self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_DATA, 0), 0) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_REQUESTS, 0), 0) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_DATA, 1), 0) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_REQUESTS, 1), 0) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_DATA, 2), 0) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_REQUESTS, 2), 0) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_DATA, 6), 1024) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_REQUESTS, 6), 1) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_DATA, 3600), 1024) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_REQUESTS, 3600), 1) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_DATA, None), 1024) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_REQUESTS, None), 1) # bandwidth_tracker.ReportDataUsed(32) bandwidth_tracker.ReportRequestUsed() bandwidth_tracker.ReportDataUsed(32) bandwidth_tracker.ReportRequestUsed() self.assertEqual(bandwidth_tracker.GetCurrentMonthSummary(), 'used 1.1KB in 3 requests this month') self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_DATA, 0), 0) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_REQUESTS, 0), 0) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_DATA, 1), 64) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_REQUESTS, 1), 2) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_DATA, 2), 64) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_REQUESTS, 2), 2) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_DATA, 6), 1088) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_REQUESTS, 6), 3) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_DATA, 3600), 1088) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_REQUESTS, 3600), 3) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_DATA, None), 1088) self.assertEqual( bandwidth_tracker.GetUsage(HC.BANDWIDTH_TYPE_REQUESTS, None), 3)
def Delay(self, delay): self._next_work_time = HydrusData.GetNowFloat() + delay self._scheduler.WorkTimesHaveChanged()
def test_dict_to_content_updates(self): hash = HydrusData.GenerateKey() hashes = {hash} local_key = CC.DEFAULT_LOCAL_TAG_SERVICE_KEY remote_key = HG.test_controller.example_tag_repo_service_key service_keys_to_tags = ClientTags.ServiceKeysToTags({local_key: {'a'}}) content_updates = { local_key: [ HydrusData.ContentUpdate(HC.CONTENT_TYPE_MAPPINGS, HC.CONTENT_UPDATE_ADD, ('a', hashes)) ] } self.assertEqual( ClientData.ConvertServiceKeysToTagsToServiceKeysToContentUpdates( {hash}, service_keys_to_tags), content_updates) service_keys_to_tags = ClientTags.ServiceKeysToTags( {remote_key: {'c'}}) content_updates = { remote_key: [ HydrusData.ContentUpdate(HC.CONTENT_TYPE_MAPPINGS, HC.CONTENT_UPDATE_PEND, ('c', hashes)) ] } self.assertEqual( ClientData.ConvertServiceKeysToTagsToServiceKeysToContentUpdates( {hash}, service_keys_to_tags), content_updates) service_keys_to_tags = ClientTags.ServiceKeysToTags({ local_key: ['a', 'character:b'], remote_key: ['c', 'series:d'] }) content_updates = {} content_updates[local_key] = [ HydrusData.ContentUpdate(HC.CONTENT_TYPE_MAPPINGS, HC.CONTENT_UPDATE_ADD, ('a', hashes)), HydrusData.ContentUpdate(HC.CONTENT_TYPE_MAPPINGS, HC.CONTENT_UPDATE_ADD, ('character:b', hashes)) ] content_updates[remote_key] = [ HydrusData.ContentUpdate(HC.CONTENT_TYPE_MAPPINGS, HC.CONTENT_UPDATE_PEND, ('c', hashes)), HydrusData.ContentUpdate(HC.CONTENT_TYPE_MAPPINGS, HC.CONTENT_UPDATE_PEND, ('series:d', hashes)) ] self.assertEqual( HydrusData.ContentUpdate(HC.CONTENT_TYPE_MAPPINGS, HC.CONTENT_UPDATE_PEND, 'c'), HydrusData.ContentUpdate(HC.CONTENT_TYPE_MAPPINGS, HC.CONTENT_UPDATE_PEND, 'c')) self.assertEqual( ClientData.ConvertServiceKeysToTagsToServiceKeysToContentUpdates( {hash}, service_keys_to_tags), content_updates)
def UpdateMessage(self): title = self._job_key.GetStatusTitle() if title is not None: self._title.show() self._title.setText(title) else: self._title.hide() # paused = self._job_key.IsPaused() popup_text_1 = self._job_key.GetIfHasVariable('popup_text_1') if popup_text_1 is not None or paused: if paused: text = 'paused' else: text = popup_text_1 self._text_1.show() self._text_1.setText(text) else: self._text_1.hide() # popup_gauge_1 = self._job_key.GetIfHasVariable('popup_gauge_1') if popup_gauge_1 is not None and not paused: (gauge_value, gauge_range) = popup_gauge_1 self._gauge_1.SetRange(gauge_range) self._gauge_1.SetValue(gauge_value) self._gauge_1.show() else: self._gauge_1.hide() # popup_text_2 = self._job_key.GetIfHasVariable('popup_text_2') if popup_text_2 is not None and not paused: text = popup_text_2 self._text_2.setText(self._ProcessText(text)) self._text_2.show() else: self._text_2.hide() # popup_gauge_2 = self._job_key.GetIfHasVariable('popup_gauge_2') if popup_gauge_2 is not None and not paused: (gauge_value, gauge_range) = popup_gauge_2 self._gauge_2.SetRange(gauge_range) self._gauge_2.SetValue(gauge_value) self._gauge_2.show() else: self._gauge_2.hide() # popup_yes_no_question = self._job_key.GetIfHasVariable( 'popup_yes_no_question') if popup_yes_no_question is not None and not paused: text = popup_yes_no_question self._text_yes_no.setText(self._ProcessText(text)) self._text_yes_no.show() self._yes.show() self._no.show() else: self._text_yes_no.hide() self._yes.hide() self._no.hide() # network_job = self._job_key.GetNetworkJob() if network_job is None: self._network_job_ctrl.ClearNetworkJob() self._network_job_ctrl.hide() else: self._network_job_ctrl.SetNetworkJob(network_job) self._network_job_ctrl.show() # popup_clipboard = self._job_key.GetIfHasVariable('popup_clipboard') if popup_clipboard is not None: (title, text) = popup_clipboard if self._copy_to_clipboard_button.text() != title: self._copy_to_clipboard_button.setText(title) self._copy_to_clipboard_button.show() else: self._copy_to_clipboard_button.hide() # result = self._job_key.GetIfHasVariable('popup_files') if result is not None: (popup_files, popup_files_name) = result hashes = popup_files text = popup_files_name + ' - show ' + HydrusData.ToHumanInt( len(hashes)) + ' files' if self._show_files_button.text() != text: self._show_files_button.setText(text) self._show_files_button.show() else: self._show_files_button.hide() # user_callable = self._job_key.GetUserCallable() if user_callable is None: self._user_callable_button.hide() else: self._user_callable_button.setText(user_callable.GetLabel()) self._user_callable_button.show() # popup_traceback = self._job_key.GetTraceback() if popup_traceback is not None: self._copy_tb_button.show() self._show_tb_button.show() text = popup_traceback self._tb_text.setText(self._ProcessText(text)) # do not show automatically--that is up to the show button else: self._copy_tb_button.hide() self._show_tb_button.hide() self._tb_text.hide() # if self._job_key.IsPausable(): self._pause_button.show() else: self._pause_button.hide() if self._job_key.IsCancellable(): self._cancel_button.show() else: self._cancel_button.hide() # Dirty hack to reduce unnecessary resizing if self.minimumWidth() < self.sizeHint().width(): self.setMinimumWidth(self.sizeHint().width())
def _Update(self): if self._network_job is None or self._network_job.NoEngineYet(): self._left_text.clear() self._right_text.clear() self._gauge.SetRange(1) self._gauge.SetValue(0) can_cancel = False else: if self._network_job.IsDone(): can_cancel = False else: can_cancel = True (status_text, current_speed, bytes_read, bytes_to_read) = self._network_job.GetStatus() self._left_text.setText(status_text) if not self._download_started and current_speed > 0: self._download_started = True speed_text = '' if self._download_started and not self._network_job.HasError(): if bytes_read is not None: if bytes_to_read is not None and bytes_read != bytes_to_read: speed_text += HydrusData.ConvertValueRangeToBytes( bytes_read, bytes_to_read) else: speed_text += HydrusData.ToHumanBytes(bytes_read) if current_speed != bytes_to_read: # if it is a real quick download, just say its size speed_text += ' ' + HydrusData.ToHumanBytes( current_speed) + '/s' self._right_text.setText(speed_text) right_width = ClientGUIFunctions.ConvertTextToPixelWidth( self._right_text, len(speed_text)) right_min_width = right_width if right_min_width != self._last_right_min_width: self._last_right_min_width = right_min_width self._right_text.setMinimumWidth(right_min_width) self._gauge.SetRange(bytes_to_read) self._gauge.SetValue(bytes_read) if can_cancel: if not self._cancel_button.isEnabled(): self._cancel_button.setEnabled(True) else: if self._cancel_button.isEnabled(): self._cancel_button.setEnabled(False)
def GenerateNewAccessKey( self ): with self._lock: self._access_key = HydrusData.GenerateKey()
def _DoPreCall(self): if HG.daemon_report_mode: HydrusData.ShowText(self._name + ' doing a job.')
def test_can_start(self): EMPTY_RULES = HydrusNetworking.BandwidthRules() PERMISSIVE_DATA_RULES = HydrusNetworking.BandwidthRules() PERMISSIVE_DATA_RULES.AddRule(HC.BANDWIDTH_TYPE_DATA, None, 1048576) PERMISSIVE_REQUEST_RULES = HydrusNetworking.BandwidthRules() PERMISSIVE_REQUEST_RULES.AddRule(HC.BANDWIDTH_TYPE_REQUESTS, None, 10000) RESTRICTIVE_DATA_RULES = HydrusNetworking.BandwidthRules() RESTRICTIVE_DATA_RULES.AddRule(HC.BANDWIDTH_TYPE_DATA, None, 10) RESTRICTIVE_REQUEST_RULES = HydrusNetworking.BandwidthRules() RESTRICTIVE_REQUEST_RULES.AddRule(HC.BANDWIDTH_TYPE_REQUESTS, None, 1) DOMAIN_NETWORK_CONTEXT = ClientNetworkingContexts.NetworkContext( CC.NETWORK_CONTEXT_DOMAIN, MOCK_DOMAIN) SUBDOMAIN_NETWORK_CONTEXT = ClientNetworkingContexts.NetworkContext( CC.NETWORK_CONTEXT_DOMAIN, MOCK_SUBDOMAIN) GLOBAL_NETWORK_CONTEXTS = [ ClientNetworkingContexts.GLOBAL_NETWORK_CONTEXT ] DOMAIN_NETWORK_CONTEXTS = [ ClientNetworkingContexts.GLOBAL_NETWORK_CONTEXT, DOMAIN_NETWORK_CONTEXT ] SUBDOMAIN_NETWORK_CONTEXTS = [ ClientNetworkingContexts.GLOBAL_NETWORK_CONTEXT, DOMAIN_NETWORK_CONTEXT, SUBDOMAIN_NETWORK_CONTEXT ] # fast_forward = HydrusData.GetNow() + 3600 with patch.object(HydrusData, 'GetNow', return_value=fast_forward): bm = ClientNetworkingBandwidth.NetworkBandwidthManager() self.assertTrue(bm.CanStartRequest(GLOBAL_NETWORK_CONTEXTS)) self.assertTrue(bm.CanStartRequest(DOMAIN_NETWORK_CONTEXTS)) self.assertTrue(bm.CanStartRequest(SUBDOMAIN_NETWORK_CONTEXTS)) # bm.ReportRequestUsed(DOMAIN_NETWORK_CONTEXTS) bm.ReportDataUsed(DOMAIN_NETWORK_CONTEXTS, 50) bm.ReportRequestUsed(SUBDOMAIN_NETWORK_CONTEXTS) bm.ReportDataUsed(SUBDOMAIN_NETWORK_CONTEXTS, 25) self.assertTrue(bm.CanStartRequest(GLOBAL_NETWORK_CONTEXTS)) self.assertTrue(bm.CanStartRequest(DOMAIN_NETWORK_CONTEXTS)) self.assertTrue(bm.CanStartRequest(SUBDOMAIN_NETWORK_CONTEXTS)) # bm.SetRules(ClientNetworkingContexts.GLOBAL_NETWORK_CONTEXT, EMPTY_RULES) bm.SetRules(DOMAIN_NETWORK_CONTEXT, EMPTY_RULES) bm.SetRules(SUBDOMAIN_NETWORK_CONTEXT, EMPTY_RULES) self.assertTrue(bm.CanStartRequest(GLOBAL_NETWORK_CONTEXTS)) self.assertTrue(bm.CanStartRequest(DOMAIN_NETWORK_CONTEXTS)) self.assertTrue(bm.CanStartRequest(SUBDOMAIN_NETWORK_CONTEXTS)) bm.SetRules(ClientNetworkingContexts.GLOBAL_NETWORK_CONTEXT, PERMISSIVE_DATA_RULES) bm.SetRules(DOMAIN_NETWORK_CONTEXT, PERMISSIVE_DATA_RULES) bm.SetRules(SUBDOMAIN_NETWORK_CONTEXT, PERMISSIVE_DATA_RULES) self.assertTrue(bm.CanStartRequest(GLOBAL_NETWORK_CONTEXTS)) self.assertTrue(bm.CanStartRequest(DOMAIN_NETWORK_CONTEXTS)) self.assertTrue(bm.CanStartRequest(SUBDOMAIN_NETWORK_CONTEXTS)) bm.SetRules(ClientNetworkingContexts.GLOBAL_NETWORK_CONTEXT, PERMISSIVE_REQUEST_RULES) bm.SetRules(DOMAIN_NETWORK_CONTEXT, PERMISSIVE_REQUEST_RULES) bm.SetRules(SUBDOMAIN_NETWORK_CONTEXT, PERMISSIVE_REQUEST_RULES) self.assertTrue(bm.CanStartRequest(GLOBAL_NETWORK_CONTEXTS)) self.assertTrue(bm.CanStartRequest(DOMAIN_NETWORK_CONTEXTS)) self.assertTrue(bm.CanStartRequest(SUBDOMAIN_NETWORK_CONTEXTS)) # bm.SetRules(SUBDOMAIN_NETWORK_CONTEXT, RESTRICTIVE_DATA_RULES) self.assertTrue(bm.CanStartRequest(GLOBAL_NETWORK_CONTEXTS)) self.assertTrue(bm.CanStartRequest(DOMAIN_NETWORK_CONTEXTS)) self.assertFalse(bm.CanStartRequest(SUBDOMAIN_NETWORK_CONTEXTS)) bm.SetRules(SUBDOMAIN_NETWORK_CONTEXT, RESTRICTIVE_REQUEST_RULES) self.assertTrue(bm.CanStartRequest(GLOBAL_NETWORK_CONTEXTS)) self.assertTrue(bm.CanStartRequest(DOMAIN_NETWORK_CONTEXTS)) self.assertFalse(bm.CanStartRequest(SUBDOMAIN_NETWORK_CONTEXTS)) bm.SetRules(SUBDOMAIN_NETWORK_CONTEXT, PERMISSIVE_REQUEST_RULES) self.assertTrue(bm.CanStartRequest(GLOBAL_NETWORK_CONTEXTS)) self.assertTrue(bm.CanStartRequest(DOMAIN_NETWORK_CONTEXTS)) self.assertTrue(bm.CanStartRequest(SUBDOMAIN_NETWORK_CONTEXTS)) # bm.SetRules(DOMAIN_NETWORK_CONTEXT, RESTRICTIVE_DATA_RULES) self.assertTrue(bm.CanStartRequest(GLOBAL_NETWORK_CONTEXTS)) self.assertFalse(bm.CanStartRequest(DOMAIN_NETWORK_CONTEXTS)) self.assertFalse(bm.CanStartRequest(SUBDOMAIN_NETWORK_CONTEXTS)) bm.SetRules(DOMAIN_NETWORK_CONTEXT, RESTRICTIVE_REQUEST_RULES) self.assertTrue(bm.CanStartRequest(GLOBAL_NETWORK_CONTEXTS)) self.assertFalse(bm.CanStartRequest(DOMAIN_NETWORK_CONTEXTS)) self.assertFalse(bm.CanStartRequest(SUBDOMAIN_NETWORK_CONTEXTS)) bm.SetRules(DOMAIN_NETWORK_CONTEXT, PERMISSIVE_REQUEST_RULES) self.assertTrue(bm.CanStartRequest(GLOBAL_NETWORK_CONTEXTS)) self.assertTrue(bm.CanStartRequest(DOMAIN_NETWORK_CONTEXTS)) self.assertTrue(bm.CanStartRequest(SUBDOMAIN_NETWORK_CONTEXTS)) # bm.SetRules(ClientNetworkingContexts.GLOBAL_NETWORK_CONTEXT, RESTRICTIVE_DATA_RULES) self.assertFalse(bm.CanStartRequest(GLOBAL_NETWORK_CONTEXTS)) self.assertFalse(bm.CanStartRequest(DOMAIN_NETWORK_CONTEXTS)) self.assertFalse(bm.CanStartRequest(SUBDOMAIN_NETWORK_CONTEXTS)) bm.SetRules(ClientNetworkingContexts.GLOBAL_NETWORK_CONTEXT, RESTRICTIVE_REQUEST_RULES) self.assertFalse(bm.CanStartRequest(GLOBAL_NETWORK_CONTEXTS)) self.assertFalse(bm.CanStartRequest(DOMAIN_NETWORK_CONTEXTS)) self.assertFalse(bm.CanStartRequest(SUBDOMAIN_NETWORK_CONTEXTS)) bm.SetRules(ClientNetworkingContexts.GLOBAL_NETWORK_CONTEXT, PERMISSIVE_REQUEST_RULES) self.assertTrue(bm.CanStartRequest(GLOBAL_NETWORK_CONTEXTS)) self.assertTrue(bm.CanStartRequest(DOMAIN_NETWORK_CONTEXTS)) self.assertTrue(bm.CanStartRequest(SUBDOMAIN_NETWORK_CONTEXTS)) # bm.SetRules(ClientNetworkingContexts.GLOBAL_NETWORK_CONTEXT, RESTRICTIVE_DATA_RULES) bm.SetRules(DOMAIN_NETWORK_CONTEXT, RESTRICTIVE_REQUEST_RULES) bm.SetRules(DOMAIN_NETWORK_CONTEXT, EMPTY_RULES) self.assertFalse(bm.CanStartRequest(GLOBAL_NETWORK_CONTEXTS)) self.assertFalse(bm.CanStartRequest(DOMAIN_NETWORK_CONTEXTS)) self.assertFalse(bm.CanStartRequest(SUBDOMAIN_NETWORK_CONTEXTS))
def GetCurrentJobSummary(self): with self._waiting_lock: return HydrusData.ToHumanInt(len(self._waiting)) + ' jobs'
def __init__(self, controller, db_dir, db_name): if HydrusPaths.GetFreeSpace(db_dir) < 500 * 1048576: raise Exception( 'Sorry, it looks like the db partition has less than 500MB, please free up some space.' ) self._controller = controller self._db_dir = db_dir self._db_name = db_name self._transaction_started = 0 self._in_transaction = False self._transaction_contains_writes = False self._ssl_cert_filename = '{}.crt'.format(self._db_name) self._ssl_key_filename = '{}.key'.format(self._db_name) self._ssl_cert_path = os.path.join(self._db_dir, self._ssl_cert_filename) self._ssl_key_path = os.path.join(self._db_dir, self._ssl_key_filename) self._connection_timestamp = 0 main_db_filename = db_name if not main_db_filename.endswith('.db'): main_db_filename += '.db' self._db_filenames = {} self._db_filenames['main'] = main_db_filename self._durable_temp_db_filename = db_name + '.temp.db' self._InitExternalDatabases() if distutils.version.LooseVersion( sqlite3.sqlite_version) < distutils.version.LooseVersion( '3.11.0'): self._fast_big_transaction_wal = False else: self._fast_big_transaction_wal = True self._is_first_start = False self._is_db_updated = False self._local_shutdown = False self._pause_and_disconnect = False self._loop_finished = False self._ready_to_serve_requests = False self._could_not_initialise = False self._jobs = queue.Queue() self._pubsubs = [] self._currently_doing_job = False self._current_status = '' self._current_job_name = '' self._db = None self._c = None if os.path.exists( os.path.join(self._db_dir, self._db_filenames['main'])): # open and close to clean up in case last session didn't close well self._InitDB() self._CloseDBCursor() self._InitDB() self._RepairDB() (version, ) = self._c.execute('SELECT version FROM version;').fetchone() if version > HC.SOFTWARE_VERSION: self._ReportOverupdatedDB(version) if version < (HC.SOFTWARE_VERSION - 15): self._ReportUnderupdatedDB(version) if version < HC.SOFTWARE_VERSION - 50: raise Exception('Your current database version of hydrus ' + str(version) + ' is too old for this software version ' + str(HC.SOFTWARE_VERSION) + ' to update. Please try updating with version ' + str(version + 45) + ' or earlier first.') while version < HC.SOFTWARE_VERSION: time.sleep(self.UPDATE_WAIT) try: self._BeginImmediate() except Exception as e: raise HydrusExceptions.DBAccessException(str(e)) try: self._UpdateDB(version) self._Commit() self._is_db_updated = True except: e = Exception('Updating the ' + self._db_name + ' db to version ' + str(version + 1) + ' caused this error:' + os.linesep + traceback.format_exc()) try: self._Rollback() except Exception as rollback_e: HydrusData.Print( 'When the update failed, attempting to rollback the database failed.' ) HydrusData.PrintException(rollback_e) raise e (version, ) = self._c.execute('SELECT version FROM version;').fetchone() self._CloseDBCursor() self._controller.CallToThreadLongRunning(self.MainLoop) while not self._ready_to_serve_requests: time.sleep(0.1) if self._could_not_initialise: raise Exception( 'Could not initialise the db! Error written to the log!')
def __repr__(self): return repr(self.__class__) + ': ' + repr( self._work_callable ) + ' next in ' + HydrusData.TimeDeltaToPrettyTimeDelta( self._next_work_time - HydrusData.GetNowFloat())
def _InitDBCursor(self): self._CloseDBCursor() db_path = os.path.join(self._db_dir, self._db_filenames['main']) db_just_created = not os.path.exists(db_path) self._db = sqlite3.connect(db_path, isolation_level=None, detect_types=sqlite3.PARSE_DECLTYPES) self._connection_timestamp = HydrusData.GetNow() self._c = self._db.cursor() if HG.no_db_temp_files: self._c.execute('PRAGMA temp_store = 2;' ) # use memory for temp store exclusively self._c.execute('ATTACH ":memory:" AS mem;') self._AttachExternalDatabases() # if this is set to 1, transactions are not immediately synced to the journal so multiple can be undone following a power-loss # if set to 2, all transactions are synced, so once a new one starts you know the last one is on disk # corruption cannot occur either way, but since we have multiple ATTACH dbs with diff journals, let's not mess around when power-cut during heavy file import or w/e synchronous = 2 if HG.db_synchronous_override is not None: synchronous = HG.db_synchronous_override # durable_temp is not excluded here db_names = [ name for (index, name, path) in self._c.execute('PRAGMA database_list;') if name not in ('mem', 'temp') ] for db_name in db_names: self._c.execute('PRAGMA {}.cache_size = -10000;'.format(db_name)) if HG.db_memory_journaling: self._c.execute( 'PRAGMA {}.journal_mode = MEMORY;'.format(db_name)) elif HG.no_wal: self._c.execute( 'PRAGMA {}.journal_mode = TRUNCATE;'.format(db_name)) else: self._c.execute( 'PRAGMA {}.journal_mode = WAL;'.format(db_name)) self._c.execute('PRAGMA {}.synchronous = {};'.format( db_name, synchronous)) try: self._c.execute('SELECT * FROM {}.sqlite_master;'.format( db_name)).fetchone() except sqlite3.OperationalError as e: if HG.no_wal: message = 'The database failed to read any data. Please check your hard drive and perhaps \'help my db is broke.txt\' in the db directory. Full error information:' else: message = 'The database failed to read some data. You may need to run the program in no-wal mode using the --no_wal command parameter. Full error information:' message += os.linesep * 2 message += str(e) HydrusData.DebugPrint(message) raise HydrusExceptions.DBAccessException(message) try: self._BeginImmediate() except Exception as e: raise HydrusExceptions.DBAccessException(str(e))
def GetTimeDeltaUntilDue(self): return HydrusData.GetTimeDeltaUntilTimeFloat(self._next_work_time)
def _ReportStatus(self, text): HydrusData.Print(text)
def IsDue(self): return HydrusData.TimeHasPassedFloat(self._next_work_time)
def _ImportURLs(self, urls): gallery_seed_log = self._gallery_seed_log_get_callable() urls = HydrusData.DedupeList(urls) filtered_urls = [ url for url in urls if not gallery_seed_log.HasGalleryURL(url) ] urls_to_add = urls if len(filtered_urls) < len(urls): num_urls = len(urls) num_removed = num_urls - len(filtered_urls) message = 'Of the ' + HydrusData.ToHumanInt( num_urls ) + ' URLs you mean to add, ' + HydrusData.ToHumanInt( num_removed ) + ' are already in the gallery log. Would you like to only add new URLs or add everything (which will force a re-check of the duplicates)?' (result, was_cancelled) = ClientGUIDialogsQuick.GetYesNo( self, message, yes_label='only add new urls', no_label='add all urls, even duplicates', check_for_cancelled=True) if was_cancelled: return if result == QW.QDialog.Accepted: urls_to_add = filtered_urls elif result == QW.QDialog.Rejected: return can_generate_more_pages = False if self._can_generate_more_pages: message = 'Would you like these urls to only check for new files, or would you like them to also generate subsequent gallery pages, like a regular search would?' (result, was_cancelled) = ClientGUIDialogsQuick.GetYesNo( self, message, yes_label='just check what I am adding', no_label='start a potential new search for every url added', check_for_cancelled=True) if was_cancelled: return can_generate_more_pages = result == QW.QDialog.Rejected gallery_seeds = [ ClientImportGallerySeeds.GallerySeed( url, can_generate_more_pages=can_generate_more_pages) for url in urls_to_add ] gallery_seed_log.AddGallerySeeds(gallery_seeds)
def GetFFMPEGVersion(): cmd = [FFMPEG_PATH, '-version'] try: sbp_kwargs = HydrusData.GetSubprocessKWArgs(text=True) process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **sbp_kwargs) except FileNotFoundError: return 'no ffmpeg found at path "{}"'.format(FFMPEG_PATH) except Exception as e: HydrusData.ShowException(e) return 'unable to execute ffmpeg at path "{}"'.format(FFMPEG_PATH) (stdout, stderr) = process.communicate() del process lines = stdout.splitlines() if len(lines) > 0: # typically 'ffmpeg version [VERSION] Copyright ... top_line = lines[0] if top_line.startswith('ffmpeg version '): top_line = top_line.replace('ffmpeg version ', '') if ' ' in top_line: version_string = top_line.split(' ')[0] return version_string message = 'FFMPEG was recently contacted to fetch version information. While FFMPEG could be found, the response could not be understood. Significant debug information has been printed to the log, which hydrus_dev would be interested in.' HydrusData.ShowText(message) message += os.linesep * 2 message += str(sbp_kwargs) message += os.linesep * 2 message += str(os.environ) message += os.linesep * 2 message += 'STDOUT Response: {}'.format(stdout) message += os.linesep * 2 message += 'STDERR Response: {}'.format(stderr) HydrusData.Print(message) global FFMPEG_NO_CONTENT_ERROR_PUBBED FFMPEG_NO_CONTENT_ERROR_PUBBED = True return 'unknown'