def DoSomeWork(self, source): time_started_precise = HydrusData.GetNowPrecise() data = source.GetSomeData() content_updates = [] if self._content_action in (HC.CONTENT_UPDATE_PETITION, HC.CONTENT_UPDATE_PEND): reason = 'Mass Migration Job' else: reason = None content_updates = [ HydrusData.ContentUpdate(self._content_type, self._content_action, tag_pair, reason=reason) for tag_pair in data ] service_keys_to_content_updates = { self._tag_service_key: content_updates } self._controller.WriteSynchronous('content_updates', service_keys_to_content_updates) num_done = len(data) return GetBasicSpeedStatement(num_done, time_started_precise)
def EventMove(self, event): if HydrusData.TimeHasPassedFloat(self._last_move_pub + 0.1): HG.client_controller.pub('top_level_window_move_event') self._last_move_pub = HydrusData.GetNowPrecise() return True # was: event.ignore()
def __init__( self, *args, **kwargs ): Request.__init__( self, *args, **kwargs ) self.start_time = HydrusData.GetNowPrecise() self.parsed_request_args = None self.hydrus_response_context = None self.hydrus_account = None self.client_api_permissions = None
def DoSomeWork(self, source): time_started_precise = HydrusData.GetNowPrecise() data = source.GetSomeData() self._data_received.extend(data) num_done = len(data) return GetBasicSpeedStatement(num_done, time_started_precise)
def GetBasicSpeedStatement(num_done, time_started_precise): if num_done == 0: rows_s = 0 else: time_taken = HydrusData.GetNowPrecise() - time_started_precise rows_s = int(num_done / time_taken) return '{} rows/s'.format(rows_s)
def _AddWatcher( self, watcher ): watcher.PublishToPage( False ) watcher.Repage( self._page_key ) self._watchers.append( watcher ) self._last_time_watchers_changed = HydrusData.GetNowPrecise() watcher_key = watcher.GetWatcherKey() self._watcher_keys_to_watchers[ watcher_key ] = watcher self._watcher_keys_to_added_timestamps[ watcher_key ] = HydrusData.GetNow()
def DoSomeWork(self, source): time_started_precise = HydrusData.GetNowPrecise() num_done = 0 data = source.GetSomeData() for (hash, tags) in data: self._hta.AddMappings(hash, tags) num_done += len(tags) return GetBasicSpeedStatement(num_done, time_started_precise)
def _RemoveWatcher(self, watcher_key): if watcher_key not in self._watcher_keys_to_watchers: return watcher = self._watcher_keys_to_watchers[watcher_key] watcher.PublishToPage(False) watcher.Repage('dead page key') self._watchers.remove(watcher) self._last_time_watchers_changed = HydrusData.GetNowPrecise() del self._watcher_keys_to_watchers[watcher_key]
def __init__(self, url=None): HydrusSerialisable.SerialisableBase.__init__(self) self._lock = threading.Lock() self._page_key = 'initialising page key' self._watchers = HydrusSerialisable.SerialisableList() self._highlighted_watcher_url = None self._checker_options = HG.client_controller.new_options.GetDefaultWatcherCheckerOptions( ) self._file_import_options = HG.client_controller.new_options.GetDefaultFileImportOptions( 'loud') self._tag_import_options = ClientImportOptions.TagImportOptions( is_default=True) self._watcher_keys_to_watchers = {} self._watcher_keys_to_added_timestamps = {} self._watcher_keys_to_already_in_timestamps = {} self._watchers_repeating_job = None self._status_dirty = True self._status_cache = None self._status_cache_generation_time = 0 # if url is not None: watcher = WatcherImport() watcher.SetURL(url) self._AddWatcher(watcher) self._last_time_watchers_changed = HydrusData.GetNowPrecise() self._last_pubbed_value_range = (0, 0) self._next_pub_value_check_time = 0
def DoSomeWork(self, source): time_started_precise = HydrusData.GetNowPrecise() data = source.GetSomeData() content_updates = [] pairs = [] for (hash, tags) in data: pairs.extend(((tag, hash) for tag in tags)) num_done = len(pairs) tags_to_hashes = HydrusData.BuildKeyToListDict(pairs) if self._content_action == HC.CONTENT_UPDATE_PETITION: reason = 'Mass Migration Job' else: reason = None for (tag, hashes) in tags_to_hashes.items(): content_updates.append( HydrusData.ContentUpdate(HC.CONTENT_TYPE_MAPPINGS, self._content_action, (tag, hashes), reason=reason)) service_keys_to_content_updates = { self._tag_service_key: content_updates } self._controller.WriteSynchronous('content_updates', service_keys_to_content_updates) return GetBasicSpeedStatement(num_done, time_started_precise)
def finish(self): HydrusRequest.finish(self) host = self.getHost() if self.hydrus_response_context is not None: status_text = str(self.hydrus_response_context.GetStatusCode()) elif hasattr(self, 'code'): status_text = str(self.code) else: status_text = '200' message = str(host.port) + ' ' + str(self.method, 'utf-8') + ' ' + str( self.path, 'utf-8' ) + ' ' + status_text + ' in ' + HydrusData.TimeDeltaToPrettyTimeDelta( HydrusData.GetNowPrecise() - self.start_time) HydrusData.Print(message)
def AnalyzeDueTables(self, maintenance_mode=HC.MAINTENANCE_FORCED, stop_time=None, force_reanalyze=False): names_to_analyze = self.GetTableNamesDueAnalysis( force_reanalyze=force_reanalyze) if len(names_to_analyze) > 0: job_key = ClientThreading.JobKey(maintenance_mode=maintenance_mode, cancellable=True) try: job_key.SetStatusTitle('database maintenance - analyzing') HG.client_controller.pub('modal_message', job_key) random.shuffle(names_to_analyze) for name in names_to_analyze: HG.client_controller.frame_splash_status.SetText( 'analyzing ' + name) job_key.SetVariable('popup_text_1', 'analyzing ' + name) time.sleep(0.02) started = HydrusData.GetNowPrecise() self.AnalyzeTable(name) time_took = HydrusData.GetNowPrecise() - started if time_took > 1: HydrusData.Print( 'Analyzed ' + name + ' in ' + HydrusData.TimeDeltaToPrettyTimeDelta(time_took)) p1 = HG.client_controller.ShouldStopThisWork( maintenance_mode, stop_time=stop_time) p2 = job_key.IsCancelled() if p1 or p2: break self._Execute( 'ANALYZE sqlite_master;' ) # this reloads the current stats into the query planner job_key.SetVariable('popup_text_1', 'done!') HydrusData.Print(job_key.ToString()) finally: job_key.Finish() job_key.Delete(10)
def THREADSearchPotentials(self): try: search_distance = HG.client_controller.new_options.GetInteger( 'similar_files_duplicate_pairs_search_distance') with self._lock: if self._similar_files_maintenance_status is None: return searched_distances_to_count = self._similar_files_maintenance_status total_num_files = sum(searched_distances_to_count.values()) num_searched = sum( (count for (value, count) in searched_distances_to_count.items() if value is not None and value >= search_distance)) all_files_searched = num_searched >= total_num_files if all_files_searched: return # no work to do num_searched_estimate = num_searched HG.client_controller.pub('new_similar_files_maintenance_numbers') job_key = ClientThreading.JobKey(cancellable=True) job_key.SetStatusTitle('searching for potential duplicates') HG.client_controller.pub('message', job_key) still_work_to_do = True while still_work_to_do: search_distance = HG.client_controller.new_options.GetInteger( 'similar_files_duplicate_pairs_search_distance') start_time = HydrusData.GetNowPrecise() (still_work_to_do, num_done) = HG.client_controller.WriteSynchronous( 'maintain_similar_files_search_for_potential_duplicates', search_distance, maintenance_mode=HC.MAINTENANCE_FORCED, job_key=job_key, work_time_float=0.5) time_it_took = HydrusData.GetNowPrecise() - start_time num_searched_estimate += num_done if num_searched_estimate > total_num_files: similar_files_maintenance_status = HG.client_controller.Read( 'similar_files_maintenance_status') if similar_files_maintenance_status is None: break with self._lock: self._similar_files_maintenance_status = similar_files_maintenance_status searched_distances_to_count = self._similar_files_maintenance_status total_num_files = max( num_searched_estimate, sum(searched_distances_to_count.values())) text = 'searching: {}'.format( HydrusData.ConvertValueRangeToPrettyString( num_searched_estimate, total_num_files)) job_key.SetVariable('popup_text_1', text) job_key.SetVariable('popup_gauge_1', (num_searched_estimate, total_num_files)) if job_key.IsCancelled() or HG.model_shutdown: break time.sleep(min( 5, time_it_took)) # ideally 0.5s, but potentially longer job_key.Delete() finally: with self._lock: self._currently_doing_potentials_search = False self.RefreshMaintenanceNumbers() self.NotifyNewPotentialsSearchNumbers()
def ProcessRepositoryDefinitions(self, service_key: bytes, definition_hash: bytes, definition_iterator_dict, content_types, job_key, work_time): # ignore content_types for now service_id = self.modules_services.GetServiceId(service_key) precise_time_to_stop = HydrusData.GetNowPrecise() + work_time (hash_id_map_table_name, tag_id_map_table_name ) = GenerateRepositoryDefinitionTableNames(service_id) num_rows_processed = 0 if 'service_hash_ids_to_hashes' in definition_iterator_dict: i = definition_iterator_dict['service_hash_ids_to_hashes'] for chunk in HydrusData.SplitIteratorIntoAutothrottledChunks( i, 50, precise_time_to_stop): inserts = [] for (service_hash_id, hash) in chunk: hash_id = self.modules_hashes_local_cache.GetHashId(hash) inserts.append((service_hash_id, hash_id)) self._ExecuteMany( 'REPLACE INTO {} ( service_hash_id, hash_id ) VALUES ( ?, ? );' .format(hash_id_map_table_name), inserts) num_rows_processed += len(inserts) if HydrusData.TimeHasPassedPrecise( precise_time_to_stop) or job_key.IsCancelled(): return num_rows_processed del definition_iterator_dict['service_hash_ids_to_hashes'] if 'service_tag_ids_to_tags' in definition_iterator_dict: i = definition_iterator_dict['service_tag_ids_to_tags'] for chunk in HydrusData.SplitIteratorIntoAutothrottledChunks( i, 50, precise_time_to_stop): inserts = [] for (service_tag_id, tag) in chunk: try: tag_id = self.modules_tags_local_cache.GetTagId(tag) except HydrusExceptions.TagSizeException: # in future what we'll do here is assign this id to the 'do not show' table, so we know it exists, but it is knowingly filtered out # _or something_. maybe a small 'invalid' table, so it isn't mixed up with potentially re-addable tags tag_id = self.modules_tags_local_cache.GetTagId( 'invalid repository tag') inserts.append((service_tag_id, tag_id)) self._ExecuteMany( 'REPLACE INTO {} ( service_tag_id, tag_id ) VALUES ( ?, ? );' .format(tag_id_map_table_name), inserts) num_rows_processed += len(inserts) if HydrusData.TimeHasPassedPrecise( precise_time_to_stop) or job_key.IsCancelled(): return num_rows_processed del definition_iterator_dict['service_tag_ids_to_tags'] self.SetUpdateProcessed(service_id, definition_hash, (HC.CONTENT_TYPE_DEFINITIONS, )) return num_rows_processed
def MainLoop(self): while not HydrusThreading.IsThreadShuttingDown(): time.sleep(0.00001) with self._lock: do_wait = len(self._waterfall_queue) == 0 and len( self._delayed_regeneration_queue) == 0 if do_wait: self._waterfall_event.wait(1) self._waterfall_event.clear() start_time = HydrusData.GetNowPrecise() stop_time = start_time + 0.005 # a bit of a typical frame page_keys_to_rendered_medias = collections.defaultdict(list) num_done = 0 max_at_once = 16 while not HydrusData.TimeHasPassedPrecise( stop_time) and num_done <= max_at_once: with self._lock: if len(self._waterfall_queue) == 0: break result = self._waterfall_queue.pop() if len(self._waterfall_queue) == 0: self._waterfall_queue_empty_event.set() self._waterfall_queue_quick.discard(result) (page_key, media) = result if media.GetDisplayMedia() is not None: self.GetThumbnail(media) page_keys_to_rendered_medias[page_key].append(media) num_done += 1 if len(page_keys_to_rendered_medias) > 0: for (page_key, rendered_medias) in page_keys_to_rendered_medias.items(): self._controller.pub('waterfall_thumbnails', page_key, rendered_medias) time.sleep(0.00001) # now we will do regen if appropriate with self._lock: # got more important work or no work to do if len(self._waterfall_queue) > 0 or len( self._delayed_regeneration_queue ) == 0 or HG.client_controller.CurrentlyPubSubbing(): continue media_result = self._delayed_regeneration_queue.pop() self._delayed_regeneration_queue_quick.discard(media_result) if HG.file_report_mode: hash = media_result.GetHash() HydrusData.ShowText( 'Thumbnail {} now regenerating from source.'.format( hash.hex())) try: self._controller.files_maintenance_manager.RunJobImmediately( [media_result], ClientFiles.REGENERATE_FILE_DATA_JOB_FORCE_THUMBNAIL, pub_job_key=False) except HydrusExceptions.FileMissingException: pass except Exception as e: hash = media_result.GetHash() summary = 'The thumbnail for file {} was incorrect, but a later attempt to regenerate it or load the new file back failed.'.format( hash.hex()) self._HandleThumbnailException(e, summary)
def MainLoop(self): try: INIT_WAIT = 10 self._wake_event.wait(INIT_WAIT) while not (HG.started_shutdown or self._shutdown): self._controller.WaitUntilViewFree() if self._WorkPermitted() and self._WorkToDo(): try: service_key = self._GetServiceKeyToWorkOn() except HydrusExceptions.NotFoundException: time.sleep(5) continue work_time = self._GetWorkTime(service_key) start_time = HydrusData.GetNowPrecise() still_needs_work = self._controller.WriteSynchronous( 'sync_tag_display_maintenance', service_key, work_time) finish_time = HydrusData.GetNowPrecise() total_time_took = finish_time - start_time self._service_keys_to_needs_work[ service_key] = still_needs_work wait_time = self._GetAfterWorkWaitTime( service_key, work_time, total_time_took) self._last_loop_work_time = work_time else: wait_time = 10 self._wake_event.wait(wait_time) self._wake_event.clear() if self._new_data_event.is_set(): time.sleep(1) self._last_last_new_data_event_time = self._last_new_data_event_time self._last_new_data_event_time = HydrusData.GetNow() self._service_keys_to_needs_work = {} self._new_data_event.clear() finally: self._mainloop_finished = True
def FrameActivated(self): self._frame_activated_time = HydrusData.GetNowPrecise()