def _CheckShareAuthorised( self, share_key ): self._CheckDataUsage() info = self._GetInfo( share_key ) timeout = info[ 'timeout' ] if timeout is not None and HydrusData.TimeHasPassed( timeout ): raise HydrusExceptions.NotFoundException( 'This share has expired.' )
def _OverrideBandwidthIfAppropriate( self ): if self._network_job is None or self._network_job.NoEngineYet(): return else: if self._auto_override_bandwidth_rules and HydrusData.TimeHasPassed( self._network_job.GetCreationTime() + 5 ): self._network_job.OverrideBandwidth()
def SetVariable(self, name, value): with self._variable_lock: self._variables[name] = value if HydrusData.TimeHasPassed(self._next_ui_update_pause): time.sleep(0.00001) self._next_ui_update_pause = HydrusData.GetNow( ) + self._ui_update_pause_period
def WaitIfNeeded(self): if HydrusData.TimeHasPassed(self._next_yield_pause): time.sleep(0.1) self._next_yield_pause = HydrusData.GetNow( ) + self._yield_pause_period if HydrusData.TimeHasPassed(self._next_bigger_pause): time.sleep(1) self._next_bigger_pause = HydrusData.GetNow( ) + self._bigger_pause_period if HydrusData.TimeHasPassed(self._longer_pause_period): time.sleep(10) self._next_longer_pause = HydrusData.GetNow( ) + self._longer_pause_period i_paused = False should_quit = False while self.IsPaused(): i_paused = True time.sleep(0.1) if self.IsDone(): break if self.IsCancelled(): should_quit = True return (i_paused, should_quit)
def CleanUp( self ): self._hta.CommitBigJob() if HydrusData.TimeHasPassed( self._time_started + 120 ): self._hta.Optimise() self._hta.Close() self._hta = None
def _CleanAddedTimestamps(self): keys = list(self._watcher_keys_to_added_timestamps.keys()) for key in keys: if HydrusData.TimeHasPassed( self._watcher_keys_to_added_timestamps[key] + self.ADDED_TIMESTAMP_DURATION): del self._watcher_keys_to_added_timestamps[key] keys = list(self._watcher_keys_to_already_in_timestamps.keys()) for key in keys: if HydrusData.TimeHasPassed( self._watcher_keys_to_already_in_timestamps[key] + self.ADDED_TIMESTAMP_DURATION): del self._watcher_keys_to_already_in_timestamps[key]
def GetExternalIP(): if HydrusData.TimeHasPassed(EXTERNAL_IP['time'] + (3600 * 24)): cmd = [upnpc_path, '-l'] sbp_kwargs = HydrusData.GetSubprocessKWArgs(text=True) HydrusData.CheckProgramIsNotShuttingDown() p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **sbp_kwargs) HydrusData.WaitForProcessToFinish(p, 30) (stdout, stderr) = HydrusThreading.SubprocessCommunicate(p) if stderr is not None and len(stderr) > 0: raise Exception('Problem while trying to fetch External IP:' + os.linesep * 2 + str(stderr)) else: try: lines = HydrusText.DeserialiseNewlinedTexts(stdout) i = lines.index( 'i protocol exPort->inAddr:inPort description remoteHost leaseTime' ) # ExternalIPAddress = ip (gumpf, external_ip_address) = lines[i - 1].split(' = ') except ValueError: raise Exception('Could not parse external IP!') if external_ip_address == '0.0.0.0': raise Exception( 'Your UPnP device returned your external IP as 0.0.0.0! Try rebooting it, or overwrite it in options!' ) EXTERNAL_IP['ip'] = external_ip_address EXTERNAL_IP['time'] = HydrusData.GetNow() return EXTERNAL_IP['ip']
def IsSyncDue( self ): if HG.subscription_report_mode: HydrusData.ShowText( 'Query "' + self._query + '" IsSyncDue test. Paused/dead status is {}/{}, check time due is {}, and check_now is {}.'.format( self._paused, self.IsDead(), HydrusData.TimeHasPassed( self._next_check_time ), self._check_now ) ) if self._paused or self.IsDead(): return False return HydrusData.TimeHasPassed( self._next_check_time ) or self._check_now
def CheckCanDoNetworkWork(no_work_until: int, no_work_until_reason: str): if not HydrusData.TimeHasPassed(no_work_until): no_work_text = '{}: {}'.format( HydrusData.ConvertTimestampToPrettyExpires(no_work_until), no_work_until_reason) raise HydrusExceptions.VetoException(no_work_text) if HG.client_controller.network_engine.IsBusy(): raise HydrusExceptions.VetoException('network engine is too busy!')
def IsSyncDue( self ): if HG.subscription_report_mode: HydrusData.ShowText( 'Query "' + self._query_text + '" IsSyncDue test. Paused/dead/container status is {}/{}/{}, check time due is {}, and check_now is {}.'.format( self._paused, self.IsDead(), self.IsLogContainerOK(), HydrusData.TimeHasPassed( self._next_check_time ), self._check_now ) ) if not self.IsExpectingToWorkInFuture(): return False return HydrusData.TimeHasPassed( self._next_check_time ) or self._check_now
def SleepCheck( self ) -> None: with self._sleep_lock: if HydrusData.TimeHasPassed( self.GetTimestamp( 'last_sleep_check' ) + 60 ): # it has been way too long since this method last fired, so we've prob been asleep self._just_woke_from_sleep = True self.ResetIdleTimer() # this will stop the background jobs from kicking in as soon as the grace period is over wake_delay_period = self._GetWakeDelayPeriod() self.SetTimestamp( 'now_awake', HydrusData.GetNow() + wake_delay_period ) # enough time for ethernet to get back online and all that self._ShowJustWokeToUser() elif self._just_woke_from_sleep and HydrusData.TimeHasPassed( self.GetTimestamp( 'now_awake' ) ): self._just_woke_from_sleep = False self.TouchTimestamp( 'last_sleep_check' )
def _CleanCache( self ): if HydrusData.TimeHasPassed( self._next_clean_cache_time ): for cache in ( self._html_to_soups, self._json_to_jsons ): dead_datas = set() for ( data, ( last_accessed, parsed_object ) ) in cache.items(): if HydrusData.TimeHasPassed( last_accessed + 10 ): dead_datas.add( data ) for dead_data in dead_datas: del cache[ dead_data ] self._next_clean_cache_time = HydrusData.GetNow() + 5
def CleanUpOldTempPaths(): with TEMP_PATH_LOCK: data = list(IN_USE_TEMP_PATHS) for row in data: (time_failed, temp_path) = row if HydrusData.TimeHasPassed(time_failed + 60): try: os.remove(temp_path) IN_USE_TEMP_PATHS.discard(row) except OSError: if HydrusData.TimeHasPassed(time_failed + 600): IN_USE_TEMP_PATHS.discard(row)
def DoWork(self): regular_run_due = self._run_regularly and HydrusData.TimeHasPassed( self._last_checked + self._period) good_to_go = (regular_run_due or self._run_now) and not self._paused if not good_to_go: return try: if self._path == '': raise Exception('No path set for the folder!') if not os.path.exists(self._path): raise Exception('The path, "{}", does not exist!'.format( self._path)) if not os.path.isdir(self._path): raise Exception('The path, "{}", is not a directory!'.format( self._path)) self._DoExport() self._last_error = '' except Exception as e: self._paused = True HydrusData.ShowText( 'The export folder "' + self._name + '" encountered an error! It has now been paused. Please check the folder\'s settings and maybe report to hydrus dev if the error is complicated! The error follows:' ) HydrusData.ShowException(e) self._last_error = str(e) finally: self._last_checked = HydrusData.GetNow() self._run_now = False HG.client_controller.WriteSynchronous('serialisable', self)
def SleepCheck( self ): with self._sleep_lock: if HydrusData.TimeHasPassed( self._timestamps[ 'now_awake' ] ): last_sleep_check = self._timestamps[ 'last_sleep_check' ] if HydrusData.TimeHasPassed( last_sleep_check + 600 ): # it has been way too long since this method last fired, so we've prob been asleep self._just_woke_from_sleep = True self.ResetIdleTimer() # this will stop the background jobs from kicking in as soon as the grace period is over self._timestamps[ 'now_awake' ] = HydrusData.GetNow() + 15 # enough time for ethernet to get back online and all that else: self._just_woke_from_sleep = False self._timestamps[ 'last_sleep_check' ] = HydrusData.GetNow()
def _CleanSessionCookies(self, network_context, session): if network_context not in self._network_contexts_to_session_timeouts: self._network_contexts_to_session_timeouts[network_context] = 0 if HydrusData.TimeHasPassed( self._network_contexts_to_session_timeouts[network_context]): session.cookies.clear_session_cookies() self._network_contexts_to_session_timeouts[ network_context] = HydrusData.GetNow() + self.SESSION_TIMEOUT session.cookies.clear_expired_cookies()
def GetWatcherSimpleStatus( self, watcher ): with self._lock: watcher_key = watcher.GetWatcherKey() if watcher_key in self._watcher_keys_to_added_timestamps: added_timestamp = self._watcher_keys_to_added_timestamps[ watcher_key ] if HydrusData.TimeHasPassed( added_timestamp + self.ADDED_TIMESTAMP_DURATION ): self._CleanAddedTimestamps() else: return 'just added' if watcher_key in self._watcher_keys_to_already_in_timestamps: already_in_timestamp = self._watcher_keys_to_already_in_timestamps[ watcher_key ] if HydrusData.TimeHasPassed( already_in_timestamp + self.ADDED_TIMESTAMP_DURATION ): self._CleanAddedTimestamps() else: return 'already watching' return watcher.GetSimpleStatus()
def _ShutdownDaemons(self): for job in self._daemon_jobs.values(): job.Cancel() started = HydrusData.GetNow() while True in (daemon_job.CurrentlyWorking() for daemon_job in self._daemon_jobs.values()): self._ReportShutdownDaemonsStatus() time.sleep(0.1) if HydrusData.TimeHasPassed(started + 30): break self._daemon_jobs = {}
def GenerateLiveStatusText(text: str, paused: bool, no_work_until: int, no_work_until_reason: str) -> str: if not HydrusData.TimeHasPassed(no_work_until): return '{}: {}'.format( HydrusData.ConvertTimestampToPrettyExpires(no_work_until), no_work_until_reason) if paused and text != 'paused': if text == '': text = 'pausing' else: text = 'pausing - {}'.format(text) return text
def RenderPageToFile(path, temp_path, page_index): cmd = [SWFRENDER_PATH, path, '-o', temp_path, '-p', str(page_index)] timeout = HydrusData.GetNow() + 60 sbp_kwargs = HydrusData.GetSubprocessKWArgs() p = subprocess.Popen(cmd, **sbp_kwargs) while p.poll() is None: if HydrusData.TimeHasPassed(timeout): p.terminate() raise Exception('Could not render the swf page within 60 seconds!') time.sleep(0.5) p.communicate()
def ShouldStopThisWork(self, maintenance_mode, stop_time=None): if maintenance_mode == HC.MAINTENANCE_IDLE: if not self.CurrentlyIdle(): return True elif maintenance_mode == HC.MAINTENANCE_SHUTDOWN: if not HG.do_idle_shutdown_work: return True if stop_time is not None: if HydrusData.TimeHasPassed(stop_time): return True return False
def _DoAWait(self, wait_time, event_can_wake=True): time_to_start = HydrusData.GetNow() + wait_time while not HydrusData.TimeHasPassed(time_to_start): if event_can_wake: event_was_set = self._event.wait(1.0) if event_was_set: self._event.clear() return else: time.sleep(1.0) CheckIfThreadShuttingDown()
def MaintainCache( self ): with self._lock: while True: if len( self._keys_fifo ) == 0: break else: ( key, last_access_time ) = next( iter( self._keys_fifo.items() ) ) if HydrusData.TimeHasPassed( last_access_time + self._timeout ): self._DeleteItem() else: break
def TryToConsumeAGalleryToken(self, second_level_domain, query_type): with self._lock: if query_type == 'download page': timestamps_dict = self._last_pages_gallery_query_timestamps delay = HG.client_controller.new_options.GetInteger( 'gallery_page_wait_period_pages') elif query_type == 'subscription': timestamps_dict = self._last_subscriptions_gallery_query_timestamps delay = HG.client_controller.new_options.GetInteger( 'gallery_page_wait_period_subscriptions') elif query_type == 'watcher': timestamps_dict = self._last_watchers_query_timestamps delay = HG.client_controller.new_options.GetInteger( 'watcher_page_wait_period') else: raise NotImplementedError('Unknown query type') next_timestamp = timestamps_dict[second_level_domain] + delay if HydrusData.TimeHasPassed(next_timestamp): timestamps_dict[second_level_domain] = HydrusData.GetNow() return (True, 0) else: return (False, next_timestamp)
def GetAccessKey( self, session_key ): with self._lock: if session_key not in self._session_keys_to_access_keys_and_expirys: raise HydrusExceptions.DataMissing( 'Did not find an entry for that session key!' ) ( access_key, session_expiry ) = self._session_keys_to_access_keys_and_expirys[ session_key ] if HydrusData.TimeHasPassed( session_expiry ): del self._session_keys_to_access_keys_and_expirys[ session_expiry ] raise HydrusExceptions.SessionException( 'That session key has expired!' ) self._session_keys_to_access_keys_and_expirys[ session_key ] = ( access_key, HydrusData.GetNow() + SESSION_EXPIRY ) return access_key
def REPEATINGWorkOnWatchers( self ): with self._lock: if ClientImporting.PageImporterShouldStopWorking( self._page_key ): self._watchers_repeating_job.Cancel() return if not self._status_dirty: # if we think we are clean for watcher in self._watchers: file_seed_cache = watcher.GetFileSeedCache() if file_seed_cache.GetStatus().GetGenerationTime() > self._status_cache.GetGenerationTime(): # has there has been an update? self._SetDirty() break if HydrusData.TimeHasPassed( self._next_pub_value_check_time ): self._next_pub_value_check_time = HydrusData.GetNow() + 5 current_value_range = self.GetValueRange() if current_value_range != self._last_pubbed_value_range: self._last_pubbed_value_range = current_value_range HG.client_controller.pub( 'refresh_page_name', self._page_key )
def GetSimpleStatus( self ): with self._lock: if self._checking_status == ClientImporting.CHECKER_STATUS_404: return '404' elif self._checking_status == ClientImporting.CHECKER_STATUS_DEAD: return 'DEAD' elif not HydrusData.TimeHasPassed( self._no_work_until ): return self._no_work_until_reason + ' - ' + 'next check ' + ClientData.TimestampToPrettyTimeDelta( self._next_check_time ) elif self._watcher_status != '' or self._file_status != '': return 'working' else: return ''
def GetAccount( self, service_key, session_key ): with self._lock: session_keys_to_sessions = self._service_keys_to_session_keys_to_sessions[ service_key ] if session_key in session_keys_to_sessions: ( account_key, expires ) = session_keys_to_sessions[ session_key ] if HydrusData.TimeHasPassed( expires ): del session_keys_to_sessions[ session_key ] else: account = self._service_keys_to_account_keys_to_accounts[ service_key ][ account_key ] return account raise HydrusExceptions.SessionException( 'Did not find that session! Try again!' )
def CheckCanVacuumData(db_path, page_size, page_count, freelist_count, stop_time=None): db_size = (page_count - freelist_count) * page_size if stop_time is not None: approx_vacuum_duration = GetApproxVacuumDuration(db_size) time_i_will_have_to_start = stop_time - approx_vacuum_duration if HydrusData.TimeHasPassed(time_i_will_have_to_start): raise Exception( 'I believe you need about ' + HydrusData.TimeDeltaToPrettyTimeDelta(approx_vacuum_duration) + ' to vacuum, but there is not enough time allotted.') db_dir = os.path.dirname(db_path) HydrusDBBase.CheckHasSpaceForDBTransaction(db_dir, db_size)
def DoWork(self): if HG.view_shutdown: return if HC.options['pause_import_folders_sync'] or self._paused: return checked_folder = False did_import_file_work = False error_occured = False stop_time = HydrusData.GetNow() + 3600 job_key = ClientThreading.JobKey(pausable=False, cancellable=True, stop_time=stop_time) try: if not os.path.exists(self._path) or not os.path.isdir(self._path): raise Exception( 'Path "' + self._path + '" does not seem to exist, or is not a directory.') pubbed_job_key = False job_key.SetVariable('popup_title', 'import folder - ' + self._name) due_by_check_now = self._check_now due_by_period = self._check_regularly and HydrusData.TimeHasPassed( self._last_checked + self._period) if due_by_check_now or due_by_period: if not pubbed_job_key and self._show_working_popup: HG.client_controller.pub('message', job_key) pubbed_job_key = True self._CheckFolder(job_key) checked_folder = True file_seed = self._file_seed_cache.GetNextFileSeed( CC.STATUS_UNKNOWN) if file_seed is not None: if not pubbed_job_key and self._show_working_popup: HG.client_controller.pub('message', job_key) pubbed_job_key = True did_import_file_work = self._ImportFiles(job_key) except Exception as e: error_occured = True self._paused = True HydrusData.ShowText( 'The import folder "' + self._name + '" encountered an exception! It has been paused!') HydrusData.ShowException(e) if checked_folder or did_import_file_work or error_occured: HG.client_controller.WriteSynchronous('serialisable', self) job_key.Delete()