def __init__(self, paused=False): Thread.__init__(self) logging.debug("Initializing downloader/decoder") # Used for scheduled pausing self.paused = paused # used for throttling bandwidth and scheduling bandwidth changes cfg.bandwidth_perc.callback(self.speed_set) cfg.bandwidth_max.callback(self.speed_set) self.speed_set() # Used for reducing speed self.delayed = False # Used to see if we can add a slowdown to the Downloader-loop self.can_be_slowed = None self.can_be_slowed_timer = 0 self.postproc = False self.shutdown = False # A user might change server parms again before server restart is ready. # Keep a counter to prevent multiple restarts self.__restart = 0 self.force_disconnect = False self.read_fds = {} self.write_fds = {} self.servers = [] self._timers = {} for server in config.get_servers(): self.init_server(None, server) self.decoder = Decoder(self.servers) Downloader.do = self
def __init__(self, paused=False): Thread.__init__(self) logging.debug("Initializing downloader/decoder") # Used for scheduled pausing self.paused = paused # used for throttling bandwidth and scheduling bandwidth changes cfg.bandwidth_perc.callback(self.speed_set) cfg.bandwidth_max.callback(self.speed_set) self.speed_set() # Used for reducing speed self.delayed = False # Used to see if we can add a slowdown to the Downloader-loop self.can_be_slowed = None self.can_be_slowed_timer = 0 self.postproc = False self.shutdown = False # A user might change server parms again before server restart is ready. # Keep a counter to prevent multiple restarts self.__restart = 0 self.force_disconnect = False self.read_fds = {} self.write_fds = {} self.servers = [] self.server_dict = {} # For faster lookups, but is not updated later! self.server_nr = 0 self._timers = {} for server in config.get_servers(): self.init_server(None, server) self.decoder_queue = Queue.Queue() # Initialize decoders, only 1 for non-SABYenc self.decoder_workers = [] nr_decoders = 2 if sabnzbd.decoder.SABYENC_ENABLED else 1 for i in range(nr_decoders): self.decoder_workers.append( Decoder(self.servers, self.decoder_queue)) Downloader.do = self
def __init__(self, paused=False): Thread.__init__(self) logging.debug("Initializing downloader/decoder") # Used for scheduled pausing self.paused = paused #used for throttling bandwidth and scheduling bandwidth changes self.bandwidth_limit = cfg.bandwidth_limit() cfg.bandwidth_limit.callback(self.speed_set) t = time.time() self.log_time = t self.end_of_day = tomorrow(t) # Time that current day will end self.end_of_week = next_week(t) # Time that current day will end self.end_of_month = next_month(t) # Time that current month will end self.ext_ip = "" # Used for reducing speed self.delayed = False self.postproc = False self.shutdown = False # A user might change server parms again before server restart is ready. # Keep a counter to prevent multiple restarts self.__restart = 0 self.force_disconnect = False self.read_fds = {} self.write_fds = {} self.servers = [] self._timers = {} for server in config.get_servers(): self.init_server(None, server) self.decoder = Decoder(self.servers) Downloader.do = self
def initialize(pause_downloader=False, clean_up=False, evalSched=False, repair=0): global __INITIALIZED__, __SHUTTING_DOWN__, LOGFILE, WEBLOGFILE, LOGHANDLER, GUIHANDLER, AMBI_LOCALHOST, WAITEXIT, DAEMON, MY_NAME, MY_FULLNAME, NEW_VERSION, DIR_HOME, DIR_APPDATA, DIR_LCLDATA, DIR_PROG, DIR_INTERFACES, DARWIN, RESTART_REQ if __INITIALIZED__: return False __SHUTTING_DOWN__ = False # Set global database connection for Web-UI threads cherrypy.engine.subscribe("start_thread", get_db_connection) # Paused? pause_downloader = pause_downloader or cfg.start_paused() # Clean-up, if requested if clean_up: # New admin folder filesystem.remove_all(cfg.admin_dir.get_path(), "*.sab") # Optionally wait for "incomplete" to become online if cfg.wait_for_dfolder(): wait_for_download_folder() else: cfg.download_dir.set(cfg.download_dir(), create=True) cfg.download_dir.set_create(True) # Set access rights for "incomplete" base folder filesystem.set_permissions(cfg.download_dir.get_path(), recursive=False) # If dirscan_dir cannot be created, set a proper value anyway. # Maybe it's a network path that's temporarily missing. path = cfg.dirscan_dir.get_path() if not os.path.exists(path): filesystem.create_real_path(cfg.dirscan_dir.ident(), "", path, False) # Set call backs for Config items cfg.cache_limit.callback(new_limit) cfg.cherryhost.callback(guard_restart) cfg.cherryport.callback(guard_restart) cfg.web_dir.callback(guard_restart) cfg.web_color.callback(guard_restart) cfg.username.callback(guard_restart) cfg.password.callback(guard_restart) cfg.log_dir.callback(guard_restart) cfg.https_port.callback(guard_restart) cfg.https_cert.callback(guard_restart) cfg.https_key.callback(guard_restart) cfg.enable_https.callback(guard_restart) cfg.top_only.callback(guard_top_only) cfg.pause_on_post_processing.callback(guard_pause_on_pp) cfg.quota_size.callback(guard_quota_size) cfg.quota_day.callback(guard_quota_dp) cfg.quota_period.callback(guard_quota_dp) cfg.language.callback(guard_language) cfg.enable_https_verification.callback(guard_https_ver) guard_https_ver() # Set cache limit if not cfg.cache_limit() or (cfg.cache_limit() in ("200M", "450M") and (sabnzbd.WIN32 or sabnzbd.DARWIN)): cfg.cache_limit.set(misc.get_cache_limit()) ArticleCache.do.new_limit(cfg.cache_limit.get_int()) check_incomplete_vs_complete() # Set language files lang.set_locale_info("SABnzbd", DIR_LANGUAGE) lang.set_language(cfg.language()) sabnzbd.api.clear_trans_cache() sabnzbd.change_queue_complete_action(cfg.queue_complete(), new=False) # One time conversion "speedlimit" in schedules. if not cfg.sched_converted(): schedules = cfg.schedules() newsched = [] for sched in schedules: if "speedlimit" in sched: newsched.append(re.sub(r"(speedlimit \d+)$", r"\1K", sched)) else: newsched.append(sched) cfg.schedules.set(newsched) cfg.sched_converted.set(1) # Second time schedule conversion if cfg.sched_converted() != 2: cfg.schedules.set(["%s %s" % (1, schedule) for schedule in cfg.schedules()]) cfg.sched_converted.set(2) config.save_config() # Convert auto-sort if cfg.auto_sort() == "0": cfg.auto_sort.set("") elif cfg.auto_sort() == "1": cfg.auto_sort.set("avg_age asc") # Add hostname to the whitelist if not cfg.host_whitelist(): cfg.host_whitelist.set(socket.gethostname()) # Do repair if requested if check_repair_request(): repair = 2 pause_downloader = True # Initialize threads rss.init() paused = BPSMeter.do.read() NzbQueue() Downloader(pause_downloader or paused) Decoder() Assembler() PostProcessor() NzbQueue.do.read_queue(repair) DirScanner() Rating() URLGrabber() scheduler.init() if evalSched: scheduler.analyse(pause_downloader) logging.info("All processes started") RESTART_REQ = False __INITIALIZED__ = True return True
class Downloader(Thread): """ Singleton Downloader Thread """ do = None def __init__(self, paused=False): Thread.__init__(self) logging.debug("Initializing downloader/decoder") # Used for scheduled pausing self.paused = paused #used for throttling bandwidth and scheduling bandwidth changes self.bandwidth_limit = cfg.bandwidth_limit() cfg.bandwidth_limit.callback(self.speed_set) # Used for reducing speed self.delayed = False self.postproc = False self.shutdown = False # A user might change server parms again before server restart is ready. # Keep a counter to prevent multiple restarts self.__restart = 0 self.force_disconnect = False self.read_fds = {} self.write_fds = {} self.servers = [] self._timers = {} for server in config.get_servers(): self.init_server(None, server) self.decoder = Decoder(self.servers) Downloader.do = self def init_server(self, oldserver, newserver): """ Setup or re-setup single server When oldserver is defined and in use, delay startup. Return True when newserver is primary Note that the server names are "host:port" strings! """ primary = False create = False servers = config.get_servers() if newserver in servers: srv = servers[newserver] enabled = srv.enable() host = srv.host() port = srv.port() timeout = srv.timeout() threads = srv.connections() fillserver = srv.fillserver() primary = enabled and (not fillserver) and (threads > 0) ssl = srv.ssl() and sabnzbd.newswrapper.HAVE_SSL username = srv.username() password = srv.password() optional = srv.optional() retention = float(srv.retention() * 24 * 3600) # days ==> seconds create = True if oldserver: for n in xrange(len(self.servers)): if self.servers[n].id == oldserver: # Server exists, do re-init later create = False self.servers[n].newid = newserver self.servers[n].restart = True self.__restart += 1 break if create and enabled and host and port and threads: self.servers.append(Server(newserver, host, port, timeout, threads, fillserver, ssl, username, password, optional, retention)) return primary @synchronized_CV def set_paused_state(self, state): """ Set Downloader to specified paused state """ self.paused = state @synchronized_CV def resume(self): logging.info("Resuming") self.paused = False @synchronized_CV def pause(self, save=True): """ Pause the downloader, optionally saving admin """ if not self.paused: self.paused = True logging.info("Pausing") growler.send_notification("SABnzbd", T('Paused'), 'download') if self.is_paused(): BPSMeter.do.reset() if cfg.autodisconnect(): self.disconnect() if save: sabnzbd.save_state() @synchronized_CV def delay(self): logging.debug("Delaying") self.delayed = True @synchronized_CV def undelay(self): logging.debug("Undelaying") self.delayed = False @synchronized_CV def wait_for_postproc(self): logging.info("Waiting for post-processing to finish") self.postproc = True @synchronized_CV def resume_from_postproc(self): logging.info("Post-processing finished, resuming download") self.postproc = False def disconnect(self): self.force_disconnect = True @synchronized_CV def limit_speed(self, value): self.bandwidth_limit = int(value) logging.info("Bandwidth limit set to %s", value) def get_limit(self): return self.bandwidth_limit def speed_set(self): self.bandwidth_limit = cfg.bandwidth_limit() def is_paused(self): from sabnzbd.nzbqueue import NzbQueue if not self.paused: return False else: if NzbQueue.do.has_forced_items(): return False else: return True def active_primaries(self): """ Check if any primary server is defined and active """ for server in self.servers: if server.active and not server.fillserver: return True return False def maybe_block_server(self, server): from sabnzbd.nzbqueue import NzbQueue if server.optional and server.active and (server.bad_cons/server.threads) > 3: # Optional and active server had too many problems, # disable it now and send a re-enable plan to the scheduler server.bad_cons = 0 server.active = False server.errormsg = T('Server %s will be ignored for %s minutes') % ('', _PENALTY_TIMEOUT) logging.warning(Ta('Server %s will be ignored for %s minutes'), server.id, _PENALTY_TIMEOUT) self.plan_server(server.id, _PENALTY_TIMEOUT) # Remove all connections to server for nw in server.idle_threads + server.busy_threads: self.__reset_nw(nw, "forcing disconnect", warn=False, wait=False, quit=False) # Make sure server address resolution is refreshed server.info = None NzbQueue.do.reset_all_try_lists() def run(self): from sabnzbd.nzbqueue import NzbQueue self.decoder.start() # Kick BPS-Meter to check quota BPSMeter.do.update() while 1: for server in self.servers: assert isinstance(server, Server) for nw in server.busy_threads[:]: if (nw.nntp and nw.nntp.error_msg) or (nw.timeout and time.time() > nw.timeout): if (nw.nntp and nw.nntp.error_msg): self.__reset_nw(nw, "", warn=False) else: self.__reset_nw(nw, "timed out") server.bad_cons += 1 self.maybe_block_server(server) if server.restart: if not server.busy_threads: newid = server.newid server.stop(self.read_fds, self.write_fds) self.servers.remove(server) if newid: self.init_server(None, newid) self.__restart -= 1 NzbQueue.do.reset_all_try_lists() # Have to leave this loop, because we removed element break else: # Restart pending, don't add new articles continue assert isinstance(server, Server) if not server.idle_threads or server.restart or self.is_paused() or self.shutdown or self.delayed or self.postproc: continue if not (server.active and NzbQueue.do.has_articles_for(server)): continue for nw in server.idle_threads[:]: assert isinstance(nw, NewsWrapper) if nw.timeout: if time.time() < nw.timeout: continue else: nw.timeout = None if not server.active: break if server.info is None: self.maybe_block_server(server) request_server_info(server) break article = NzbQueue.do.get_article(server) if not article: break if server.retention and article.nzf.nzo.avg_stamp < time.time() - server.retention: # Article too old for the server, treat as missing if sabnzbd.LOG_ALL: logging.debug('Article %s too old for %s:%s', article.article, server.host, server.port) self.decoder.decode(article, None) break server.idle_threads.remove(nw) server.busy_threads.append(nw) nw.article = article if nw.connected: self.__request_article(nw) else: try: logging.info("%s@%s:%s: Initiating connection", nw.thrdnum, server.host, server.port) nw.init_connect(self.write_fds) except: logging.error(Ta('Failed to initialize %s@%s:%s'), nw.thrdnum, server.host, server.port) logging.info("Traceback: ", exc_info = True) self.__reset_nw(nw, "failed to initialize") # Exit-point if self.shutdown: empty = True for server in self.servers: if server.busy_threads: empty = False break if empty: self.decoder.stop() self.decoder.join() for server in self.servers: server.stop(self.read_fds, self.write_fds) logging.info("Shutting down") break if self.force_disconnect: for server in self.servers: for nw in server.idle_threads + server.busy_threads: quit = nw.connected and server.active self.__reset_nw(nw, "forcing disconnect", warn=False, wait=False, quit=quit) # Make sure server address resolution is refreshed server.info = None self.force_disconnect = False # => Select readkeys = self.read_fds.keys() writekeys = self.write_fds.keys() if readkeys or writekeys: read, write, error = select.select(readkeys, writekeys, (), 1.0) else: read, write, error = ([], [], []) BPSMeter.do.reset() time.sleep(1.0) CV.acquire() while (NzbQueue.do.is_empty() or self.is_paused() or self.delayed or self.postproc) and not \ self.shutdown and not self.__restart: CV.wait() CV.release() self.force_disconnect = False for selected in write: nw = self.write_fds[selected] fileno = nw.nntp.sock.fileno() if fileno not in self.read_fds: self.read_fds[fileno] = nw if fileno in self.write_fds: self.write_fds.pop(fileno) if not read: BPSMeter.do.update() continue for selected in read: nw = self.read_fds[selected] article = nw.article server = nw.server if article: nzo = article.nzf.nzo try: bytes, done, skip = nw.recv_chunk() except: bytes, done, skip = (0, False, False) if skip: BPSMeter.do.update() continue if bytes < 1: self.__reset_nw(nw, "server closed connection", warn=False, wait=False) continue else: if self.bandwidth_limit: bps = BPSMeter.do.get_bps() bps += bytes limit = self.bandwidth_limit * 1024 if bps > limit: while BPSMeter.do.get_bps() > limit: time.sleep(0.05) BPSMeter.do.update() BPSMeter.do.update(server.id, bytes) if nzo: nzo.bytes_downloaded += bytes nzo.update_avg_kbs(BPSMeter.do.get_bps()) if len(nw.lines) == 1: code = nw.lines[0][:3] if not nw.connected or code == '480': done = False try: nw.finish_connect(code) if sabnzbd.LOG_ALL: logging.debug("%s@%s:%s last message -> %s", nw.thrdnum, nw.server.host, nw.server.port, nw.lines[0]) nw.lines = [] nw.data = '' except NNTPPermanentError, error: # Handle login problems block = False penalty = 0 msg = error.response ecode = msg[:3] display_msg = ' [%s]' % msg logging.debug('Server login problem: %s, %s', ecode, msg) if ((ecode in ('502', '400')) and clues_too_many(msg)) or \ (ecode == '481' and clues_too_many(msg)): # Too many connections: remove this thread and reduce thread-setting for server # Plan to go back to the full number after a penalty timeout if server.active: server.errormsg = Ta('Too many connections to server %s:%s') % ('', display_msg) logging.error(Ta('Too many connections to server %s:%s'), server.host, server.port) self.__reset_nw(nw, None, warn=False, destroy=True, quit=True) self.plan_server(server.id, _PENALTY_TOOMANY) server.threads -= 1 elif ecode in ('502', '481') and clues_too_many_ip(msg): # Account sharing? if server.active: server.errormsg = Ta('Probable account sharing') + display_msg name = ' (%s:%s)' % (server.host, server.port) logging.error(Ta('Probable account sharing') + name) penalty = _PENALTY_SHARE elif ecode in ('481', '482', '381') or (ecode == '502' and clues_login(msg)): # Cannot login, block this server if server.active: server.errormsg = Ta('Failed login for server %s') % display_msg logging.error(Ta('Failed login for server %s'), '%s:%s' % (server.host, server.port)) penalty = _PENALTY_PERM block = True elif ecode == '502': # Cannot connect (other reasons), block this server if server.active: server.errormsg = Ta('Cannot connect to server %s [%s]') % ('', display_msg) logging.warning(Ta('Cannot connect to server %s [%s]'), '%s:%s' % (server.host, server.port), msg) if clues_pay(msg): penalty = _PENALTY_PERM else: penalty = _PENALTY_502 block = True else: # Unknown error, just keep trying if server.active: server.errormsg = Ta('Cannot connect to server %s [%s]') % ('', display_msg) logging.error(Ta('Cannot connect to server %s [%s]'), '%s:%s' % (server.host, server.port), msg) penalty = _PENALTY_UNKNOWN if block or (penalty and server.optional): if server.active: server.active = False if (not server.optional) and cfg.no_penalties(): penalty = _PENALTY_SHORT if penalty and (block or server.optional): logging.info('Server %s ignored for %s minutes', server.id, penalty) self.plan_server(server.id, penalty) NzbQueue.do.reset_all_try_lists() self.__reset_nw(nw, None, warn=False, quit=True) continue except: logging.error(Ta('Connecting %s@%s:%s failed, message=%s'), nw.thrdnum, nw.server.host, nw.server.port, nw.lines[0]) # No reset-warning needed, above logging is sufficient self.__reset_nw(nw, None, warn=False) if nw.connected: logging.info("Connecting %s@%s:%s finished", nw.thrdnum, nw.server.host, nw.server.port) self.__request_article(nw) elif code == '223': done = True logging.debug('Article <%s> is present', article.article) self.decoder.decode(article, nw.lines)
class Downloader(Thread): """ Singleton Downloader Thread """ do = None def __init__(self, paused=False): Thread.__init__(self) logging.debug("Initializing downloader/decoder") # Used for scheduled pausing self.paused = paused # used for throttling bandwidth and scheduling bandwidth changes cfg.bandwidth_perc.callback(self.speed_set) cfg.bandwidth_max.callback(self.speed_set) self.speed_set() # Used for reducing speed self.delayed = False # Used to see if we can add a slowdown to the Downloader-loop self.can_be_slowed = None self.can_be_slowed_timer = 0 self.postproc = False self.shutdown = False # A user might change server parms again before server restart is ready. # Keep a counter to prevent multiple restarts self.__restart = 0 self.force_disconnect = False self.read_fds = {} self.write_fds = {} self.servers = [] self._timers = {} for server in config.get_servers(): self.init_server(None, server) self.decoder = Decoder(self.servers) Downloader.do = self def init_server(self, oldserver, newserver): """ Setup or re-setup single server When oldserver is defined and in use, delay startup. Note that the server names are "host:port" strings! """ create = False servers = config.get_servers() if newserver in servers: srv = servers[newserver] enabled = srv.enable() displayname = srv.displayname() host = srv.host() port = srv.port() timeout = srv.timeout() threads = srv.connections() priority = srv.priority() ssl = srv.ssl() and sabnzbd.HAVE_SSL ssl_verify = srv.ssl_verify() username = srv.username() password = srv.password() optional = srv.optional() categories = srv.categories() retention = float(srv.retention() * 24 * 3600) # days ==> seconds send_group = srv.send_group() create = True if oldserver: for n in xrange(len(self.servers)): if self.servers[n].id == oldserver: # Server exists, do re-init later create = False self.servers[n].newid = newserver self.servers[n].restart = True self.__restart += 1 break if create and enabled and host and port and threads: self.servers.append( Server(newserver, displayname, host, port, timeout, threads, priority, ssl, ssl_verify, send_group, username, password, optional, retention, categories=categories)) return @synchronized_CV def set_paused_state(self, state): """ Set downloader to specified paused state """ self.paused = state @synchronized_CV def resume(self): # Do not notify when SABnzbd is still starting if self.paused and sabnzbd.WEB_DIR: logging.info("Resuming") notifier.send_notification("SABnzbd", T('Resuming'), 'download') self.paused = False @synchronized_CV def pause(self, save=True): """ Pause the downloader, optionally saving admin """ if not self.paused: self.paused = True self.can_be_slowed = None logging.info("Pausing") notifier.send_notification("SABnzbd", T('Paused'), 'download') if self.is_paused(): BPSMeter.do.reset() if cfg.autodisconnect(): self.disconnect() if save: ArticleCache.do.flush_articles() @synchronized_CV def delay(self): logging.debug("Delaying") self.delayed = True @synchronized_CV def undelay(self): logging.debug("Undelaying") self.delayed = False @synchronized_CV def wait_for_postproc(self): logging.info("Waiting for post-processing to finish") self.postproc = True @synchronized_CV def resume_from_postproc(self): logging.info("Post-processing finished, resuming download") self.postproc = False def disconnect(self): self.force_disconnect = True @synchronized_CV def limit_speed(self, value): ''' Set the actual download speed in Bytes/sec When 'value' ends with a '%' sign or is within 1-100, it is interpreted as a pecentage of the maximum bandwidth When no '%' is found, it is interpreted as an absolute speed (including KMGT notation). ''' if value: mx = cfg.bandwidth_max.get_int() if '%' in str(value) or (from_units(value) > 0 and from_units(value) < 101): limit = value.strip(' %') self.bandwidth_perc = from_units(limit) if mx: self.bandwidth_limit = mx * self.bandwidth_perc / 100 else: logging.warning( T('You must set a maximum bandwidth before you can set a bandwidth limit' )) else: self.bandwidth_limit = from_units(value) if mx: self.bandwidth_perc = self.bandwidth_limit / mx * 100 else: self.bandwidth_perc = 100 else: self.speed_set() logging.info("Speed limit set to %s B/s", self.bandwidth_limit) self.can_be_slowed = None def get_limit(self): return self.bandwidth_perc def get_limit_abs(self): return self.bandwidth_limit def speed_set(self): limit = cfg.bandwidth_max.get_int() perc = cfg.bandwidth_perc() if limit and perc: self.bandwidth_perc = perc self.bandwidth_limit = limit * perc / 100 else: self.bandwidth_perc = 0 self.bandwidth_limit = 0 def is_paused(self): if not self.paused: return False else: if sabnzbd.nzbqueue.NzbQueue.do.has_forced_items(): return False else: return True def highest_server(self, me): """ Return True when this server has the highest priority of the active ones 0 is the highest priority """ for server in self.servers: if server is not me and server.active and server.priority < me.priority: return False return True def nzo_servers(self, nzo): return filter(nzo.server_in_try_list, self.servers) def maybe_block_server(self, server): if server.optional and server.active and (server.bad_cons / server.threads) > 3: # Optional and active server had too many problems, # disable it now and send a re-enable plan to the scheduler server.bad_cons = 0 server.active = False server.errormsg = T('Server %s will be ignored for %s minutes') % ( '', _PENALTY_TIMEOUT) logging.warning(T('Server %s will be ignored for %s minutes'), server.id, _PENALTY_TIMEOUT) self.plan_server(server.id, _PENALTY_TIMEOUT) # Remove all connections to server for nw in server.idle_threads + server.busy_threads: self.__reset_nw(nw, "forcing disconnect", warn=False, wait=False, quit=False) # Make sure server address resolution is refreshed server.info = None sabnzbd.nzbqueue.NzbQueue.do.reset_all_try_lists() def run(self): # First check IPv6 connectivity sabnzbd.EXTERNAL_IPV6 = sabnzbd.test_ipv6() logging.debug('External IPv6 test result: %s', sabnzbd.EXTERNAL_IPV6) # Then have to check the quality of SSL verification if sabnzbd.HAVE_SSL_CONTEXT: try: import ssl ctx = ssl.create_default_context() base_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) ssl_sock = ctx.wrap_socket(base_sock, server_hostname=cfg.selftest_host()) ssl_sock.settimeout(2.0) ssl_sock.connect((cfg.selftest_host(), 443)) ssl_sock.close() except: # Seems something is still wrong sabnzbd.set_https_verification(0) sabnzbd.HAVE_SSL_CONTEXT = False logging.debug('SSL verification test: %s', sabnzbd.HAVE_SSL_CONTEXT) # Start decoder self.decoder.start() # Kick BPS-Meter to check quota BPSMeter.do.update() while 1: for server in self.servers: if 0: assert isinstance(server, Server) # Assert only for debug purposes for nw in server.busy_threads[:]: if (nw.nntp and nw.nntp.error_msg) or ( nw.timeout and time.time() > nw.timeout): if nw.nntp and nw.nntp.error_msg: self.__reset_nw(nw, "", warn=False) else: self.__reset_nw(nw, "timed out") server.bad_cons += 1 self.maybe_block_server(server) if server.restart: if not server.busy_threads: newid = server.newid server.stop(self.read_fds, self.write_fds) self.servers.remove(server) if newid: self.init_server(None, newid) self.__restart -= 1 sabnzbd.nzbqueue.NzbQueue.do.reset_all_try_lists() # Have to leave this loop, because we removed element break else: # Restart pending, don't add new articles continue if 0: assert isinstance(server, Server) # Assert only for debug purposes if not server.idle_threads or server.restart or self.is_paused( ) or self.shutdown or self.delayed or self.postproc: continue if not (server.active and sabnzbd.nzbqueue.NzbQueue.do.has_articles_for(server)): continue for nw in server.idle_threads[:]: if 0: assert isinstance( nw, NewsWrapper) # Assert only for debug purposes if nw.timeout: if time.time() < nw.timeout: continue else: nw.timeout = None if not server.active: break if server.info is None: self.maybe_block_server(server) request_server_info(server) break article = sabnzbd.nzbqueue.NzbQueue.do.get_article( server, self.servers) if not article: break if server.retention and article.nzf.nzo.avg_stamp < time.time( ) - server.retention: # Article too old for the server, treat as missing if sabnzbd.LOG_ALL: logging.debug('Article %s too old for %s', article.article, server.id) self.decoder.decode(article, None) break server.idle_threads.remove(nw) server.busy_threads.append(nw) nw.article = article if nw.connected: self.__request_article(nw) else: try: logging.info("%s@%s: Initiating connection", nw.thrdnum, server.id) nw.init_connect(self.write_fds) except: logging.error( T('Failed to initialize %s@%s with reason: %s' ), nw.thrdnum, server.id, sys.exc_info()[1]) self.__reset_nw(nw, "failed to initialize") # Exit-point if self.shutdown: empty = True for server in self.servers: if server.busy_threads: empty = False break if empty: self.decoder.stop() self.decoder.join() for server in self.servers: server.stop(self.read_fds, self.write_fds) logging.info("Shutting down") break if self.force_disconnect: for server in self.servers: for nw in server.idle_threads + server.busy_threads: quit = nw.connected and server.active self.__reset_nw(nw, "forcing disconnect", warn=False, wait=False, quit=quit) # Make sure server address resolution is refreshed server.info = None self.force_disconnect = False # => Select readkeys = self.read_fds.keys() writekeys = self.write_fds.keys() if readkeys or writekeys: read, write, error = select.select(readkeys, writekeys, (), 1.0) # Why check so often when so few things happend? if self.can_be_slowed and len(readkeys) >= 8 and len( read) <= 2: time.sleep(0.01) # Need to initalize the check during first 20 seconds if self.can_be_slowed is None or self.can_be_slowed_timer: # Wait for stable speed to start testing if not self.can_be_slowed_timer and BPSMeter.do.get_stable_speed( timespan=10): self.can_be_slowed_timer = time.time() # Check 10 seconds after enabeling slowdown if self.can_be_slowed_timer and time.time( ) > self.can_be_slowed_timer + 10: # Now let's check if it was stable in the last 10 seconds self.can_be_slowed = ( BPSMeter.do.get_stable_speed(timespan=10) > 0) self.can_be_slowed_timer = 0 logging.debug('Downloader-slowdown: %r', self.can_be_slowed) else: read, write, error = ([], [], []) BPSMeter.do.reset() time.sleep(1.0) CV.acquire() while (sabnzbd.nzbqueue.NzbQueue.do.is_empty() or self.is_paused() or self.delayed or self.postproc) and not \ self.shutdown and not self.__restart: CV.wait() CV.release() self.force_disconnect = False for selected in write: nw = self.write_fds[selected] fileno = nw.nntp.sock.fileno() if fileno not in self.read_fds: self.read_fds[fileno] = nw if fileno in self.write_fds: self.write_fds.pop(fileno) if not read: BPSMeter.do.update() continue for selected in read: nw = self.read_fds[selected] article = nw.article server = nw.server if article: nzo = article.nzf.nzo try: bytes, done, skip = nw.recv_chunk() except: bytes, done, skip = (0, False, False) if skip: BPSMeter.do.update() continue if bytes < 1: self.__reset_nw(nw, "server closed connection", warn=False, wait=False) continue else: if self.bandwidth_limit: bps = BPSMeter.do.get_bps() bps += bytes limit = self.bandwidth_limit if bps > limit: while BPSMeter.do.get_bps() > limit: time.sleep(0.05) BPSMeter.do.update() BPSMeter.do.update(server.id, bytes) if nzo: nzo.update_download_stats(BPSMeter.do.get_bps(), server.id, bytes) if len(nw.lines) == 1: code = nw.lines[0][:3] if not nw.connected or code == '480': done = False try: nw.finish_connect(code) if sabnzbd.LOG_ALL: logging.debug("%s@%s last message -> %s", nw.thrdnum, nw.server.id, nw.lines[0]) nw.lines = [] nw.data = '' except NNTPPermanentError, error: # Handle login problems block = False penalty = 0 msg = error.response ecode = msg[:3] display_msg = ' [%s]' % msg logging.debug('Server login problem: %s, %s', ecode, msg) if ecode in ('502', '400', '481', '482') and clues_too_many(msg): # Too many connections: remove this thread and reduce thread-setting for server # Plan to go back to the full number after a penalty timeout if server.active: errormsg = T( 'Too many connections to server %s' ) % display_msg if server.errormsg != errormsg: server.errormsg = errormsg logging.warning( T('Too many connections to server %s' ), server.id) self.__reset_nw(nw, None, warn=False, destroy=True, quit=True) self.plan_server(server.id, _PENALTY_TOOMANY) server.threads -= 1 elif ecode in ('502', '481', '482') and clues_too_many_ip(msg): # Account sharing? if server.active: errormsg = T('Probable account sharing' ) + display_msg if server.errormsg != errormsg: server.errormsg = errormsg name = ' (%s)' % server.id logging.warning( T('Probable account sharing') + name) penalty = _PENALTY_SHARE elif ecode in ('481', '482', '381') or (ecode == '502' and clues_login(msg)): # Cannot login, block this server if server.active: errormsg = T('Failed login for server %s' ) % display_msg if server.errormsg != errormsg: server.errormsg = errormsg logging.error( T('Failed login for server %s'), server.id) penalty = _PENALTY_PERM block = True elif ecode == '502': # Cannot connect (other reasons), block this server if server.active: errormsg = T( 'Cannot connect to server %s [%s]') % ( '', display_msg) if server.errormsg != errormsg: server.errormsg = errormsg logging.warning( T('Cannot connect to server %s [%s]' ), server.id, msg) if clues_pay(msg): penalty = _PENALTY_PERM else: penalty = _PENALTY_502 block = True elif ecode == '400': # Temp connection problem? if server.active: logging.debug( 'Unspecified error 400 from server %s', server.id) penalty = _PENALTY_VERYSHORT block = True else: # Unknown error, just keep trying if server.active: errormsg = T( 'Cannot connect to server %s [%s]') % ( '', display_msg) if server.errormsg != errormsg: server.errormsg = errormsg logging.warning( T('Cannot connect to server %s [%s]' ), server.id, msg) penalty = _PENALTY_UNKNOWN if block or (penalty and server.optional): if server.active: server.active = False if penalty and (block or server.optional): self.plan_server(server.id, penalty) sabnzbd.nzbqueue.NzbQueue.do.reset_all_try_lists( ) self.__reset_nw(nw, None, warn=False, quit=True) continue except: logging.error( T('Connecting %s@%s failed, message=%s'), nw.thrdnum, nw.server.id, nw.lines[0]) # No reset-warning needed, above logging is sufficient self.__reset_nw(nw, None, warn=False) if nw.connected: logging.info("Connecting %s@%s finished", nw.thrdnum, nw.server.id) self.__request_article(nw) elif code == '223': done = True logging.debug('Article <%s> is present', article.article) self.decoder.decode(article, nw.lines)
class Downloader(Thread): """ Singleton Downloader Thread """ do = None def __init__(self, paused=False): Thread.__init__(self) logging.debug("Initializing downloader/decoder") # Used for scheduled pausing self.paused = paused # used for throttling bandwidth and scheduling bandwidth changes cfg.bandwidth_perc.callback(self.speed_set) cfg.bandwidth_max.callback(self.speed_set) self.speed_set() # Used for reducing speed self.delayed = False # Used to see if we can add a slowdown to the Downloader-loop self.can_be_slowed = None self.can_be_slowed_timer = 0 self.postproc = False self.shutdown = False # A user might change server parms again before server restart is ready. # Keep a counter to prevent multiple restarts self.__restart = 0 self.force_disconnect = False self.read_fds = {} self.write_fds = {} self.servers = [] self._timers = {} for server in config.get_servers(): self.init_server(None, server) self.decoder = Decoder(self.servers) Downloader.do = self def init_server(self, oldserver, newserver): """ Setup or re-setup single server When oldserver is defined and in use, delay startup. Note that the server names are "host:port" strings! """ create = False servers = config.get_servers() if newserver in servers: srv = servers[newserver] enabled = srv.enable() displayname = srv.displayname() host = srv.host() port = srv.port() timeout = srv.timeout() threads = srv.connections() priority = srv.priority() ssl = srv.ssl() and sabnzbd.HAVE_SSL ssl_verify = srv.ssl_verify() username = srv.username() password = srv.password() optional = srv.optional() categories = srv.categories() retention = float(srv.retention() * 24 * 3600) # days ==> seconds send_group = srv.send_group() create = True if oldserver: for n in xrange(len(self.servers)): if self.servers[n].id == oldserver: # Server exists, do re-init later create = False self.servers[n].newid = newserver self.servers[n].restart = True self.__restart += 1 break if create and enabled and host and port and threads: self.servers.append(Server(newserver, displayname, host, port, timeout, threads, priority, ssl, ssl_verify, send_group, username, password, optional, retention, categories=categories)) return @synchronized_CV def set_paused_state(self, state): """ Set downloader to specified paused state """ self.paused = state @synchronized_CV def resume(self): # Do not notify when SABnzbd is still starting if self.paused and sabnzbd.WEB_DIR: logging.info("Resuming") notifier.send_notification("SABnzbd", T('Resuming'), 'download') self.paused = False @synchronized_CV def pause(self, save=True): """ Pause the downloader, optionally saving admin """ if not self.paused: self.paused = True self.can_be_slowed = None logging.info("Pausing") notifier.send_notification("SABnzbd", T('Paused'), 'download') if self.is_paused(): BPSMeter.do.reset() if cfg.autodisconnect(): self.disconnect() if save: ArticleCache.do.flush_articles() @synchronized_CV def delay(self): logging.debug("Delaying") self.delayed = True @synchronized_CV def undelay(self): logging.debug("Undelaying") self.delayed = False @synchronized_CV def wait_for_postproc(self): logging.info("Waiting for post-processing to finish") self.postproc = True @synchronized_CV def resume_from_postproc(self): logging.info("Post-processing finished, resuming download") self.postproc = False def disconnect(self): self.force_disconnect = True @synchronized_CV def limit_speed(self, value): ''' Set the actual download speed in Bytes/sec When 'value' ends with a '%' sign or is within 1-100, it is interpreted as a pecentage of the maximum bandwidth When no '%' is found, it is interpreted as an absolute speed (including KMGT notation). ''' if value: mx = cfg.bandwidth_max.get_int() if '%' in str(value) or (from_units(value) > 0 and from_units(value) < 101): limit = value.strip(' %') self.bandwidth_perc = from_units(limit) if mx: self.bandwidth_limit = mx * self.bandwidth_perc / 100 else: logging.warning(T('You must set a maximum bandwidth before you can set a bandwidth limit')) else: self.bandwidth_limit = from_units(value) if mx: self.bandwidth_perc = self.bandwidth_limit / mx * 100 else: self.bandwidth_perc = 100 else: self.speed_set() logging.info("Speed limit set to %s B/s", self.bandwidth_limit) self.can_be_slowed = None def get_limit(self): return self.bandwidth_perc def get_limit_abs(self): return self.bandwidth_limit def speed_set(self): limit = cfg.bandwidth_max.get_int() perc = cfg.bandwidth_perc() if limit and perc: self.bandwidth_perc = perc self.bandwidth_limit = limit * perc / 100 else: self.bandwidth_perc = 0 self.bandwidth_limit = 0 def is_paused(self): if not self.paused: return False else: if sabnzbd.nzbqueue.NzbQueue.do.has_forced_items(): return False else: return True def highest_server(self, me): """ Return True when this server has the highest priority of the active ones 0 is the highest priority """ for server in self.servers: if server is not me and server.active and server.priority < me.priority: return False return True def nzo_servers(self, nzo): return filter(nzo.server_in_try_list, self.servers) def maybe_block_server(self, server): if server.optional and server.active and (server.bad_cons / server.threads) > 3: # Optional and active server had too many problems, # disable it now and send a re-enable plan to the scheduler server.bad_cons = 0 server.active = False server.errormsg = T('Server %s will be ignored for %s minutes') % ('', _PENALTY_TIMEOUT) logging.warning(T('Server %s will be ignored for %s minutes'), server.id, _PENALTY_TIMEOUT) self.plan_server(server.id, _PENALTY_TIMEOUT) # Remove all connections to server for nw in server.idle_threads + server.busy_threads: self.__reset_nw(nw, "forcing disconnect", warn=False, wait=False, quit=False) # Make sure server address resolution is refreshed server.info = None sabnzbd.nzbqueue.NzbQueue.do.reset_all_try_lists() def run(self): # First check IPv6 connectivity sabnzbd.EXTERNAL_IPV6 = sabnzbd.test_ipv6() logging.debug('External IPv6 test result: %s', sabnzbd.EXTERNAL_IPV6) # Then have to check the quality of SSL verification if sabnzbd.HAVE_SSL_CONTEXT: try: import ssl ctx = ssl.create_default_context() base_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) ssl_sock = ctx.wrap_socket(base_sock, server_hostname=cfg.selftest_host()) ssl_sock.settimeout(2.0) ssl_sock.connect((cfg.selftest_host(), 443)) ssl_sock.close() except: # Seems something is still wrong sabnzbd.set_https_verification(0) sabnzbd.HAVE_SSL_CONTEXT = False logging.debug('SSL verification test: %s', sabnzbd.HAVE_SSL_CONTEXT) # Start decoder self.decoder.start() # Kick BPS-Meter to check quota BPSMeter.do.update() while 1: for server in self.servers: if 0: assert isinstance(server, Server) # Assert only for debug purposes for nw in server.busy_threads[:]: if (nw.nntp and nw.nntp.error_msg) or (nw.timeout and time.time() > nw.timeout): if nw.nntp and nw.nntp.error_msg: self.__reset_nw(nw, "", warn=False) else: self.__reset_nw(nw, "timed out") server.bad_cons += 1 self.maybe_block_server(server) if server.restart: if not server.busy_threads: newid = server.newid server.stop(self.read_fds, self.write_fds) self.servers.remove(server) if newid: self.init_server(None, newid) self.__restart -= 1 sabnzbd.nzbqueue.NzbQueue.do.reset_all_try_lists() # Have to leave this loop, because we removed element break else: # Restart pending, don't add new articles continue if 0: assert isinstance(server, Server) # Assert only for debug purposes if not server.idle_threads or server.restart or self.is_paused() or self.shutdown or self.delayed or self.postproc: continue if not (server.active and sabnzbd.nzbqueue.NzbQueue.do.has_articles_for(server)): continue for nw in server.idle_threads[:]: if 0: assert isinstance(nw, NewsWrapper) # Assert only for debug purposes if nw.timeout: if time.time() < nw.timeout: continue else: nw.timeout = None if not server.active: break if server.info is None: self.maybe_block_server(server) request_server_info(server) break article = sabnzbd.nzbqueue.NzbQueue.do.get_article(server, self.servers) if not article: break if server.retention and article.nzf.nzo.avg_stamp < time.time() - server.retention: # Article too old for the server, treat as missing if sabnzbd.LOG_ALL: logging.debug('Article %s too old for %s', article.article, server.id) self.decoder.decode(article, None) break server.idle_threads.remove(nw) server.busy_threads.append(nw) nw.article = article if nw.connected: self.__request_article(nw) else: try: logging.info("%s@%s: Initiating connection", nw.thrdnum, server.id) nw.init_connect(self.write_fds) except: logging.error(T('Failed to initialize %s@%s with reason: %s'), nw.thrdnum, server.id, sys.exc_info()[1]) self.__reset_nw(nw, "failed to initialize") # Exit-point if self.shutdown: empty = True for server in self.servers: if server.busy_threads: empty = False break if empty: self.decoder.stop() self.decoder.join() for server in self.servers: server.stop(self.read_fds, self.write_fds) logging.info("Shutting down") break if self.force_disconnect: for server in self.servers: for nw in server.idle_threads + server.busy_threads: quit = nw.connected and server.active self.__reset_nw(nw, "forcing disconnect", warn=False, wait=False, quit=quit) # Make sure server address resolution is refreshed server.info = None self.force_disconnect = False # => Select readkeys = self.read_fds.keys() writekeys = self.write_fds.keys() if readkeys or writekeys: read, write, error = select.select(readkeys, writekeys, (), 1.0) # Why check so often when so few things happend? if self.can_be_slowed and len(readkeys) >= 8 and len(read) <= 2: time.sleep(0.01) # Need to initalize the check during first 20 seconds if self.can_be_slowed is None or self.can_be_slowed_timer: # Wait for stable speed to start testing if not self.can_be_slowed_timer and BPSMeter.do.get_stable_speed(timespan=10): self.can_be_slowed_timer = time.time() # Check 10 seconds after enabeling slowdown if self.can_be_slowed_timer and time.time() > self.can_be_slowed_timer + 10: # Now let's check if it was stable in the last 10 seconds self.can_be_slowed = (BPSMeter.do.get_stable_speed(timespan=10) > 0) self.can_be_slowed_timer = 0 logging.debug('Downloader-slowdown: %r', self.can_be_slowed) else: read, write, error = ([], [], []) BPSMeter.do.reset() time.sleep(1.0) CV.acquire() while (sabnzbd.nzbqueue.NzbQueue.do.is_empty() or self.is_paused() or self.delayed or self.postproc) and not \ self.shutdown and not self.__restart: CV.wait() CV.release() self.force_disconnect = False for selected in write: nw = self.write_fds[selected] fileno = nw.nntp.sock.fileno() if fileno not in self.read_fds: self.read_fds[fileno] = nw if fileno in self.write_fds: self.write_fds.pop(fileno) if not read: BPSMeter.do.update() continue for selected in read: nw = self.read_fds[selected] article = nw.article server = nw.server if article: nzo = article.nzf.nzo try: bytes, done, skip = nw.recv_chunk() except: bytes, done, skip = (0, False, False) if skip: BPSMeter.do.update() continue if bytes < 1: self.__reset_nw(nw, "server closed connection", warn=False, wait=False) continue else: if self.bandwidth_limit: bps = BPSMeter.do.get_bps() bps += bytes limit = self.bandwidth_limit if bps > limit: while BPSMeter.do.get_bps() > limit: time.sleep(0.05) BPSMeter.do.update() BPSMeter.do.update(server.id, bytes) if nzo: nzo.update_download_stats(BPSMeter.do.get_bps(), server.id, bytes) if len(nw.lines) == 1: code = nw.lines[0][:3] if not nw.connected or code == '480': done = False try: nw.finish_connect(code) if sabnzbd.LOG_ALL: logging.debug("%s@%s last message -> %s", nw.thrdnum, nw.server.id, nw.lines[0]) nw.lines = [] nw.data = '' except NNTPPermanentError, error: # Handle login problems block = False penalty = 0 msg = error.response ecode = msg[:3] display_msg = ' [%s]' % msg logging.debug('Server login problem: %s, %s', ecode, msg) if ecode in ('502', '400', '481', '482') and clues_too_many(msg): # Too many connections: remove this thread and reduce thread-setting for server # Plan to go back to the full number after a penalty timeout if server.active: errormsg = T('Too many connections to server %s') % display_msg if server.errormsg != errormsg: server.errormsg = errormsg logging.warning(T('Too many connections to server %s'), server.id) self.__reset_nw(nw, None, warn=False, destroy=True, quit=True) self.plan_server(server.id, _PENALTY_TOOMANY) server.threads -= 1 elif ecode in ('502', '481', '482') and clues_too_many_ip(msg): # Account sharing? if server.active: errormsg = T('Probable account sharing') + display_msg if server.errormsg != errormsg: server.errormsg = errormsg name = ' (%s)' % server.id logging.warning(T('Probable account sharing') + name) penalty = _PENALTY_SHARE elif ecode in ('481', '482', '381') or (ecode == '502' and clues_login(msg)): # Cannot login, block this server if server.active: errormsg = T('Failed login for server %s') % display_msg if server.errormsg != errormsg: server.errormsg = errormsg logging.error(T('Failed login for server %s'), server.id) penalty = _PENALTY_PERM block = True elif ecode == '502': # Cannot connect (other reasons), block this server if server.active: errormsg = T('Cannot connect to server %s [%s]') % ('', display_msg) if server.errormsg != errormsg: server.errormsg = errormsg logging.warning(T('Cannot connect to server %s [%s]'), server.id, msg) if clues_pay(msg): penalty = _PENALTY_PERM else: penalty = _PENALTY_502 block = True elif ecode == '400': # Temp connection problem? if server.active: logging.debug('Unspecified error 400 from server %s', server.id) penalty = _PENALTY_VERYSHORT block = True else: # Unknown error, just keep trying if server.active: errormsg = T('Cannot connect to server %s [%s]') % ('', display_msg) if server.errormsg != errormsg: server.errormsg = errormsg logging.warning(T('Cannot connect to server %s [%s]'), server.id, msg) penalty = _PENALTY_UNKNOWN if block or (penalty and server.optional): if server.active: server.active = False if penalty and (block or server.optional): self.plan_server(server.id, penalty) sabnzbd.nzbqueue.NzbQueue.do.reset_all_try_lists() self.__reset_nw(nw, None, warn=False, quit=True) continue except: logging.error(T('Connecting %s@%s failed, message=%s'), nw.thrdnum, nw.server.id, nw.lines[0]) # No reset-warning needed, above logging is sufficient self.__reset_nw(nw, None, warn=False) if nw.connected: logging.info("Connecting %s@%s finished", nw.thrdnum, nw.server.id) self.__request_article(nw) elif code == '223': done = True logging.debug('Article <%s> is present', article.article) self.decoder.decode(article, nw.lines)
class Downloader(Thread): """ Singleton Downloader Thread """ do = None def __init__(self, paused=False): Thread.__init__(self) logging.debug("Initializing downloader/decoder") # Used for scheduled pausing self.paused = paused # used for throttling bandwidth and scheduling bandwidth changes cfg.bandwidth_perc.callback(self.speed_set) cfg.bandwidth_max.callback(self.speed_set) self.speed_set() # Used for reducing speed self.delayed = False self.postproc = False self.shutdown = False # A user might change server parms again before server restart is ready. # Keep a counter to prevent multiple restarts self.__restart = 0 self.force_disconnect = False self.read_fds = {} self.write_fds = {} self.servers = [] self._timers = {} for server in config.get_servers(): self.init_server(None, server) self.decoder = Decoder(self.servers) Downloader.do = self def init_server(self, oldserver, newserver): """ Setup or re-setup single server When oldserver is defined and in use, delay startup. Note that the server names are "host:port" strings! """ create = False servers = config.get_servers() if newserver in servers: srv = servers[newserver] enabled = srv.enable() displayname = srv.displayname() host = srv.host() port = srv.port() timeout = srv.timeout() threads = srv.connections() priority = srv.priority() ssl = srv.ssl() and sabnzbd.newswrapper.HAVE_SSL ssl_type = srv.ssl_type() username = srv.username() password = srv.password() optional = srv.optional() categories = srv.categories() retention = float(srv.retention() * 24 * 3600) # days ==> seconds send_group = srv.send_group() create = True if oldserver: for n in xrange(len(self.servers)): if self.servers[n].id == oldserver: # Server exists, do re-init later create = False self.servers[n].newid = newserver self.servers[n].restart = True self.__restart += 1 break if create and enabled and host and port and threads: self.servers.append( Server( newserver, displayname, host, port, timeout, threads, priority, ssl, ssl_type, send_group, username, password, optional, retention, categories=categories, ) ) return @synchronized_CV def set_paused_state(self, state): """ Set downloader to specified paused state """ self.paused = state @synchronized_CV def resume(self): logging.info("Resuming") if self.paused: growler.send_notification("SABnzbd", T("Resuming"), "download") self.paused = False @synchronized_CV def pause(self, save=True): """ Pause the downloader, optionally saving admin """ if not self.paused: self.paused = True logging.info("Pausing") growler.send_notification("SABnzbd", T("Paused"), "download") if self.is_paused(): BPSMeter.do.reset() if cfg.autodisconnect(): self.disconnect() if save: sabnzbd.save_state() @synchronized_CV def delay(self): logging.debug("Delaying") self.delayed = True @synchronized_CV def undelay(self): logging.debug("Undelaying") self.delayed = False @synchronized_CV def wait_for_postproc(self): logging.info("Waiting for post-processing to finish") self.postproc = True @synchronized_CV def resume_from_postproc(self): logging.info("Post-processing finished, resuming download") self.postproc = False def disconnect(self): self.force_disconnect = True @synchronized_CV def limit_speed(self, value): """ Set the actual download speed in Bytes/sec When 'value' ends with a '%' sign or is within 1-100, it is interpreted as a pecentage of the maximum bandwidth When no '%' is found, it is interpreted as an absolute speed (including KMGT notation). """ if value: mx = cfg.bandwidth_max.get_int() if "%" in str(value) or (from_units(value) > 0 and from_units(value) < 101): limit = value.strip(" %") self.bandwidth_perc = from_units(limit) if mx: self.bandwidth_limit = mx * self.bandwidth_perc / 100 else: logging.warning(T("You must set a maximum bandwidth before you can set a bandwidth limit")) else: self.bandwidth_limit = from_units(value) if mx: self.bandwidth_perc = self.bandwidth_limit / mx * 100 else: self.bandwidth_perc = 100 else: self.speed_set() logging.info("Speed limit set to %s B/s", self.bandwidth_limit) def get_limit(self): return self.bandwidth_perc def get_limit_abs(self): return self.bandwidth_limit def speed_set(self): limit = cfg.bandwidth_max.get_int() perc = cfg.bandwidth_perc() if limit and perc: self.bandwidth_perc = perc self.bandwidth_limit = limit * perc / 100 else: self.bandwidth_perc = 0 self.bandwidth_limit = 0 def is_paused(self): from sabnzbd.nzbqueue import NzbQueue if not self.paused: return False else: if NzbQueue.do.has_forced_items(): return False else: return True def highest_server(self, me): """ Return True when this server has the highest priority of the active ones 0 is the highest priority """ for server in self.servers: if server is not me and server.active and server.priority < me.priority: return False return True def nzo_servers(self, nzo): return filter(nzo.server_in_try_list, self.servers) def maybe_block_server(self, server): from sabnzbd.nzbqueue import NzbQueue if server.optional and server.active and (server.bad_cons / server.threads) > 3: # Optional and active server had too many problems, # disable it now and send a re-enable plan to the scheduler server.bad_cons = 0 server.active = False server.errormsg = T("Server %s will be ignored for %s minutes") % ("", _PENALTY_TIMEOUT) logging.warning(T("Server %s will be ignored for %s minutes"), server.id, _PENALTY_TIMEOUT) self.plan_server(server.id, _PENALTY_TIMEOUT) # Remove all connections to server for nw in server.idle_threads + server.busy_threads: self.__reset_nw(nw, "forcing disconnect", warn=False, wait=False, quit=False) # Make sure server address resolution is refreshed server.info = None NzbQueue.do.reset_all_try_lists() def run(self): from sabnzbd.nzbqueue import NzbQueue self.decoder.start() # Kick BPS-Meter to check quota BPSMeter.do.update() while 1: for server in self.servers: assert isinstance(server, Server) for nw in server.busy_threads[:]: if (nw.nntp and nw.nntp.error_msg) or (nw.timeout and time.time() > nw.timeout): if nw.nntp and nw.nntp.error_msg: self.__reset_nw(nw, "", warn=False) else: self.__reset_nw(nw, "timed out") server.bad_cons += 1 self.maybe_block_server(server) if server.restart: if not server.busy_threads: newid = server.newid server.stop(self.read_fds, self.write_fds) self.servers.remove(server) if newid: self.init_server(None, newid) self.__restart -= 1 NzbQueue.do.reset_all_try_lists() # Have to leave this loop, because we removed element break else: # Restart pending, don't add new articles continue assert isinstance(server, Server) if ( not server.idle_threads or server.restart or self.is_paused() or self.shutdown or self.delayed or self.postproc ): continue if not (server.active and NzbQueue.do.has_articles_for(server)): continue for nw in server.idle_threads[:]: assert isinstance(nw, NewsWrapper) if nw.timeout: if time.time() < nw.timeout: continue else: nw.timeout = None if not server.active: break if server.info is None: self.maybe_block_server(server) request_server_info(server) break article = NzbQueue.do.get_article(server, self.servers) if not article: break if server.retention and article.nzf.nzo.avg_stamp < time.time() - server.retention: # Article too old for the server, treat as missing if sabnzbd.LOG_ALL: logging.debug("Article %s too old for %s", article.article, server.id) self.decoder.decode(article, None) break server.idle_threads.remove(nw) server.busy_threads.append(nw) nw.article = article if nw.connected: self.__request_article(nw) else: try: logging.info("%s@%s: Initiating connection", nw.thrdnum, server.id) nw.init_connect(self.write_fds) except: logging.error( T("Failed to initialize %s@%s with reason: %s"), nw.thrdnum, server.id, sys.exc_info()[1], ) self.__reset_nw(nw, "failed to initialize") # Exit-point if self.shutdown: empty = True for server in self.servers: if server.busy_threads: empty = False break if empty: self.decoder.stop() self.decoder.join() for server in self.servers: server.stop(self.read_fds, self.write_fds) logging.info("Shutting down") break if self.force_disconnect: for server in self.servers: for nw in server.idle_threads + server.busy_threads: quit = nw.connected and server.active self.__reset_nw(nw, "forcing disconnect", warn=False, wait=False, quit=quit) # Make sure server address resolution is refreshed server.info = None self.force_disconnect = False # => Select readkeys = self.read_fds.keys() writekeys = self.write_fds.keys() if readkeys or writekeys: read, write, error = select.select(readkeys, writekeys, (), 1.0) else: read, write, error = ([], [], []) BPSMeter.do.reset() time.sleep(1.0) CV.acquire() while ( (NzbQueue.do.is_empty() or self.is_paused() or self.delayed or self.postproc) and not self.shutdown and not self.__restart ): CV.wait() CV.release() self.force_disconnect = False for selected in write: nw = self.write_fds[selected] fileno = nw.nntp.sock.fileno() if fileno not in self.read_fds: self.read_fds[fileno] = nw if fileno in self.write_fds: self.write_fds.pop(fileno) if not read: BPSMeter.do.update() continue for selected in read: nw = self.read_fds[selected] article = nw.article server = nw.server if article: nzo = article.nzf.nzo try: bytes, done, skip = nw.recv_chunk() except: bytes, done, skip = (0, False, False) if skip: BPSMeter.do.update() continue if bytes < 1: self.__reset_nw(nw, "server closed connection", warn=False, wait=False) continue else: if self.bandwidth_limit: bps = BPSMeter.do.get_bps() bps += bytes limit = self.bandwidth_limit if bps > limit: while BPSMeter.do.get_bps() > limit: time.sleep(0.05) BPSMeter.do.update() BPSMeter.do.update(server.id, bytes) if nzo: if server.id in nzo.servercount: nzo.servercount[server.id] += bytes else: nzo.servercount[server.id] = bytes nzo.bytes_downloaded += bytes nzo.update_avg_kbs(BPSMeter.do.get_bps()) if len(nw.lines) == 1: code = nw.lines[0][:3] if not nw.connected or code == "480": done = False try: nw.finish_connect(code) if sabnzbd.LOG_ALL: logging.debug("%s@%s last message -> %s", nw.thrdnum, nw.server.id, nw.lines[0]) nw.lines = [] nw.data = "" except NNTPPermanentError, error: # Handle login problems block = False penalty = 0 msg = error.response ecode = msg[:3] display_msg = " [%s]" % msg logging.debug("Server login problem: %s, %s", ecode, msg) if ecode in ("502", "481", "400") and clues_too_many(msg): # Too many connections: remove this thread and reduce thread-setting for server # Plan to go back to the full number after a penalty timeout if server.active: server.errormsg = T("Too many connections to server %s") % display_msg logging.error(T("Too many connections to server %s"), server.id) self.__reset_nw(nw, None, warn=False, destroy=True, quit=True) self.plan_server(server.id, _PENALTY_TOOMANY) server.threads -= 1 elif ecode in ("502", "481") and clues_too_many_ip(msg): # Account sharing? if server.active: server.errormsg = T("Probable account sharing") + display_msg name = " (%s)" % server.id logging.error(T("Probable account sharing") + name) penalty = _PENALTY_SHARE elif ecode in ("481", "482", "381") or (ecode == "502" and clues_login(msg)): # Cannot login, block this server if server.active: server.errormsg = T("Failed login for server %s") % display_msg logging.error(T("Failed login for server %s"), server.id) penalty = _PENALTY_PERM block = True elif ecode == "502": # Cannot connect (other reasons), block this server if server.active: server.errormsg = T("Cannot connect to server %s [%s]") % ("", display_msg) logging.warning(T("Cannot connect to server %s [%s]"), server.id, msg) if clues_pay(msg): penalty = _PENALTY_PERM else: penalty = _PENALTY_502 block = True elif ecode == "400": # Temp connection problem? if server.active: logging.debug("Unspecified error 400 from server %s", server.id) penalty = _PENALTY_VERYSHORT block = True else: # Unknown error, just keep trying if server.active: server.errormsg = T("Cannot connect to server %s [%s]") % ("", display_msg) logging.error(T("Cannot connect to server %s [%s]"), server.id, msg) penalty = _PENALTY_UNKNOWN if block or (penalty and server.optional): if server.active: server.active = False if (not server.optional) and cfg.no_penalties(): penalty = _PENALTY_SHORT if penalty and (block or server.optional): logging.info("Server %s ignored for %s minutes", server.id, penalty) self.plan_server(server.id, penalty) NzbQueue.do.reset_all_try_lists() self.__reset_nw(nw, None, warn=False, quit=True) continue except: logging.error( T("Connecting %s@%s failed, message=%s"), nw.thrdnum, nw.server.id, nw.lines[0] ) # No reset-warning needed, above logging is sufficient self.__reset_nw(nw, None, warn=False) if nw.connected: logging.info("Connecting %s@%s finished", nw.thrdnum, nw.server.id) self.__request_article(nw) elif code == "223": done = True logging.debug("Article <%s> is present", article.article) self.decoder.decode(article, nw.lines)