def run(self): """ Process the queue (including waits and retries) """ from sabnzbd.nzbqueue import NzbQueue def sleeper(delay): for n in range(delay): if not self.shutdown: time.sleep(1.05) self.shutdown = False msgid = None while not self.shutdown: if not msgid: (msgid, nzo) = self.queue.get() if self.shutdown or not msgid: break logging.debug("Popping msgid %s", msgid) filename, data, newzbin_cat, nzo_info = _grabnzb(msgid) if filename and data: filename = name_fixer(filename) pp = nzo.pp script = nzo.script cat = nzo.cat if cat == '*' or not cat: cat = cat_convert(newzbin_cat) priority = nzo.priority nzbname = nzo.custom_name cat, pp, script, priority = cat_to_opts(cat, pp, script, priority) try: sabnzbd.nzbqueue.insert_future_nzo(nzo, filename, msgid, data, pp=pp, script=script, cat=cat, priority=priority, nzbname=nzbname, nzo_info=nzo_info) except: logging.error(Ta('Failed to update newzbin job %s'), msgid) logging.info("Traceback: ", exc_info = True) NzbQueue.do.remove(nzo.nzo_id, False) msgid = None else: if filename: sleeper(int(filename)) else: # Fatal error, give up on this one bad_fetch(nzo, msgid, msg=nzo_info, retry=True) msgid = None osx.sendGrowlMsg(T('NZB added to queue'),filename,osx.NOTIFICATION['download']) # Keep some distance between the grabs sleeper(5) logging.debug('Stopping MSGIDGrabber')
def run(self): """ Process the queue (including waits and retries) """ from sabnzbd.nzbqueue import NzbQueue self.shutdown = False while not self.shutdown: time.sleep(5) (msgid, nzo) = self.queue.get() if self.shutdown or not msgid: break if nzo.wait and nzo.wait > time.time(): self.grab(msgid, nzo) continue logging.debug("Popping msgid %s", msgid) filename, data, newzbin_cat, nzo_info = _grabnzb(msgid) if filename and data: filename = name_fixer(filename) pp = nzo.pp script = nzo.script cat = nzo.cat if cat == '*' or not cat: cat = cat_convert(newzbin_cat) priority = nzo.priority nzbname = nzo.custom_name cat, pp, script, priority = cat_to_opts(cat, pp, script, priority) try: sabnzbd.nzbqueue.insert_future_nzo(nzo, filename, msgid, data, pp=pp, script=script, cat=cat, priority=priority, nzbname=nzbname, nzo_info=nzo_info) nzo.url = format_source_url(str(msgid)) except: logging.error(Ta('Failed to update newzbin job %s'), msgid) logging.info("Traceback: ", exc_info = True) NzbQueue.do.remove(nzo.nzo_id, False) msgid = None else: if filename: self.grab(msgid, nzo, float(filename)) else: # Fatal error, give up on this one bad_fetch(nzo, msgid, msg=nzo_info, retry=True) msgid = None if msgid: growler.send_notification(T('NZB added to queue'), filename, 'download') logging.debug('Stopping MSGIDGrabber')
def run(self): logging.info('URLGrabber starting up') self.shutdown = False while not self.shutdown: # Don't pound the website! time.sleep(5.0) (url, future_nzo) = self.queue.get() if not url: # stop signal, go test self.shutdown continue if future_nzo and future_nzo.wait and future_nzo.wait > time.time(): # Requeue when too early and still active self.add(url, future_nzo) continue url = url.replace(' ', '') try: del_bookmark = not future_nzo if future_nzo: # If nzo entry deleted, give up try: deleted = future_nzo.deleted except AttributeError: deleted = True if deleted: logging.debug('Dropping URL %s, job entry missing', url) continue # Add nzbmatrix credentials if needed url, matrix_id = _matrix_url(url) # _grab_url cannot reside in a function, because the tempfile # would not survive the end of the function if del_bookmark: logging.info('Removing nzbmatrix bookmark %s', matrix_id) else: logging.info('Grabbing URL %s', url) opener = urllib.FancyURLopener({}) opener.prompt_user_passwd = None opener.addheaders = [] opener.addheader('User-Agent', 'SABnzbd+/%s' % sabnzbd.version.__version__) if not [True for item in _BAD_GZ_HOSTS if item in url]: opener.addheader('Accept-encoding','gzip') filename = None category = None length = 0 nzo_info = {} try: fn, header = opener.retrieve(url) except: fn = None if fn: for tup in header.items(): try: item = tup[0].lower() value = tup[1].strip() except: continue if item in ('category_id', 'x-dnzb-category'): category = value elif item in ('x-dnzb-moreinfo',): nzo_info['more_info'] = value elif item in ('x-dnzb-name',): filename = value if not filename.endswith('.nzb'): filename += '.nzb' elif item in ('content-length',): length = misc.int_conv(value) if not filename: for item in tup: if "filename=" in item: filename = item[item.index("filename=") + 9:].strip(';').strip('"') if matrix_id: fn, msg, retry, wait = _analyse_matrix(fn, matrix_id) if not fn: if retry: logging.info(msg) logging.debug('Retry nzbmatrix item %s after waiting %s sec', matrix_id, wait) self.add(url, future_nzo, wait) else: logging.error(msg) misc.bad_fetch(future_nzo, clean_matrix_url(url), msg, retry=True) continue category = _MATRIX_MAP.get(category, category) if del_bookmark: # No retries of nzbmatrix bookmark removals continue else: fn, msg, retry, wait = _analyse_others(fn, url) if not fn: if retry: logging.info('Retry URL %s', url) self.add(url, future_nzo, wait) else: misc.bad_fetch(future_nzo, url, msg, retry=True) continue if not filename: filename = os.path.basename(url) + '.nzb' # Sanitize and trim name, preserving the extension filename, ext = os.path.splitext(filename) filename = misc.sanitize_foldername(filename) filename += '.' + misc.sanitize_foldername(ext) pp = future_nzo.pp script = future_nzo.script cat = future_nzo.cat if (cat is None or cat == '*') and category: cat = misc.cat_convert(category) priority = future_nzo.priority nzbname = future_nzo.custom_name # Check if nzb file if os.path.splitext(filename)[1].lower() in ('.nzb', '.gz'): res, nzo_ids = dirscanner.ProcessSingleFile(filename, fn, pp=pp, script=script, cat=cat, priority=priority, \ nzbname=nzbname, nzo_info=nzo_info, url=future_nzo.url) if res == 0: NzbQueue.do.remove(future_nzo.nzo_id, add_to_history=False) else: if res == -2: logging.info('Incomplete NZB, retry after 5 min %s', url) when = 300 else: logging.info('Unknown error fetching NZB, retry after 2 min %s', url) when = 120 self.add(url, future_nzo, when) # Check if a supported archive else: if dirscanner.ProcessArchiveFile(filename, fn, pp, script, cat, priority=priority, url=future_nzo.url)[0] == 0: NzbQueue.do.remove(future_nzo.nzo_id, add_to_history=False) else: # Not a supported filetype, not an nzb (text/html ect) try: os.remove(fn) except: pass logging.info('Unknown filetype when fetching NZB, retry after 30s %s', url) self.add(url, future_nzo, 30) except: logging.error('URLGRABBER CRASHED', exc_info=True) logging.debug("URLGRABBER Traceback: ", exc_info=True)
def run(self): logging.info('URLGrabber starting up') self.shutdown = False while not self.shutdown: # Don't pound the website! time.sleep(5.0) (url, future_nzo, retry_count) = self.queue.get() if not url: continue url = url.replace(' ', '') try: del_bookmark = not future_nzo if future_nzo: # If nzo entry deleted, give up try: deleted = future_nzo.deleted except: deleted = True if deleted: logging.debug('Dropping URL %s, job entry missing', url) continue # Add nzbmatrix credentials if needed url, matrix_id = _matrix_url(url) # When still waiting for nzbmatrix wait period, requeue if matrix_id and self.matrix_wait > time.time(): self.queue.put((url, future_nzo, retry_count)) continue # _grab_url cannot reside in a function, because the tempfile # would not survive the end of the function if del_bookmark: logging.info('Removing nzbmatrix bookmark %s', matrix_id) else: logging.info('Grabbing URL %s', url) opener = urllib.FancyURLopener({}) opener.prompt_user_passwd = None opener.addheaders = [] opener.addheader('User-Agent', 'SABnzbd+/%s' % sabnzbd.version.__version__) opener.addheader('Accept-encoding', 'gzip') filename = None category = None length = 0 nzo_info = {} try: fn, header = opener.retrieve(url) except: fn = None if fn: for tup in header.items(): try: item = tup[0].lower() value = tup[1].strip() except: continue if item in ('category_id', 'x-dnzb-category'): category = value elif item in ('x-dnzb-moreinfo', ): nzo_info['more_info'] = value elif item in ('x-dnzb-name', ): filename = value if not filename.endswith('.nzb'): filename += '.nzb' elif item in ('content-length', ): length = misc.int_conv(value) if not filename: for item in tup: if "filename=" in item: filename = item[item.index("filename=") + 9:].strip(';').strip('"') if matrix_id: fn, msg, retry, wait = _analyse_matrix(fn, matrix_id) if retry and wait > 0: self.matrix_wait = time.time() + wait logging.debug('Retry URL %s after waiting', url) self.queue.put((url, future_nzo, retry_count)) continue category = _MATRIX_MAP.get(category, category) else: msg = '' retry = True # Check if the filepath is specified, if not, check if a retry is allowed. if not fn: retry_count -= 1 if retry_count > 0 and retry: logging.info('Retry URL %s', url) self.queue.put((url, future_nzo, retry_count)) elif not del_bookmark: misc.bad_fetch(future_nzo, url, msg, retry=True) continue if del_bookmark: continue if not filename: filename = os.path.basename(url) + '.nzb' # Sanitize and trim name, preserving the extension filename, ext = os.path.splitext(filename) filename = misc.sanitize_foldername(filename) filename += '.' + misc.sanitize_foldername(ext) pp = future_nzo.pp script = future_nzo.script cat = future_nzo.cat if (cat is None or cat == '*') and category: cat = misc.cat_convert(category) priority = future_nzo.priority nzbname = future_nzo.custom_name # Check if nzb file if os.path.splitext(filename)[1].lower() in ('.nzb', '.gz'): res = dirscanner.ProcessSingleFile(filename, fn, pp=pp, script=script, cat=cat, priority=priority, \ nzbname=nzbname, nzo_info=nzo_info, url=future_nzo.url) if res == 0: NzbQueue.do.remove(future_nzo.nzo_id, add_to_history=False) elif res == -2: retry_count -= 1 if retry_count > 0: logging.info('Incomplete NZB, retry %s', url) self.queue.put((url, future_nzo, retry_count)) else: misc.bad_fetch(future_nzo, url, retry=True, content=True) else: misc.bad_fetch(future_nzo, url, retry=True, content=True) # Check if a supported archive else: if dirscanner.ProcessArchiveFile(filename, fn, pp, script, cat, priority=priority, url=future_nzo.url) == 0: NzbQueue.do.remove(future_nzo.nzo_id, add_to_history=False) else: # Not a supported filetype, not an nzb (text/html ect) try: os.remove(fn) except: pass misc.bad_fetch(future_nzo, url, retry=True, content=True) except: logging.error('URLGRABBER CRASHED', exc_info=True) logging.debug("URLGRABBER Traceback: ", exc_info=True)
def run(self): """ Process the queue (including waits and retries) """ from sabnzbd.nzbqueue import NzbQueue self.shutdown = False while not self.shutdown: time.sleep(5) (msgid, nzo) = self.queue.get() if self.shutdown or not msgid: break if nzo.wait and nzo.wait > time.time(): self.grab(msgid, nzo) continue logging.debug("Popping msgid %s", msgid) filename, data, newzbin_cat, nzo_info = _grabnzb(msgid) if filename and data: filename = name_fixer(filename) pp = nzo.pp script = nzo.script cat = nzo.cat if cat == '*' or not cat: cat = cat_convert(newzbin_cat) priority = nzo.priority nzbname = nzo.custom_name cat, pp, script, priority = cat_to_opts( cat, pp, script, priority) try: sabnzbd.nzbqueue.insert_future_nzo(nzo, filename, msgid, data, pp=pp, script=script, cat=cat, priority=priority, nzbname=nzbname, nzo_info=nzo_info) nzo.url = format_source_url(str(msgid)) except: logging.error(Ta('Failed to update newzbin job %s'), msgid) logging.info("Traceback: ", exc_info=True) NzbQueue.do.remove(nzo.nzo_id, False) msgid = None else: if filename: self.grab(msgid, nzo, float(filename)) else: # Fatal error, give up on this one bad_fetch(nzo, msgid, msg=nzo_info, retry=True) msgid = None if msgid: growler.send_notification(T('NZB added to queue'), filename, 'download') logging.debug('Stopping MSGIDGrabber')
def run(self): logging.info('URLGrabber starting up') self.shutdown = False while not self.shutdown: # Don't pound the website! time.sleep(5.0) (url, future_nzo, retry_count) = self.queue.get() if not url: continue url = url.replace(' ', '') try: del_bookmark = not future_nzo if future_nzo: # If nzo entry deleted, give up try: deleted = future_nzo.deleted except: deleted = True if deleted: logging.debug('Dropping URL %s, job entry missing', url) continue # Add nzbmatrix credentials if needed url, matrix_id = _matrix_url(url) # When still waiting for nzbmatrix wait period, requeue if matrix_id and self.matrix_wait > time.time(): self.queue.put((url, future_nzo, retry_count)) continue # _grab_url cannot reside in a function, because the tempfile # would not survive the end of the function if del_bookmark: logging.info('Removing nzbmatrix bookmark %s', matrix_id) else: logging.info('Grabbing URL %s', url) opener = urllib.FancyURLopener({}) opener.prompt_user_passwd = None opener.addheaders = [] opener.addheader('User-Agent', 'SABnzbd+/%s' % sabnzbd.version.__version__) opener.addheader('Accept-encoding','gzip') filename = None category = None length = 0 nzo_info = {} try: fn, header = opener.retrieve(url) except: fn = None if fn: for tup in header.items(): try: item = tup[0].lower() value = tup[1].strip() except: continue if item in ('category_id', 'x-dnzb-category'): category = value elif item in ('x-dnzb-moreinfo',): nzo_info['more_info'] = value elif item in ('x-dnzb-name',): filename = value if not filename.endswith('.nzb'): filename += '.nzb' elif item in ('content-length',): length = misc.int_conv(value) if not filename: for item in tup: if "filename=" in item: filename = item[item.index("filename=") + 9:].strip(';').strip('"') if matrix_id: fn, msg, retry, wait = _analyse_matrix(fn, matrix_id) if retry and wait > 0: self.matrix_wait = time.time() + wait logging.debug('Retry URL %s after waiting', url) self.queue.put((url, future_nzo, retry_count)) continue category = _MATRIX_MAP.get(category, category) else: msg = '' retry = True # Check if the filepath is specified, if not, check if a retry is allowed. if not fn: retry_count -= 1 if retry_count > 0 and retry: logging.info('Retry URL %s', url) self.queue.put((url, future_nzo, retry_count)) elif not del_bookmark: misc.bad_fetch(future_nzo, url, msg, retry=True) continue if del_bookmark: continue if not filename: filename = os.path.basename(url) + '.nzb' # Sanitize and trim name, preserving the extension filename, ext = os.path.splitext(filename) filename = misc.sanitize_foldername(filename) filename += '.' + misc.sanitize_foldername(ext) pp = future_nzo.pp script = future_nzo.script cat = future_nzo.cat if (cat is None or cat == '*') and category: cat = misc.cat_convert(category) priority = future_nzo.priority nzbname = future_nzo.custom_name # Check if nzb file if os.path.splitext(filename)[1].lower() in ('.nzb', '.gz'): res = dirscanner.ProcessSingleFile(filename, fn, pp=pp, script=script, cat=cat, priority=priority, \ nzbname=nzbname, nzo_info=nzo_info, url=future_nzo.url) if res == 0: NzbQueue.do.remove(future_nzo.nzo_id, add_to_history=False) elif res == -2: retry_count -= 1 if retry_count > 0: logging.info('Incomplete NZB, retry %s', url) self.queue.put((url, future_nzo, retry_count)) else: misc.bad_fetch(future_nzo, url, retry=True, content=True) else: misc.bad_fetch(future_nzo, url, retry=True, content=True) # Check if a supported archive else: if dirscanner.ProcessArchiveFile(filename, fn, pp, script, cat, priority=priority, url=future_nzo.url) == 0: NzbQueue.do.remove(future_nzo.nzo_id, add_to_history=False) else: # Not a supported filetype, not an nzb (text/html ect) try: os.remove(fn) except: pass misc.bad_fetch(future_nzo, url, retry=True, content=True) except: logging.error('URLGRABBER CRASHED', exc_info=True) logging.debug("URLGRABBER Traceback: ", exc_info=True)
def run(self): logging.info('URLGrabber starting up') self.shutdown = False while not self.shutdown: # Don't pound the website! time.sleep(5.0) (url, future_nzo) = self.queue.get() if not url: # stop signal, go test self.shutdown continue if future_nzo and future_nzo.wait and future_nzo.wait > time.time( ): # Requeue when too early and still active self.add(url, future_nzo) continue url = url.replace(' ', '') try: del_bookmark = not future_nzo if future_nzo: # If nzo entry deleted, give up try: deleted = future_nzo.deleted except AttributeError: deleted = True if deleted: logging.debug('Dropping URL %s, job entry missing', url) continue # Add nzbmatrix credentials if needed url, matrix_id = _matrix_url(url) # _grab_url cannot reside in a function, because the tempfile # would not survive the end of the function if del_bookmark: logging.info('Removing nzbmatrix bookmark %s', matrix_id) else: logging.info('Grabbing URL %s', url) if '.nzbsrus.' in url: opener = urllib.URLopener({}) else: opener = urllib.FancyURLopener({}) opener.prompt_user_passwd = None opener.addheaders = [] opener.addheader('User-Agent', 'SABnzbd+/%s' % sabnzbd.version.__version__) if not [True for item in _BAD_GZ_HOSTS if item in url]: opener.addheader('Accept-encoding', 'gzip') filename = None category = None length = 0 nzo_info = {} wait = 0 try: fn, header = opener.retrieve(url) except: fn = None if fn: for tup in header.items(): try: item = tup[0].lower() value = tup[1].strip() except: continue if item in ('category_id', 'x-dnzb-category'): category = value elif item in ('x-dnzb-moreinfo', ): nzo_info['more_info'] = value elif item in ('x-dnzb-name', ): filename = value if not filename.endswith('.nzb'): filename += '.nzb' elif item == 'x-dnzb-propername': nzo_info['propername'] = value elif item == 'x-dnzb-episodename': nzo_info['episodename'] = value elif item == 'x-dnzb-year': nzo_info['year'] = value elif item == 'x-dnzb-failure': nzo_info['failure'] = value elif item == 'x-dnzb-details': nzo_info['details'] = value elif item in ('content-length', ): length = misc.int_conv(value) elif item == 'retry-after': # For NZBFinder wait = misc.int_conv(value) if not filename: for item in tup: if "filename=" in item: filename = item[item.index("filename=") + 9:].strip(';').strip('"') if matrix_id: fn, msg, retry, wait = _analyse_matrix(fn, matrix_id) if not fn: if retry: logging.info(msg) logging.debug( 'Retry nzbmatrix item %s after waiting %s sec', matrix_id, wait) self.add(url, future_nzo, wait) else: logging.error(msg) misc.bad_fetch(future_nzo, clean_matrix_url(url), msg, retry=True) continue category = get_matrix_category(url, category) if del_bookmark: # No retries of nzbmatrix bookmark removals continue else: if wait: # For sites that have a rate-limiting attribute msg = '' retry = True fn = None else: fn, msg, retry, wait = _analyse_others(fn, url) if not fn: if retry: logging.info('Retry URL %s', url) self.add(url, future_nzo, wait) else: misc.bad_fetch(future_nzo, url, msg, retry=True) continue if not filename: filename = os.path.basename(url) + '.nzb' pp = future_nzo.pp script = future_nzo.script cat = future_nzo.cat if (cat is None or cat == '*') and category: cat = misc.cat_convert(category) priority = future_nzo.priority nzbname = future_nzo.custom_name # Check if nzb file if os.path.splitext(filename)[1].lower() in ('.nzb', '.gz'): res, nzo_ids = dirscanner.ProcessSingleFile(filename, fn, pp=pp, script=script, cat=cat, priority=priority, \ nzbname=nzbname, nzo_info=nzo_info, url=future_nzo.url) if res == 0: NzbQueue.do.remove(future_nzo.nzo_id, add_to_history=False) else: if res == -2: logging.info( 'Incomplete NZB, retry after 5 min %s', url) when = 300 elif res == -1: # Error, but no reason to retry. Warning is already given NzbQueue.do.remove(future_nzo.nzo_id, add_to_history=False) continue else: logging.info( 'Unknown error fetching NZB, retry after 2 min %s', url) when = 120 self.add(url, future_nzo, when) # Check if a supported archive else: if dirscanner.ProcessArchiveFile( filename, fn, pp, script, cat, priority=priority, nzbname=nzbname, url=future_nzo.url)[0] == 0: NzbQueue.do.remove(future_nzo.nzo_id, add_to_history=False) else: # Not a supported filetype, not an nzb (text/html ect) try: os.remove(fn) except: pass logging.info( 'Unknown filetype when fetching NZB, retry after 30s %s', url) self.add(url, future_nzo, 30) except: logging.error('URLGRABBER CRASHED', exc_info=True) logging.debug("URLGRABBER Traceback: ", exc_info=True)