def scan_jobs(self, all=False, action=True): """ Scan "incomplete" for missing folders, 'all' is True: Include active folders 'action' is True, do the recovery action returns list of orphaned folders """ result = [] # Folders from the download queue if all: registered = [] else: registered = [nzo.work_name for nzo in self.__nzo_list] # Retryable folders from History items = sabnzbd.api.build_history(output=True)[0] # Anything waiting or active or retryable is a known item registered.extend([platform_encode(os.path.basename(item['path'])) for item in items if item['retry'] or item['loaded'] or item['status'] == Status.QUEUED]) # Repair unregistered folders for folder in globber_full(cfg.download_dir.get_path()): name = os.path.basename(folder) if os.path.isdir(folder) and name not in registered and name not in IGNORED_FOLDERS: if action: logging.info('Repairing job %s', folder) self.repair_job(folder) result.append(os.path.basename(folder)) else: if action: logging.info('Skipping repair for job %s', folder) return result
def try_sfv_check(nzo, workdir, setname): """ Attempt to verify set using SFV file Return True if verified, False when failed When setname is '', all SFV files will be used, otherwise only the matching one When setname is '' and no SFV files are found, True is returned """ # Get list of SFV names; shortest name first, minimizes the chance on a mismatch sfvs = globber_full(workdir, '*.sfv') sfvs.sort(lambda x, y: len(x) - len(y)) par_error = False found = False for sfv in sfvs: if setname.lower() in os.path.basename(sfv).lower(): found = True nzo.status = Status.VERIFYING nzo.set_unpack_info('Repair', T('Trying SFV verification')) nzo.set_action_line(T('Trying SFV verification'), '...') failed = sfv_check(sfv) if failed: fail_msg = T('Some files failed to verify against "%s"') % unicoder(os.path.basename(sfv)) msg = fail_msg + '; ' msg += '; '.join(failed) nzo.set_unpack_info('Repair', msg) par_error = True else: nzo.set_unpack_info('Repair', T('Verified successfully using SFV files')) if setname: break # Show error in GUI if found and par_error: nzo.status = Status.FAILED nzo.fail_msg = fail_msg return (found or not setname) and not par_error
def repair_job(self, folder, new_nzb=None, password=None): """ Reconstruct admin for a single job folder, optionally with new NZB """ def all_verified(path): """ Return True when all sets have been successfully verified """ verified = sabnzbd.load_data(VERIFIED_FILE, path, remove=False) or {'x': False} return all(verified[x] for x in verified) name = os.path.basename(folder) path = os.path.join(folder, JOB_ADMIN) if hasattr(new_nzb, 'filename'): filename = new_nzb.filename else: filename = '' if not filename: if not all_verified(path): filename = globber_full(path, '*.gz') if len(filename) > 0: logging.debug('Repair job %s by reparsing stored NZB', name) nzo_id = sabnzbd.add_nzbfile(filename[0], pp=None, script=None, cat=None, priority=None, nzbname=name, reuse=True, password=password)[1] else: logging.debug('Repair job %s without stored NZB', name) nzo = NzbObject(name, pp=None, script=None, nzb='', cat=None, priority=None, nzbname=name, reuse=True) nzo.password = password self.add(nzo) nzo_id = nzo.nzo_id else: remove_all(path, '*.gz') logging.debug('Repair job %s with new NZB (%s)', name, filename) nzo_id = sabnzbd.add_nzbfile(new_nzb, pp=None, script=None, cat=None, priority=None, nzbname=name, reuse=True, password=password)[1] return nzo_id
def try_sfv_check(nzo, workdir, setname): """ Attempt to verify set using SFV file Return True if verified, False when failed When setname is '', all SFV files will be used, otherwise only the matching one When setname is '' and no SFV files are found, True is returned """ # Get list of SFV names; shortest name first, minimizes the chance on a mismatch sfvs = globber_full(workdir, "*.sfv") sfvs.sort(lambda x, y: len(x) - len(y)) par_error = False found = False for sfv in sfvs: if setname.lower() in os.path.basename(sfv).lower(): found = True nzo.set_unpack_info("Repair", T("Trying SFV verification")) failed = sfv_check(sfv) if failed: msg = T('Some files failed to verify against "%s"') % unicoder(os.path.basename(sfv)) msg += "; " msg += "; ".join(failed) nzo.set_unpack_info("Repair", msg) par_error = True else: nzo.set_unpack_info("Repair", T("Verified successfully using SFV files")) if setname: break return (found or not setname) and not par_error
def send_back(self, nzo): """ Send back job to queue after successful pre-check """ try: nzb_path = globber_full(nzo.workpath, '*.gz')[0] except: logging.debug('Failed to find NZB file after pre-check (%s)', nzo.nzo_id) return res, nzo_ids = ProcessSingleFile(nzo.work_name + '.nzb', nzb_path, keep=True, reuse=True) if res == 0 and nzo_ids: nzo = self.replace_in_q(nzo, nzo_ids[0]) # Reset reuse flag to make pause/abort on encryption possible nzo.reuse = False
def check_compatibility(self, data): """ Do compatibility checks on the loaded data """ nzo_ids = [] if not data: # Warn about old queue if sabnzbd.OLD_QUEUE and cfg.warned_old_queue() < QUEUE_VERSION: logging.warning( T('Old queue detected, use Status->Repair to convert the queue' )) cfg.warned_old_queue.set(QUEUE_VERSION) sabnzbd.config.save_config() else: # Try to process try: queue_vers, nzo_ids, dummy = data if not queue_vers == QUEUE_VERSION: nzo_ids = [] logging.error( T('Incompatible queuefile found, cannot proceed')) if not repair: panic_queue( os.path.join(cfg.admin_dir.get_path(), QUEUE_FILE_NAME)) exit_sab(2) except ValueError: nzo_ids = [] logging.error( T('Error loading %s, corrupt file detected'), os.path.join(cfg.admin_dir.get_path(), QUEUE_FILE_NAME)) # We need to do a repair in case of old-style pickles if not cfg.converted_nzo_pickles(): for nzo_id in nzo_ids: folder, _id = os.path.split(nzo_id) path = get_admin_path(folder, future=False) # This will update them but preserve queue-order if os.path.exists(os.path.join(path, _id)): self.repair_job(os.path.dirname(path)) continue # Remove any future-jobs, we can't save those for item in globber_full( os.path.join(cfg.admin_dir.get_path(), FUTURE_Q_FOLDER)): os.remove(item) # Done converting cfg.converted_nzo_pickles.set(True) sabnzbd.config.save_config() nzo_ids = [] return nzo_ids
def read_queue(self, repair): """ Read queue from disk, supporting repair modes 0 = no repairs 1 = use existing queue, add missing "incomplete" folders 2 = Discard all queue admin, reconstruct from "incomplete" folders """ nzo_ids = [] if repair < 2: # Read the queue from the saved files data = sabnzbd.load_admin(QUEUE_FILE_NAME) # Process the data and check compatibility nzo_ids = self.check_compatibility(data) # First handle jobs in the queue file folders = [] for nzo_id in nzo_ids: folder, _id = os.path.split(nzo_id) path = get_admin_path(folder, future=False) # Try as normal job nzo = sabnzbd.load_data(_id, path, remove=False) if not nzo: # Try as future job path = get_admin_path(folder, future=True) nzo = sabnzbd.load_data(_id, path) if nzo: self.add(nzo, save=False, quiet=True) folders.append(folder) # Scan for any folders in "incomplete" that are not yet in the queue if repair: self.scan_jobs(not folders) # Handle any lost future jobs for item in globber_full( os.path.join(cfg.admin_dir.get_path(), FUTURE_Q_FOLDER)): path, nzo_id = os.path.split(item) if nzo_id not in self.__nzo_table: if nzo_id.startswith('SABnzbd_nzo'): nzo = sabnzbd.load_data(nzo_id, path, remove=True) if nzo: self.add(nzo, save=True) else: try: os.remove(item) except: pass
def read_queue(self, repair): """ Read queue from disk, supporting repair modes 0 = no repairs 1 = use existing queue, add missing "incomplete" folders 2 = Discard all queue admin, reconstruct from "incomplete" folders """ nzo_ids = [] if repair < 2: # Read the queue from the saved files data = sabnzbd.load_admin(QUEUE_FILE_NAME) # Process the data and check compatibility nzo_ids = self.check_compatibility(repair, data) # First handle jobs in the queue file folders = [] for nzo_id in nzo_ids: folder, _id = os.path.split(nzo_id) path = get_admin_path(folder, future=False) # Try as normal job nzo = sabnzbd.load_data(_id, path, remove=False) if not nzo: # Try as future job path = get_admin_path(folder, future=True) nzo = sabnzbd.load_data(_id, path) if nzo: self.add(nzo, save=False, quiet=True) folders.append(folder) # Scan for any folders in "incomplete" that are not yet in the queue if repair: self.scan_jobs(not folders) # Handle any lost future jobs for item in globber_full(os.path.join(cfg.admin_dir.get_path(), FUTURE_Q_FOLDER)): path, nzo_id = os.path.split(item) if nzo_id not in self.__nzo_table: if nzo_id.startswith('SABnzbd_nzo'): nzo = sabnzbd.load_data(nzo_id, path, remove=True) if nzo: self.add(nzo, save=True) else: try: remove_file(item) except: pass
def check_compatibility(self, repair, data): """ Do compatibility checks on the loaded data """ nzo_ids = [] if not data: # Warn about old queue if sabnzbd.OLD_QUEUE and cfg.warned_old_queue() < QUEUE_VERSION: logging.warning(T('Old queue detected, use Status->Repair to convert the queue')) cfg.warned_old_queue.set(QUEUE_VERSION) sabnzbd.config.save_config() else: # Try to process try: queue_vers, nzo_ids, dummy = data if not queue_vers == QUEUE_VERSION: nzo_ids = [] logging.error(T('Incompatible queuefile found, cannot proceed')) if not repair: panic_queue(os.path.join(cfg.admin_dir.get_path(), QUEUE_FILE_NAME)) exit_sab(2) except ValueError: nzo_ids = [] logging.error(T('Error loading %s, corrupt file detected'), os.path.join(cfg.admin_dir.get_path(), QUEUE_FILE_NAME)) # We need to do a repair in case of old-style pickles if not cfg.converted_nzo_pickles(): for nzo_id in nzo_ids: folder, _id = os.path.split(nzo_id) path = get_admin_path(folder, future=False) # This will update them but preserve queue-order if os.path.exists(os.path.join(path, _id)): self.repair_job(os.path.dirname(path)) continue # Remove any future-jobs, we can't save those for item in globber_full(os.path.join(cfg.admin_dir.get_path(), FUTURE_Q_FOLDER)): remove_file(item) # Done converting cfg.converted_nzo_pickles.set(True) sabnzbd.config.save_config() nzo_ids = [] return nzo_ids
def read_queue(self, repair): """ Read queue from disk, supporting repair modes 0 = no repairs 1 = use existing queue, add missing "incomplete" folders 2 = Discard all queue admin, reconstruct from "incomplete" folders """ nzo_ids = [] if repair < 2: # Read the queue from the saved files data = sabnzbd.load_admin(QUEUE_FILE_NAME) if not data: try: # Try previous queue file queue_vers, nzo_ids, dummy = sabnzbd.load_admin( QUEUE_FILE_TMPL % '9') except: nzo_ids = [] if nzo_ids: logging.warning( T('Old queue detected, use Status->Repair to convert the queue' )) nzo_ids = [] else: try: queue_vers, nzo_ids, dummy = data if not queue_vers == QUEUE_VERSION: nzo_ids = [] logging.error( T('Incompatible queuefile found, cannot proceed')) if not repair: panic_queue( os.path.join(cfg.admin_dir.get_path(), QUEUE_FILE_NAME)) exit_sab(2) except ValueError: nzo_ids = [] logging.error( T('Error loading %s, corrupt file detected'), os.path.join(cfg.admin_dir.get_path(), QUEUE_FILE_NAME)) if not repair: return # First handle jobs in the queue file folders = [] for nzo_id in nzo_ids: folder, _id = os.path.split(nzo_id) # Try as normal job path = get_admin_path(folder, False) nzo = sabnzbd.load_data(_id, path, remove=False) if not nzo: # Try as future job path = get_admin_path(folder, True) nzo = sabnzbd.load_data(_id, path) if nzo: self.add(nzo, save=False, quiet=True) folders.append(folder) # Scan for any folders in "incomplete" that are not yet in the queue if repair: self.scan_jobs(not folders) # Handle any lost future jobs for item in globber_full( os.path.join(cfg.admin_dir.get_path(), FUTURE_Q_FOLDER)): path, nzo_id = os.path.split(item) if nzo_id not in self.__nzo_table: if nzo_id.startswith('SABnzbd_nzo'): nzo = sabnzbd.load_data(nzo_id, path, remove=True) if nzo: self.add(nzo, save=True) else: try: os.remove(item) except: pass
def read_queue(self, repair): """ Read queue from disk, supporting repair modes 0 = no repairs 1 = use existing queue, add missing "incomplete" folders 2 = Discard all queue admin, reconstruct from "incomplete" folders """ nzo_ids = [] if repair < 2: # Read the queue from the saved files data = sabnzbd.load_admin(QUEUE_FILE_NAME) if not data: try: # Try previous queue file queue_vers, nzo_ids, dummy = sabnzbd.load_admin(QUEUE_FILE_TMPL % '9') except: nzo_ids = [] if nzo_ids: logging.warning(T('Old queue detected, use Status->Repair to convert the queue')) nzo_ids = [] else: try: queue_vers, nzo_ids, dummy = data if not queue_vers == QUEUE_VERSION: nzo_ids = [] logging.error(T('Incompatible queuefile found, cannot proceed')) if not repair: panic_queue(os.path.join(cfg.admin_dir.get_path(), QUEUE_FILE_NAME)) exit_sab(2) except ValueError: nzo_ids = [] logging.error(T('Error loading %s, corrupt file detected'), os.path.join(cfg.admin_dir.get_path(), QUEUE_FILE_NAME)) if not repair: return # First handle jobs in the queue file folders = [] for nzo_id in nzo_ids: folder, _id = os.path.split(nzo_id) # Try as normal job path = get_admin_path(folder, False) nzo = sabnzbd.load_data(_id, path, remove=False) if not nzo: # Try as future job path = get_admin_path(folder, True) nzo = sabnzbd.load_data(_id, path) if nzo: self.add(nzo, save=False, quiet=True) folders.append(folder) # Scan for any folders in "incomplete" that are not yet in the queue if repair: self.scan_jobs(not folders) # Handle any lost future jobs for item in globber_full(os.path.join(cfg.admin_dir.get_path(), FUTURE_Q_FOLDER)): path, nzo_id = os.path.split(item) if nzo_id not in self.__nzo_table: if nzo_id.startswith('SABnzbd_nzo'): nzo = sabnzbd.load_data(nzo_id, path, remove=True) if nzo: self.add(nzo, save=True) else: try: os.remove(item) except: pass