def try_sfv_check(nzo, workdir): """Attempt to verify set using SFV file Return None if no SFV-sets, True/False based on verification """ # Get list of SFV names sfvs = globber_full(workdir, "*.sfv") # If no files named *.sfv, lets search for obfuscated SFV files if not sfvs: files = globber_full(workdir, "*") for file in files: if is_sfv_file(file): logging.debug("Found and will use obfuscated SFV file: %s", file) sfvs.append(file) if not sfvs: # still no SFV, so: return None result = sfv_check(sfvs, nzo, workdir) if not result: print_sfv = [os.path.basename(sfv) for sfv in sfvs] fail_msg = T('Some files failed to verify against "%s"') % "; ".join( print_sfv) nzo.set_unpack_info("Repair", fail_msg) nzo.status = Status.FAILED nzo.fail_msg = fail_msg return False # Success nzo.set_unpack_info("Repair", T("Verified successfully using SFV files")) return True
def scan_jobs(self, all_jobs=False, action=True): """ Scan "incomplete" for missing folders, 'all' is True: Include active folders 'action' is True, do the recovery action returns list of orphaned folders """ result = [] # Folders from the download queue if all_jobs: registered = [] else: registered = [nzo.work_name for nzo in self.__nzo_list] # Retryable folders from History items = sabnzbd.api.build_history(output=True)[0] # Anything waiting or active or retryable is a known item registered.extend([ os.path.basename(item["path"]) for item in items if item["retry"] or item["loaded"] or item["status"] == Status.QUEUED ]) # Repair unregistered folders for folder in globber_full(cfg.download_dir.get_path()): name = os.path.basename(folder) if os.path.isdir( folder ) and name not in registered and name not in IGNORED_FOLDERS: if action: logging.info("Repairing job %s", folder) self.repair_job(folder) result.append(os.path.basename(folder)) else: if action: logging.info("Skipping repair for job %s", folder) return result
def read_queue(self, repair): """Read queue from disk, supporting repair modes 0 = no repairs 1 = use existing queue, add missing "incomplete" folders 2 = Discard all queue admin, reconstruct from "incomplete" folders """ nzo_ids = [] if repair < 2: # Try to process the queue file try: data = sabnzbd.load_admin(QUEUE_FILE_NAME) if data: queue_vers, nzo_ids, _ = data if not queue_vers == QUEUE_VERSION: nzo_ids = [] logging.error(T("Incompatible queuefile found, cannot proceed")) if not repair: panic_queue(os.path.join(cfg.admin_dir.get_path(), QUEUE_FILE_NAME)) exit_sab(2) except: nzo_ids = [] logging.error( T("Error loading %s, corrupt file detected"), os.path.join(cfg.admin_dir.get_path(), QUEUE_FILE_NAME), ) # First handle jobs in the queue file folders = [] for nzo_id in nzo_ids: folder, _id = os.path.split(nzo_id) path = get_admin_path(folder, future=False) # Try as normal job nzo = sabnzbd.load_data(_id, path, remove=False) if not nzo: # Try as future job path = get_admin_path(folder, future=True) nzo = sabnzbd.load_data(_id, path) if nzo: self.add(nzo, save=False, quiet=True) folders.append(folder) # Scan for any folders in "incomplete" that are not yet in the queue if repair: logging.info("Starting queue repair") self.scan_jobs(not folders) # Handle any lost future jobs for item in globber_full(os.path.join(cfg.admin_dir.get_path(), FUTURE_Q_FOLDER)): path, nzo_id = os.path.split(item) if nzo_id not in self.__nzo_table: if nzo_id.startswith("SABnzbd_nzo"): nzo = sabnzbd.load_data(nzo_id, path, remove=True) if nzo: self.add(nzo, save=True) else: try: remove_file(item) except: pass
def send_back(self, nzo): """ Send back job to queue after successful pre-check """ try: nzb_path = globber_full(nzo.workpath, "*.gz")[0] except: logging.info("Failed to find NZB file after pre-check (%s)", nzo.nzo_id) return # Need to remove it first, otherwise it might still be downloading self.remove(nzo, add_to_history=False, cleanup=False) res, nzo_ids = process_single_nzb(nzo.filename, nzb_path, keep=True, reuse=nzo.downpath, nzo_id=nzo.nzo_id) if res == 0 and nzo_ids: # Reset reuse flag to make pause/abort on encryption possible self.__nzo_table[nzo_ids[0]].reuse = None
def repair_job(self, repair_folder, new_nzb=None, password=None): """ Reconstruct admin for a single job folder, optionally with new NZB """ # Check if folder exists if not repair_folder or not os.path.exists(repair_folder): return None name = os.path.basename(repair_folder) admin_path = os.path.join(repair_folder, JOB_ADMIN) # If Retry was used and a new NZB was uploaded if getattr(new_nzb, "filename", None): remove_all(admin_path, "*.gz", keep_folder=True) logging.debug("Repair job %s with new NZB (%s)", name, new_nzb.filename) _, nzo_ids = sabnzbd.add_nzbfile(new_nzb, nzbname=name, reuse=repair_folder, password=password) nzo_id = nzo_ids[0] else: # Was this file already post-processed? verified = sabnzbd.load_data(VERIFIED_FILE, admin_path, remove=False) filenames = [] if not verified or not all(verified[x] for x in verified): filenames = globber_full(admin_path, "*.gz") if filenames: logging.debug("Repair job %s by re-parsing stored NZB", name) _, nzo_ids = sabnzbd.add_nzbfile(filenames[0], nzbname=name, reuse=repair_folder, password=password) nzo_id = nzo_ids[0] else: logging.debug("Repair job %s without stored NZB", name) nzo = NzbObject(name, nzbname=name, reuse=repair_folder) nzo.password = password self.add(nzo) nzo_id = nzo.nzo_id return nzo_id
def send_back(self, old_nzo: NzbObject): """ Send back job to queue after successful pre-check """ try: nzb_path = globber_full(old_nzo.admin_path, "*.gz")[0] except: logging.info("Failed to find NZB file after pre-check (%s)", old_nzo.nzo_id) return # Store old position and create new NZO old_position = self.__nzo_list.index(old_nzo) res, nzo_ids = process_single_nzb( old_nzo.filename, nzb_path, keep=True, reuse=old_nzo.download_path, nzo_id=old_nzo.nzo_id ) if res == 0 and nzo_ids: # Swap to old position new_nzo = self.get_nzo(nzo_ids[0]) self.__nzo_list.remove(new_nzo) self.__nzo_list.insert(old_position, new_nzo) # Reset reuse flag to make pause/abort on encryption possible self.__nzo_table[nzo_ids[0]].reuse = None