def read_queue(self, repair): """Read queue from disk, supporting repair modes 0 = no repairs 1 = use existing queue, add missing "incomplete" folders 2 = Discard all queue admin, reconstruct from "incomplete" folders """ nzo_ids = [] if repair < 2: # Try to process the queue file try: data = sabnzbd.load_admin(QUEUE_FILE_NAME) if data: queue_vers, nzo_ids, _ = data if not queue_vers == QUEUE_VERSION: nzo_ids = [] logging.error(T("Incompatible queuefile found, cannot proceed")) if not repair: panic_queue(os.path.join(cfg.admin_dir.get_path(), QUEUE_FILE_NAME)) exit_sab(2) except: nzo_ids = [] logging.error( T("Error loading %s, corrupt file detected"), os.path.join(cfg.admin_dir.get_path(), QUEUE_FILE_NAME), ) # First handle jobs in the queue file folders = [] for nzo_id in nzo_ids: folder, _id = os.path.split(nzo_id) path = get_admin_path(folder, future=False) # Try as normal job nzo = sabnzbd.load_data(_id, path, remove=False) if not nzo: # Try as future job path = get_admin_path(folder, future=True) nzo = sabnzbd.load_data(_id, path) if nzo: self.add(nzo, save=False, quiet=True) folders.append(folder) # Scan for any folders in "incomplete" that are not yet in the queue if repair: logging.info("Starting queue repair") self.scan_jobs(not folders) # Handle any lost future jobs for item in globber_full(os.path.join(cfg.admin_dir.get_path(), FUTURE_Q_FOLDER)): path, nzo_id = os.path.split(item) if nzo_id not in self.__nzo_table: if nzo_id.startswith("SABnzbd_nzo"): nzo = sabnzbd.load_data(nzo_id, path, remove=True) if nzo: self.add(nzo, save=True) else: try: remove_file(item) except: pass
def read_queue(self, repair): """ Read queue from disk, supporting repair modes 0 = no repairs 1 = use existing queue, add missing "incomplete" folders 2 = Discard all queue admin, reconstruct from "incomplete" folders """ nzo_ids = [] if repair < 2: # Read the queue from the saved files data = sabnzbd.load_admin(QUEUE_FILE_NAME) if data: try: queue_vers, nzo_ids, dummy = data if not queue_vers == QUEUE_VERSION: nzo_ids = [] logging.error( Ta('Incompatible queuefile found, cannot proceed')) if not repair: panic_queue( os.path.join(cfg.cache_dir.get_path(), QUEUE_FILE_NAME)) exit_sab(2) except ValueError: nzo_ids = [] logging.error( Ta('Error loading %s, corrupt file detected'), os.path.join(cfg.cache_dir.get_path(), QUEUE_FILE_NAME)) if not repair: return # First handle jobs in the queue file folders = [] for nzo_id in nzo_ids: folder, _id = os.path.split(nzo_id) # Try as normal job path = get_admin_path(bool(folder), folder, False) nzo = sabnzbd.load_data(_id, path, remove=False) if not nzo: # Try as future job path = get_admin_path(bool(folder), folder, True) nzo = sabnzbd.load_data(_id, path) if nzo: self.add(nzo, save=False, quiet=True) folders.append(folder) # Scan for any folders in "incomplete" that are not yet in the queue if repair: self.scan_jobs(not folders) # Handle any lost future jobs for path in globber( os.path.join(cfg.admin_dir.get_path(), FUTURE_Q_FOLDER)): path, nzo_id = os.path.split(path) if nzo_id not in self.__nzo_table: nzo = sabnzbd.load_data(nzo_id, path, remove=True) if nzo: self.add(nzo, save=True)
def read_queue(self, repair): """ Read queue from disk, supporting repair modes 0 = no repairs 1 = use existing queue, add missing "incomplete" folders 2 = Discard all queue admin, reconstruct from "incomplete" folders """ nzo_ids = [] if repair < 2: # Read the queue from the saved files data = sabnzbd.load_admin(QUEUE_FILE_NAME) if data: try: queue_vers, nzo_ids, dummy = data if not queue_vers == QUEUE_VERSION: nzo_ids = [] logging.error(Ta("Incompatible queuefile found, cannot proceed")) if not repair: panic_queue(os.path.join(cfg.cache_dir.get_path(), QUEUE_FILE_NAME)) exit_sab(2) except ValueError: nzo_ids = [] logging.error( Ta("Error loading %s, corrupt file detected"), os.path.join(cfg.cache_dir.get_path(), QUEUE_FILE_NAME), ) if not repair: return # First handle jobs in the queue file folders = [] for nzo_id in nzo_ids: folder, _id = os.path.split(nzo_id) # Try as normal job path = get_admin_path(bool(folder), folder, False) nzo = sabnzbd.load_data(_id, path, remove=False) if not nzo: # Try as future job path = get_admin_path(bool(folder), folder, True) nzo = sabnzbd.load_data(_id, path) if nzo: self.add(nzo, save=False, quiet=True) folders.append(folder) # Scan for any folders in "incomplete" that are not yet in the queue if repair: self.scan_jobs(not folders) # Handle any lost future jobs for path in globber(os.path.join(cfg.admin_dir.get_path(), FUTURE_Q_FOLDER)): path, nzo_id = os.path.split(path) if nzo_id not in self.__nzo_table: nzo = sabnzbd.load_data(nzo_id, path, remove=True) if nzo: self.add(nzo, save=True)
def check_compatibility(self, data): """ Do compatibility checks on the loaded data """ nzo_ids = [] if not data: # Warn about old queue if sabnzbd.OLD_QUEUE and cfg.warned_old_queue() < QUEUE_VERSION: logging.warning( T('Old queue detected, use Status->Repair to convert the queue' )) cfg.warned_old_queue.set(QUEUE_VERSION) sabnzbd.config.save_config() else: # Try to process try: queue_vers, nzo_ids, dummy = data if not queue_vers == QUEUE_VERSION: nzo_ids = [] logging.error( T('Incompatible queuefile found, cannot proceed')) if not repair: panic_queue( os.path.join(cfg.admin_dir.get_path(), QUEUE_FILE_NAME)) exit_sab(2) except ValueError: nzo_ids = [] logging.error( T('Error loading %s, corrupt file detected'), os.path.join(cfg.admin_dir.get_path(), QUEUE_FILE_NAME)) # We need to do a repair in case of old-style pickles if not cfg.converted_nzo_pickles(): for nzo_id in nzo_ids: folder, _id = os.path.split(nzo_id) path = get_admin_path(folder, future=False) # This will update them but preserve queue-order if os.path.exists(os.path.join(path, _id)): self.repair_job(os.path.dirname(path)) continue # Remove any future-jobs, we can't save those for item in globber_full( os.path.join(cfg.admin_dir.get_path(), FUTURE_Q_FOLDER)): os.remove(item) # Done converting cfg.converted_nzo_pickles.set(True) sabnzbd.config.save_config() nzo_ids = [] return nzo_ids
def check_compatibility(self, repair, data): """ Do compatibility checks on the loaded data """ nzo_ids = [] if not data: # Warn about old queue if sabnzbd.OLD_QUEUE and cfg.warned_old_queue() < QUEUE_VERSION: logging.warning(T('Old queue detected, use Status->Repair to convert the queue')) cfg.warned_old_queue.set(QUEUE_VERSION) sabnzbd.config.save_config() else: # Try to process try: queue_vers, nzo_ids, dummy = data if not queue_vers == QUEUE_VERSION: nzo_ids = [] logging.error(T('Incompatible queuefile found, cannot proceed')) if not repair: panic_queue(os.path.join(cfg.admin_dir.get_path(), QUEUE_FILE_NAME)) exit_sab(2) except ValueError: nzo_ids = [] logging.error(T('Error loading %s, corrupt file detected'), os.path.join(cfg.admin_dir.get_path(), QUEUE_FILE_NAME)) # We need to do a repair in case of old-style pickles if not cfg.converted_nzo_pickles(): for nzo_id in nzo_ids: folder, _id = os.path.split(nzo_id) path = get_admin_path(folder, future=False) # This will update them but preserve queue-order if os.path.exists(os.path.join(path, _id)): self.repair_job(os.path.dirname(path)) continue # Remove any future-jobs, we can't save those for item in globber_full(os.path.join(cfg.admin_dir.get_path(), FUTURE_Q_FOLDER)): remove_file(item) # Done converting cfg.converted_nzo_pickles.set(True) sabnzbd.config.save_config() nzo_ids = [] return nzo_ids
def read_queue(self, repair): """ Read queue from disk, supporting repair modes 0 = no repairs 1 = use existing queue, add missing "incomplete" folders 2 = Discard all queue admin, reconstruct from "incomplete" folders """ nzo_ids = [] if repair < 2: # Read the queue from the saved files data = sabnzbd.load_admin(QUEUE_FILE_NAME) if not data: try: # Try previous queue file queue_vers, nzo_ids, dummy = sabnzbd.load_admin(QUEUE_FILE_TMPL % '9') except: nzo_ids = [] if nzo_ids: logging.warning(T('Old queue detected, use Status->Repair to convert the queue')) nzo_ids = [] else: try: queue_vers, nzo_ids, dummy = data if not queue_vers == QUEUE_VERSION: nzo_ids = [] logging.error(T('Incompatible queuefile found, cannot proceed')) if not repair: panic_queue(os.path.join(cfg.admin_dir.get_path(), QUEUE_FILE_NAME)) exit_sab(2) except ValueError: nzo_ids = [] logging.error(T('Error loading %s, corrupt file detected'), os.path.join(cfg.admin_dir.get_path(), QUEUE_FILE_NAME)) if not repair: return # First handle jobs in the queue file folders = [] for nzo_id in nzo_ids: folder, _id = os.path.split(nzo_id) # Try as normal job path = get_admin_path(folder, False) nzo = sabnzbd.load_data(_id, path, remove=False) if not nzo: # Try as future job path = get_admin_path(folder, True) nzo = sabnzbd.load_data(_id, path) if nzo: self.add(nzo, save=False, quiet=True) folders.append(folder) # Scan for any folders in "incomplete" that are not yet in the queue if repair: self.scan_jobs(not folders) # Handle any lost future jobs for item in globber_full(os.path.join(cfg.admin_dir.get_path(), FUTURE_Q_FOLDER)): path, nzo_id = os.path.split(item) if nzo_id not in self.__nzo_table: if nzo_id.startswith('SABnzbd_nzo'): nzo = sabnzbd.load_data(nzo_id, path, remove=True) if nzo: self.add(nzo, save=True) else: try: os.remove(item) except: pass