示例#1
0
    def read_queue(self, repair):
        """ Read queue from disk, supporting repair modes
            0 = no repairs
            1 = use existing queue, add missing "incomplete" folders
            2 = Discard all queue admin, reconstruct from "incomplete" folders
        """
        nzo_ids = []
        if repair < 2:
            # Read the queue from the saved files
            data = sabnzbd.load_admin(QUEUE_FILE_NAME)
            if data:
                try:
                    queue_vers, nzo_ids, dummy = data
                    if not queue_vers == QUEUE_VERSION:
                        nzo_ids = []
                        logging.error(Ta("Incompatible queuefile found, cannot proceed"))
                        if not repair:
                            panic_queue(os.path.join(cfg.cache_dir.get_path(), QUEUE_FILE_NAME))
                            exit_sab(2)
                except ValueError:
                    nzo_ids = []
                    logging.error(
                        Ta("Error loading %s, corrupt file detected"),
                        os.path.join(cfg.cache_dir.get_path(), QUEUE_FILE_NAME),
                    )
                    if not repair:
                        return

        # First handle jobs in the queue file
        folders = []
        for nzo_id in nzo_ids:
            folder, _id = os.path.split(nzo_id)
            # Try as normal job
            path = get_admin_path(bool(folder), folder, False)
            nzo = sabnzbd.load_data(_id, path, remove=False)
            if not nzo:
                # Try as future job
                path = get_admin_path(bool(folder), folder, True)
                nzo = sabnzbd.load_data(_id, path)
            if nzo:
                self.add(nzo, save=False, quiet=True)
                folders.append(folder)

        # Scan for any folders in "incomplete" that are not yet in the queue
        if repair:
            self.scan_jobs(not folders)
            # Handle any lost future jobs
            for path in globber(os.path.join(cfg.admin_dir.get_path(), FUTURE_Q_FOLDER)):
                path, nzo_id = os.path.split(path)
                if nzo_id not in self.__nzo_table:
                    nzo = sabnzbd.load_data(nzo_id, path, remove=True)
                    if nzo:
                        self.add(nzo, save=True)
示例#2
0
    def read_queue(self, repair):
        """ Read queue from disk, supporting repair modes
            0 = no repairs
            1 = use existing queue, add missing "incomplete" folders
            2 = Discard all queue admin, reconstruct from "incomplete" folders
        """
        nzo_ids = []
        if repair < 2:
            # Read the queue from the saved files
            data = sabnzbd.load_admin(QUEUE_FILE_NAME)

            # Process the data and check compatibility
            nzo_ids = self.check_compatibility(repair, data)

        # First handle jobs in the queue file
        folders = []
        for nzo_id in nzo_ids:
            folder, _id = os.path.split(nzo_id)
            path = get_admin_path(folder, future=False)

            # Try as normal job
            nzo = sabnzbd.load_data(_id, path, remove=False)
            if not nzo:
                # Try as future job
                path = get_admin_path(folder, future=True)
                nzo = sabnzbd.load_data(_id, path)
            if nzo:
                self.add(nzo, save=False, quiet=True)
                folders.append(folder)

        # Scan for any folders in "incomplete" that are not yet in the queue
        if repair:
            self.scan_jobs(not folders)
            # Handle any lost future jobs
            for item in globber_full(os.path.join(cfg.admin_dir.get_path(), FUTURE_Q_FOLDER)):
                path, nzo_id = os.path.split(item)
                if nzo_id not in self.__nzo_table:
                    if nzo_id.startswith('SABnzbd_nzo'):
                        nzo = sabnzbd.load_data(nzo_id, path, remove=True)
                        if nzo:
                            self.add(nzo, save=True)
                    else:
                        try:
                            remove_file(item)
                        except:
                            pass
示例#3
0
def parring(nzo, workdir):
    """ Perform par processing. Returns: (par_error, re_add)
    """
    assert isinstance(nzo, sabnzbd.nzbstuff.NzbObject)
    filename = nzo.final_name
    growler.send_notification(T('Post-processing'), nzo.final_name, 'pp')
    logging.info('Par2 check starting on %s', filename)

    ## Get verification status of sets
    verified = sabnzbd.load_data(VERIFIED_FILE, nzo.workpath, remove=False) or {}

    ## Collect the par files
    if nzo.partable:
        par_table = nzo.partable.copy()
    else:
        par_table = {}
    repair_sets = par_table.keys()

    re_add = False
    par_error = False
    single = len(repair_sets) == 1

    if repair_sets:
        for setname in repair_sets:
            if cfg.ignore_samples() > 0 and 'sample' in setname.lower():
                continue
            if not verified.get(setname, False):
                logging.info("Running repair on set %s", setname)
                parfile_nzf = par_table[setname]
                if os.path.exists(os.path.join(nzo.downpath, parfile_nzf.filename)) or parfile_nzf.extrapars:
                    need_re_add, res = par2_repair(parfile_nzf, nzo, workdir, setname, single=single)
                    re_add = re_add or need_re_add
                    if not res and not need_re_add and cfg.sfv_check():
                        res = try_sfv_check(nzo, workdir, setname)
                    verified[setname] = res
                else:
                    continue
                par_error = par_error or not res
    else:
        logging.info("No par2 sets for %s", filename)
        nzo.set_unpack_info('Repair', T('[%s] No par2 sets') % unicoder(filename))
        if cfg.sfv_check():
            par_error = not try_sfv_check(nzo, workdir, '')
            verified[''] = not par_error

    if re_add:
        logging.info('Readded %s to queue', filename)
        if nzo.priority != TOP_PRIORITY:
            nzo.priority = REPAIR_PRIORITY
        sabnzbd.nzbqueue.add_nzo(nzo)
        sabnzbd.downloader.Downloader.do.resume_from_postproc()

    sabnzbd.save_data(verified, VERIFIED_FILE, nzo.workpath)

    logging.info('Par2 check finished on %s', filename)
    return par_error, re_add
示例#4
0
    def load_article(self, article):
        data = None
        nzo = article.nzf.nzo

        if article in self.__article_list:
            data = self.__article_table.pop(article)
            self.__article_list.remove(article)
            self.__cache_size -= len(data)
        elif article.art_id:
            data = sabnzbd.load_data(article.art_id, nzo.workpath, remove=True,
                                     do_pickle=False, silent=True)

        if article in nzo.saved_articles:
            nzo.remove_saved_article(article)

        return data
示例#5
0
def parring(nzo, workdir):
    """ Perform par processing. Returns: (par_error, re_add) """
    filename = nzo.final_name
    notifier.send_notification(T('Post-processing'), filename, 'pp', nzo.cat)
    logging.info('Starting verification and repair of %s', filename)

    # Get verification status of sets
    verified = sabnzbd.load_data(VERIFIED_FILE, nzo.workpath, remove=False) or {}
    repair_sets = nzo.extrapars.keys()

    re_add = False
    par_error = False
    single = len(repair_sets) == 1

    if repair_sets:
        for setname in repair_sets:
            if cfg.ignore_samples() and RE_SAMPLE.search(setname.lower()):
                continue
            if not verified.get(setname, False):
                logging.info("Running verification and repair on set %s", setname)
                parfile_nzf = nzo.partable[setname]

                # Check if file maybe wasn't deleted and if we maybe have more files in the parset
                if os.path.exists(os.path.join(nzo.downpath, parfile_nzf.filename)) or nzo.extrapars[setname]:
                    need_re_add, res = par2_repair(parfile_nzf, nzo, workdir, setname, single=single)

                    # Was it aborted?
                    if not nzo.pp_active:
                        re_add = False
                        par_error = True
                        break

                    re_add = re_add or need_re_add
                    verified[setname] = res
                else:
                    continue
                par_error = par_error or not res

    else:
        # We must not have found any par2..
        logging.info("No par2 sets for %s", filename)
        nzo.set_unpack_info('Repair', T('[%s] No par2 sets') % unicoder(filename))
        if cfg.sfv_check() and not verified.get('', False):
            par_error = not try_sfv_check(nzo, workdir, '')
            verified[''] = not par_error
        # If still no success, do RAR-check
        if not par_error and cfg.enable_unrar():
            par_error = not try_rar_check(nzo, workdir, '')
            verified[''] = not par_error

    if re_add:
        logging.info('Re-added %s to queue', filename)
        if nzo.priority != TOP_PRIORITY:
            nzo.priority = REPAIR_PRIORITY
        nzo.status = Status.FETCHING
        sabnzbd.nzbqueue.NzbQueue.do.add(nzo)
        sabnzbd.downloader.Downloader.do.resume_from_postproc()

    sabnzbd.save_data(verified, VERIFIED_FILE, nzo.workpath)

    logging.info('Verification and repair finished for %s', filename)
    return par_error, re_add
示例#6
0
 def all_verified(path):
     """ Return True when all sets have been successfully verified """
     verified = sabnzbd.load_data(VERIFIED_FILE, path, remove=False) or {'x': False}
     return all(verified[x] for x in verified)
示例#7
0
    def read_queue(self, repair):
        """ Read queue from disk, supporting repair modes
            0 = no repairs
            1 = use existing queue, add missing "incomplete" folders
            2 = Discard all queue admin, reconstruct from "incomplete" folders
        """
        nzo_ids = []
        if repair < 2:
            # Read the queue from the saved files
            data = sabnzbd.load_admin(QUEUE_FILE_NAME)
            if not data:
                try:
                    # Try previous queue file
                    queue_vers, nzo_ids, dummy = sabnzbd.load_admin(QUEUE_FILE_TMPL % '9')
                except:
                    nzo_ids = []
                if nzo_ids:
                    logging.warning(T('Old queue detected, use Status->Repair to convert the queue'))
                    nzo_ids = []
            else:
                try:
                    queue_vers, nzo_ids, dummy = data
                    if not queue_vers == QUEUE_VERSION:
                        nzo_ids = []
                        logging.error(T('Incompatible queuefile found, cannot proceed'))
                        if not repair:
                            panic_queue(os.path.join(cfg.admin_dir.get_path(), QUEUE_FILE_NAME))
                            exit_sab(2)
                except ValueError:
                    nzo_ids = []
                    logging.error(T('Error loading %s, corrupt file detected'),
                                  os.path.join(cfg.admin_dir.get_path(), QUEUE_FILE_NAME))
                    if not repair:
                        return

        # First handle jobs in the queue file
        folders = []
        for nzo_id in nzo_ids:
            folder, _id = os.path.split(nzo_id)
            # Try as normal job
            path = get_admin_path(folder, False)
            nzo = sabnzbd.load_data(_id, path, remove=False)
            if not nzo:
                # Try as future job
                path = get_admin_path(folder, True)
                nzo = sabnzbd.load_data(_id, path)
            if nzo:
                self.add(nzo, save=False, quiet=True)
                folders.append(folder)

        # Scan for any folders in "incomplete" that are not yet in the queue
        if repair:
            self.scan_jobs(not folders)
            # Handle any lost future jobs
            for item in globber_full(os.path.join(cfg.admin_dir.get_path(), FUTURE_Q_FOLDER)):
                path, nzo_id = os.path.split(item)
                if nzo_id not in self.__nzo_table:
                    if nzo_id.startswith('SABnzbd_nzo'):
                        nzo = sabnzbd.load_data(nzo_id, path, remove=True)
                        if nzo:
                            self.add(nzo, save=True)
                    else:
                        try:
                            os.remove(item)
                        except:
                            pass
示例#8
0
def parring(nzo, workdir):
    """ Perform par processing. Returns: (par_error, re_add) """
    filename = nzo.final_name
    notifier.send_notification(T('Post-processing'), filename, 'pp')
    logging.info('Starting verification and repair of %s', filename)

    # Get verification status of sets
    verified = sabnzbd.load_data(VERIFIED_FILE, nzo.workpath, remove=False) or {}
    repair_sets = nzo.partable.keys()

    re_add = False
    par_error = False
    single = len(repair_sets) == 1

    if repair_sets:
        for setname in repair_sets:
            if cfg.ignore_samples() and RE_SAMPLE.search(setname.lower()):
                continue
            if not verified.get(setname, False):
                logging.info("Running verification and repair on set %s", setname)
                parfile_nzf = nzo.partable[setname]
                if os.path.exists(os.path.join(nzo.downpath, parfile_nzf.filename)) or parfile_nzf.extrapars:
                    need_re_add, res = par2_repair(parfile_nzf, nzo, workdir, setname, single=single)

                    # Was it aborted?
                    if not nzo.pp_active:
                        re_add = False
                        par_error = True
                        break

                    re_add = re_add or need_re_add
                    verified[setname] = res
                else:
                    continue
                par_error = par_error or not res
    else:
        # Obfuscated par2 check
        logging.info('No par2 sets found, running obfuscated check on %s', filename)

        # Get the NZF's and sort them based on size
        nzfs_sorted = sorted(nzo.finished_files, key=lambda x: x.bytes)

        # We will have to make 'fake' par files that are recognized
        par2_vol = 0
        par2_filename = None

        for nzf_try in nzfs_sorted:
            # run through list of files, looking for par2 signature..
            logging.debug("Checking par2 signature of %s", nzf_try.filename)
            try:
                nzf_path = os.path.join(workdir, nzf_try.filename)
                if(is_parfile(nzf_path)):
                    # We need 1 base-name so they are recognized as 1 set
                    if not par2_filename:
                        par2_filename = nzf_path

                    # Rename so handle_par2() picks it up
                    newpath = '%s.vol%d+%d.par2' % (par2_filename, par2_vol, par2_vol + 1)
                    renamer(nzf_path, newpath)
                    nzf_try.filename = os.path.split(newpath)[1]

                    # Let the magic happen
                    nzo.handle_par2(nzf_try, file_done=True)
                    par2_vol += 1
            except:
                pass
        if par2_vol > 0:
            # Pars found, we do it again
            par_error, re_add = parring(nzo, workdir)
        else:
            # We must not have found any par2..
            logging.info("No par2 sets for %s", filename)
            nzo.set_unpack_info('Repair', T('[%s] No par2 sets') % unicoder(filename))
            if cfg.sfv_check() and not verified.get('', False):
                par_error = not try_sfv_check(nzo, workdir, '')
                verified[''] = not par_error
            # If still no success, do RAR-check
            if not par_error and cfg.enable_unrar():
                par_error = not try_rar_check(nzo, workdir, '')
                verified[''] = not par_error
    if re_add:
        logging.info('Re-added %s to queue', filename)
        if nzo.priority != TOP_PRIORITY:
            nzo.priority = REPAIR_PRIORITY
        sabnzbd.nzbqueue.add_nzo(nzo)
        sabnzbd.downloader.Downloader.do.resume_from_postproc()

    sabnzbd.save_data(verified, VERIFIED_FILE, nzo.workpath)

    logging.info('Verification and repair finished for %s', filename)
    return par_error, re_add
示例#9
0
def parring(nzo, workdir):
    """ Perform par processing. Returns: (par_error, re_add) """
    filename = nzo.final_name
    notifier.send_notification(T('Post-processing'), filename, 'pp', nzo.cat)
    logging.info('Starting verification and repair of %s', filename)

    # Get verification status of sets
    verified = sabnzbd.load_data(VERIFIED_FILE, nzo.workpath,
                                 remove=False) or {}
    repair_sets = nzo.extrapars.keys()

    re_add = False
    par_error = False
    single = len(repair_sets) == 1

    if repair_sets:
        for setname in repair_sets:
            if cfg.ignore_samples() and RE_SAMPLE.search(setname.lower()):
                continue
            if not verified.get(setname, False):
                logging.info("Running verification and repair on set %s",
                             setname)
                parfile_nzf = nzo.partable[setname]

                # Check if file maybe wasn't deleted and if we maybe have more files in the parset
                if os.path.exists(
                        os.path.join(
                            nzo.downpath,
                            parfile_nzf.filename)) or nzo.extrapars[setname]:
                    need_re_add, res = par2_repair(parfile_nzf,
                                                   nzo,
                                                   workdir,
                                                   setname,
                                                   single=single)

                    # Was it aborted?
                    if not nzo.pp_active:
                        re_add = False
                        par_error = True
                        break

                    re_add = re_add or need_re_add
                    verified[setname] = res
                else:
                    continue
                par_error = par_error or not res

    else:
        # We must not have found any par2..
        logging.info("No par2 sets for %s", filename)
        nzo.set_unpack_info('Repair',
                            T('[%s] No par2 sets') % unicoder(filename))
        if cfg.sfv_check() and not verified.get('', False):
            par_error = not try_sfv_check(nzo, workdir, '')
            verified[''] = not par_error
        # If still no success, do RAR-check
        if not par_error and cfg.enable_unrar():
            par_error = not try_rar_check(nzo, workdir, '')
            verified[''] = not par_error

    if re_add:
        logging.info('Re-added %s to queue', filename)
        if nzo.priority != TOP_PRIORITY:
            nzo.priority = REPAIR_PRIORITY
        nzo.status = Status.FETCHING
        sabnzbd.nzbqueue.NzbQueue.do.add(nzo)
        sabnzbd.downloader.Downloader.do.resume_from_postproc()

    sabnzbd.save_data(verified, VERIFIED_FILE, nzo.workpath)

    logging.info('Verification and repair finished for %s', filename)
    return par_error, re_add
示例#10
0
def parring(nzo, workdir):
    """ Perform par processing. Returns: (par_error, re_add) """
    filename = nzo.final_name
    notifier.send_notification(T('Post-processing'), filename, 'pp', nzo.cat)
    logging.info('Starting verification and repair of %s', filename)

    # Get verification status of sets
    verified = sabnzbd.load_data(VERIFIED_FILE, nzo.workpath,
                                 remove=False) or {}
    repair_sets = nzo.partable.keys()

    re_add = False
    par_error = False
    single = len(repair_sets) == 1

    if repair_sets:
        for setname in repair_sets:
            if cfg.ignore_samples() and RE_SAMPLE.search(setname.lower()):
                continue
            if not verified.get(setname, False):
                logging.info("Running verification and repair on set %s",
                             setname)
                parfile_nzf = nzo.partable[setname]
                if os.path.exists(
                        os.path.join(
                            nzo.downpath,
                            parfile_nzf.filename)) or parfile_nzf.extrapars:
                    need_re_add, res = par2_repair(parfile_nzf,
                                                   nzo,
                                                   workdir,
                                                   setname,
                                                   single=single)

                    # Was it aborted?
                    if not nzo.pp_active:
                        re_add = False
                        par_error = True
                        break

                    re_add = re_add or need_re_add
                    verified[setname] = res
                else:
                    continue
                par_error = par_error or not res
    else:
        # Obfuscated par2 check
        logging.info('No par2 sets found, running obfuscated check on %s',
                     filename)

        # Get the NZF's and sort them based on size
        nzfs_sorted = sorted(nzo.finished_files, key=lambda x: x.bytes)

        # We will have to make 'fake' par files that are recognized
        par2_vol = 0
        par2_filename = None

        for nzf_try in nzfs_sorted:
            # run through list of files, looking for par2 signature..
            logging.debug("Checking par2 signature of %s", nzf_try.filename)
            try:
                nzf_path = os.path.join(workdir, nzf_try.filename)
                if (is_parfile(nzf_path)):
                    # We need 1 base-name so they are recognized as 1 set
                    if not par2_filename:
                        par2_filename = nzf_path

                    # Rename so handle_par2() picks it up
                    newpath = '%s.vol%d+%d.par2' % (par2_filename, par2_vol,
                                                    par2_vol + 1)
                    renamer(nzf_path, newpath)
                    nzf_try.filename = os.path.split(newpath)[1]

                    # Let the magic happen
                    nzo.handle_par2(nzf_try, file_done=True)
                    par2_vol += 1
            except:
                pass
        if par2_vol > 0:
            # Pars found, we do it again
            par_error, re_add = parring(nzo, workdir)
        else:
            # We must not have found any par2..
            logging.info("No par2 sets for %s", filename)
            nzo.set_unpack_info('Repair',
                                T('[%s] No par2 sets') % unicoder(filename))
            if cfg.sfv_check() and not verified.get('', False):
                par_error = not try_sfv_check(nzo, workdir, '')
                verified[''] = not par_error
            # If still no success, do RAR-check
            if not par_error and cfg.enable_unrar():
                par_error = not try_rar_check(nzo, workdir, '')
                verified[''] = not par_error
    if re_add:
        logging.info('Re-added %s to queue', filename)
        if nzo.priority != TOP_PRIORITY:
            nzo.priority = REPAIR_PRIORITY
        sabnzbd.nzbqueue.NzbQueue.do.add(nzo)
        sabnzbd.downloader.Downloader.do.resume_from_postproc()

    sabnzbd.save_data(verified, VERIFIED_FILE, nzo.workpath)

    logging.info('Verification and repair finished for %s', filename)
    return par_error, re_add
示例#11
0
    def read_queue(self, repair):
        """ Read queue from disk, supporting repair modes
            0 = no repairs
            1 = use existing queue, add missing "incomplete" folders
            2 = Discard all queue admin, reconstruct from "incomplete" folders
        """
        nzo_ids = []
        if repair < 2:
            # Try to process the queue file
            try:
                data = sabnzbd.load_admin(QUEUE_FILE_NAME)
                if data:
                    queue_vers, nzo_ids, _ = data
                    if not queue_vers == QUEUE_VERSION:
                        nzo_ids = []
                        logging.error(
                            T("Incompatible queuefile found, cannot proceed"))
                        if not repair:
                            panic_queue(
                                os.path.join(cfg.admin_dir.get_path(),
                                             QUEUE_FILE_NAME))
                            exit_sab(2)
            except:
                nzo_ids = []
                logging.error(
                    T("Error loading %s, corrupt file detected"),
                    os.path.join(cfg.admin_dir.get_path(), QUEUE_FILE_NAME),
                )

        # First handle jobs in the queue file
        folders = []
        for nzo_id in nzo_ids:
            folder, _id = os.path.split(nzo_id)
            path = get_admin_path(folder, future=False)

            # Try as normal job
            nzo = sabnzbd.load_data(_id, path, remove=False)
            if not nzo:
                # Try as future job
                path = get_admin_path(folder, future=True)
                nzo = sabnzbd.load_data(_id, path)
            if nzo:
                self.add(nzo, save=False, quiet=True)
                folders.append(folder)

        # Scan for any folders in "incomplete" that are not yet in the queue
        if repair:
            self.scan_jobs(not folders)
            # Handle any lost future jobs
            for item in globber_full(
                    os.path.join(cfg.admin_dir.get_path(), FUTURE_Q_FOLDER)):
                path, nzo_id = os.path.split(item)
                if nzo_id not in self.__nzo_table:
                    if nzo_id.startswith("SABnzbd_nzo"):
                        nzo = sabnzbd.load_data(nzo_id, path, remove=True)
                        if nzo:
                            self.add(nzo, save=True)
                    else:
                        try:
                            remove_file(item)
                        except:
                            pass