예제 #1
0
def is_cloaked(nzo, path, names):
    """ Return True if this is likely to be a cloaked encrypted post """
    fname = unicoder(get_filename(path)).lower()
    fname = os.path.splitext(fname)[0]
    for name in names:
        name = get_filename(name.lower())
        name, ext = os.path.splitext(unicoder(name))
        if ext == u'.rar' and fname.startswith(name) and (len(fname) - len(
                name)) < 8 and len(names) < 3 and not RE_SUBS.search(fname):
            # Only warn once
            if nzo.encrypted == 0:
                logging.warning(
                    T('Job "%s" is probably encrypted due to RAR with same name inside this RAR'
                      ), nzo.final_name)
                nzo.encrypted = 1
            return True
        elif 'password' in name and ext not in SAFE_EXTS:
            # Only warn once
            if nzo.encrypted == 0:
                logging.warning(
                    T('Job "%s" is probably encrypted: "password" in filename "%s"'
                      ), nzo.final_name, name)
                nzo.encrypted = 1
            return True
    return False
예제 #2
0
def is_cloaked(path, names):
    """ Return True if this is likely to be a cloaked encrypted post """
    fname = unicoder(os.path.split(path)[1]).lower()
    for name in names:
        name = unicoder(name.lower())
        if fname == name or 'password' in name:
            return True
    return False
예제 #3
0
def is_cloaked(path, names):
    """ Return True if this is likely to be a cloaked encrypted post """
    fname = unicoder(os.path.split(path)[1]).lower()
    for name in names:
        name = unicoder(name.lower())
        if fname == name or 'password' in name:
            return True
    return False
예제 #4
0
def folders_at_path(path, include_parent = False, show_hidden = False):
    """ Returns a list of dictionaries with the folders contained at the given path
        Give the empty string as the path to list the contents of the root path
        under Unix this means "/", on Windows this will be a list of drive letters)
    """
    from sabnzbd.encoding import unicoder

    if path == "":
        if NT:
            entries = [{'name': letter + ':\\', 'path': letter + ':\\'} for letter in get_win_drives()]
            entries.insert(0, {'current_path': 'Root'})
            return entries
        else:
            path = '/'

    # walk up the tree until we find a valid path
    path = sabnzbd.misc.real_path(sabnzbd.DIR_HOME, path)
    while path and not os.path.isdir(path):
        if path == os.path.dirname(path):
            return folders_at_path('', include_parent)
        else:
            path = os.path.dirname(path)

    # fix up the path and find the parent
    path = os.path.abspath(os.path.normpath(path))
    parent_path = os.path.dirname(path)

    # if we're at the root then the next step is the meta-node showing our drive letters
    if path == parent_path and os.name == 'nt':
        parent_path = ""

    file_list = []
    try:
        for filename in os.listdir(path):
            fpath = os.path.join(path, filename)
            try:
                if NT:
                    doit = (win32api.GetFileAttributes(fpath) & MASK) == TMASK and filename != 'PerfLogs'
                elif not show_hidden:
                    doit = not filename.startswith('.')
                else:
                    doit = True
            except:
                doit = False
            if doit:
                file_list.append({ 'name': unicoder(filename), 'path': unicoder(fpath) })
        file_list = filter(lambda entry: os.path.isdir(entry['path']), file_list)
        file_list = filter(lambda entry: entry['name'].lower() not in _JUNKFOLDERS, file_list)
        file_list = sorted(file_list, lambda x, y: cmp(os.path.basename(x['name']).lower(), os.path.basename(y['path']).lower()))
    except:
        # No access, ignore
        pass
    file_list.insert(0, {'current_path': path})
    if include_parent and parent_path != path:
        file_list.insert(1,{ 'name': "..", 'path': parent_path })

    return file_list
예제 #5
0
def try_rar_check(nzo, workdir, setname):
    """ Attempt to verify set using the RARs
        Return True if verified, False when failed
        When setname is '', all RAR files will be used, otherwise only the matching one
        If no RAR's are found, returns True
    """
    _, _, rars, _, _ = build_filelists(workdir)

    if setname:
        # Filter based on set
        rars = [
            rar for rar in rars if os.path.basename(rar).startswith(setname)
        ]

    # Sort
    rars.sort(rar_sort)

    # Test
    if rars:
        nzo.status = Status.VERIFYING
        nzo.set_unpack_info('Repair',
                            T('Trying RAR-based verification'),
                            set=setname)
        nzo.set_action_line(T('Trying RAR-based verification'), '...')
        try:
            # Set path to unrar and open the file
            # Requires de-unicode for RarFile to work!
            rarfile.UNRAR_TOOL = sabnzbd.newsunpack.RAR_COMMAND
            zf = rarfile.RarFile(rars[0])

            # Skip if it's encrypted
            if zf.needs_password():
                msg = T('[%s] RAR-based verification failed: %s') % (unicoder(
                    os.path.basename(rars[0])), T('Passworded'))
                nzo.set_unpack_info('Repair', msg, set=setname)
                return True

            # Will throw exception if something is wrong
            zf.testrar()
            # Success!
            msg = T('RAR files verified successfully')
            nzo.set_unpack_info('Repair', msg, set=setname)
            logging.info(msg)
            return True
        except rarfile.Error as e:
            nzo.fail_msg = T('RAR files failed to verify')
            msg = T('[%s] RAR-based verification failed: %s') % (unicoder(
                os.path.basename(
                    rars[0])), unicoder(e.message.replace('\r\n', ' ')))
            nzo.set_unpack_info('Repair', msg, set=setname)
            logging.info(msg)
            return False
    else:
        # No rar-files, so just continue
        return True
예제 #6
0
def parring(nzo, workdir):
    """ Perform par processing. Returns: (par_error, re_add)
    """
    filename = nzo.final_name
    osx.sendGrowlMsg(T('Post-processing'), nzo.final_name,
                     osx.NOTIFICATION['pp'])
    logging.info('Par2 check starting on %s', filename)

    ## Collect the par files
    if nzo.partable:
        par_table = nzo.partable.copy()
    else:
        par_table = {}
    repair_sets = par_table.keys()

    re_add = False
    par_error = False

    if repair_sets:

        for set_ in repair_sets:
            logging.info("Running repair on set %s", set_)
            parfile_nzf = par_table[set_]
            need_re_add, res = par2_repair(parfile_nzf, nzo, workdir, set_)
            if need_re_add:
                re_add = True
            else:
                par_error = par_error or not res

        if re_add:
            logging.info('Readded %s to queue', filename)
            nzo.priority = REPAIR_PRIORITY
            sabnzbd.nzbqueue.add_nzo(nzo)
            sabnzbd.downloader.Downloader.do.resume_from_postproc()

        logging.info('Par2 check finished on %s', filename)

    else:
        # See if alternative SFV check is possible
        sfv = None
        if cfg.sfv_check():
            for sfv in globber(workdir, '*.sfv'):
                par_error = par_error or not sfv_check(sfv)
            if par_error:
                nzo.set_unpack_info(
                    'Repair',
                    T('Some files failed to verify against "%s"') %
                    unicoder(os.path.basename(sfv)))

        if not sfv:
            logging.info("No par2 sets for %s", filename)
            nzo.set_unpack_info('Repair',
                                T('[%s] No par2 sets') % unicoder(filename))

    return par_error, re_add
예제 #7
0
def folders_at_path(path, include_parent=False):
    """ Returns a list of dictionaries with the folders contained at the given path
        Give the empty string as the path to list the contents of the root path
        under Unix this means "/", on Windows this will be a list of drive letters)
        from sabnzbd.encoding import unicoder
        assert os.path.isabs(path) or path == ""
    """
    from sabnzbd.encoding import unicoder

    # walk up the tree until we find a valid path
    while path and not os.path.isdir(path):
        if path == os.path.dirname(path):
            path = ''
            break
        else:
            path = os.path.dirname(path)

    if path == "":
        if os.name == 'nt':
            entries = [{
                'name': letter + ':\\',
                'path': letter + ':\\'
            } for letter in get_win_drives()]
            entries.insert(0, {'current_path': 'Root'})
            return entries
        else:
            path = '/'

    # fix up the path and find the parent
    path = os.path.abspath(os.path.normpath(path))
    parent_path = os.path.dirname(path)

    # if we're at the root then the next step is the meta-node showing our drive letters
    if path == parent_path and os.name == 'nt':
        parent_path = ""

    file_list = [{
        'name': unicoder(filename),
        'path': unicoder(os.path.join(path, filename))
    } for filename in os.listdir(path)]
    file_list = filter(lambda entry: os.path.isdir(entry['path']), file_list)
    file_list = sorted(
        file_list, lambda x, y: cmp(
            os.path.basename(x['name']).lower(),
            os.path.basename(y['path']).lower()))

    file_list.insert(0, {'current_path': path})
    if include_parent and parent_path != path:
        file_list.append({'name': "..", 'path': parent_path})

    return file_list
예제 #8
0
def is_cloaked(path, names):
    """ Return True if this is likely to be a cloaked encrypted post """
    fname = unicoder(os.path.split(path)[1]).lower()
    fname = os.path.splitext(fname)[0]
    for name in names:
        name = os.path.split(name.lower())[1]
        name, ext = os.path.splitext(unicoder(name))
        if ext == u'.rar' and fname.startswith(name) and (len(fname) - len(name)) < 8 and len(names) < 3 and not RE_SUBS.search(fname):
            logging.debug('File %s is probably encrypted due to RAR with same name inside this RAR', fname)
            return True
        elif 'password' in name:
            logging.debug('RAR %s is probably encrypted: "password" in filename %s', fname, name)
            return True
    return False
예제 #9
0
def is_cloaked(path, names):
    """ Return True if this is likely to be a cloaked encrypted post """
    fname = unicoder(os.path.split(path)[1]).lower()
    fname = os.path.splitext(fname)[0]
    for name in names:
        name = os.path.split(name.lower())[1]
        name, ext = os.path.splitext(unicoder(name))
        if ext == u'.rar' and fname.startswith(name) and (len(fname) - len(name)) < 8 and not RE_SUBS.search(fname):
            logging.debug('File %s is probably encrypted due to RAR with same name inside this RAR', fname)
            return True
        elif 'password' in name:
            logging.debug('RAR %s is probably encrypted: "password" in filename %s', fname, name)
            return True
    return False
예제 #10
0
def unpack_history_info(item):
    """ Expands the single line stage_log from the DB
        into a python dictionary for use in the history display
    """
    # Stage Name is separated by ::: stage lines by ; and stages by \r\n
    lst = item['stage_log']
    if lst:
        try:
            lines = lst.split('\r\n')
        except:
            logging.error(
                T('Invalid stage logging in history for %s') + ' (\\r\\n)',
                unicoder(item['name']))
            logging.debug('Lines: %s', lst)
            lines = []
        lst = [None for x in STAGES]
        for line in lines:
            stage = {}
            try:
                key, logs = line.split(':::')
            except:
                logging.debug('Missing key:::logs "%s"', line)
                key = line
                logs = ''
            stage['name'] = key
            stage['actions'] = []
            try:
                logs = logs.split(';')
            except:
                logging.error(
                    T('Invalid stage logging in history for %s') + ' (;)',
                    unicoder(item['name']))
                logging.debug('Logs: %s', logs)
                logs = []
            for log in logs:
                stage['actions'].append(log)
            try:
                lst[STAGES[key]] = stage
            except KeyError:
                lst.append(stage)
        # Remove unused stages
        item['stage_log'] = [x for x in lst if x is not None]

    if item['script_log']:
        item['script_log'] = ''
    # The action line is only available for items in the postproc queue
    if not item.has_key('action_line'):
        item['action_line'] = ''
    return item
예제 #11
0
def try_rar_check(nzo, workdir, setname):
    """ Attempt to verify set using the RARs
        Return True if verified, False when failed
        When setname is '', all RAR files will be used, otherwise only the matching one
        If no RAR's are found, returns True
    """
    _, _, rars, _, _ = build_filelists(workdir)

    if setname:
        # Filter based on set
        rars = [rar for rar in rars if os.path.basename(rar).startswith(setname)]

    # Sort
    rars.sort(rar_sort)

    # Test
    if rars:
        nzo.status = Status.VERIFYING
        nzo.set_unpack_info('Repair', T('Trying RAR-based verification'))
        nzo.set_action_line(T('Trying RAR-based verification'), '...')
        try:
            # Set path to unrar and open the file
            # Requires de-unicode for RarFile to work!
            rarfile.UNRAR_TOOL = sabnzbd.newsunpack.RAR_COMMAND
            zf = rarfile.RarFile(rars[0])

            # Skip if it's encrypted
            if zf.needs_password():
                msg = T('[%s] RAR-based verification failed: %s') % (unicoder(os.path.basename(rars[0])), T('Passworded'))
                nzo.set_unpack_info('Repair', msg)
                return True

            # Will throw exception if something is wrong
            zf.testrar()
            # Success!
            msg = T('RAR files verified successfully')
            nzo.set_unpack_info('Repair', msg)
            logging.info(msg)
            return True
        except rarfile.Error as e:
            nzo.fail_msg = T('RAR files failed to verify')
            msg = T('[%s] RAR-based verification failed: %s') % (unicoder(os.path.basename(rars[0])), unicoder(e.message.replace('\r\n', ' ')))
            nzo.set_unpack_info('Repair', msg)
            logging.info(msg)
            return False
    else:
        # No rar-files, so just continue
        return True
예제 #12
0
def send_growl(title , msg, gtype):
    """ Send Growl message
    """
    global _GROWL, _GROWL_REG

    for n in (0, 1):
        if not _GROWL_REG: _GROWL = None
        if not _GROWL:
            _GROWL, error = register_growl()
        if _GROWL:
            assert isinstance(_GROWL, GrowlNotifier)
            _GROWL_REG = True
            if not isinstance(msg, str) and not isinstance(msg, unicode):
                msg = str(msg)
            logging.debug('Send to Growl: %s %s %s', gtype, latin1(title), latin1(msg))
            try:
                ret = _GROWL.notify(
                    noteType = Tx(NOTIFICATION.get(gtype, 'other')),
                    title = title,
                    description = unicoder(msg),
                )
                if ret is None or isinstance(ret, bool):
                    return None
                elif ret[0] == '401':
                    _GROWL = False
                else:
                    logging.debug('Growl error %s', ret)
                    return 'Growl error %s', ret
            except socket.error, err:
                error = 'Growl error %s' % err
                logging.debug(error)
                return error
            except:
예제 #13
0
def try_sfv_check(nzo, workdir, setname):
    """ Attempt to verify set using SFV file
        Return True if verified, False when failed
        When setname is '', all SFV files will be used, otherwise only the matching one
        When setname is '' and no SFV files are found, True is returned
        """
    # Get list of SFV names; shortest name first, minimizes the chance on a mismatch
    sfvs = globber(workdir, '*.sfv')
    sfvs.sort(lambda x, y: len(x) - len(y))
    par_error = False
    found = False
    for sfv in sfvs:
        if setname in os.path.basename(sfv):
            found = True
            nzo.set_unpack_info('Repair', T('Trying SFV verification'))
            failed = sfv_check(sfv)
            if failed:
                msg = T('Some files failed to verify against "%s"') % unicoder(
                    os.path.basename(sfv))
                msg += '; '
                msg += '; '.join(failed)
                nzo.set_unpack_info('Repair', msg)
                par_error = True
            else:
                nzo.set_unpack_info('Repair',
                                    T('Verified successfully using SFV files'))
            if setname:
                break
    return (found or not setname) and not par_error
예제 #14
0
def bad_fetch(nzo, url, msg='', content=False):
    """ Create History entry for failed URL Fetch
        msg : message to be logged
        retry : make retry link in history
        content : report in history that cause is a bad NZB file
    """
    if msg:
        msg = unicoder(msg)
    else:
        msg = ''

    nzo.status = Status.FAILED

    if url:
        nzo.filename = url
        nzo.final_name = url.strip()

    if content:
        # Bad content
        msg = T('Unusable NZB file')
    else:
        # Failed fetch
        msg = T('URL Fetching failed; %s') % msg

    nzo.fail_msg = msg

    growler.send_notification(T('URL Fetching failed; %s') % '', '%s\n%s' % (msg, url), 'other')
    if cfg.email_endjob() > 0:
        emailer.badfetch_mail(msg, url)

    NzbQueue.do.remove(nzo.nzo_id, add_to_history=True)
예제 #15
0
def bad_fetch(nzo, url, msg='', content=False):
    """ Create History entry for failed URL Fetch
        msg : message to be logged
        retry : make retry link in history
        content : report in history that cause is a bad NZB file
    """
    if msg:
        msg = unicoder(msg)
    else:
        msg = ''

    nzo.status = Status.FAILED

    if url:
        nzo.filename = url
        nzo.final_name = url.strip()

    if content:
        # Bad content
        msg = T('Unusable NZB file')
    else:
        # Failed fetch
        msg = T('URL Fetching failed; %s') % msg

    nzo.fail_msg = msg

    notifier.send_notification(T('URL Fetching failed; %s') % '', '%s\n%s' % (msg, url), 'other', nzo.cat)
    if cfg.email_endjob() > 0:
        emailer.badfetch_mail(msg, url)

    NzbQueue.do.remove(nzo.nzo_id, add_to_history=True)
예제 #16
0
    def fail_to_history(self, nzo, url, msg='', content=False):
        """ Create History entry for failed URL Fetch
            msg: message to be logged
            content: report in history that cause is a bad NZB file
        """
        # Remove the "Trying to fetch" part
        if url:
            nzo.filename = url
            nzo.final_name = url.strip()

        if content:
            # Bad content
            msg = T('Unusable NZB file')
        else:
            # Failed fetch
            msg = T('URL Fetching failed; %s') % unicoder(msg)

        # Mark as failed
        nzo.status = Status.FAILED
        nzo.fail_msg = msg

        notifier.send_notification(
            T('URL Fetching failed; %s') % '', '%s\n%s' % (msg, url), 'other',
            nzo.cat)
        if cfg.email_endjob() > 0:
            emailer.badfetch_mail(msg, url)

        # Parse category to make sure script is set correctly after a grab
        nzo.cat, _, nzo.script, _ = misc.cat_to_opts(nzo.cat,
                                                     script=nzo.script)

        # Add to history and run script if desired
        NzbQueue.do.remove(nzo.nzo_id, add_to_history=False)
        PostProcessor.do.process(nzo)
예제 #17
0
def send_growl(title , msg, gtype):
    """ Send Growl message
    """
    global _GROWL, _GROWL_REG

    for n in (0, 1):
        if not _GROWL_REG: _GROWL = None
        if not _GROWL:
            _GROWL, error = register_growl()
        if _GROWL:
            assert isinstance(_GROWL, GrowlNotifier)
            _GROWL_REG = True
            if not isinstance(msg, str) and not isinstance(msg, unicode):
                msg = str(msg)
            logging.debug('Send to Growl: %s %s %s', gtype, latin1(title), latin1(msg))
            try:
                ret = _GROWL.notify(
                    noteType = Tx(NOTIFICATION.get(gtype, 'other')),
                    title = title,
                    description = unicoder(msg),
                )
                if ret is None or isinstance(ret, bool):
                    return None
                elif ret[0] == '401':
                    _GROWL = False
                else:
                    logging.debug('Growl error %s', ret)
                    return 'Growl error %s', ret
            except socket.error, err:
                error = 'Growl error %s' % err
                logging.debug(error)
                return error
            except:
예제 #18
0
def show_error_dialog(msg):
    """ Show a pop-up when program cannot start
        Windows-only, otherwise only print to console
    """
    if sabnzbd.WIN32:
        ctypes.windll.user32.MessageBoxW(0, unicoder(msg), T('Fatal error'), 0)
    print msg
예제 #19
0
파일: panic.py 프로젝트: sabnzbd/sabnzbd
def show_error_dialog(msg):
    """ Show a pop-up when program cannot start
        Windows-only, otherwise only print to console
    """
    if sabnzbd.WIN32:
        ctypes.windll.user32.MessageBoxW(0, unicoder(msg), T('Fatal error'), 0)
    print msg
예제 #20
0
    def fail_to_history(self, nzo, url, msg='', content=False):
        """ Create History entry for failed URL Fetch
            msg: message to be logged
            content: report in history that cause is a bad NZB file
        """
        # Remove the "Trying to fetch" part
        if url:
            nzo.filename = url
            nzo.final_name = url.strip()

        if content:
            # Bad content
            msg = T('Unusable NZB file')
        else:
            # Failed fetch
            msg = T('URL Fetching failed; %s') % unicoder(msg)

        # Mark as failed
        nzo.status = Status.FAILED
        nzo.fail_msg = msg

        notifier.send_notification(T('URL Fetching failed; %s') % '', '%s\n%s' % (msg, url), 'other', nzo.cat)
        if cfg.email_endjob() > 0:
            emailer.badfetch_mail(msg, url)

        # Parse category to make sure script is set correctly after a grab
        nzo.cat, _, nzo.script, _ = misc.cat_to_opts(nzo.cat, script=nzo.script)

        # Add to history and run script if desired
        NzbQueue.do.remove(nzo.nzo_id, add_to_history=False)
        PostProcessor.do.process(nzo)
예제 #21
0
def try_sfv_check(nzo, workdir, setname):
    """ Attempt to verify set using SFV file
        Return True if verified, False when failed
        When setname is '', all SFV files will be used, otherwise only the matching one
        When setname is '' and no SFV files are found, True is returned
        """
    # Get list of SFV names; shortest name first, minimizes the chance on a mismatch
    sfvs = globber(workdir, '*.sfv')
    sfvs.sort(lambda x, y: len(x) - len(y))
    par_error = False
    found = False
    for sfv in sfvs:
        if setname in os.path.basename(sfv):
            found = True
            nzo.set_unpack_info('Repair', T('Trying SFV verification'))
            failed = sfv_check(sfv)
            if failed:
                msg = T('Some files failed to verify against "%s"') % unicoder(os.path.basename(sfv))
                msg += '; '
                msg += '; '.join(failed)
                nzo.set_unpack_info('Repair', msg)
                par_error = True
            else:
                nzo.set_unpack_info('Repair', T('Verified successfully using SFV files'))
            if setname:
                break
    return (found or not setname) and not par_error
예제 #22
0
def prepare_extraction_path(nzo):
    """ Based on the information that we have, generate
        the extraction path and create the directory.
        Separated so it can be called from DirectUnpacker
    """
    one_folder = False
    marker_file = None
    # Determine class directory
    catdir = config.get_categories(nzo.cat).dir()
    if catdir.endswith('*'):
        catdir = catdir.strip('*')
        one_folder = True
    complete_dir = real_path(cfg.complete_dir.get_path(), catdir)
    complete_dir = long_path(complete_dir)

    # TV/Movie/Date Renaming code part 1 - detect and construct paths
    if cfg.enable_meta():
        file_sorter = Sorter(nzo, nzo.cat)
    else:
        file_sorter = Sorter(None, nzo.cat)
    complete_dir = file_sorter.detect(nzo.final_name, complete_dir)
    if file_sorter.sort_file:
        one_folder = False

    complete_dir = sanitize_and_trim_path(complete_dir)

    if one_folder:
        workdir_complete = create_dirs(complete_dir)
    else:
        workdir_complete = get_unique_path(os.path.join(
            complete_dir, nzo.final_name),
                                           create_dir=True)
        marker_file = set_marker(workdir_complete)

    if not workdir_complete or not os.path.exists(workdir_complete):
        logging.error(
            T('Cannot create final folder %s') %
            unicoder(os.path.join(complete_dir, nzo.final_name)))
        raise IOError

    if cfg.folder_rename() and not one_folder:
        prefixed_path = prefix(workdir_complete, '_UNPACK_')
        tmp_workdir_complete = get_unique_path(prefix(workdir_complete,
                                                      '_UNPACK_'),
                                               create_dir=False)

        try:
            renamer(workdir_complete, tmp_workdir_complete)
        except:
            pass  # On failure, just use the original name

        # Is the unique path different? Then we also need to modify the final path
        if prefixed_path != tmp_workdir_complete:
            workdir_complete = workdir_complete + os.path.splitext(
                tmp_workdir_complete)[1]
    else:
        tmp_workdir_complete = workdir_complete

    return tmp_workdir_complete, workdir_complete, file_sorter, one_folder, marker_file
예제 #23
0
def parring(nzo, workdir):
    """ Perform par processing. Returns: (par_error, re_add)
    """
    assert isinstance(nzo, sabnzbd.nzbstuff.NzbObject)
    filename = nzo.final_name
    growler.send_notification(T('Post-processing'), nzo.final_name, 'pp')
    logging.info('Par2 check starting on %s', filename)

    ## Get verification status of sets
    verified = sabnzbd.load_data(VERIFIED_FILE, nzo.workpath,
                                 remove=False) or {}

    ## Collect the par files
    if nzo.partable:
        par_table = nzo.partable.copy()
    else:
        par_table = {}
    repair_sets = par_table.keys()

    re_add = False
    par_error = False

    if repair_sets:
        for setname in repair_sets:
            if cfg.ignore_samples() > 0 and 'sample' in setname.lower():
                continue
            if not verified.get(setname, False):
                logging.info("Running repair on set %s", setname)
                parfile_nzf = par_table[setname]
                if not os.path.exists(
                        os.path.join(nzo.downpath, parfile_nzf.filename)):
                    continue
                need_re_add, res = par2_repair(parfile_nzf, nzo, workdir,
                                               setname)
                re_add = re_add or need_re_add
                if not res and not need_re_add and cfg.sfv_check():
                    res = try_sfv_check(nzo, workdir, setname)
                verified[setname] = res
                par_error = par_error or not res
    else:
        logging.info("No par2 sets for %s", filename)
        nzo.set_unpack_info('Repair',
                            T('[%s] No par2 sets') % unicoder(filename))
        if cfg.sfv_check():
            par_error = not try_sfv_check(nzo, workdir, '')
            verified[''] = not par_error

    if re_add:
        logging.info('Readded %s to queue', filename)
        if nzo.priority != TOP_PRIORITY:
            nzo.priority = REPAIR_PRIORITY
        sabnzbd.nzbqueue.add_nzo(nzo)
        sabnzbd.downloader.Downloader.do.resume_from_postproc()

    sabnzbd.save_data(verified, VERIFIED_FILE, nzo.workpath)

    logging.info('Par2 check finished on %s', filename)
    return par_error, re_add
예제 #24
0
def parring(nzo, workdir):
    """ Perform par processing. Returns: (par_error, re_add)
    """
    filename = nzo.final_name
    growler.send_notification(T('Post-processing'), nzo.final_name, 'pp')
    logging.info('Par2 check starting on %s', filename)

    ## Collect the par files
    if nzo.partable:
        par_table = nzo.partable.copy()
    else:
        par_table = {}
    repair_sets = par_table.keys()

    re_add = False
    par_error = False

    if repair_sets:

        for set_ in repair_sets:
            logging.info("Running repair on set %s", set_)
            parfile_nzf = par_table[set_]
            need_re_add, res = par2_repair(parfile_nzf, nzo, workdir, set_)
            if need_re_add:
                re_add = True
            else:
                par_error = par_error or not res

        if re_add:
            logging.info('Readded %s to queue', filename)
            nzo.priority = REPAIR_PRIORITY
            sabnzbd.nzbqueue.add_nzo(nzo)
            sabnzbd.downloader.Downloader.do.resume_from_postproc()

        logging.info('Par2 check finished on %s', filename)

    if (par_error and not re_add) or not repair_sets:
        # See if alternative SFV check is possible
        if cfg.sfv_check():
            sfvs = globber(workdir, '*.sfv')
        else:
            sfvs = None
        if sfvs:
            par_error = False
            nzo.set_unpack_info('Repair', T('Trying SFV verification'))
            for sfv in sfvs:
                if not sfv_check(sfv):
                    nzo.set_unpack_info('Repair', T('Some files failed to verify against "%s"') % unicoder(os.path.basename(sfv)))
                    par_error = True
            if not par_error:
                nzo.set_unpack_info('Repair', T('Verified successfully using SFV files'))
        elif not repair_sets:
            logging.info("No par2 sets for %s", filename)
            nzo.set_unpack_info('Repair', T('[%s] No par2 sets') % unicoder(filename))

    if not par_error:
        verified_flag_file(workdir, create=True)
    return par_error, re_add
예제 #25
0
def parring(nzo, workdir):
    """ Perform par processing. Returns: (par_error, re_add)
    """
    assert isinstance(nzo, sabnzbd.nzbstuff.NzbObject)
    filename = nzo.final_name
    growler.send_notification(T('Post-processing'), nzo.final_name, 'pp')
    logging.info('Par2 check starting on %s', filename)

    ## Get verification status of sets
    verified = sabnzbd.load_data(VERIFIED_FILE, nzo.workpath, remove=False) or {}

    ## Collect the par files
    if nzo.partable:
        par_table = nzo.partable.copy()
    else:
        par_table = {}
    repair_sets = par_table.keys()

    re_add = False
    par_error = False
    single = len(repair_sets) == 1

    if repair_sets:
        for setname in repair_sets:
            if cfg.ignore_samples() > 0 and 'sample' in setname.lower():
                continue
            if not verified.get(setname, False):
                logging.info("Running repair on set %s", setname)
                parfile_nzf = par_table[setname]
                if os.path.exists(os.path.join(nzo.downpath, parfile_nzf.filename)) or parfile_nzf.extrapars:
                    need_re_add, res = par2_repair(parfile_nzf, nzo, workdir, setname, single=single)
                    re_add = re_add or need_re_add
                    if not res and not need_re_add and cfg.sfv_check():
                        res = try_sfv_check(nzo, workdir, setname)
                    verified[setname] = res
                else:
                    continue
                par_error = par_error or not res
    else:
        logging.info("No par2 sets for %s", filename)
        nzo.set_unpack_info('Repair', T('[%s] No par2 sets') % unicoder(filename))
        if cfg.sfv_check():
            par_error = not try_sfv_check(nzo, workdir, '')
            verified[''] = not par_error

    if re_add:
        logging.info('Readded %s to queue', filename)
        if nzo.priority != TOP_PRIORITY:
            nzo.priority = REPAIR_PRIORITY
        sabnzbd.nzbqueue.add_nzo(nzo)
        sabnzbd.downloader.Downloader.do.resume_from_postproc()

    sabnzbd.save_data(verified, VERIFIED_FILE, nzo.workpath)

    logging.info('Par2 check finished on %s', filename)
    return par_error, re_add
예제 #26
0
def unpack_history_info(item):
    '''
        Expands the single line stage_log from the DB
        into a python dictionary for use in the history display
    '''
    # Stage Name is seperated by ::: stage lines by ; and stages by \r\n
    if item['stage_log']:
        try:
            lines = item['stage_log'].split('\r\n')
        except:
            logging.error(
                T('Invalid stage logging in history for %s') + ' (\\r\\n)',
                unicoder(item['name']))
            logging.debug('Lines: %s', item['stage_log'])
            lines = []
        item['stage_log'] = []
        for line in lines:
            stage = {}
            try:
                key, logs = line.split(':::')
            except:
                logging.debug('Missing key:::logs "%s"', line)
                key = line
                logs = ''
            stage['name'] = key
            stage['actions'] = []
            try:
                logs = logs.split(';')
            except:
                logging.error(
                    T('Invalid stage logging in history for %s') + ' (;)',
                    unicoder(item['name']))
                logging.debug('Logs: %s', logs)
                logs = []
            for log in logs:
                stage['actions'].append(log)
            item['stage_log'].append(stage)
    if item['script_log']:
        item['script_log'] = zlib.decompress(item['script_log'][:])
    # The action line is only available for items in the postproc queue
    if not item.has_key('action_line'):
        item['action_line'] = ''
    return item
예제 #27
0
def bad_fetch(nzo, url, msg='', retry=False, content=False):
    """ Create History entry for failed URL Fetch
        msg : message to be logged
        retry : make retry link in histort
        content : report in history that cause is a bad NZB file
    """
    msg = unicoder(msg)

    pp = nzo.pp
    if pp is None:
        pp = ''
    else:
        pp = '&pp=%s' % str(pp)
    cat = nzo.cat
    if cat:
        cat = '&cat=%s' % urllib.quote(cat)
    else:
        cat = ''
    script = nzo.script
    if script:
        script = '&script=%s' % urllib.quote(script)
    else:
        script = ''

    nzo.status = 'Failed'

    if url:
        nzo.filename = url
        nzo.final_name = url.strip()

    if content:
        # Bad content
        msg = T('Unusable NZB file')
    else:
        # Failed fetch
        msg = ' (' + msg + ')'

    if retry:
        nzbname = nzo.custom_name
        if nzbname:
            nzbname = '&nzbname=%s' % urllib.quote(nzbname)
        else:
            nzbname = ''
        text = T('URL Fetching failed; %s'
                 ) + ', <a href="./retry?session=%s&url=%s%s%s%s%s">' + T(
                     'Try again') + '</a>'
        parms = (msg, cfg.api_key(), urllib.quote(url), pp, cat, script,
                 nzbname)
        nzo.fail_msg = text % parms
    else:
        nzo.fail_msg = msg

    from sabnzbd.nzbqueue import NzbQueue
    assert isinstance(NzbQueue.do, NzbQueue)
    NzbQueue.do.remove(nzo.nzo_id, add_to_history=True)
예제 #28
0
def folders_at_path(path, include_parent = False):
    """ Returns a list of dictionaries with the folders contained at the given path
        Give the empty string as the path to list the contents of the root path
        under Unix this means "/", on Windows this will be a list of drive letters)
        from sabnzbd.encoding import unicoder
        assert os.path.isabs(path) or path == ""
    """
    from sabnzbd.encoding import unicoder

    # walk up the tree until we find a valid path
    while path and not os.path.isdir(path):
        if path == os.path.dirname(path):
            path = ''
            break
        else:
            path = os.path.dirname(path)

    if path == "":
        if os.name == 'nt':
            entries = [{'name': letter + ':\\', 'path': letter + ':\\'} for letter in get_win_drives()]
            entries.insert(0, {'current_path': 'Root'})
            return entries
        else:
            path = '/'

    # fix up the path and find the parent
    path = os.path.abspath(os.path.normpath(path))
    parent_path = os.path.dirname(path)

    # if we're at the root then the next step is the meta-node showing our drive letters
    if path == parent_path and os.name == 'nt':
        parent_path = ""

    file_list = [{ 'name': unicoder(filename), 'path': unicoder(os.path.join(path, filename)) } for filename in os.listdir(path)]
    file_list = filter(lambda entry: os.path.isdir(entry['path']), file_list)
    file_list = sorted(file_list, lambda x, y: cmp(os.path.basename(x['name']).lower(), os.path.basename(y['path']).lower()))

    file_list.insert(0, {'current_path': path})
    if include_parent and parent_path != path:
        file_list.append({ 'name': "..", 'path': parent_path })

    return file_list
예제 #29
0
def is_cloaked(nzo, path, names):
    """ Return True if this is likely to be a cloaked encrypted post """
    fname = unicoder(os.path.split(path)[1]).lower()
    fname = os.path.splitext(fname)[0]
    for name in names:
        name = os.path.split(name.lower())[1]
        name, ext = os.path.splitext(unicoder(name))
        if ext == u'.rar' and fname.startswith(name) and (len(fname) - len(name)) < 8 and len(names) < 3 and not RE_SUBS.search(fname):
            # Only warn once
            if nzo.encrypted == 0:
                logging.warning(T('Job "%s" is probably encrypted due to RAR with same name inside this RAR'), nzo.final_name)
                nzo.encrypted = 1
            return True
        elif 'password' in name:
            # Only warn once
            if nzo.encrypted == 0:
                logging.warning(T('Job "%s" is probably encrypted: "password" in filename "%s"'), nzo.final_name, name)
                nzo.encrypted = 1
            return True
    return False
예제 #30
0
파일: misc.py 프로젝트: lad1337/sabnzbd
def bad_fetch(nzo, url, msg='', retry=False, content=False):
    """ Create History entry for failed URL Fetch
        msg : message to be logged
        retry : make retry link in histort
        content : report in history that cause is a bad NZB file
    """
    msg = unicoder(msg)

    pp = nzo.pp
    if pp is None:
        pp = ''
    else:
        pp = '&pp=%s' % str(pp)
    cat = nzo.cat
    if cat:
        cat = '&cat=%s' % urllib.quote(cat)
    else:
        cat = ''
    script = nzo.script
    if script:
        script = '&script=%s' % urllib.quote(script)
    else:
        script = ''

    nzo.status = 'Failed'


    if url:
        nzo.filename = url
        nzo.final_name = url.strip()

    if content:
        # Bad content
        msg = T('Unusable NZB file')
    else:
        # Failed fetch
        msg = ' (' + msg + ')'

    if retry:
        nzbname = nzo.custom_name
        if nzbname:
            nzbname = '&nzbname=%s' % urllib.quote(nzbname)
        else:
            nzbname = ''
        text = T('URL Fetching failed; %s') + ', <a href="./retry?session=%s&url=%s%s%s%s%s">' + T('Try again') + '</a>'
        parms = (msg, cfg.api_key(), urllib.quote(url), pp, cat, script, nzbname)
        nzo.fail_msg = text % parms
    else:
        nzo.fail_msg = msg

    from sabnzbd.nzbqueue import NzbQueue
    assert isinstance(NzbQueue.do, NzbQueue)
    NzbQueue.do.remove(nzo.nzo_id, add_to_history=True)
예제 #31
0
def prepare_extraction_path(nzo):
    """ Based on the information that we have, generate
        the extraction path and create the directory.
        Separated so it can be called from DirectUnpacker
    """
    one_folder = False
    marker_file = None
    # Determine class directory
    catdir = config.get_categories(nzo.cat).dir()
    if catdir.endswith('*'):
        catdir = catdir.strip('*')
        one_folder = True
    complete_dir = real_path(cfg.complete_dir.get_path(), catdir)
    complete_dir = long_path(complete_dir)

    # TV/Movie/Date Renaming code part 1 - detect and construct paths
    if cfg.enable_meta():
        file_sorter = Sorter(nzo, nzo.cat)
    else:
        file_sorter = Sorter(None, nzo.cat)
    complete_dir = file_sorter.detect(nzo.final_name, complete_dir)
    if file_sorter.sort_file:
        one_folder = False

    complete_dir = sanitize_and_trim_path(complete_dir)

    if one_folder:
        workdir_complete = create_dirs(complete_dir)
    else:
        workdir_complete = get_unique_path(os.path.join(complete_dir, nzo.final_name), create_dir=True)
        marker_file = set_marker(workdir_complete)

    if not workdir_complete or not os.path.exists(workdir_complete):
        logging.error(T('Cannot create final folder %s') % unicoder(os.path.join(complete_dir, nzo.final_name)))
        raise IOError

    if cfg.folder_rename() and not one_folder:
        prefixed_path = prefix(workdir_complete, '_UNPACK_')
        tmp_workdir_complete = get_unique_path(prefix(workdir_complete, '_UNPACK_'), create_dir=False)

        try:
            renamer(workdir_complete, tmp_workdir_complete)
        except:
            pass  # On failure, just use the original name

        # Is the unique path different? Then we also need to modify the final path
        if prefixed_path != tmp_workdir_complete:
            workdir_complete = workdir_complete + os.path.splitext(tmp_workdir_complete)[1]
    else:
        tmp_workdir_complete = workdir_complete

    return tmp_workdir_complete, workdir_complete, file_sorter, one_folder, marker_file
예제 #32
0
파일: osxmenu.py 프로젝트: jreeder/sabnzbd
    def queueUpdate(self):
        try:
            qnfo = NzbQueue.do.queue_info(max_jobs=10)
            pnfo_list = qnfo[QNFO_PNFO_LIST_FIELD]

            bytesleftprogess = 0
            bpsnow = BPSMeter.do.get_bps()
            self.info = ""

            self.menu_queue = NSMenu.alloc().init()

            if len(pnfo_list):

                menu_queue_item = NSMenuItem.alloc(
                ).initWithTitle_action_keyEquivalent_(
                    T('Queue First 10 Items'), '', '')
                self.menu_queue.addItem_(menu_queue_item)
                self.menu_queue.addItem_(NSMenuItem.separatorItem())

                job_nb = 1
                for pnfo in pnfo_list:
                    if job_nb > 10:
                        break
                    filename = unicoder(pnfo[PNFO_FILENAME_FIELD])
                    msgid = pnfo[PNFO_MSGID_FIELD]
                    bytesleft = pnfo[PNFO_BYTES_LEFT_FIELD] / MEBI
                    bytesleftprogess += pnfo[PNFO_BYTES_LEFT_FIELD]
                    bytes = pnfo[PNFO_BYTES_FIELD] / MEBI
                    nzo_id = pnfo[PNFO_NZO_ID_FIELD]
                    timeleft = self.calc_timeleft(bytesleftprogess, bpsnow)

                    job = "%s\t(%d/%d MB) %s" % (filename, bytesleft, bytes,
                                                 timeleft)
                    job_nb += 1
                    menu_queue_item = NSMenuItem.alloc(
                    ).initWithTitle_action_keyEquivalent_(job, '', '')
                    self.menu_queue.addItem_(menu_queue_item)

                self.info = "%d nzb(s)\t( %d / %d MB )" % (
                    len(pnfo_list), (qnfo[QNFO_BYTES_LEFT_FIELD] / MEBI),
                    (qnfo[QNFO_BYTES_FIELD] / MEBI))

            else:
                menu_queue_item = NSMenuItem.alloc(
                ).initWithTitle_action_keyEquivalent_(T('Empty'), '', '')
                self.menu_queue.addItem_(menu_queue_item)

            self.queue_menu_item.setSubmenu_(self.menu_queue)

        except:
            logging.info("[osx] queueUpdate Exception %s" %
                         (sys.exc_info()[0]))
예제 #33
0
def fix_keys(data):
    """ Convert keys of each dictionary in tuple 'data' to unicode """
    new_data = []
    if isinstance(data, list):
        for n in xrange(len(data)):
            if isinstance(data[n], dict):
                new = {}
                for key in data[n]:
                    new[unicoder(key)] = data[n][key]
            else:
                new = data[n]
            new_data.append(new)
    return new_data
예제 #34
0
def fix_keys(data):
    """ Convert keys of each dictionary in tuple 'data' to unicode """
    new_data = []
    if isinstance(data, list):
        for n in xrange(len(data)):
            if isinstance(data[n], dict):
                new = {}
                for key in data[n]:
                    new[unicoder(key)] = data[n][key]
            else:
                new = data[n]
            new_data.append(new)
    return new_data
예제 #35
0
파일: notifier.py 프로젝트: sabnzbd/sabnzbd
def send_growl(title, msg, gtype, test=None):
    """ Send Growl message """
    global _GROWL, _GROWL_REG, _GROWL_DATA

    # support testing values from UI
    if test:
        growl_server = test.get('growl_server') or None
        growl_password = test.get('growl_password') or None
    else:
        growl_server = sabnzbd.cfg.growl_server()
        growl_password = sabnzbd.cfg.growl_password()

    for n in (0, 1):
        if not _GROWL_REG:
            _GROWL = None
        if (growl_server, growl_password) != _GROWL_DATA:
            reset_growl()
        if not _GROWL:
            _GROWL, error = register_growl(growl_server, growl_password)
        if _GROWL:
            _GROWL_REG = True
            if isinstance(msg, unicode):
                msg = msg.decode('utf-8')
            elif not isinstance(msg, str):
                msg = str(msg)
            logging.debug('Send to Growl: %s %s %s', gtype, title, msg)
            try:
                ret = _GROWL.notify(
                    noteType=Tx(NOTIFICATION.get(gtype, 'other')),
                    title=title,
                    description=unicoder(msg),
                )
                if ret is None or isinstance(ret, bool):
                    return None
                elif ret[0] == '401':
                    _GROWL = False
                else:
                    logging.debug('Growl error %s', ret)
                    return 'Growl error %s', ret
            except (gntp.errors.NetworkError, gntp.errors.AuthError) as err:
                error = 'Growl error %s' % err
                logging.debug(error)
                return error
            except:
                error = 'Growl error (unknown)'
                logging.debug(error)
                return error
        else:
            return error
    return None
예제 #36
0
파일: postproc.py 프로젝트: lad1337/sabnzbd
def parring(nzo, workdir):
    """ Perform par processing. Returns: (par_error, re_add)
    """
    filename = nzo.final_name
    osx.sendGrowlMsg(T('Post-processing'), nzo.final_name, osx.NOTIFICATION['pp'])
    logging.info('Par2 check starting on %s', filename)

    ## Collect the par files
    if nzo.partable:
        par_table = nzo.partable.copy()
    else:
        par_table = {}
    repair_sets = par_table.keys()

    re_add = False
    par_error = False

    if repair_sets:

        for set_ in repair_sets:
            logging.info("Running repair on set %s", set_)
            parfile_nzf = par_table[set_]
            need_re_add, res = par2_repair(parfile_nzf, nzo, workdir, set_)
            if need_re_add:
                re_add = True
            else:
                par_error = par_error or not res

        if re_add:
            logging.info('Readded %s to queue', filename)
            nzo.priority = REPAIR_PRIORITY
            sabnzbd.nzbqueue.add_nzo(nzo)
            sabnzbd.downloader.Downloader.do.resume_from_postproc()

        logging.info('Par2 check finished on %s', filename)

    else:
        # See if alternative SFV check is possible
        sfv = None
        if cfg.sfv_check():
            for sfv in globber(workdir, '*.sfv'):
                par_error = par_error or not sfv_check(sfv)
            if par_error:
                nzo.set_unpack_info('Repair', T('Some files failed to verify against "%s"') % unicoder(os.path.basename(sfv)))

        if not sfv:
            logging.info("No par2 sets for %s", filename)
            nzo.set_unpack_info('Repair', T('[%s] No par2 sets') % unicoder(filename))

    return par_error, re_add
예제 #37
0
def send_growl(title, msg, gtype, test=None):
    """ Send Growl message """
    global _GROWL, _GROWL_REG, _GROWL_DATA

    # support testing values from UI
    if test:
        growl_server = test.get('growl_server') or None
        growl_password = test.get('growl_password') or None
    else:
        growl_server = sabnzbd.cfg.growl_server()
        growl_password = sabnzbd.cfg.growl_password()

    for n in (0, 1):
        if not _GROWL_REG:
            _GROWL = None
        if (growl_server, growl_password) != _GROWL_DATA:
            reset_growl()
        if not _GROWL:
            _GROWL, error = register_growl(growl_server, growl_password)
        if _GROWL:
            _GROWL_REG = True
            if isinstance(msg, unicode):
                msg = msg.decode('utf-8')
            elif not isinstance(msg, str):
                msg = str(msg)
            logging.debug('Send to Growl: %s %s %s', gtype, title, msg)
            try:
                ret = _GROWL.notify(
                    noteType=Tx(NOTIFICATION.get(gtype, 'other')),
                    title=title,
                    description=unicoder(msg),
                )
                if ret is None or isinstance(ret, bool):
                    return None
                elif ret[0] == '401':
                    _GROWL = False
                else:
                    logging.debug('Growl error %s', ret)
                    return 'Growl error %s', ret
            except (gntp.errors.NetworkError, gntp.errors.AuthError) as err:
                error = 'Growl error %s' % err
                logging.debug(error)
                return error
            except:
                error = 'Growl error (unknown)'
                logging.debug(error)
                return error
        else:
            return error
    return None
예제 #38
0
def set_serv_parms(service, args):
    """ Set the service command line parameters in Registry """
    import _winreg

    uargs = []
    for arg in args:
        uargs.append(unicoder(arg))

    try:
        key = _winreg.CreateKey(_winreg.HKEY_LOCAL_MACHINE, _SERVICE_KEY + service)
        _winreg.SetValueEx(key, _SERVICE_PARM, None, _winreg.REG_MULTI_SZ, uargs)
        _winreg.CloseKey(key)
    except WindowsError:
        return False
    return True
예제 #39
0
파일: misc.py 프로젝트: labrys/sabnzbd
def set_serv_parms(service, args):
    """ Set the service command line parameters in Registry """
    import _winreg

    uargs = []
    for arg in args:
        uargs.append(unicoder(arg))

    try:
        key = _winreg.CreateKey(_winreg.HKEY_LOCAL_MACHINE, _SERVICE_KEY + service)
        _winreg.SetValueEx(key, _SERVICE_PARM, None, _winreg.REG_MULTI_SZ, uargs)
        _winreg.CloseKey(key)
    except WindowsError:
        return False
    return True
예제 #40
0
    def queueUpdate(self):
        try:
            qnfo = NzbQueue.do.queue_info(start=0, limit=10)
            pnfo_list = qnfo.list

            bytesleftprogess = 0
            bpsnow = BPSMeter.do.get_bps()
            self.info = ""

            self.menu_queue = NSMenu.alloc().init()

            if len(pnfo_list):

                menu_queue_item = NSMenuItem.alloc(
                ).initWithTitle_action_keyEquivalent_(
                    T('Queue First 10 Items'), '', '')
                self.menu_queue.addItem_(menu_queue_item)
                self.menu_queue.addItem_(NSMenuItem.separatorItem())

                for pnfo in pnfo_list:
                    filename = unicoder(pnfo.filename)
                    bytesleft = pnfo.bytes_left / MEBI
                    bytesleftprogess += pnfo.bytes_left
                    bytes = pnfo.bytes / MEBI
                    nzo_id = pnfo.nzo_id
                    timeleft = self.calc_timeleft_(bytesleftprogess, bpsnow)

                    job = "%s\t(%d/%d MB) %s" % (filename, bytesleft, bytes,
                                                 timeleft)
                    menu_queue_item = NSMenuItem.alloc(
                    ).initWithTitle_action_keyEquivalent_(job, '', '')
                    self.menu_queue.addItem_(menu_queue_item)

                self.info = "%d nzb(s)\t( %d / %d MB )" % (qnfo.q_size_list,
                                                           (qnfo.bytes_left /
                                                            MEBI),
                                                           (qnfo.bytes / MEBI))

            else:
                menu_queue_item = NSMenuItem.alloc(
                ).initWithTitle_action_keyEquivalent_(T('Empty'), '', '')
                self.menu_queue.addItem_(menu_queue_item)

            self.queue_menu_item.setSubmenu_(self.menu_queue)

        except:
            logging.info("[osx] queueUpdate Exception %s" %
                         (sys.exc_info()[0]))
예제 #41
0
파일: database.py 프로젝트: Jypy/iSABnzbd
def unpack_history_info(item):
    """ Expands the single line stage_log from the DB
        into a python dictionary for use in the history display
    """
    # Stage Name is separated by ::: stage lines by ; and stages by \r\n
    lst = item["stage_log"]
    if lst:
        try:
            lines = lst.split("\r\n")
        except:
            logging.error(T("Invalid stage logging in history for %s") + " (\\r\\n)", unicoder(item["name"]))
            logging.debug("Lines: %s", lst)
            lines = []
        lst = [None for x in STAGES]
        for line in lines:
            stage = {}
            try:
                key, logs = line.split(":::")
            except:
                logging.debug('Missing key:::logs "%s"', line)
                key = line
                logs = ""
            stage["name"] = key
            stage["actions"] = []
            try:
                logs = logs.split(";")
            except:
                logging.error(T("Invalid stage logging in history for %s") + " (;)", unicoder(item["name"]))
                logging.debug("Logs: %s", logs)
                logs = []
            for log in logs:
                stage["actions"].append(log)
            try:
                lst[STAGES[key]] = stage
            except KeyError:
                lst.append(stage)
        # Remove unused stages
        item["stage_log"] = [x for x in lst if x is not None]

    if item["script_log"]:
        item["script_log"] = zlib.decompress(item["script_log"][:])
    # The action line is only available for items in the postproc queue
    if not item.has_key("action_line"):
        item["action_line"] = ""
    return item
예제 #42
0
def unpack_history_info(item):
    """ Expands the single line stage_log from the DB
        into a python dictionary for use in the history display
    """
    # Stage Name is separated by ::: stage lines by ; and stages by \r\n
    lst = item['stage_log']
    if lst:
        try:
            lines = lst.split('\r\n')
        except:
            logging.error(T('Invalid stage logging in history for %s') + ' (\\r\\n)', unicoder(item['name']))
            logging.debug('Lines: %s', lst)
            lines = []
        lst = [None for x in STAGES]
        for line in lines:
            stage = {}
            try:
                key, logs = line.split(':::')
            except:
                logging.debug('Missing key:::logs "%s"', line)
                key = line
                logs = ''
            stage['name'] = key
            stage['actions'] = []
            try:
                logs = logs.split(';')
            except:
                logging.error(T('Invalid stage logging in history for %s') + ' (;)', unicoder(item['name']))
                logging.debug('Logs: %s', logs)
                logs = []
            for log in logs:
                stage['actions'].append(log)
            try:
                lst[STAGES[key]] = stage
            except KeyError:
                lst.append(stage)
        # Remove unused stages
        item['stage_log'] = [x for x in lst if x is not None]

    if item['script_log']:
        item['script_log'] = ''
    # The action line is only available for items in the postproc queue
    if not item.has_key('action_line'):
        item['action_line'] = ''
    return item
예제 #43
0
    def queueUpdate(self):
        try:
            qnfo = NzbQueue.do.queue_info(max_jobs=10)
            pnfo_list = qnfo[QNFO_PNFO_LIST_FIELD]

            bytesleftprogess = 0
            bpsnow = BPSMeter.do.get_bps()
            self.info = ""

            self.menu_queue = NSMenu.alloc().init()

            if len(pnfo_list):

                menu_queue_item = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(T('Queue First 10 Items'), '', '')
                self.menu_queue.addItem_(menu_queue_item)
                self.menu_queue.addItem_(NSMenuItem.separatorItem())

                job_nb = 1
                for pnfo in pnfo_list:
                    if job_nb > 10:
                        break
                    filename = unicoder(pnfo[PNFO_FILENAME_FIELD])
                    msgid = pnfo[PNFO_MSGID_FIELD]
                    bytesleft = pnfo[PNFO_BYTES_LEFT_FIELD] / MEBI
                    bytesleftprogess += pnfo[PNFO_BYTES_LEFT_FIELD]
                    bytes = pnfo[PNFO_BYTES_FIELD] / MEBI
                    nzo_id = pnfo[PNFO_NZO_ID_FIELD]
                    timeleft = self.calc_timeleft(bytesleftprogess, bpsnow)

                    job = "%s\t(%d/%d MB) %s" % (filename, bytesleft, bytes, timeleft)
                    job_nb += 1
                    menu_queue_item = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(job, '', '')
                    self.menu_queue.addItem_(menu_queue_item)

                self.info = "%d nzb(s)\t( %d / %d MB )" % (len(pnfo_list),(qnfo[QNFO_BYTES_LEFT_FIELD] / MEBI), (qnfo[QNFO_BYTES_FIELD] / MEBI))

            else:
                menu_queue_item = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(T('Empty'), '', '')
                self.menu_queue.addItem_(menu_queue_item)

            self.queue_menu_item.setSubmenu_(self.menu_queue)

        except :
            logging.info("[osx] queueUpdate Exception %s" % (sys.exc_info()[0]))
예제 #44
0
def decode_factory(text):
    """ Recursively looks through the supplied argument
        and converts and text to Unicode
    """
    if isinstance(text, str):
        return unicoder(text)

    elif isinstance(text, list):
        new_text = []
        for t in text:
            new_text.append(decode_factory(t))
        return new_text

    elif isinstance(text, dict):
        new_text = {}
        for key in text:
            new_text[key] = decode_factory(text[key])
        return new_text
    else:
        return text
예제 #45
0
파일: upload.py 프로젝트: sabnzbd/sabnzbd
def upload_file(url, fp):
    """ Function for uploading nzbs to a running sabnzbd instance """
    try:
        fp = unicoder(fp).encode('utf-8')
        fp = urllib.quote_plus(fp)
        url = '%s&mode=addlocalfile&name=%s' % (url, fp)
        # Add local apikey if it wasn't already in the registered URL
        apikey = cfg.api_key()
        if apikey and 'apikey' not in url:
            url = '%s&apikey=%s' % (url, apikey)
        if 'apikey' not in url:
            # Use alternative login method
            username = cfg.username()
            password = cfg.password()
            if username and password:
                url = '%s&ma_username=%s&ma_password=%s' % (url, username, password)
        get_from_url(url)
    except:
        logging.error("Failed to upload file: %s", fp)
        logging.info("Traceback: ", exc_info=True)
예제 #46
0
def decode_factory(text):
    """ Recursively looks through the supplied argument
        and converts and text to Unicode
    """
    if isinstance(text, str):
        return unicoder(text)

    elif isinstance(text, list):
        new_text = []
        for t in text:
            new_text.append(decode_factory(t))
        return new_text

    elif isinstance(text, dict):
        new_text = {}
        for key in text:
            new_text[key] = decode_factory(text[key])
        return new_text
    else:
        return text
예제 #47
0
파일: upload.py 프로젝트: gitgift/sabnzbd
def upload_file(url, fp):
    """ Function for uploading nzbs to a running sabnzbd instance """
    try:
        fp = unicoder(fp).encode('utf-8')
        fp = urllib.quote_plus(fp)
        url = '%s&mode=addlocalfile&name=%s' % (url, fp)
        # Add local apikey if it wasn't already in the registered URL
        apikey = cfg.api_key()
        if apikey and 'apikey' not in url:
            url = '%s&apikey=%s' % (url, apikey)
        if 'apikey' not in url:
            # Use alternative login method
            username = cfg.username()
            password = cfg.password()
            if username and password:
                url = '%s&ma_username=%s&ma_password=%s' % (url, username, password)
        sabnzbd.newsunpack.get_from_url(url)
    except:
        logging.error("Failed to upload file: %s", fp)
        logging.info("Traceback: ", exc_info=True)
예제 #48
0
파일: sorting.py 프로젝트: sabnzbd/sabnzbd
def path_subst(path, mapping):
    """ Replace the sort sting elements by real values.
        Non-elements are copied literally.
        path = the sort string
        mapping = array of tuples that maps all elements to their values
    """
    # Added ugly hack to prevent %ext from being masked by %e
    newpath = []
    plen = len(path)
    n = 0
    while n < plen:
        result = path[n]
        if result == '%':
            for key, value in mapping:
                if path.startswith(key, n) and not path.startswith('%ext', n):
                    n += len(key) - 1
                    result = value
                    break
        newpath.append(result)
        n += 1
    return u''.join([unicoder(x) for x in newpath])
예제 #49
0
def path_subst(path, mapping):
    """ Replace the sort sting elements by real values.
        Non-elements are copied literally.
        path = the sort string
        mapping = array of tuples that maps all elements to their values
    """
    # Added ugly hack to prevent %ext from being masked by %e
    newpath = []
    plen = len(path)
    n = 0
    while n < plen:
        result = path[n]
        if result == '%':
            for key, value in mapping:
                if path.startswith(key, n) and not path.startswith('%ext', n):
                    n += len(key) - 1
                    result = value
                    break
        newpath.append(result)
        n += 1
    return u''.join([unicoder(x) for x in newpath])
예제 #50
0
def unpack_history_info(item):
    '''
        Expands the single line stage_log from the DB
        into a python dictionary for use in the history display
    '''
    # Stage Name is seperated by ::: stage lines by ; and stages by \r\n
    if item['stage_log']:
        try:
            lines = item['stage_log'].split('\r\n')
        except:
            logging.error(T('Invalid stage logging in history for %s') + ' (\\r\\n)', unicoder(item['name']))
            logging.debug('Lines: %s', item['stage_log'])
            lines = []
        item['stage_log'] = []
        for line in lines:
            stage = {}
            try:
                key, logs = line.split(':::')
            except:
                logging.debug('Missing key:::logs "%s"', line)
                key = line
                logs = ''
            stage['name'] = key
            stage['actions'] = []
            try:
                logs = logs.split(';')
            except:
                logging.error(T('Invalid stage logging in history for %s') + ' (;)', unicoder(item['name']))
                logging.debug('Logs: %s', logs)
                logs = []
            for log in logs:
                stage['actions'].append(log)
            item['stage_log'].append(stage)
    if item['script_log']:
        item['script_log'] = zlib.decompress(item['script_log'][:])
    # The action line is only available for items in the postproc queue
    if not item.has_key('action_line'):
        item['action_line'] = ''
    return item
예제 #51
0
파일: osxmenu.py 프로젝트: labrys/sabnzbd
    def queueUpdate(self):
        try:
            qnfo = NzbQueue.do.queue_info(start=0, limit=10)
            pnfo_list = qnfo.list

            bytesleftprogess = 0
            bpsnow = BPSMeter.do.get_bps()
            self.info = ""

            self.menu_queue = NSMenu.alloc().init()

            if len(pnfo_list):

                menu_queue_item = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(T('Queue First 10 Items'), '', '')
                self.menu_queue.addItem_(menu_queue_item)
                self.menu_queue.addItem_(NSMenuItem.separatorItem())

                for pnfo in pnfo_list:
                    filename = unicoder(pnfo.filename)
                    bytesleft = pnfo.bytes_left / MEBI
                    bytesleftprogess += pnfo.bytes_left
                    bytes = pnfo.bytes / MEBI
                    nzo_id = pnfo.nzo_id
                    timeleft = self.calc_timeleft(bytesleftprogess, bpsnow)

                    job = "%s\t(%d/%d MB) %s" % (filename, bytesleft, bytes, timeleft)
                    menu_queue_item = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(job, '', '')
                    self.menu_queue.addItem_(menu_queue_item)

                self.info = "%d nzb(s)\t( %d / %d MB )" % (qnfo.q_size_list, (qnfo.bytes_left / MEBI), (qnfo.bytes / MEBI))

            else:
                menu_queue_item = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(T('Empty'), '', '')
                self.menu_queue.addItem_(menu_queue_item)

            self.queue_menu_item.setSubmenu_(self.menu_queue)

        except:
            logging.info("[osx] queueUpdate Exception %s" % (sys.exc_info()[0]))
예제 #52
0
def process_job(nzo):
    """ Process one job """
    assert isinstance(nzo, sabnzbd.nzbstuff.NzbObject)
    start = time.time()

    # keep track of whether we can continue
    all_ok = True
    # keep track of par problems
    par_error = False
    # keep track of any unpacking errors
    unpack_error = False
    # Signal empty download, for when 'empty_postproc' is enabled
    empty = False
    nzb_list = []
    # These need to be initialised incase of a crash
    workdir_complete = ''
    postproc_time = 0
    script_log = ''
    script_line = ''
    crash_msg = ''

    ## Get the job flags
    nzo.save_attribs()
    flag_repair, flag_unpack, flag_delete = nzo.repair_opts
    # Normalize PP
    if flag_delete: flag_unpack = True
    if flag_unpack: flag_repair = True

    # Get the NZB name
    filename = nzo.final_name
    msgid = nzo.msgid

    if cfg.allow_streaming() and not (flag_repair or flag_unpack or flag_delete):
        # After streaming, force +D
        nzo.set_pp(3)
        nzo.status = Status.FAILED
        nzo.save_attribs()
        all_ok = False

    if nzo.fail_msg: # Special case: aborted due to too many missing data
        nzo.status = Status.FAILED
        nzo.save_attribs()
        all_ok = False
        par_error = unpack_error = True

    try:

        # Get the folder containing the download result
        workdir = nzo.downpath
        tmp_workdir_complete = None

        # if no files are present (except __admin__), fail the job
        if all_ok and len(globber(workdir)) < 2:
            if nzo.precheck:
                enough, ratio = nzo.check_quality()
                req_ratio = float(cfg.req_completion_rate()) / 100.0
                # Make sure that rounded ratio doesn't equal required ratio
                # when it is actually below required
                if (ratio < req_ratio) and (req_ratio - ratio) < 0.001:
                    ratio = req_ratio - 0.001
                emsg = '%.1f%%' % (ratio * 100.0)
                emsg2 = '%.1f%%' % float(cfg.req_completion_rate())
                emsg = T('Download might fail, only %s of required %s available') % (emsg, emsg2)
            else:
                emsg = T('Download failed - Out of your server\'s retention?')
                empty = True
            nzo.fail_msg = emsg
            nzo.set_unpack_info('Fail', emsg)
            nzo.status = Status.FAILED
            # do not run unpacking or parity verification
            flag_repair = flag_unpack = False
            all_ok = cfg.empty_postproc() and empty
            if not all_ok:
                par_error = unpack_error = True

        script = nzo.script
        cat = nzo.cat

        logging.info('Starting PostProcessing on %s' + \
                     ' => Repair:%s, Unpack:%s, Delete:%s, Script:%s, Cat:%s',
                     filename, flag_repair, flag_unpack, flag_delete, script, cat)

        ## Par processing, if enabled
        if all_ok and flag_repair:
            par_error, re_add = parring(nzo, workdir)
            if re_add:
                # Try to get more par files
                return False

        ## Check if user allows unsafe post-processing
        if flag_repair and cfg.safe_postproc():
            all_ok = all_ok and not par_error

        # Set complete dir to workdir in case we need to abort
        workdir_complete = workdir
        dirname = nzo.final_name
        marker_file = None

        if all_ok:
            one_folder = False
            ## Determine class directory
            if cfg.create_group_folders():
                complete_dir = addPrefixes(cfg.complete_dir.get_path(), nzo.dirprefix)
                complete_dir = create_dirs(complete_dir)
            else:
                catdir = config.get_categories(cat).dir()
                if catdir.endswith('*'):
                    catdir = catdir.strip('*')
                    one_folder = True
                complete_dir = real_path(cfg.complete_dir.get_path(), catdir)

            ## TV/Movie/Date Renaming code part 1 - detect and construct paths
            file_sorter = Sorter(cat)
            complete_dir = file_sorter.detect(dirname, complete_dir)
            if file_sorter.sort_file:
                one_folder = False

            if one_folder:
                workdir_complete = create_dirs(complete_dir)
            else:
                workdir_complete = get_unique_path(os.path.join(complete_dir, dirname), create_dir=True)
                marker_file = set_marker(workdir_complete)

            if not workdir_complete or not os.path.exists(workdir_complete):
                crash_msg = T('Cannot create final folder %s') % unicoder(os.path.join(complete_dir, dirname))
                raise IOError

            if cfg.folder_rename() and not one_folder:
                tmp_workdir_complete = prefix(workdir_complete, '_UNPACK_')
                try:
                    renamer(workdir_complete, tmp_workdir_complete)
                except:
                    pass # On failure, just use the original name
            else:
                tmp_workdir_complete = workdir_complete

            newfiles = []
            ## Run Stage 2: Unpack
            if flag_unpack:
                if all_ok:
                    #set the current nzo status to "Extracting...". Used in History
                    nzo.status = Status.EXTRACTING
                    logging.info("Running unpack_magic on %s", filename)
                    unpack_error, newfiles = unpack_magic(nzo, workdir, tmp_workdir_complete, flag_delete, one_folder, (), (), (), ())
                    logging.info("unpack_magic finished on %s", filename)
                else:
                    nzo.set_unpack_info('Unpack', T('No post-processing because of failed verification'))

            if cfg.safe_postproc():
                all_ok = all_ok and not unpack_error

            if all_ok:
                ## Move any (left-over) files to destination
                nzo.status = Status.MOVING
                nzo.set_action_line(T('Moving'), '...')
                for root, dirs, files in os.walk(workdir):
                    if not root.endswith(JOB_ADMIN):
                        for file_ in files:
                            path = os.path.join(root, file_)
                            new_path = path.replace(workdir, tmp_workdir_complete)
                            ok, new_path = move_to_path(path, new_path)
                            newfiles.append(new_path)
                            if not ok:
                                nzo.set_unpack_info('Unpack', T('Failed moving %s to %s') % (unicoder(path), unicoder(new_path)))
                                all_ok = False
                                break

            ## Set permissions right
            set_permissions(tmp_workdir_complete)

            if all_ok and marker_file:
                del_marker(os.path.join(tmp_workdir_complete, marker_file))
                remove_from_list(marker_file, newfiles)

            if all_ok:
                ## Remove files matching the cleanup list
                cleanup_list(tmp_workdir_complete, True)

                ## Check if this is an NZB-only download, if so redirect to queue
                ## except when PP was Download-only
                if flag_repair:
                    nzb_list = nzb_redirect(tmp_workdir_complete, nzo.final_name, nzo.pp, script, cat, priority=nzo.priority)
                else:
                    nzb_list = None
                if nzb_list:
                    nzo.set_unpack_info('Download', T('Sent %s to queue') % unicoder(nzb_list))
                    cleanup_empty_directories(tmp_workdir_complete)
                else:
                    cleanup_list(tmp_workdir_complete, False)

        script_output = ''
        script_ret = 0
        if not nzb_list:
            ## Give destination its final name
            if cfg.folder_rename() and tmp_workdir_complete and not one_folder:
                if all_ok:
                    try:
                        newfiles = rename_and_collapse_folder(tmp_workdir_complete, workdir_complete, newfiles)
                    except:
                        logging.error(Ta('Error renaming "%s" to "%s"'), tmp_workdir_complete, workdir_complete)
                        logging.info('Traceback: ', exc_info = True)
                        # Better disable sorting because filenames are all off now
                        file_sorter.sort_file = None
                else:
                    workdir_complete = tmp_workdir_complete.replace('_UNPACK_', '_FAILED_')
                    workdir_complete = get_unique_path(workdir_complete, n=0, create_dir=False)

            if empty:
                job_result = -1
            else:
                job_result = int(par_error) + int(unpack_error)*2

            if cfg.ignore_samples() > 0:
                remove_samples(workdir_complete)

            ## TV/Movie/Date Renaming code part 2 - rename and move files to parent folder
            if all_ok and file_sorter.sort_file:
                if newfiles:
                    file_sorter.rename(newfiles, workdir_complete)
                    workdir_complete, ok = file_sorter.move(workdir_complete)
                else:
                    workdir_complete, ok = file_sorter.rename_with_ext(workdir_complete)
                if not ok:
                    nzo.set_unpack_info('Unpack', T('Failed to move files'))
                    all_ok = False

            ## Run the user script
            script_path = make_script_path(script)
            if (all_ok or not cfg.safe_postproc()) and (not nzb_list) and script_path:
                #set the current nzo status to "Ext Script...". Used in History
                nzo.status = Status.RUNNING
                nzo.set_action_line(T('Running script'), unicoder(script))
                nzo.set_unpack_info('Script', T('Running user script %s') % unicoder(script), unique=True)
                script_log, script_ret = external_processing(script_path, workdir_complete, nzo.filename,
                                                             msgid, dirname, cat, nzo.group, job_result)
                script_line = get_last_line(script_log)
                if script_log:
                    script_output = nzo.nzo_id
                if script_line:
                    nzo.set_unpack_info('Script', unicoder(script_line), unique=True)
                else:
                    nzo.set_unpack_info('Script', T('Ran %s') % unicoder(script), unique=True)
            else:
                script = ""
                script_line = ""
                script_ret = 0

        ## Email the results
        if (not nzb_list) and cfg.email_endjob():
            if (cfg.email_endjob() == 1) or (cfg.email_endjob() == 2 and (unpack_error or par_error)):
                emailer.endjob(dirname, msgid, cat, all_ok, workdir_complete, nzo.bytes_downloaded,
                               nzo.fail_msg, nzo.unpack_info, script, TRANS(script_log), script_ret)

        if script_output:
            # Can do this only now, otherwise it would show up in the email
            if script_ret:
                script_ret = 'Exit(%s) ' % script_ret
            else:
                script_ret = ''
            if script_line:
                nzo.set_unpack_info('Script',
                                    u'%s%s <a href="./scriptlog?name=%s">(%s)</a>' % (script_ret, unicoder(script_line), urllib.quote(script_output),
                                     T('More')), unique=True)
            else:
                nzo.set_unpack_info('Script',
                                    u'%s<a href="./scriptlog?name=%s">%s</a>' % (script_ret, urllib.quote(script_output),
                                    T('View script output')), unique=True)

        ## Cleanup again, including NZB files
        if all_ok:
            cleanup_list(workdir_complete, False)

        ## Remove newzbin bookmark, if any
        if msgid and all_ok:
            Bookmarks.do.del_bookmark(msgid)
        elif all_ok and isinstance(nzo.url, str):
            sabnzbd.proxy_rm_bookmark(nzo.url)

        ## Force error for empty result
        all_ok = all_ok and not empty

        ## Show final status in history
        if all_ok:
            growler.send_notification(T('Download Completed'), filename, 'complete')
            nzo.status = Status.COMPLETED
        else:
            growler.send_notification(T('Download Failed'), filename, 'complete')
            nzo.status = Status.FAILED

    except:
        logging.error(Ta('Post Processing Failed for %s (%s)'), filename, crash_msg)
        if not crash_msg:
            logging.info("Traceback: ", exc_info = True)
            crash_msg = T('see logfile')
        nzo.fail_msg = T('PostProcessing was aborted (%s)') % unicoder(crash_msg)
        growler.send_notification(T('Download Failed'), filename, 'complete')
        nzo.status = Status.FAILED
        par_error = True
        all_ok = False
        if cfg.email_endjob():
            emailer.endjob(dirname, msgid, cat, all_ok, workdir_complete, nzo.bytes_downloaded,
                           nzo.fail_msg, nzo.unpack_info, '', '', 0)


    if all_ok:
        # If the folder only contains one file OR folder, have that as the path
        # Be aware that series/generic/date sorting may move a single file into a folder containing other files
        workdir_complete = one_file_or_folder(workdir_complete)
        workdir_complete = os.path.normpath(workdir_complete)

    # Log the overall time taken for postprocessing
    postproc_time = int(time.time() - start)

    # Create the history DB instance
    history_db = database.get_history_handle()
    # Add the nzo to the database. Only the path, script and time taken is passed
    # Other information is obtained from the nzo
    history_db.add_history_db(nzo, workdir_complete, nzo.downpath, postproc_time, script_log, script_line)
    # The connection is only used once, so close it here
    history_db.close()

    ## Clean up the NZO
    try:
        logging.info('Cleaning up %s (keep_basic=%s)', filename, str(not all_ok))
        sabnzbd.nzbqueue.NzbQueue.do.cleanup_nzo(nzo, keep_basic=not all_ok)
    except:
        logging.error(Ta('Cleanup of %s failed.'), nzo.final_name)
        logging.info("Traceback: ", exc_info = True)

    ## Remove download folder
    if all_ok:
        try:
            if os.path.exists(workdir):
                logging.debug('Removing workdir %s', workdir)
                remove_all(workdir, recursive=True)
        except:
            logging.error(Ta('Error removing workdir (%s)'), workdir)
            logging.info("Traceback: ", exc_info = True)

    return True
예제 #53
0
    def run(self):
        # Input and output
        linebuf = ''
        last_volume_linebuf = ''
        unrar_log = []
        rarfiles = []
        extracted = []
        start_time = time.time()

        # Need to read char-by-char because there's no newline after new-disk message
        while 1:
            if not self.active_instance:
                break

            char = self.active_instance.stdout.read(1)
            linebuf += char

            if not char:
                # End of program
                break

            # Error? Let PP-handle it
            if linebuf.endswith(
                ('ERROR: ', 'Cannot create', 'in the encrypted file',
                 'CRC failed', 'checksum failed',
                 'You need to start extraction from a previous volume',
                 'password is incorrect', 'Write error', 'checksum error',
                 'start extraction from a previous volume'
                 'Unexpected end of archive')):
                logging.info('Error in DirectUnpack of %s: %s',
                             self.cur_setname, linebuf.strip())
                self.abort()

            if linebuf.endswith('\n'):
                # List files we used
                if linebuf.startswith('Extracting from'):
                    filename = TRANS((re.search(EXTRACTFROM_RE,
                                                linebuf.strip()).group(1)))
                    if filename not in rarfiles:
                        rarfiles.append(filename)

                # List files we extracted
                m = re.search(EXTRACTED_RE, linebuf)
                if m:
                    # In case of flat-unpack, UnRar still prints the whole path (?!)
                    unpacked_file = TRANS(m.group(2))
                    if cfg.flat_unpack():
                        unpacked_file = os.path.basename(unpacked_file)
                    extracted.append(
                        real_path(self.unpack_dir_info[0], unpacked_file))

            # Did we reach the end?
            if linebuf.endswith('All OK'):
                # Stop timer and finish
                self.unpack_time += time.time() - start_time
                ACTIVE_UNPACKERS.remove(self)

                # Add to success
                rarfile_path = os.path.join(self.nzo.downpath,
                                            self.rarfile_nzf.filename)
                self.success_sets[self.cur_setname] = (rar_volumelist(
                    rarfile_path, self.nzo.password, rarfiles), extracted)
                logging.info('DirectUnpack completed for %s', self.cur_setname)
                self.nzo.set_action_line(T('Direct Unpack'), T('Completed'))

                # List success in history-info
                msg = T('Unpacked %s files/folders in %s') % (
                    len(extracted), format_time_string(self.unpack_time))
                msg = '%s - %s' % (T('Direct Unpack'), msg)
                self.nzo.set_unpack_info(
                    'Unpack', '[%s] %s' % (unicoder(self.cur_setname), msg))

                # Write current log and clear
                unrar_log.append(linebuf.strip())
                linebuf = ''
                last_volume_linebuf = ''
                logging.debug('DirectUnpack Unrar output %s',
                              '\n'.join(unrar_log))
                unrar_log = []
                rarfiles = []
                extracted = []

                # Are there more files left?
                while self.nzo.files and not self.next_sets:
                    with self.next_file_lock:
                        self.next_file_lock.wait()

                # Is there another set to do?
                if self.next_sets:
                    # Start new instance
                    nzf = self.next_sets.pop(0)
                    self.reset_active()
                    self.cur_setname = nzf.setname
                    # Wait for the 1st volume to appear
                    self.wait_for_next_volume()
                    self.create_unrar_instance()
                    start_time = time.time()
                else:
                    self.killed = True
                    break

            if linebuf.endswith('[C]ontinue, [Q]uit '):
                # Stop timer
                self.unpack_time += time.time() - start_time

                # Wait for the next one..
                self.wait_for_next_volume()

                # Possible that the instance was deleted while locked
                if not self.killed:
                    # If unrar stopped or is killed somehow, writing will cause a crash
                    try:
                        # Give unrar some time to do it's thing
                        self.active_instance.stdin.write('C\n')
                        start_time = time.time()
                        time.sleep(0.1)
                    except IOError:
                        self.abort()
                        break

                    # Did we unpack a new volume? Sometimes UnRar hangs on 1 volume
                    if not last_volume_linebuf or last_volume_linebuf != linebuf:
                        # Next volume
                        self.cur_volume += 1
                        self.nzo.set_action_line(T('Direct Unpack'),
                                                 self.get_formatted_stats())
                        logging.info('DirectUnpacked volume %s for %s',
                                     self.cur_volume, self.cur_setname)

                    # If lines did not change and we don't have the next volume, this download is missing files!
                    # In rare occasions we can get stuck forever with repeating lines
                    if last_volume_linebuf == linebuf:
                        if not self.have_next_volume(
                        ) or self.duplicate_lines > 10:
                            logging.info(
                                'DirectUnpack failed due to missing files %s',
                                self.cur_setname)
                            self.abort()
                        else:
                            logging.debug(
                                'Duplicate output line detected: "%s"',
                                last_volume_linebuf)
                            self.duplicate_lines += 1
                    else:
                        self.duplicate_lines = 0
                    last_volume_linebuf = linebuf

            # Show the log
            if linebuf.endswith('\n'):
                unrar_log.append(linebuf.strip())
                linebuf = ''

        # Add last line
        unrar_log.append(linebuf.strip())
        logging.debug('DirectUnpack Unrar output %s', '\n'.join(unrar_log))

        # Make more space
        self.reset_active()
        if self in ACTIVE_UNPACKERS:
            ACTIVE_UNPACKERS.remove(self)

        # Set the thread to killed so it never gets restarted by accident
        self.killed = True
예제 #54
0
def process_job(nzo):
    """ Process one job """
    start = time.time()

    # keep track of whether we can continue
    all_ok = True
    # keep track of par problems
    par_error = False
    # keep track of any unpacking errors
    unpack_error = False
    # Signal empty download, for when 'empty_postproc' is enabled
    empty = False
    nzb_list = []
    # These need to be initialized in case of a crash
    workdir_complete = ''
    script_log = ''
    script_line = ''

    # Get the job flags
    nzo.save_attribs()
    flag_repair, flag_unpack, flag_delete = nzo.repair_opts
    # Normalize PP
    if flag_delete:
        flag_unpack = True
    if flag_unpack:
        flag_repair = True

    # Get the NZB name
    filename = nzo.final_name

    if nzo.fail_msg:  # Special case: aborted due to too many missing data
        nzo.status = Status.FAILED
        nzo.save_attribs()
        all_ok = False
        par_error = True
        unpack_error = 1

    try:
        # Get the folder containing the download result
        workdir = nzo.downpath
        tmp_workdir_complete = None

        # if no files are present (except __admin__), fail the job
        if all_ok and len(globber(workdir)) < 2:
            if nzo.precheck:
                _enough, ratio = nzo.check_quality()
                req_ratio = float(cfg.req_completion_rate()) / 100.0
                # Make sure that rounded ratio doesn't equal required ratio
                # when it is actually below required
                if (ratio < req_ratio) and (req_ratio - ratio) < 0.001:
                    ratio = req_ratio - 0.001
                emsg = '%.1f%%' % (ratio * 100.0)
                emsg2 = '%.1f%%' % float(cfg.req_completion_rate())
                emsg = T(
                    'Download might fail, only %s of required %s available'
                ) % (emsg, emsg2)
            else:
                emsg = T('Download failed - Not on your server(s)')
                empty = True
            emsg += ' - https://sabnzbd.org/not-complete'
            nzo.fail_msg = emsg
            nzo.set_unpack_info('Fail', emsg)
            nzo.status = Status.FAILED
            # do not run unpacking or parity verification
            flag_repair = flag_unpack = False
            all_ok = cfg.empty_postproc() and empty
            if not all_ok:
                par_error = True
                unpack_error = 1

        script = nzo.script
        logging.info(
            'Starting Post-Processing on %s' +
            ' => Repair:%s, Unpack:%s, Delete:%s, Script:%s, Cat:%s', filename,
            flag_repair, flag_unpack, flag_delete, script, nzo.cat)

        # Set complete dir to workdir in case we need to abort
        workdir_complete = workdir

        # Par processing, if enabled
        if all_ok and flag_repair:
            par_error, re_add = parring(nzo, workdir)
            if re_add:
                # Try to get more par files
                return False

        # If we don't need extra par2, we can disconnect
        if sabnzbd.nzbqueue.NzbQueue.do.actives(
                grabs=False) == 0 and cfg.autodisconnect():
            # This was the last job, close server connections
            sabnzbd.downloader.Downloader.do.disconnect()

        # Sanitize the resulting files
        if sabnzbd.WIN32:
            sanitize_files_in_folder(workdir)

        # Check if user allows unsafe post-processing
        if flag_repair and cfg.safe_postproc():
            all_ok = all_ok and not par_error

        if all_ok:
            # Fix encodings
            fix_unix_encoding(workdir)

            # Use dirs generated by direct-unpacker
            if nzo.direct_unpacker and nzo.direct_unpacker.unpack_dir_info:
                tmp_workdir_complete, workdir_complete, file_sorter, one_folder, marker_file = nzo.direct_unpacker.unpack_dir_info
            else:
                # Generate extraction path
                tmp_workdir_complete, workdir_complete, file_sorter, one_folder, marker_file = prepare_extraction_path(
                    nzo)

            newfiles = []
            # Run Stage 2: Unpack
            if flag_unpack:
                # set the current nzo status to "Extracting...". Used in History
                nzo.status = Status.EXTRACTING
                logging.info("Running unpack_magic on %s", filename)
                unpack_error, newfiles = unpack_magic(nzo, workdir,
                                                      tmp_workdir_complete,
                                                      flag_delete, one_folder,
                                                      (), (), (), (), ())
                logging.info("Unpacked files %s", newfiles)

                if sabnzbd.WIN32:
                    # Sanitize the resulting files
                    newfiles = sanitize_files_in_folder(tmp_workdir_complete)
                logging.info("Finished unpack_magic on %s", filename)

            if cfg.safe_postproc():
                all_ok = all_ok and not unpack_error

            if all_ok:
                # Move any (left-over) files to destination
                nzo.status = Status.MOVING
                nzo.set_action_line(T('Moving'), '...')
                for root, _dirs, files in os.walk(workdir):
                    if not root.endswith(JOB_ADMIN):
                        for file_ in files:
                            path = os.path.join(root, file_)
                            new_path = path.replace(workdir,
                                                    tmp_workdir_complete)
                            ok, new_path = move_to_path(path, new_path)
                            if new_path:
                                newfiles.append(new_path)
                            if not ok:
                                nzo.set_unpack_info(
                                    'Unpack',
                                    T('Failed moving %s to %s') %
                                    (unicoder(path), unicoder(new_path)))
                                all_ok = False
                                break

            # Set permissions right
            set_permissions(tmp_workdir_complete)

            if all_ok and marker_file:
                del_marker(os.path.join(tmp_workdir_complete, marker_file))
                remove_from_list(marker_file, newfiles)

            if all_ok:
                # Remove files matching the cleanup list
                cleanup_list(tmp_workdir_complete, True)

                # Check if this is an NZB-only download, if so redirect to queue
                # except when PP was Download-only
                if flag_repair:
                    nzb_list = nzb_redirect(tmp_workdir_complete,
                                            nzo.final_name,
                                            nzo.pp,
                                            script,
                                            nzo.cat,
                                            priority=nzo.priority)
                else:
                    nzb_list = None
                if nzb_list:
                    nzo.set_unpack_info(
                        'Download',
                        T('Sent %s to queue') % unicoder(nzb_list))
                    cleanup_empty_directories(tmp_workdir_complete)
                else:
                    cleanup_list(tmp_workdir_complete, False)

        script_output = ''
        script_ret = 0
        if not nzb_list:
            # Give destination its final name
            if cfg.folder_rename() and tmp_workdir_complete and not one_folder:
                if all_ok:
                    try:
                        newfiles = rename_and_collapse_folder(
                            tmp_workdir_complete, workdir_complete, newfiles)
                    except:
                        logging.error(T('Error renaming "%s" to "%s"'),
                                      clip_path(tmp_workdir_complete),
                                      clip_path(workdir_complete))
                        logging.info('Traceback: ', exc_info=True)
                        # Better disable sorting because filenames are all off now
                        file_sorter.sort_file = None
                else:
                    workdir_complete = tmp_workdir_complete.replace(
                        '_UNPACK_', '_FAILED_')
                    workdir_complete = get_unique_path(workdir_complete,
                                                       n=0,
                                                       create_dir=False)

            if empty:
                job_result = -1
            else:
                job_result = int(par_error) + int(bool(unpack_error)) * 2

            if cfg.ignore_samples():
                remove_samples(workdir_complete)

            # TV/Movie/Date Renaming code part 2 - rename and move files to parent folder
            if all_ok and file_sorter.sort_file:
                if newfiles:
                    file_sorter.rename(newfiles, workdir_complete)
                    workdir_complete, ok = file_sorter.move(workdir_complete)
                else:
                    workdir_complete, ok = file_sorter.rename_with_ext(
                        workdir_complete)
                if not ok:
                    nzo.set_unpack_info('Unpack', T('Failed to move files'))
                    all_ok = False

            # Run the user script
            script_path = make_script_path(script)
            if (all_ok or not cfg.safe_postproc()) and (
                    not nzb_list) and script_path:
                # Set the current nzo status to "Ext Script...". Used in History
                nzo.status = Status.RUNNING
                nzo.set_action_line(T('Running script'), unicoder(script))
                nzo.set_unpack_info('Script',
                                    T('Running user script %s') %
                                    unicoder(script),
                                    unique=True)
                script_log, script_ret = external_processing(
                    script_path, nzo, clip_path(workdir_complete),
                    nzo.final_name, job_result)
                script_line = get_last_line(script_log)
                if script_log:
                    script_output = nzo.nzo_id
                if script_line:
                    nzo.set_unpack_info('Script',
                                        unicoder(script_line),
                                        unique=True)
                else:
                    nzo.set_unpack_info('Script',
                                        T('Ran %s') % unicoder(script),
                                        unique=True)
            else:
                script = ""
                script_line = ""
                script_ret = 0

        # Maybe bad script result should fail job
        if script_ret and cfg.script_can_fail():
            script_error = True
            all_ok = False
            nzo.fail_msg = T('Script exit code is %s') % script_ret
        else:
            script_error = False

        # Email the results
        if (not nzb_list) and cfg.email_endjob():
            if (cfg.email_endjob()
                    == 1) or (cfg.email_endjob() == 2 and
                              (unpack_error or par_error or script_error)):
                emailer.endjob(nzo.final_name, nzo.cat, all_ok,
                               workdir_complete, nzo.bytes_downloaded,
                               nzo.fail_msg, nzo.unpack_info, script,
                               TRANS(script_log), script_ret)

        if script_output:
            # Can do this only now, otherwise it would show up in the email
            if script_ret:
                script_ret = 'Exit(%s) ' % script_ret
            else:
                script_ret = ''
            if len(script_log.rstrip().split('\n')) > 1:
                nzo.set_unpack_info(
                    'Script',
                    u'%s%s <a href="./scriptlog?name=%s">(%s)</a>' %
                    (script_ret, script_line,
                     xml.sax.saxutils.escape(script_output), T('More')),
                    unique=True)
            else:
                # No '(more)' button needed
                nzo.set_unpack_info('Script',
                                    u'%s%s ' % (script_ret, script_line),
                                    unique=True)

        # Cleanup again, including NZB files
        if all_ok:
            cleanup_list(workdir_complete, False)

        # Force error for empty result
        all_ok = all_ok and not empty

        # Update indexer with results
        if cfg.rating_enable():
            if nzo.encrypted > 0:
                Rating.do.update_auto_flag(nzo.nzo_id, Rating.FLAG_ENCRYPTED)
            if empty:
                hosts = map(lambda s: s.host,
                            sabnzbd.downloader.Downloader.do.nzo_servers(nzo))
                if not hosts:
                    hosts = [None]
                for host in hosts:
                    Rating.do.update_auto_flag(nzo.nzo_id, Rating.FLAG_EXPIRED,
                                               host)

    except:
        logging.error(T('Post Processing Failed for %s (%s)'), filename,
                      T('see logfile'))
        logging.info("Traceback: ", exc_info=True)

        nzo.fail_msg = T('PostProcessing was aborted (%s)') % T('see logfile')
        notifier.send_notification(T('Download Failed'), filename, 'failed',
                                   nzo.cat)
        nzo.status = Status.FAILED
        par_error = True
        all_ok = False

        if cfg.email_endjob():
            emailer.endjob(nzo.final_name, nzo.cat, all_ok,
                           clip_path(workdir_complete), nzo.bytes_downloaded,
                           nzo.fail_msg, nzo.unpack_info, '', '', 0)

    if all_ok:
        # If the folder only contains one file OR folder, have that as the path
        # Be aware that series/generic/date sorting may move a single file into a folder containing other files
        workdir_complete = one_file_or_folder(workdir_complete)
        workdir_complete = os.path.normpath(workdir_complete)

    # Clean up the NZO
    try:
        logging.info('Cleaning up %s (keep_basic=%s)', filename,
                     str(not all_ok))
        sabnzbd.nzbqueue.NzbQueue.do.cleanup_nzo(nzo, keep_basic=not all_ok)
    except:
        logging.error(T('Cleanup of %s failed.'), nzo.final_name)
        logging.info("Traceback: ", exc_info=True)

    # Remove download folder
    if all_ok:
        try:
            if os.path.exists(workdir):
                logging.debug('Removing workdir %s', workdir)
                remove_all(workdir, recursive=True)
        except:
            logging.error(T('Error removing workdir (%s)'), clip_path(workdir))
            logging.info("Traceback: ", exc_info=True)

    # Use automatic retry link on par2 errors and encrypted/bad RARs
    if par_error or unpack_error in (2, 3):
        try_alt_nzb(nzo)

    # Show final status in history
    if all_ok:
        notifier.send_notification(T('Download Completed'), filename,
                                   'complete', nzo.cat)
        nzo.status = Status.COMPLETED
    else:
        notifier.send_notification(T('Download Failed'), filename, 'failed',
                                   nzo.cat)
        nzo.status = Status.FAILED

    # Log the overall time taken for postprocessing
    postproc_time = int(time.time() - start)

    # Create the history DB instance
    history_db = database.HistoryDB()
    # Add the nzo to the database. Only the path, script and time taken is passed
    # Other information is obtained from the nzo
    history_db.add_history_db(nzo, clip_path(workdir_complete), nzo.downpath,
                              postproc_time, script_log, script_line)
    # Purge items
    history_db.auto_history_purge()
    # The connection is only used once, so close it here
    history_db.close()
    sabnzbd.history_updated()
    return True
예제 #55
0
def bad_fetch(nzo, url, msg='', retry=False, content=False):
    """ Create History entry for failed URL Fetch
        msg : message to be logged
        retry : make retry link in histort
        content : report in history that cause is a bad NZB file
    """
    if msg:
        msg = unicoder(msg)
    else:
        msg = ''

    pp = nzo.pp
    if pp is None:
        pp = ''
    else:
        pp = '&pp=%s' % str(pp)
    cat = nzo.cat
    if cat:
        cat = '&cat=%s' % urllib.quote(cat)
    else:
        cat = ''
    script = nzo.script
    if script:
        script = '&script=%s' % urllib.quote(script)
    else:
        script = ''

    nzo.status = Status.FAILED


    if url:
        nzo.filename = url
        nzo.final_name = url.strip()

    if content:
        # Bad content
        msg = T('Unusable NZB file')
    else:
        # Failed fetch
        msg = ' (' + msg + ')'

    if retry:
        nzbname = nzo.custom_name
        if nzbname:
            nzbname = '&nzbname=%s' % urllib.quote(nzbname)
        else:
            nzbname = ''
        text = T('URL Fetching failed; %s') + ', <a href="./retry?session=%s&url=%s&job=%s%s%s%s%s">' + T('Try again') + '</a>'
        parms = (msg, cfg.api_key(), urllib.quote(url), nzo.nzo_id, pp, cat, script, nzbname)
        nzo.fail_msg = text % parms
    else:
        nzo.fail_msg = msg

    if isinstance(url, int) or url.isdigit():
        url = 'Newzbin #%s' % url
    growler.send_notification(T('URL Fetching failed; %s') % '', '%s\n%s' % (msg, url), 'other')
    if cfg.email_endjob() > 0:
        #import sabnzbd.emailer
        sabnzbd.emailer.badfetch_mail(msg, url)

    from sabnzbd.nzbqueue import NzbQueue
    assert isinstance(NzbQueue.do, NzbQueue)
    NzbQueue.do.remove(nzo.nzo_id, add_to_history=True)
예제 #56
0
def parring(nzo, workdir):
    """ Perform par processing. Returns: (par_error, re_add) """
    filename = nzo.final_name
    notifier.send_notification(T('Post-processing'), filename, 'pp', nzo.cat)
    logging.info('Starting verification and repair of %s', filename)

    # Get verification status of sets
    verified = sabnzbd.load_data(VERIFIED_FILE, nzo.workpath,
                                 remove=False) or {}
    repair_sets = nzo.extrapars.keys()

    re_add = False
    par_error = False
    single = len(repair_sets) == 1

    if repair_sets:
        for setname in repair_sets:
            if cfg.ignore_samples() and RE_SAMPLE.search(setname.lower()):
                continue
            if not verified.get(setname, False):
                logging.info("Running verification and repair on set %s",
                             setname)
                parfile_nzf = nzo.partable[setname]

                # Check if file maybe wasn't deleted and if we maybe have more files in the parset
                if os.path.exists(
                        os.path.join(
                            nzo.downpath,
                            parfile_nzf.filename)) or nzo.extrapars[setname]:
                    need_re_add, res = par2_repair(parfile_nzf,
                                                   nzo,
                                                   workdir,
                                                   setname,
                                                   single=single)

                    # Was it aborted?
                    if not nzo.pp_active:
                        re_add = False
                        par_error = True
                        break

                    re_add = re_add or need_re_add
                    verified[setname] = res
                else:
                    continue
                par_error = par_error or not res

    else:
        # We must not have found any par2..
        logging.info("No par2 sets for %s", filename)
        nzo.set_unpack_info('Repair',
                            T('[%s] No par2 sets') % unicoder(filename))
        if cfg.sfv_check() and not verified.get('', False):
            par_error = not try_sfv_check(nzo, workdir, '')
            verified[''] = not par_error
        # If still no success, do RAR-check
        if not par_error and cfg.enable_unrar():
            par_error = not try_rar_check(nzo, workdir, '')
            verified[''] = not par_error

    if re_add:
        logging.info('Re-added %s to queue', filename)
        if nzo.priority != TOP_PRIORITY:
            nzo.priority = REPAIR_PRIORITY
        nzo.status = Status.FETCHING
        sabnzbd.nzbqueue.NzbQueue.do.add(nzo)
        sabnzbd.downloader.Downloader.do.resume_from_postproc()

    sabnzbd.save_data(verified, VERIFIED_FILE, nzo.workpath)

    logging.info('Verification and repair finished for %s', filename)
    return par_error, re_add
예제 #57
0
def process_job(nzo):
    """ Process one job """
    assert isinstance(nzo, sabnzbd.nzbstuff.NzbObject)
    start = time.time()

    # keep track of whether we can continue
    all_ok = True
    # keep track of par problems
    par_error = False
    # keep track of any unpacking errors
    unpack_error = False
    # Signal empty download, for when 'empty_postproc' is enabled
    empty = False
    nzb_list = []
    # These need to be initialised incase of a crash
    workdir_complete = ''
    postproc_time = 0
    script_log = ''
    script_line = ''
    crash_msg = ''

    ## Get the job flags
    nzo.save_attribs()
    flag_repair, flag_unpack, flag_delete = nzo.repair_opts
    # Normalize PP
    if flag_delete: flag_unpack = True
    if flag_unpack: flag_repair = True

    # Get the NZB name
    filename = nzo.final_name
    msgid = nzo.msgid

    if cfg.allow_streaming() and not (flag_repair or flag_unpack
                                      or flag_delete):
        # After streaming, force +D
        nzo.set_pp(3)
        nzo.status = Status.FAILED
        nzo.save_attribs()
        all_ok = False

    if nzo.fail_msg:  # Special case: aborted due to too many missing data
        nzo.status = Status.FAILED
        nzo.save_attribs()
        all_ok = False
        par_error = unpack_error = True

    try:

        # Get the folder containing the download result
        workdir = nzo.downpath
        tmp_workdir_complete = None

        # if no files are present (except __admin__), fail the job
        if all_ok and len(globber(workdir)) < 2:
            if nzo.precheck:
                enough, ratio = nzo.check_quality()
                req_ratio = float(cfg.req_completion_rate()) / 100.0
                # Make sure that rounded ratio doesn't equal required ratio
                # when it is actually below required
                if (ratio < req_ratio) and (req_ratio - ratio) < 0.001:
                    ratio = req_ratio - 0.001
                emsg = '%.1f%%' % (ratio * 100.0)
                emsg2 = '%.1f%%' % float(cfg.req_completion_rate())
                emsg = T(
                    'Download might fail, only %s of required %s available'
                ) % (emsg, emsg2)
            else:
                emsg = T('Download failed - Out of your server\'s retention?')
                empty = True
            nzo.fail_msg = emsg
            nzo.set_unpack_info('Fail', emsg)
            nzo.status = Status.FAILED
            # do not run unpacking or parity verification
            flag_repair = flag_unpack = False
            all_ok = cfg.empty_postproc() and empty
            if not all_ok:
                par_error = unpack_error = True

        script = nzo.script
        cat = nzo.cat

        logging.info('Starting PostProcessing on %s' + \
                     ' => Repair:%s, Unpack:%s, Delete:%s, Script:%s, Cat:%s',
                     filename, flag_repair, flag_unpack, flag_delete, script, cat)

        ## Par processing, if enabled
        if all_ok and flag_repair:
            par_error, re_add = parring(nzo, workdir)
            if re_add:
                # Try to get more par files
                return False

        ## Check if user allows unsafe post-processing
        if flag_repair and cfg.safe_postproc():
            all_ok = all_ok and not par_error

        # Set complete dir to workdir in case we need to abort
        workdir_complete = workdir
        dirname = nzo.final_name
        marker_file = None

        if all_ok:
            one_folder = False
            ## Determine class directory
            if cfg.create_group_folders():
                complete_dir = addPrefixes(cfg.complete_dir.get_path(),
                                           nzo.dirprefix)
                complete_dir = create_dirs(complete_dir)
            else:
                catdir = config.get_categories(cat).dir()
                if catdir.endswith('*'):
                    catdir = catdir.strip('*')
                    one_folder = True
                complete_dir = real_path(cfg.complete_dir.get_path(), catdir)

            ## TV/Movie/Date Renaming code part 1 - detect and construct paths
            if cfg.enable_meta():
                file_sorter = Sorter(nzo, cat)
            else:
                file_sorter = Sorter(None, cat)
            complete_dir = file_sorter.detect(dirname, complete_dir)
            if file_sorter.sort_file:
                one_folder = False

            if one_folder:
                workdir_complete = create_dirs(complete_dir)
            else:
                workdir_complete = get_unique_path(os.path.join(
                    complete_dir, dirname),
                                                   create_dir=True)
                marker_file = set_marker(workdir_complete)

            if not workdir_complete or not os.path.exists(workdir_complete):
                crash_msg = T('Cannot create final folder %s') % unicoder(
                    os.path.join(complete_dir, dirname))
                raise IOError

            if cfg.folder_rename() and not one_folder:
                tmp_workdir_complete = prefix(workdir_complete, '_UNPACK_')
                try:
                    renamer(workdir_complete, tmp_workdir_complete)
                except:
                    pass  # On failure, just use the original name
            else:
                tmp_workdir_complete = workdir_complete

            newfiles = []
            ## Run Stage 2: Unpack
            if flag_unpack:
                if all_ok:
                    #set the current nzo status to "Extracting...". Used in History
                    nzo.status = Status.EXTRACTING
                    logging.info("Running unpack_magic on %s", filename)
                    unpack_error, newfiles = unpack_magic(
                        nzo, workdir, tmp_workdir_complete, flag_delete,
                        one_folder, (), (), (), ())
                    logging.info("unpack_magic finished on %s", filename)
                else:
                    nzo.set_unpack_info(
                        'Unpack',
                        T('No post-processing because of failed verification'))

            if cfg.safe_postproc():
                all_ok = all_ok and not unpack_error

            if all_ok:
                ## Move any (left-over) files to destination
                nzo.status = Status.MOVING
                nzo.set_action_line(T('Moving'), '...')
                for root, dirs, files in os.walk(workdir):
                    if not root.endswith(JOB_ADMIN):
                        for file_ in files:
                            path = os.path.join(root, file_)
                            new_path = path.replace(workdir,
                                                    tmp_workdir_complete)
                            ok, new_path = move_to_path(path, new_path)
                            newfiles.append(new_path)
                            if not ok:
                                nzo.set_unpack_info(
                                    'Unpack',
                                    T('Failed moving %s to %s') %
                                    (unicoder(path), unicoder(new_path)))
                                all_ok = False
                                break

            ## Set permissions right
            set_permissions(tmp_workdir_complete)

            if all_ok and marker_file:
                del_marker(os.path.join(tmp_workdir_complete, marker_file))
                remove_from_list(marker_file, newfiles)

            if all_ok:
                ## Remove files matching the cleanup list
                cleanup_list(tmp_workdir_complete, True)

                ## Check if this is an NZB-only download, if so redirect to queue
                ## except when PP was Download-only
                if flag_repair:
                    nzb_list = nzb_redirect(tmp_workdir_complete,
                                            nzo.final_name,
                                            nzo.pp,
                                            script,
                                            cat,
                                            priority=nzo.priority)
                else:
                    nzb_list = None
                if nzb_list:
                    nzo.set_unpack_info(
                        'Download',
                        T('Sent %s to queue') % unicoder(nzb_list))
                    cleanup_empty_directories(tmp_workdir_complete)
                else:
                    cleanup_list(tmp_workdir_complete, False)

        script_output = ''
        script_ret = 0
        if not nzb_list:
            ## Give destination its final name
            if cfg.folder_rename() and tmp_workdir_complete and not one_folder:
                if all_ok:
                    try:
                        newfiles = rename_and_collapse_folder(
                            tmp_workdir_complete, workdir_complete, newfiles)
                    except:
                        logging.error(Ta('Error renaming "%s" to "%s"'),
                                      tmp_workdir_complete, workdir_complete)
                        logging.info('Traceback: ', exc_info=True)
                        # Better disable sorting because filenames are all off now
                        file_sorter.sort_file = None
                else:
                    workdir_complete = tmp_workdir_complete.replace(
                        '_UNPACK_', '_FAILED_')
                    workdir_complete = get_unique_path(workdir_complete,
                                                       n=0,
                                                       create_dir=False)

            if empty:
                job_result = -1
            else:
                job_result = int(par_error) + int(unpack_error) * 2

            if cfg.ignore_samples() > 0:
                remove_samples(workdir_complete)

            ## TV/Movie/Date Renaming code part 2 - rename and move files to parent folder
            if all_ok and file_sorter.sort_file:
                if newfiles:
                    file_sorter.rename(newfiles, workdir_complete)
                    workdir_complete, ok = file_sorter.move(workdir_complete)
                else:
                    workdir_complete, ok = file_sorter.rename_with_ext(
                        workdir_complete)
                if not ok:
                    nzo.set_unpack_info('Unpack', T('Failed to move files'))
                    all_ok = False

            ## Run the user script
            script_path = make_script_path(script)
            if (all_ok or not cfg.safe_postproc()) and (
                    not nzb_list) and script_path:
                #set the current nzo status to "Ext Script...". Used in History
                nzo.status = Status.RUNNING
                nzo.set_action_line(T('Running script'), unicoder(script))
                nzo.set_unpack_info('Script',
                                    T('Running user script %s') %
                                    unicoder(script),
                                    unique=True)
                script_log, script_ret = external_processing(
                    script_path, workdir_complete, nzo.filename, msgid,
                    dirname, cat, nzo.group, job_result,
                    nzo.nzo_info.get('failure', ''))
                script_line = get_last_line(script_log)
                if script_log:
                    script_output = nzo.nzo_id
                if script_line:
                    nzo.set_unpack_info('Script',
                                        unicoder(script_line),
                                        unique=True)
                else:
                    nzo.set_unpack_info('Script',
                                        T('Ran %s') % unicoder(script),
                                        unique=True)
            else:
                script = ""
                script_line = ""
                script_ret = 0

        ## Email the results
        if (not nzb_list) and cfg.email_endjob():
            if (cfg.email_endjob() == 1) or (cfg.email_endjob() == 2 and
                                             (unpack_error or par_error)):
                emailer.endjob(dirname, msgid, cat, all_ok, workdir_complete,
                               nzo.bytes_downloaded,
                               nzo.fail_msg, nzo.unpack_info, script,
                               TRANS(script_log), script_ret)

        if script_output:
            # Can do this only now, otherwise it would show up in the email
            if script_ret:
                script_ret = 'Exit(%s) ' % script_ret
            else:
                script_ret = ''
            if script_line:
                nzo.set_unpack_info(
                    'Script',
                    u'%s%s <a href="./scriptlog?name=%s">(%s)</a>' %
                    (script_ret, unicoder(script_line),
                     urllib.quote(script_output), T('More')),
                    unique=True)
            else:
                nzo.set_unpack_info('Script',
                                    u'%s<a href="./scriptlog?name=%s">%s</a>' %
                                    (script_ret, urllib.quote(script_output),
                                     T('View script output')),
                                    unique=True)

        ## Cleanup again, including NZB files
        if all_ok:
            cleanup_list(workdir_complete, False)

        ## Remove newzbin bookmark, if any
        if msgid and all_ok:
            Bookmarks.do.del_bookmark(msgid)
        elif all_ok and isinstance(nzo.url, str):
            sabnzbd.proxy_rm_bookmark(nzo.url)

        ## Force error for empty result
        all_ok = all_ok and not empty

        ## Update indexer with results
        if nzo.encrypted > 0:
            Rating.do.update_auto_flag(nzo.nzo_id, Rating.FLAG_ENCRYPTED)
        if empty:
            hosts = map(lambda s: s.host,
                        sabnzbd.downloader.Downloader.do.nzo_servers(nzo))
            if not hosts: hosts = [None]
            for host in hosts:
                Rating.do.update_auto_flag(nzo.nzo_id, Rating.FLAG_EXPIRED,
                                           host)

        ## Show final status in history
        if all_ok:
            growler.send_notification(T('Download Completed'), filename,
                                      'complete')
            nzo.status = Status.COMPLETED
        else:
            growler.send_notification(T('Download Failed'), filename,
                                      'complete')
            nzo.status = Status.FAILED

    except:
        logging.error(Ta('Post Processing Failed for %s (%s)'), filename,
                      crash_msg)
        if not crash_msg:
            logging.info("Traceback: ", exc_info=True)
            crash_msg = T('see logfile')
        nzo.fail_msg = T('PostProcessing was aborted (%s)') % unicoder(
            crash_msg)
        growler.send_notification(T('Download Failed'), filename, 'complete')
        nzo.status = Status.FAILED
        par_error = True
        all_ok = False
        if cfg.email_endjob():
            emailer.endjob(dirname, msgid, cat, all_ok, workdir_complete,
                           nzo.bytes_downloaded, nzo.fail_msg, nzo.unpack_info,
                           '', '', 0)

    if all_ok:
        # If the folder only contains one file OR folder, have that as the path
        # Be aware that series/generic/date sorting may move a single file into a folder containing other files
        workdir_complete = one_file_or_folder(workdir_complete)
        workdir_complete = os.path.normpath(workdir_complete)

    # Log the overall time taken for postprocessing
    postproc_time = int(time.time() - start)

    # Create the history DB instance
    history_db = database.get_history_handle()
    # Add the nzo to the database. Only the path, script and time taken is passed
    # Other information is obtained from the nzo
    history_db.add_history_db(nzo, workdir_complete, nzo.downpath,
                              postproc_time, script_log, script_line)
    # The connection is only used once, so close it here
    history_db.close()

    ## Clean up the NZO
    try:
        logging.info('Cleaning up %s (keep_basic=%s)', filename,
                     str(not all_ok))
        sabnzbd.nzbqueue.NzbQueue.do.cleanup_nzo(nzo, keep_basic=not all_ok)
    except:
        logging.error(Ta('Cleanup of %s failed.'), nzo.final_name)
        logging.info("Traceback: ", exc_info=True)

    ## Remove download folder
    if all_ok:
        try:
            if os.path.exists(workdir):
                logging.debug('Removing workdir %s', workdir)
                remove_all(workdir, recursive=True)
        except:
            logging.error(Ta('Error removing workdir (%s)'), workdir)
            logging.info("Traceback: ", exc_info=True)

    return True
예제 #58
0
파일: rss.py 프로젝트: labrys/sabnzbd
    def run_feed(self, feed=None, download=False, ignoreFirst=False, force=False, readout=True):
        """ Run the query for one URI and apply filters """
        self.shutdown = False

        def dup_title(title):
            """ Check if this title was in this or other feeds
                Return matching feed name
            """
            title = title.lower()
            for fd in self.jobs:
                for lk in self.jobs[fd]:
                    item = self.jobs[fd][lk]
                    if item.get('status', ' ')[0] == 'D' and \
                       item.get('title', '').lower() == title:
                        return fd
            return ''

        if not feed:
            return 'No such feed'

        newlinks = []
        new_downloads = []

        # Preparations, get options
        try:
            feeds = config.get_rss()[feed]
        except KeyError:
            logging.error(T('Incorrect RSS feed description "%s"'), feed)
            logging.info("Traceback: ", exc_info=True)
            return T('Incorrect RSS feed description "%s"') % feed

        uri = feeds.uri()
        defCat = feeds.cat()
        import sabnzbd.api
        if not notdefault(defCat) or defCat not in sabnzbd.api.list_cats(default=False):
            defCat = None
        defPP = feeds.pp()
        if not notdefault(defPP):
            defPP = None
        defScript = feeds.script()
        if not notdefault(defScript):
            defScript = None
        defPrio = feeds.priority()
        if not notdefault(defPrio):
            defPrio = None

        # Preparations, convert filters to regex's
        regexes = []
        reTypes = []
        reCats = []
        rePPs = []
        rePrios = []
        reScripts = []
        reEnabled = []
        for filter in feeds.filters():
            reCat = filter[0]
            if defCat in ('', '*'):
                reCat = None
            reCats.append(reCat)
            rePPs.append(filter[1])
            reScripts.append(filter[2])
            reTypes.append(filter[3])
            if filter[3] in ('<', '>', 'F'):
                regexes.append(filter[4])
            else:
                regexes.append(convert_filter(filter[4]))
            rePrios.append(filter[5])
            reEnabled.append(filter[6] != '0')
        regcount = len(regexes)

        # Set first if this is the very first scan of this URI
        first = (feed not in self.jobs) and ignoreFirst

        # Add sabnzbd's custom User Agent
        feedparser.USER_AGENT = 'SABnzbd+/%s' % sabnzbd.version.__version__

        # Check for nzbs.org
        if 'nzbs.org/' in uri and '&dl=1' not in uri:
            uri += '&dl=1'

        # Read the RSS feed
        msg = None
        entries = None
        if readout:
            uri = uri.replace(' ', '%20')
            logging.debug("Running feedparser on %s", uri)
            d = feedparser.parse(uri.replace('feed://', 'http://'))
            logging.debug("Done parsing %s", uri)
            if not d:
                msg = T('Failed to retrieve RSS from %s: %s') % (uri, '?')
                logging.info(msg)
                return unicoder(msg)

            status = d.get('status', 999)
            if status in (401, 402, 403):
                msg = T('Do not have valid authentication for feed %s') % feed
                logging.info(msg)
                return unicoder(msg)
            if status >= 500 and status <= 599:
                msg = T('Server side error (server code %s); could not get %s on %s') % (status, feed, uri)
                logging.info(msg)
                return unicoder(msg)

            entries = d.get('entries')
            if 'bozo_exception' in d and not entries:
                msg = str(d['bozo_exception'])
                if 'CERTIFICATE_VERIFY_FAILED' in msg:
                    msg = T('Server %s uses an untrusted HTTPS certificate') % get_urlbase(uri)
                    logging.error(msg)
                else:
                    msg = T('Failed to retrieve RSS from %s: %s') % (uri, xml_name(msg))
                logging.info(msg)
                return unicoder(msg)
            if not entries:
                msg = T('RSS Feed %s was empty') % uri
                logging.info(msg)

        if feed not in self.jobs:
            self.jobs[feed] = {}
        jobs = self.jobs[feed]
        if readout:
            if not entries:
                return unicoder(msg)
        else:
            entries = jobs.keys()
            # Sort in the order the jobs came from the feed
            entries.sort(lambda x, y: jobs[x].get('order', 0) - jobs[y].get('order', 0))

        order = 0
        # Filter out valid new links
        for entry in entries:
            if self.shutdown:
                return

            if readout:
                try:
                    link, category, size = _get_link(uri, entry)
                except (AttributeError, IndexError):
                    link = None
                    category = u''
                    size = 0L
                    logging.info(T('Incompatible feed') + ' ' + uri)
                    logging.info("Traceback: ", exc_info=True)
                    return T('Incompatible feed')
                title = entry.title
            else:
                link = entry
                category = jobs[link].get('orgcat', '')
                if category in ('', '*'):
                    category = None
                title = jobs[link].get('title', '')
                size = jobs[link].get('size', 0L)

            if link:
                # Make sure spaces are quoted in the URL
                link = link.strip().replace(' ', '%20')

                newlinks.append(link)

                if link in jobs:
                    jobstat = jobs[link].get('status', ' ')[0]
                else:
                    jobstat = 'N'
                if jobstat in 'NGB' or (jobstat == 'X' and readout):
                    # Match this title against all filters
                    logging.debug('Trying title %s', title)
                    result = False
                    myCat = defCat
                    myPP = defPP
                    myScript = defScript
                    myPrio = defPrio
                    n = 0
                    if 'F' in reTypes:
                        season, episode = sabnzbd.newsunpack.analyse_show(title)[1:3]
                        season = int_conv(season)
                        episode = int_conv(episode)
                    else:
                        season = episode = 0

                    # Match against all filters until an positive or negative match
                    logging.debug('Size %s for %s', size, title)
                    for n in xrange(regcount):
                        if reEnabled[n]:
                            if category and reTypes[n] == 'C':
                                found = re.search(regexes[n], category)
                                if not found:
                                    logging.debug("Filter rejected on rule %d", n)
                                    result = False
                                    break
                            elif reTypes[n] == '<' and size and from_units(regexes[n]) < size:
                                # "Size at most" : too large
                                logging.debug('Filter rejected on rule %d', n)
                                result = False
                                break
                            elif reTypes[n] == '>' and size and from_units(regexes[n]) > size:
                                # "Size at least" : too small
                                logging.debug('Filter rejected on rule %d', n)
                                result = False
                                break
                            elif reTypes[n] == 'F' and not ep_match(season, episode, regexes[n]):
                                # "Starting from SxxEyy", too early episode
                                logging.debug('Filter requirement match on rule %d', n)
                                result = False
                                break
                            else:
                                if regexes[n]:
                                    found = re.search(regexes[n], title)
                                else:
                                    found = False
                                if reTypes[n] == 'M' and not found:
                                    logging.debug("Filter rejected on rule %d", n)
                                    result = False
                                    break
                                if found and reTypes[n] == 'A':
                                    logging.debug("Filter matched on rule %d", n)
                                    result = True
                                    break
                                if found and reTypes[n] == 'R':
                                    logging.debug("Filter rejected on rule %d", n)
                                    result = False
                                    break

                    if len(reCats):
                        if notdefault(reCats[n]):
                            myCat = reCats[n]
                        elif category and not defCat:
                            myCat = cat_convert(category)
                        if myCat:
                            myCat, catPP, catScript, catPrio = cat_to_opts(myCat)
                        else:
                            myCat = catPP = catScript = catPrio = None
                        if notdefault(rePPs[n]):
                            myPP = rePPs[n]
                        elif not (reCats[n] or category):
                            myPP = catPP
                        if notdefault(reScripts[n]):
                            myScript = reScripts[n]
                        elif not (notdefault(reCats[n]) or category):
                            myScript = catScript
                        if rePrios[n] not in (str(DEFAULT_PRIORITY), ''):
                            myPrio = rePrios[n]
                        elif not ((rePrios[n] != str(DEFAULT_PRIORITY)) or category):
                            myPrio = catPrio

                    if cfg.no_dupes() and dup_title(title):
                        if cfg.no_dupes() == 1:
                            logging.info("Ignoring duplicate job %s", title)
                            continue
                        else:
                            myPrio = DUP_PRIORITY

                    act = download and not first
                    if link in jobs:
                        act = act and not jobs[link].get('status', '').endswith('*')
                        act = act or force
                        star = first or jobs[link].get('status', '').endswith('*')
                    else:
                        star = first
                    if result:
                        _HandleLink(jobs, link, title, size, 'G', category, myCat, myPP, myScript,
                                    act, star, order, priority=myPrio, rule=str(n))
                        if act:
                            new_downloads.append(title)
                    else:
                        _HandleLink(jobs, link, title, size, 'B', category, myCat, myPP, myScript,
                                    False, star, order, priority=myPrio, rule=str(n))
            order += 1

        # Send email if wanted and not "forced"
        if new_downloads and cfg.email_rss() and not force:
            emailer.rss_mail(feed, new_downloads)

        remove_obsolete(jobs, newlinks)
        return ''
예제 #59
0
def folders_at_path(path, include_parent=False):
    """ Returns a list of dictionaries with the folders contained at the given path
        Give the empty string as the path to list the contents of the root path
        under Unix this means "/", on Windows this will be a list of drive letters)
    """
    from sabnzbd.encoding import unicoder

    if path == "":
        if NT:
            entries = [{
                'name': letter + ':\\',
                'path': letter + ':\\'
            } for letter in get_win_drives()]
            entries.insert(0, {'current_path': 'Root'})
            return entries
        else:
            path = '/'

    # walk up the tree until we find a valid path
    path = sabnzbd.misc.real_path(sabnzbd.DIR_HOME, path)
    while path and not os.path.isdir(path):
        if path == os.path.dirname(path):
            return folders_at_path('', include_parent)
        else:
            path = os.path.dirname(path)

    # fix up the path and find the parent
    path = os.path.abspath(os.path.normpath(path))
    parent_path = os.path.dirname(path)

    # if we're at the root then the next step is the meta-node showing our drive letters
    if path == parent_path and os.name == 'nt':
        parent_path = ""

    file_list = []
    try:
        for filename in os.listdir(path):
            fpath = os.path.join(path, filename)
            try:
                if NT:
                    doit = (win32api.GetFileAttributes(fpath)
                            & MASK) == TMASK and filename != 'PerfLogs'
                else:
                    doit = not filename.startswith('.')
            except:
                doit = False
            if doit:
                file_list.append({
                    'name': unicoder(filename),
                    'path': unicoder(fpath)
                })
        file_list = filter(lambda entry: os.path.isdir(entry['path']),
                           file_list)
        file_list = filter(
            lambda entry: entry['name'].lower() not in _JUNKFOLDERS, file_list)
        file_list = sorted(
            file_list, lambda x, y: cmp(
                os.path.basename(x['name']).lower(),
                os.path.basename(y['path']).lower()))
    except:
        # No access, ignore
        pass
    file_list.insert(0, {'current_path': path})
    if include_parent and parent_path != path:
        file_list.insert(1, {'name': "..", 'path': parent_path})

    return file_list
예제 #60
0
파일: rss.py 프로젝트: rivy/sabnzbd
    def run_feed(self, feed=None, download=False, ignoreFirst=False, force=False, readout=True):
        """ Run the query for one URI and apply filters """
        self.shutdown = False

        if not feed:
            return 'No such feed'

        newlinks = []
        new_downloads = []

        # Preparations, get options
        try:
            feeds = config.get_rss()[feed]
        except KeyError:
            logging.error(T('Incorrect RSS feed description "%s"'), feed)
            logging.info("Traceback: ", exc_info=True)
            return T('Incorrect RSS feed description "%s"') % feed

        uris = feeds.uri()
        defCat = feeds.cat()
        import sabnzbd.api
        if not notdefault(defCat) or defCat not in sabnzbd.api.list_cats(default=False):
            defCat = None
        defPP = feeds.pp()
        if not notdefault(defPP):
            defPP = None
        defScript = feeds.script()
        if not notdefault(defScript):
            defScript = None
        defPrio = feeds.priority()
        if not notdefault(defPrio):
            defPrio = None

        # Preparations, convert filters to regex's
        regexes = []
        reTypes = []
        reCats = []
        rePPs = []
        rePrios = []
        reScripts = []
        reEnabled = []
        for filter in feeds.filters():
            reCat = filter[0]
            if defCat in ('', '*'):
                reCat = None
            reCats.append(reCat)
            rePPs.append(filter[1])
            reScripts.append(filter[2])
            reTypes.append(filter[3])
            if filter[3] in ('<', '>', 'F', 'S'):
                regexes.append(filter[4])
            else:
                regexes.append(convert_filter(filter[4]))
            rePrios.append(filter[5])
            reEnabled.append(filter[6] != '0')
        regcount = len(regexes)

        # Set first if this is the very first scan of this URI
        first = (feed not in self.jobs) and ignoreFirst

        # Add sabnzbd's custom User Agent
        feedparser.USER_AGENT = 'SABnzbd+/%s' % sabnzbd.version.__version__

        # Read the RSS feed
        msg = None
        entries = None
        if readout:
            all_entries = []
            for uri in uris:
                uri = uri.replace(' ', '%20')
                logging.debug("Running feedparser on %s", uri)
                feed_parsed = feedparser.parse(uri.replace('feed://', 'http://'))
                logging.debug("Done parsing %s", uri)

                if not feed_parsed:
                    msg = T('Failed to retrieve RSS from %s: %s') % (uri, '?')
                    logging.info(msg)

                status = feed_parsed.get('status', 999)
                if status in (401, 402, 403):
                    msg = T('Do not have valid authentication for feed %s') % feed
                    logging.info(msg)

                if status >= 500 and status <= 599:
                    msg = T('Server side error (server code %s); could not get %s on %s') % (status, feed, uri)
                    logging.info(msg)

                entries = feed_parsed.get('entries')
                if 'bozo_exception' in feed_parsed and not entries:
                    msg = str(feed_parsed['bozo_exception'])
                    if 'CERTIFICATE_VERIFY_FAILED' in msg:
                        msg = T('Server %s uses an untrusted HTTPS certificate') % get_urlbase(uri)
                        msg += ' - https://sabnzbd.org/certificate-errors'
                        logging.error(msg)
                    else:
                        msg = T('Failed to retrieve RSS from %s: %s') % (uri, xml_name(msg))
                    logging.info(msg)

                if not entries:
                    msg = T('RSS Feed %s was empty') % uri
                    logging.info(msg)
                all_entries.extend(entries)
            entries = all_entries

        if feed not in self.jobs:
            self.jobs[feed] = {}
        jobs = self.jobs[feed]
        if readout:
            if not entries:
                return unicoder(msg)
        else:
            entries = jobs.keys()
            # Sort in the order the jobs came from the feed
            entries.sort(lambda x, y: jobs[x].get('order', 0) - jobs[y].get('order', 0))

        order = 0
        # Filter out valid new links
        for entry in entries:
            if self.shutdown:
                return

            if readout:
                try:
                    link, category, size, age, season, episode = _get_link(uri, entry)
                except (AttributeError, IndexError):
                    link = None
                    category = u''
                    size = 0L
                    age = None
                    logging.info(T('Incompatible feed') + ' ' + uri)
                    logging.info("Traceback: ", exc_info=True)
                    return T('Incompatible feed')
                title = entry.title

                # If there's multiple feeds, remove the duplicates based on title and size
                if len(uris) > 1:
                    skip_job = False
                    for job_link, job in jobs.items():
                        # Allow 5% size deviation because indexers might have small differences for same release
                        if job.get('title') == title and link != job_link and (job.get('size')*0.95) < size < (job.get('size')*1.05):
                            logging.info("Ignoring job %s from other feed", title)
                            skip_job = True
                            break
                    if skip_job:
                        continue
            else:
                link = entry
                category = jobs[link].get('orgcat', '')
                if category in ('', '*'):
                    category = None
                title = jobs[link].get('title', '')
                size = jobs[link].get('size', 0L)
                age = jobs[link].get('age')
                season = jobs[link].get('season', 0)
                episode = jobs[link].get('episode', 0)

            if link:
                # Make sure spaces are quoted in the URL
                link = link.strip().replace(' ', '%20')

                newlinks.append(link)

                if link in jobs:
                    jobstat = jobs[link].get('status', ' ')[0]
                else:
                    jobstat = 'N'
                if jobstat in 'NGB' or (jobstat == 'X' and readout):
                    # Match this title against all filters
                    logging.debug('Trying title %s', title)
                    result = False
                    myCat = defCat
                    myPP = defPP
                    myScript = defScript
                    myPrio = defPrio
                    n = 0
                    if ('F' in reTypes or 'S' in reTypes) and (not season or not episode):
                        season, episode = sabnzbd.newsunpack.analyse_show(title)[1:3]
                        season = int_conv(season)
                        episode = int_conv(episode)

                    # Match against all filters until an positive or negative match
                    logging.debug('Size %s for %s', size, title)
                    for n in xrange(regcount):
                        if reEnabled[n]:
                            if category and reTypes[n] == 'C':
                                found = re.search(regexes[n], category)
                                if not found:
                                    logging.debug("Filter rejected on rule %d", n)
                                    result = False
                                    break
                            elif reTypes[n] == '<' and size and from_units(regexes[n]) < size:
                                # "Size at most" : too large
                                logging.debug('Filter rejected on rule %d', n)
                                result = False
                                break
                            elif reTypes[n] == '>' and size and from_units(regexes[n]) > size:
                                # "Size at least" : too small
                                logging.debug('Filter rejected on rule %d', n)
                                result = False
                                break
                            elif reTypes[n] == 'F' and not ep_match(season, episode, regexes[n]):
                                # "Starting from SxxEyy", too early episode
                                logging.debug('Filter requirement match on rule %d', n)
                                result = False
                                break
                            elif reTypes[n] == 'S' and season and episode and ep_match(season, episode, regexes[n], title):
                                logging.debug('Filter matched on rule %d', n)
                                result = True
                                break
                            else:
                                if regexes[n]:
                                    found = re.search(regexes[n], title)
                                else:
                                    found = False
                                if reTypes[n] == 'M' and not found:
                                    logging.debug("Filter rejected on rule %d", n)
                                    result = False
                                    break
                                if found and reTypes[n] == 'A':
                                    logging.debug("Filter matched on rule %d", n)
                                    result = True
                                    break
                                if found and reTypes[n] == 'R':
                                    logging.debug("Filter rejected on rule %d", n)
                                    result = False
                                    break

                    if len(reCats):
                        if not result and defCat:
                            # Apply Feed-category on non-matched items
                            myCat = defCat
                        elif result and notdefault(reCats[n]):
                            # Use the matched info
                            myCat = reCats[n]
                        elif category and not defCat:
                            # No result and no Feed-category
                            myCat = cat_convert(category)

                        if myCat:
                            myCat, catPP, catScript, catPrio = cat_to_opts(myCat)
                        else:
                            myCat = catPP = catScript = catPrio = None
                        if notdefault(rePPs[n]):
                            myPP = rePPs[n]
                        elif not (reCats[n] or category):
                            myPP = catPP
                        if notdefault(reScripts[n]):
                            myScript = reScripts[n]
                        elif not (notdefault(reCats[n]) or category):
                            myScript = catScript
                        if rePrios[n] not in (str(DEFAULT_PRIORITY), ''):
                            myPrio = rePrios[n]
                        elif not ((rePrios[n] != str(DEFAULT_PRIORITY)) or category):
                            myPrio = catPrio


                    if cfg.no_dupes() and self.check_duplicate(title):
                        if cfg.no_dupes() == 1:
                            # Dupe-detection: Discard
                            logging.info("Ignoring duplicate job %s", title)
                            continue
                        elif cfg.no_dupes() == 3:
                            # Dupe-detection: Fail
                            # We accept it so the Queue can send it to the History
                            logging.info("Found duplicate job %s", title)
                        else:
                            # Dupe-detection: Pause
                            myPrio = DUP_PRIORITY

                    act = download and not first
                    if link in jobs:
                        act = act and not jobs[link].get('status', '').endswith('*')
                        act = act or force
                        star = first or jobs[link].get('status', '').endswith('*')
                    else:
                        star = first
                    if result:
                        _HandleLink(jobs, link, title, size, age, season, episode, 'G', category, myCat, myPP, myScript,
                                    act, star, order, priority=myPrio, rule=str(n))
                        if act:
                            new_downloads.append(title)
                    else:
                        _HandleLink(jobs, link, title, size, age, season, episode, 'B', category, myCat, myPP, myScript,
                                    False, star, order, priority=myPrio, rule=str(n))
            order += 1

        # Send email if wanted and not "forced"
        if new_downloads and cfg.email_rss() and not force:
            emailer.rss_mail(feed, new_downloads)

        remove_obsolete(jobs, newlinks)
        return msg