コード例 #1
0
ファイル: dirscanner.py プロジェクト: 12345z/sabnzbd
def ProcessSingleFile(filename, path, pp=None, script=None, cat=None, catdir=None, keep=False,
                      priority=None, nzbname=None, reuse=False, nzo_info=None, dup_check=True, url=''):
    """ Analyse file and create a job from it
        Supports NZB, NZB.GZ and GZ.NZB-in-disguise
        returns: -2==Error/retry, -1==Error, 0==OK, 1==OK-but-ignorecannot-delete
    """
    from sabnzbd.nzbqueue import add_nzo
    if catdir is None:
        catdir = cat

    try:
        f = open(path, 'rb')
        b1 = f.read(1)
        b2 = f.read(1)
        f.close()

        if (b1 == '\x1f' and b2 == '\x8b'):
            # gzip file or gzip in disguise
            name = filename.replace('.nzb.gz', '.nzb')
            f = gzip.GzipFile(path, 'rb')
        else:
            name = filename
            f = open(path, 'rb')
        data = f.read()
        f.close()
    except:
        logging.warning(Ta('Cannot read %s'), path)
        logging.info("Traceback: ", exc_info = True)
        return -2


    if name:
        name, cat = name_to_cat(name, catdir)
        # The name is used as the name of the folder, so sanitize it using folder specific santization
        name = misc.sanitize_foldername(name)

    try:
        nzo = nzbstuff.NzbObject(name, 0, pp, script, data, cat=cat, priority=priority, nzbname=nzbname,
                                 nzo_info=nzo_info, reuse=reuse, dup_check=dup_check)
        nzo.url = url
    except TypeError:
        # Duplicate, ignore
        nzo = None
    except:
        if data.find("<nzb") >= 0 and data.find("</nzb") < 0:
            # Looks like an incomplete file, retry
            return -2
        else:
            return -1

    if nzo:
        add_nzo(nzo)
    try:
        if not keep: os.remove(path)
    except:
        logging.error(Ta('Error removing %s'), path)
        logging.info("Traceback: ", exc_info = True)
        return 1

    return 0
コード例 #2
0
ファイル: rss.py プロジェクト: cpl183/sabnzbd
def _HandleLink(
    jobs, link, title, flag, orgcat, cat, pp, script, download, star, order, priority=NORMAL_PRIORITY, rule=0
):
    """ Process one link """
    if script == "":
        script = None
    if pp == "":
        pp = None

    jobs[link] = {}
    jobs[link]["order"] = order
    jobs[link]["orgcat"] = orgcat
    if special_rss_site(link):
        nzbname = None
    else:
        nzbname = sanitize_foldername(title)
    m = RE_NEWZBIN.search(link)
    if m and m.group(1).lower() == "newz" and m.group(2) and m.group(3):
        if download:
            jobs[link]["status"] = "D"
            jobs[link]["title"] = title
            logging.info("Adding %s (%s) to queue", m.group(3), title)
            sabnzbd.add_msgid(m.group(3), pp=pp, script=script, cat=cat, priority=priority, nzbname=nzbname)
        else:
            if star:
                jobs[link]["status"] = flag + "*"
            else:
                jobs[link]["status"] = flag
            jobs[link]["title"] = title
            jobs[link]["url"] = m.group(3)
            jobs[link]["cat"] = cat
            jobs[link]["pp"] = pp
            jobs[link]["script"] = script
            jobs[link]["prio"] = str(priority)
    else:
        if download:
            jobs[link]["status"] = "D"
            jobs[link]["title"] = title
            logging.info("Adding %s (%s) to queue", link, title)
            sabnzbd.add_url(link, pp=pp, script=script, cat=cat, priority=priority, nzbname=nzbname)
        else:
            if star:
                jobs[link]["status"] = flag + "*"
            else:
                jobs[link]["status"] = flag
            jobs[link]["title"] = title
            jobs[link]["url"] = link
            jobs[link]["cat"] = cat
            jobs[link]["pp"] = pp
            jobs[link]["script"] = script
            jobs[link]["prio"] = str(priority)

    jobs[link]["time"] = time.time()
    jobs[link]["rule"] = rule
コード例 #3
0
ファイル: rss.py プロジェクト: WhiteStatic/sabnzbd
def _HandleLink(jobs, link, title, flag, orgcat, cat, pp, script, download, star, order,
                priority=NORMAL_PRIORITY, rule=0):
    """ Process one link """
    if script == '': script = None
    if pp == '': pp = None

    jobs[link] = {}
    jobs[link]['order'] = order
    jobs[link]['orgcat'] = orgcat
    if special_rss_site(link):
        nzbname = None
    else:
        nzbname = sanitize_foldername(title)
    m = RE_NEWZBIN.search(link)
    if m and m.group(1).lower() == 'newz' and m.group(2) and m.group(3):
        if download:
            jobs[link]['status'] = 'D'
            jobs[link]['title'] = title
            logging.info("Adding %s (%s) to queue", m.group(3), title)
            sabnzbd.add_msgid(m.group(3), pp=pp, script=script, cat=cat, priority=priority, nzbname=nzbname)
        else:
            if star:
                jobs[link]['status'] = flag + '*'
            else:
                jobs[link]['status'] = flag
            jobs[link]['title'] = title
            jobs[link]['url'] = m.group(3)
            jobs[link]['cat'] = cat
            jobs[link]['pp'] = pp
            jobs[link]['script'] = script
            jobs[link]['prio'] = str(priority)
    else:
        if download:
            jobs[link]['status'] = 'D'
            jobs[link]['title'] = title
            logging.info("Adding %s (%s) to queue", link, title)
            sabnzbd.add_url(link, pp=pp, script=script, cat=cat, priority=priority, nzbname=nzbname)
        else:
            if star:
                jobs[link]['status'] = flag + '*'
            else:
                jobs[link]['status'] = flag
            jobs[link]['title'] = title
            jobs[link]['url'] = link
            jobs[link]['cat'] = cat
            jobs[link]['pp'] = pp
            jobs[link]['script'] = script
            jobs[link]['prio'] = str(priority)

    jobs[link]['time'] = time.time()
    jobs[link]['rule'] = rule
コード例 #4
0
def _HandleLink(jobs, link, title, flag, orgcat, cat, pp, script, download, star, order,
                priority=NORMAL_PRIORITY, rule=0):
    """ Process one link """
    if script == '': script = None
    if pp == '': pp = None

    jobs[link] = {}
    jobs[link]['order'] = order
    jobs[link]['orgcat'] = orgcat
    if special_rss_site(link):
        nzbname = None
    else:
        nzbname = sanitize_foldername(title)
    m = RE_NEWZBIN.search(link)
    if m and m.group(1).lower() == 'newz' and m.group(2) and m.group(3):
        if download:
            jobs[link]['status'] = 'D'
            jobs[link]['title'] = title
            logging.info("Adding %s (%s) to queue", m.group(3), title)
            sabnzbd.add_msgid(m.group(3), pp=pp, script=script, cat=cat, priority=priority, nzbname=nzbname)
        else:
            if star:
                jobs[link]['status'] = flag + '*'
            else:
                jobs[link]['status'] = flag
            jobs[link]['title'] = title
            jobs[link]['url'] = m.group(3)
            jobs[link]['cat'] = cat
            jobs[link]['pp'] = pp
            jobs[link]['script'] = script
            jobs[link]['prio'] = str(priority)
    else:
        if download:
            jobs[link]['status'] = 'D'
            jobs[link]['title'] = title
            logging.info("Adding %s (%s) to queue", link, title)
            sabnzbd.add_url(link, pp=pp, script=script, cat=cat, priority=priority, nzbname=nzbname)
        else:
            if star:
                jobs[link]['status'] = flag + '*'
            else:
                jobs[link]['status'] = flag
            jobs[link]['title'] = title
            jobs[link]['url'] = link
            jobs[link]['cat'] = cat
            jobs[link]['pp'] = pp
            jobs[link]['script'] = script
            jobs[link]['prio'] = str(priority)

    jobs[link]['time'] = time.time()
    jobs[link]['rule'] = rule
コード例 #5
0
ファイル: sorting.py プロジェクト: mattias197711/sabnzbd
def eval_sort(sorttype, expression, name=None, multipart=''):
    """ Preview a sort expression, to be used by API """
    from sabnzbd.api import Ttemplate
    path = ''
    name = sanitize_foldername(name)
    if sorttype == 'series':
        name = name or ('%s S01E05 - %s [DTS]' %
                        (Ttemplate('show-name'), Ttemplate('ep-name')))
        sorter = SeriesSorter(None, name, path, 'tv')
    elif sorttype == 'movie':
        name = name or (Ttemplate('movie-sp-name') + ' (2009)')
        sorter = MovieSorter(None, name, path, 'tv')
    elif sorttype == 'date':
        name = name or (Ttemplate('show-name') + ' 2009-01-02')
        sorter = DateSorter(None, name, path, 'tv')
    else:
        return None
    sorter.sort_string = expression
    sorter.match(force=True)
    path = sorter.get_final_path()
    path = os.path.normpath(os.path.join(path, sorter.filename_set))
    fname = Ttemplate('orgFilename')
    fpath = path
    if sorttype == 'movie' and '%1' in multipart:
        fname = fname + multipart.replace('%1', '1')
        fpath = fpath + multipart.replace('%1', '1')
    if '%fn' in path:
        path = path.replace('%fn', fname + '.mkv')
    else:
        if sorter.rename_or_not:
            path = fpath + '.mkv'
        else:
            if sabnzbd.WIN32:
                path += '\\'
            else:
                path += '/'
    return path
コード例 #6
0
ファイル: sorting.py プロジェクト: sabnzbd/sabnzbd
def eval_sort(sorttype, expression, name=None, multipart=''):
    """ Preview a sort expression, to be used by API """
    from sabnzbd.api import Ttemplate
    path = ''
    name = sanitize_foldername(name)
    if sorttype == 'series':
        name = name or ('%s S01E05 - %s [DTS]' % (Ttemplate('show-name'), Ttemplate('ep-name')))
        sorter = SeriesSorter(None, name, path, 'tv')
    elif sorttype == 'movie':
        name = name or (Ttemplate('movie-sp-name') + ' (2009)')
        sorter = MovieSorter(None, name, path, 'tv')
    elif sorttype == 'date':
        name = name or (Ttemplate('show-name') + ' 2009-01-02')
        sorter = DateSorter(None, name, path, 'tv')
    else:
        return None
    sorter.sort_string = expression
    sorter.match(force=True)
    path = sorter.get_final_path()
    path = os.path.normpath(os.path.join(path, sorter.filename_set))
    fname = Ttemplate('orgFilename')
    fpath = path
    if sorttype == 'movie' and '%1' in multipart:
        fname = fname + multipart.replace('%1', '1')
        fpath = fpath + multipart.replace('%1', '1')
    if '%fn' in path:
        path = path.replace('%fn', fname + '.mkv')
    else:
        if sorter.rename_or_not:
            path = fpath + '.mkv'
        else:
            if sabnzbd.WIN32:
                path += '\\'
            else:
                path += '/'
    return path
コード例 #7
0
    def run(self):
        logging.info('URLGrabber starting up')
        self.shutdown = False

        while not self.shutdown:
            # Don't pound the website!
            time.sleep(5.0)

            (url, future_nzo) = self.queue.get()

            if not url:
                # stop signal, go test self.shutdown
                continue
            if future_nzo and future_nzo.wait and future_nzo.wait > time.time():
                # Requeue when too early and still active

                self.add(url, future_nzo)
                continue
            url = url.replace(' ', '')

            try:
                del_bookmark = not future_nzo
                if future_nzo:
                    # If nzo entry deleted, give up
                    try:
                        deleted = future_nzo.deleted
                    except AttributeError:
                        deleted = True
                    if deleted:
                        logging.debug('Dropping URL %s, job entry missing', url)
                        continue

                # Add nzbmatrix credentials if needed
                url, matrix_id = _matrix_url(url)

                # _grab_url cannot reside in a function, because the tempfile
                # would not survive the end of the function
                if del_bookmark:
                    logging.info('Removing nzbmatrix bookmark %s', matrix_id)
                else:
                    logging.info('Grabbing URL %s', url)
                opener = urllib.FancyURLopener({})
                opener.prompt_user_passwd = None
                opener.addheaders = []
                opener.addheader('User-Agent', 'SABnzbd+/%s' % sabnzbd.version.__version__)
                if not [True for item in _BAD_GZ_HOSTS if item in url]:
                    opener.addheader('Accept-encoding','gzip')
                filename = None
                category = None
                length = 0
                nzo_info = {}
                try:
                    fn, header = opener.retrieve(url)
                except:
                    fn = None

                if fn:
                    for tup in header.items():
                        try:
                            item = tup[0].lower()
                            value = tup[1].strip()
                        except:
                            continue
                        if item in ('category_id', 'x-dnzb-category'):
                            category = value
                        elif item in ('x-dnzb-moreinfo',):
                            nzo_info['more_info'] = value
                        elif item in ('x-dnzb-name',):
                            filename = value
                            if not filename.endswith('.nzb'):
                                filename += '.nzb'
                        elif item in ('content-length',):
                            length = misc.int_conv(value)

                        if not filename:
                            for item in tup:
                                if "filename=" in item:
                                    filename = item[item.index("filename=") + 9:].strip(';').strip('"')

                if matrix_id:
                    fn, msg, retry, wait = _analyse_matrix(fn, matrix_id)
                    if not fn:
                        if retry:
                            logging.info(msg)
                            logging.debug('Retry nzbmatrix item %s after waiting %s sec', matrix_id, wait)
                            self.add(url, future_nzo, wait)
                        else:
                            logging.error(msg)
                            misc.bad_fetch(future_nzo, clean_matrix_url(url), msg, retry=True)
                        continue
                    category = _MATRIX_MAP.get(category, category)

                    if del_bookmark:
                        # No retries of nzbmatrix bookmark removals
                        continue

                else:
                    fn, msg, retry, wait = _analyse_others(fn, url)
                    if not fn:
                        if retry:
                            logging.info('Retry URL %s', url)
                            self.add(url, future_nzo, wait)
                        else:
                            misc.bad_fetch(future_nzo, url, msg, retry=True)
                        continue

                if not filename:
                    filename = os.path.basename(url) + '.nzb'
                # Sanitize and trim name, preserving the extension
                filename, ext = os.path.splitext(filename)
                filename = misc.sanitize_foldername(filename)
                filename += '.' + misc.sanitize_foldername(ext)

                pp = future_nzo.pp
                script = future_nzo.script
                cat = future_nzo.cat
                if (cat is None or cat == '*') and category:
                    cat = misc.cat_convert(category)
                priority = future_nzo.priority
                nzbname = future_nzo.custom_name

                # Check if nzb file
                if os.path.splitext(filename)[1].lower() in ('.nzb', '.gz'):
                    res, nzo_ids = dirscanner.ProcessSingleFile(filename, fn, pp=pp, script=script, cat=cat, priority=priority, \
                                                       nzbname=nzbname, nzo_info=nzo_info, url=future_nzo.url)
                    if res == 0:
                        NzbQueue.do.remove(future_nzo.nzo_id, add_to_history=False)
                    else:
                        if res == -2:
                            logging.info('Incomplete NZB, retry after 5 min %s', url)
                            when = 300
                        else:
                            logging.info('Unknown error fetching NZB, retry after 2 min %s', url)
                            when = 120
                        self.add(url, future_nzo, when)
                # Check if a supported archive
                else:
                    if dirscanner.ProcessArchiveFile(filename, fn, pp, script, cat, priority=priority, url=future_nzo.url)[0] == 0:
                        NzbQueue.do.remove(future_nzo.nzo_id, add_to_history=False)
                    else:
                        # Not a supported filetype, not an nzb (text/html ect)
                        try:
                            os.remove(fn)
                        except:
                            pass
                        logging.info('Unknown filetype when fetching NZB, retry after 30s %s', url)
                        self.add(url, future_nzo, 30)
            except:
                logging.error('URLGRABBER CRASHED', exc_info=True)
                logging.debug("URLGRABBER Traceback: ", exc_info=True)
コード例 #8
0
ファイル: dirscanner.py プロジェクト: TinyHTPC/xbmc-dev-repo
def ProcessArchiveFile(filename, path, pp=None, script=None, cat=None, catdir=None, keep=False,
                       priority=None, url='', nzbname=None):
    """ Analyse ZIP file and create job(s).
        Accepts ZIP files with ONLY nzb/nfo/folder files in it.
        returns (status, nzo_ids)
            status: -1==Error/Retry, 0==OK, 1==Ignore
    """
    from sabnzbd.nzbqueue import add_nzo
    nzo_ids = []
    if catdir is None:
        catdir = cat

    filename, cat = name_to_cat(filename, catdir)

    if zipfile.is_zipfile(path):
        try:
            zf = zipfile.ZipFile(path)
        except:
            return -1, []
    elif is_rarfile(path):
        try:
            zf = RarFile(path)
        except:
            return -1, []
    else:
        return 1, []

    status = 1
    names = zf.namelist()
    names.sort()
    nzbcount = 0
    for name in names:
        name = name.lower()
        if not (name.endswith('.nzb') or name.endswith('.nfo') or name.endswith('/')):
            status = 1
            break
        elif name.endswith('.nzb'):
            status = 0
            nzbcount += 1
    if status == 0:
        if nzbcount != 1:
            nzbname = None
        for name in names:
            if name.lower().endswith('.nzb'):
                try:
                    data = zf.read(name)
                except:
                    zf.close()
                    return -1, []
                name = re.sub(r'\[.*nzbmatrix.com\]', '', name)
                name = os.path.basename(name)
                name = misc.sanitize_foldername(name)
                if data:
                    try:
                        nzo = nzbstuff.NzbObject(name, 0, pp, script, data, cat=cat, url=url,
                                                 priority=priority, nzbname=nzbname)
                    except:
                        nzo = None
                    if nzo:
                        nzo_ids.append(add_nzo(nzo))
                        nzo.update_rating()
        zf.close()
        try:
            if not keep: os.remove(path)
        except:
            logging.error(Ta('Error removing %s'), path)
            logging.info("Traceback: ", exc_info = True)
            status = 1
    else:
        zf.close()
        status = 1

    return status, nzo_ids
コード例 #9
0
ファイル: dirscanner.py プロジェクト: TinyHTPC/xbmc-dev-repo
def ProcessSingleFile(filename, path, pp=None, script=None, cat=None, catdir=None, keep=False,
                      priority=None, nzbname=None, reuse=False, nzo_info=None, dup_check=True, url=''):
    """ Analyse file and create a job from it
        Supports NZB, NZB.GZ and GZ.NZB-in-disguise
        returns (status, nzo_ids)
            status: -2==Error/retry, -1==Error, 0==OK, 1==OK-but-ignorecannot-delete
    """
    from sabnzbd.nzbqueue import add_nzo
    nzo_ids = []
    if catdir is None:
        catdir = cat

    try:
        f = open(path, 'rb')
        b1 = f.read(1)
        b2 = f.read(1)
        f.close()

        if (b1 == '\x1f' and b2 == '\x8b'):
            # gzip file or gzip in disguise
            name = filename.replace('.nzb.gz', '.nzb')
            f = gzip.GzipFile(path, 'rb')
        else:
            name = filename
            f = open(path, 'rb')
        data = f.read()
        f.close()
    except:
        logging.warning(Ta('Cannot read %s'), path)
        logging.info("Traceback: ", exc_info = True)
        return -2, nzo_ids


    if name:
        name, cat = name_to_cat(name, catdir)
        # The name is used as the name of the folder, so sanitize it using folder specific santization
        if not nzbname:
            # Prevent embedded password from being damaged by sanitize and trimming
            nzbname = os.path.split(name)[1]
        name = misc.sanitize_foldername(name)

    try:
        nzo = nzbstuff.NzbObject(name, 0, pp, script, data, cat=cat, priority=priority, nzbname=nzbname,
                                 nzo_info=nzo_info, url=url, reuse=reuse, dup_check=dup_check)
    except TypeError:
        # Duplicate, ignore
        nzo = None
    except ValueError:
        # Empty, but correct file
        return -1, nzo_ids
    except:
        if data.find("<nzb") >= 0 and data.find("</nzb") < 0:
            # Looks like an incomplete file, retry
            return -2, nzo_ids
        else:
            return -1, nzo_ids

    if nzo:
        nzo_ids.append(add_nzo(nzo))
        nzo.update_rating()
    try:
        if not keep: os.remove(path)
    except:
        logging.error(Ta('Error removing %s'), path)
        logging.info("Traceback: ", exc_info = True)
        return 1, nzo_ids

    return 0, nzo_ids
コード例 #10
0
ファイル: newzbin.py プロジェクト: Adrellias/sabnzbd
def _grabnzb(msgid):
    """ Grab one msgid from newzbin """

    msg = ''
    retry = (60, None, None, None)
    nzo_info = {'msgid': msgid}

    logging.info('Fetching NZB for Newzbin report #%s', msgid)

    headers = {'User-agent' : 'SABnzbd+/%s' % sabnzbd.version.__version__}

    # Connect to Newzbin
    try:
        if _HAVE_SSL:
            conn = httplib.HTTPSConnection(cfg.newzbin_url())
        else:
            conn = httplib.HTTPConnection(cfg.newzbin_url())

        postdata = { 'username': cfg.newzbin_username(), 'password': cfg.newzbin_password(), 'reportid': msgid }
        postdata = urllib.urlencode(postdata)

        headers['Content-type'] = 'application/x-www-form-urlencoded'

        fetchurl = '/api/dnzb/'
        conn.request('POST', fetchurl, postdata, headers)
        response = conn.getresponse()

        # Save debug info if we have to
        data = response.read()

    except:
        _warn_user('Problem accessing Newzbin server, wait 1 min.')
        logging.info("Traceback: ", exc_info = True)
        return retry

    # Get the filename
    rcode = response.getheader('X-DNZB-RCode')
    rtext = response.getheader('X-DNZB-RText')
    try:
        nzo_info['more_info'] = response.getheader('X-DNZB-MoreInfo')
    except:
        # Only some reports will generate a moreinfo header
        pass
    if not (rcode or rtext):
        logging.error(T('Newzbin server changed its protocol'))
        return retry

    # Official return codes:
    # 200 = OK, NZB content follows
    # 400 = Bad Request, please supply all parameters
    #       (this generally means reportid or fileid is missing; missing user/pass gets you a 401)
    # 401 = Unauthorised, check username/password?
    # 402 = Payment Required, not Premium
    # 404 = Not Found, data doesn't exist?
    #       (only working for reportids, see Technical Limitations)
    # 450 = Try Later, wait <x> seconds for counter to reset
    #       (for an explanation of this, see DNZB Rate Limiting)
    # 500 = Internal Server Error, please report to Administrator
    # 503 = Service Unavailable, site is currently down

    if rcode in ('500', '503'):
        _warn_user('Newzbin has a server problem (%s, %s), wait 5 min.' % (rcode, rtext))
        return retry

    _access_ok()

    if rcode == '450':
        wait_re = re.compile('wait (\d+) seconds')
        try:
            wait = int(wait_re.findall(rtext)[0])
        except:
            wait = 60
        if wait > 60:
            wait = 60
        logging.debug("Newzbin says we should wait for %s sec", wait)
        return int(wait+1), None, None, None

    if rcode in ('402'):
        msg = Ta('You have no credit on your Newzbin account')
        return None, None, None, msg

    if rcode in ('401'):
        msg = Ta('Unauthorised, check your newzbin username/password')
        return None, None, None, msg

    if rcode in ('400', '404'):
        msg = Ta('Newzbin report %s not found') % msgid
        return None, None, None, msg

    if rcode != '200':
        msg = Ta('Newzbin gives undocumented error code (%s, %s)') % (rcode, rtext)
        return 60, None, None, msg

    # Process data
    report_name = response.getheader('X-DNZB-Name')
    report_cat  = response.getheader('X-DNZB-Category')
    if not (report_name and report_cat):
        msg = Ta('Newzbin server fails to give info for %s') %  msgid
        return 60, None, None, msg

    # sanitize report_name
    newname = sanitize_foldername(report_name)
    if len(newname) > 80:
        newname = newname[0:79].strip('. ')
    newname += ".nzb"

    logging.info('Successfully fetched report %s - %s (cat=%s) (%s)', msgid, report_name, report_cat, newname)

    return (newname, data, report_cat, nzo_info)
コード例 #11
0
    def run(self):
        logging.info('URLGrabber starting up')
        self.shutdown = False

        while not self.shutdown:
            # Don't pound the website!
            time.sleep(5.0)

            (url, future_nzo, retry_count) = self.queue.get()
            if not url:
                continue
            url = url.replace(' ', '')

            try:
                del_bookmark = not future_nzo
                if future_nzo:
                    # If nzo entry deleted, give up
                    try:
                        deleted = future_nzo.deleted
                    except:
                        deleted = True
                    if deleted:
                        logging.debug('Dropping URL %s, job entry missing',
                                      url)
                        continue

                # Add nzbmatrix credentials if needed
                url, matrix_id = _matrix_url(url)

                # When still waiting for nzbmatrix wait period, requeue
                if matrix_id and self.matrix_wait > time.time():
                    self.queue.put((url, future_nzo, retry_count))
                    continue

                # _grab_url cannot reside in a function, because the tempfile
                # would not survive the end of the function
                if del_bookmark:
                    logging.info('Removing nzbmatrix bookmark %s', matrix_id)
                else:
                    logging.info('Grabbing URL %s', url)
                opener = urllib.FancyURLopener({})
                opener.prompt_user_passwd = None
                opener.addheaders = []
                opener.addheader('User-Agent',
                                 'SABnzbd+/%s' % sabnzbd.version.__version__)
                opener.addheader('Accept-encoding', 'gzip')
                filename = None
                category = None
                length = 0
                nzo_info = {}
                try:
                    fn, header = opener.retrieve(url)
                except:
                    fn = None

                if fn:
                    for tup in header.items():
                        try:
                            item = tup[0].lower()
                            value = tup[1].strip()
                        except:
                            continue
                        if item in ('category_id', 'x-dnzb-category'):
                            category = value
                        elif item in ('x-dnzb-moreinfo', ):
                            nzo_info['more_info'] = value
                        elif item in ('x-dnzb-name', ):
                            filename = value
                            if not filename.endswith('.nzb'):
                                filename += '.nzb'
                        elif item in ('content-length', ):
                            length = misc.int_conv(value)

                        if not filename:
                            for item in tup:
                                if "filename=" in item:
                                    filename = item[item.index("filename=") +
                                                    9:].strip(';').strip('"')

                if matrix_id:
                    fn, msg, retry, wait = _analyse_matrix(fn, matrix_id)
                    if retry and wait > 0:
                        self.matrix_wait = time.time() + wait
                        logging.debug('Retry URL %s after waiting', url)
                        self.queue.put((url, future_nzo, retry_count))
                        continue
                    category = _MATRIX_MAP.get(category, category)
                else:
                    msg = ''
                    retry = True

                # Check if the filepath is specified, if not, check if a retry is allowed.
                if not fn:
                    retry_count -= 1
                    if retry_count > 0 and retry:
                        logging.info('Retry URL %s', url)
                        self.queue.put((url, future_nzo, retry_count))
                    elif not del_bookmark:
                        misc.bad_fetch(future_nzo, url, msg, retry=True)
                    continue

                if del_bookmark:
                    continue

                if not filename:
                    filename = os.path.basename(url) + '.nzb'
                # Sanitize and trim name, preserving the extension
                filename, ext = os.path.splitext(filename)
                filename = misc.sanitize_foldername(filename)
                filename += '.' + misc.sanitize_foldername(ext)

                pp = future_nzo.pp
                script = future_nzo.script
                cat = future_nzo.cat
                if (cat is None or cat == '*') and category:
                    cat = misc.cat_convert(category)
                priority = future_nzo.priority
                nzbname = future_nzo.custom_name

                # Check if nzb file
                if os.path.splitext(filename)[1].lower() in ('.nzb', '.gz'):
                    res = dirscanner.ProcessSingleFile(filename, fn, pp=pp, script=script, cat=cat, priority=priority, \
                                                       nzbname=nzbname, nzo_info=nzo_info, url=future_nzo.url)
                    if res == 0:
                        NzbQueue.do.remove(future_nzo.nzo_id,
                                           add_to_history=False)
                    elif res == -2:
                        retry_count -= 1
                        if retry_count > 0:
                            logging.info('Incomplete NZB, retry %s', url)
                            self.queue.put((url, future_nzo, retry_count))
                        else:
                            misc.bad_fetch(future_nzo,
                                           url,
                                           retry=True,
                                           content=True)
                    else:
                        misc.bad_fetch(future_nzo,
                                       url,
                                       retry=True,
                                       content=True)
                # Check if a supported archive
                else:
                    if dirscanner.ProcessArchiveFile(filename,
                                                     fn,
                                                     pp,
                                                     script,
                                                     cat,
                                                     priority=priority,
                                                     url=future_nzo.url) == 0:
                        NzbQueue.do.remove(future_nzo.nzo_id,
                                           add_to_history=False)
                    else:
                        # Not a supported filetype, not an nzb (text/html ect)
                        try:
                            os.remove(fn)
                        except:
                            pass
                        misc.bad_fetch(future_nzo,
                                       url,
                                       retry=True,
                                       content=True)
            except:
                logging.error('URLGRABBER CRASHED', exc_info=True)
                logging.debug("URLGRABBER Traceback: ", exc_info=True)
コード例 #12
0
ファイル: newzbin.py プロジェクト: claytonbrown/sabnzbd
def _grabnzb(msgid):
    """ Grab one msgid from newzbin """

    msg = ''
    retry = (60, None, None, None)
    nzo_info = {'msgid': msgid}

    logging.info('Fetching NZB for Newzbin report #%s', msgid)

    headers = {'User-agent' : 'SABnzbd+/%s' % sabnzbd.version.__version__}

    # Connect to Newzbin
    try:
        if _HAVE_SSL:
            conn = httplib.HTTPSConnection('www.newzbin.com')
        else:
            conn = httplib.HTTPConnection('www.newzbin.com')

        postdata = { 'username': cfg.newzbin_username(), 'password': cfg.newzbin_password(), 'reportid': msgid }
        postdata = urllib.urlencode(postdata)

        headers['Content-type'] = 'application/x-www-form-urlencoded'

        fetchurl = '/api/dnzb/'
        conn.request('POST', fetchurl, postdata, headers)
        response = conn.getresponse()

        # Save debug info if we have to
        data = response.read()

    except:
        _warn_user('Problem accessing Newzbin server, wait 1 min.')
        logging.info("Traceback: ", exc_info = True)
        return retry

    # Get the filename
    rcode = response.getheader('X-DNZB-RCode')
    rtext = response.getheader('X-DNZB-RText')
    try:
        nzo_info['more_info'] = response.getheader('X-DNZB-MoreInfo')
    except:
        # Only some reports will generate a moreinfo header
        pass
    if not (rcode or rtext):
        logging.error(T('Newzbin server changed its protocol'))
        return None, None, None, None

    # Official return codes:
    # 200 = OK, NZB content follows
    # 400 = Bad Request, please supply all parameters
    #       (this generally means reportid or fileid is missing; missing user/pass gets you a 401)
    # 401 = Unauthorised, check username/password?
    # 402 = Payment Required, not Premium
    # 404 = Not Found, data doesn't exist?
    #       (only working for reportids, see Technical Limitations)
    # 450 = Try Later, wait <x> seconds for counter to reset
    #       (for an explanation of this, see DNZB Rate Limiting)
    # 500 = Internal Server Error, please report to Administrator
    # 503 = Service Unavailable, site is currently down

    if rcode in ('500', '503'):
        _warn_user('Newzbin has a server problem (%s, %s), wait 5 min.' % (rcode, rtext))
        return retry

    _access_ok()

    if rcode == '450':
        wait_re = re.compile('wait (\d+) seconds')
        try:
            wait = int(wait_re.findall(rtext)[0])
        except:
            wait = 60
        if wait > 60:
            wait = 60
        logging.debug("Newzbin says we should wait for %s sec", wait)
        return int(wait+1), None, None, None

    if rcode in ('402'):
        msg = Ta('You have no credit on your Newzbin account')
        return None, None, None, msg

    if rcode in ('401'):
        msg = Ta('Unauthorised, check your newzbin username/password')
        return None, None, None, msg

    if rcode in ('400', '404'):
        msg = Ta('Newzbin report %s not found') % msgid
        return None, None, None, msg

    if rcode != '200':
        msg = Ta('Newzbin gives undocumented error code (%s, %s)') % (rcode, rtext)
        return None, None, None, msg

    # Process data
    report_name = response.getheader('X-DNZB-Name')
    report_cat  = response.getheader('X-DNZB-Category')
    if not (report_name and report_cat):
        msg = Ta('Newzbin server fails to give info for %s') %  msgid
        return None, None, None, msg

    # sanitize report_name
    newname = sanitize_foldername(report_name)
    if len(newname) > 80:
        newname = newname[0:79].strip('. ')
    newname += ".nzb"

    logging.info('Successfully fetched report %s - %s (cat=%s) (%s)', msgid, report_name, report_cat, newname)

    return (newname, data, report_cat, nzo_info)
コード例 #13
0
ファイル: urlgrabber.py プロジェクト: 12345z/sabnzbd
    def run(self):
        logging.info('URLGrabber starting up')
        self.shutdown = False

        while not self.shutdown:
            # Don't pound the website!
            time.sleep(5.0)

            (url, future_nzo, retry_count) = self.queue.get()
            if not url:
                continue
            url = url.replace(' ', '')

            try:
                del_bookmark = not future_nzo
                if future_nzo:
                    # If nzo entry deleted, give up
                    try:
                        deleted = future_nzo.deleted
                    except:
                        deleted = True
                    if deleted:
                        logging.debug('Dropping URL %s, job entry missing', url)
                        continue

                # Add nzbmatrix credentials if needed
                url, matrix_id = _matrix_url(url)

                # When still waiting for nzbmatrix wait period, requeue
                if matrix_id and self.matrix_wait > time.time():
                    self.queue.put((url, future_nzo, retry_count))
                    continue

                # _grab_url cannot reside in a function, because the tempfile
                # would not survive the end of the function
                if del_bookmark:
                    logging.info('Removing nzbmatrix bookmark %s', matrix_id)
                else:
                    logging.info('Grabbing URL %s', url)
                opener = urllib.FancyURLopener({})
                opener.prompt_user_passwd = None
                opener.addheaders = []
                opener.addheader('User-Agent', 'SABnzbd+/%s' % sabnzbd.version.__version__)
                opener.addheader('Accept-encoding','gzip')
                filename = None
                category = None
                length = 0
                nzo_info = {}
                try:
                    fn, header = opener.retrieve(url)
                except:
                    fn = None

                if fn:
                    for tup in header.items():
                        try:
                            item = tup[0].lower()
                            value = tup[1].strip()
                        except:
                            continue
                        if item in ('category_id', 'x-dnzb-category'):
                            category = value
                        elif item in ('x-dnzb-moreinfo',):
                            nzo_info['more_info'] = value
                        elif item in ('x-dnzb-name',):
                            filename = value
                            if not filename.endswith('.nzb'):
                                filename += '.nzb'
                        elif item in ('content-length',):
                            length = misc.int_conv(value)

                        if not filename:
                            for item in tup:
                                if "filename=" in item:
                                    filename = item[item.index("filename=") + 9:].strip(';').strip('"')

                if matrix_id:
                    fn, msg, retry, wait = _analyse_matrix(fn, matrix_id)
                    if retry and wait > 0:
                        self.matrix_wait = time.time() + wait
                        logging.debug('Retry URL %s after waiting', url)
                        self.queue.put((url, future_nzo, retry_count))
                        continue
                    category = _MATRIX_MAP.get(category, category)
                else:
                    msg = ''
                    retry = True

                # Check if the filepath is specified, if not, check if a retry is allowed.
                if not fn:
                    retry_count -= 1
                    if retry_count > 0 and retry:
                        logging.info('Retry URL %s', url)
                        self.queue.put((url, future_nzo, retry_count))
                    elif not del_bookmark:
                        misc.bad_fetch(future_nzo, url, msg, retry=True)
                    continue

                if del_bookmark:
                    continue

                if not filename:
                    filename = os.path.basename(url) + '.nzb'
                # Sanitize and trim name, preserving the extension
                filename, ext = os.path.splitext(filename)
                filename = misc.sanitize_foldername(filename)
                filename += '.' + misc.sanitize_foldername(ext)

                pp = future_nzo.pp
                script = future_nzo.script
                cat = future_nzo.cat
                if (cat is None or cat == '*') and category:
                    cat = misc.cat_convert(category)
                priority = future_nzo.priority
                nzbname = future_nzo.custom_name

                # Check if nzb file
                if os.path.splitext(filename)[1].lower() in ('.nzb', '.gz'):
                    res = dirscanner.ProcessSingleFile(filename, fn, pp=pp, script=script, cat=cat, priority=priority, \
                                                       nzbname=nzbname, nzo_info=nzo_info, url=future_nzo.url)
                    if res == 0:
                        NzbQueue.do.remove(future_nzo.nzo_id, add_to_history=False)
                    elif res == -2:
                        retry_count -= 1
                        if retry_count > 0:
                            logging.info('Incomplete NZB, retry %s', url)
                            self.queue.put((url, future_nzo, retry_count))
                        else:
                            misc.bad_fetch(future_nzo, url, retry=True, content=True)
                    else:
                        misc.bad_fetch(future_nzo, url, retry=True, content=True)
                # Check if a supported archive
                else:
                    if dirscanner.ProcessArchiveFile(filename, fn, pp, script, cat, priority=priority, url=future_nzo.url) == 0:
                        NzbQueue.do.remove(future_nzo.nzo_id, add_to_history=False)
                    else:
                        # Not a supported filetype, not an nzb (text/html ect)
                        try:
                            os.remove(fn)
                        except:
                            pass
                        misc.bad_fetch(future_nzo, url, retry=True, content=True)
            except:
                logging.error('URLGRABBER CRASHED', exc_info=True)
                logging.debug("URLGRABBER Traceback: ", exc_info=True)
コード例 #14
0
ファイル: urlgrabber.py プロジェクト: theweirdone/sabnzbd
    def run(self):
        logging.info('URLGrabber starting up')
        self.shutdown = False

        while not self.shutdown:
            # Don't pound the website!
            time.sleep(5.0)

            (url, future_nzo) = self.queue.get()

            if not url:
                # stop signal, go test self.shutdown
                continue
            if future_nzo and future_nzo.wait and future_nzo.wait > time.time(
            ):
                # Requeue when too early and still active

                self.add(url, future_nzo)
                continue
            url = url.replace(' ', '')

            try:
                del_bookmark = not future_nzo
                if future_nzo:
                    # If nzo entry deleted, give up
                    try:
                        deleted = future_nzo.deleted
                    except AttributeError:
                        deleted = True
                    if deleted:
                        logging.debug('Dropping URL %s, job entry missing',
                                      url)
                        continue

                # Add nzbmatrix credentials if needed
                url, matrix_id = _matrix_url(url)

                # _grab_url cannot reside in a function, because the tempfile
                # would not survive the end of the function
                if del_bookmark:
                    logging.info('Removing nzbmatrix bookmark %s', matrix_id)
                else:
                    logging.info('Grabbing URL %s', url)
                if '.nzbsrus.' in url:
                    opener = urllib.URLopener({})
                else:
                    opener = urllib.FancyURLopener({})
                opener.prompt_user_passwd = None
                opener.addheaders = []
                opener.addheader('User-Agent',
                                 'SABnzbd+/%s' % sabnzbd.version.__version__)
                if not [True for item in _BAD_GZ_HOSTS if item in url]:
                    opener.addheader('Accept-encoding', 'gzip')
                filename = None
                category = None
                length = 0
                nzo_info = {}
                try:
                    fn, header = opener.retrieve(url)
                except:
                    fn = None

                if fn:
                    for tup in header.items():
                        try:
                            item = tup[0].lower()
                            value = tup[1].strip()
                        except:
                            continue
                        if item in ('category_id', 'x-dnzb-category'):
                            category = value
                        elif item in ('x-dnzb-moreinfo', ):
                            nzo_info['more_info'] = value
                        elif item in ('x-dnzb-name', ):
                            filename = value
                            if not filename.endswith('.nzb'):
                                filename += '.nzb'
                        elif item in ('content-length', ):
                            length = misc.int_conv(value)

                        if not filename:
                            for item in tup:
                                if "filename=" in item:
                                    filename = item[item.index("filename=") +
                                                    9:].strip(';').strip('"')

                if matrix_id:
                    fn, msg, retry, wait = _analyse_matrix(fn, matrix_id)
                    if not fn:
                        if retry:
                            logging.info(msg)
                            logging.debug(
                                'Retry nzbmatrix item %s after waiting %s sec',
                                matrix_id, wait)
                            self.add(url, future_nzo, wait)
                        else:
                            logging.error(msg)
                            misc.bad_fetch(future_nzo,
                                           clean_matrix_url(url),
                                           msg,
                                           retry=True)
                        continue
                    category = get_matrix_category(url, category)

                    if del_bookmark:
                        # No retries of nzbmatrix bookmark removals
                        continue

                else:
                    fn, msg, retry, wait = _analyse_others(fn, url)
                    if not fn:
                        if retry:
                            logging.info('Retry URL %s', url)
                            self.add(url, future_nzo, wait)
                        else:
                            misc.bad_fetch(future_nzo, url, msg, retry=True)
                        continue

                if not filename:
                    filename = os.path.basename(url) + '.nzb'
                # Sanitize and trim name, preserving the extension
                filename, ext = os.path.splitext(filename)
                filename = misc.sanitize_foldername(filename)
                filename += '.' + misc.sanitize_foldername(ext)

                pp = future_nzo.pp
                script = future_nzo.script
                cat = future_nzo.cat
                if (cat is None or cat == '*') and category:
                    cat = misc.cat_convert(category)
                priority = future_nzo.priority
                nzbname = future_nzo.custom_name

                # Check if nzb file
                if os.path.splitext(filename)[1].lower() in ('.nzb', '.gz'):
                    res, nzo_ids = dirscanner.ProcessSingleFile(filename, fn, pp=pp, script=script, cat=cat, priority=priority, \
                                                       nzbname=nzbname, nzo_info=nzo_info, url=future_nzo.url)
                    if res == 0:
                        NzbQueue.do.remove(future_nzo.nzo_id,
                                           add_to_history=False)
                    else:
                        if res == -2:
                            logging.info(
                                'Incomplete NZB, retry after 5 min %s', url)
                            when = 300
                        elif res == -1:
                            # Error, but no reason to retry. Warning is already given
                            NzbQueue.do.remove(future_nzo.nzo_id,
                                               add_to_history=False)
                            continue
                        else:
                            logging.info(
                                'Unknown error fetching NZB, retry after 2 min %s',
                                url)
                            when = 120
                        self.add(url, future_nzo, when)
                # Check if a supported archive
                else:
                    if dirscanner.ProcessArchiveFile(
                            filename,
                            fn,
                            pp,
                            script,
                            cat,
                            priority=priority,
                            url=future_nzo.url)[0] == 0:
                        NzbQueue.do.remove(future_nzo.nzo_id,
                                           add_to_history=False)
                    else:
                        # Not a supported filetype, not an nzb (text/html ect)
                        try:
                            os.remove(fn)
                        except:
                            pass
                        logging.info(
                            'Unknown filetype when fetching NZB, retry after 30s %s',
                            url)
                        self.add(url, future_nzo, 30)
            except:
                logging.error('URLGRABBER CRASHED', exc_info=True)
                logging.debug("URLGRABBER Traceback: ", exc_info=True)