Пример #1
0
    def auto_history_purge(self):
        """ Remove history items based on the configured history-retention """
        if sabnzbd.cfg.history_retention() == "0":
            return

        if sabnzbd.cfg.history_retention() == "-1":
            # Delete all non-failed ones
            self.remove_completed()

        if "d" in sabnzbd.cfg.history_retention():
            # How many days to keep?
            days_to_keep = int_conv(
                sabnzbd.cfg.history_retention().strip()[:-1])
            seconds_to_keep = int(time.time()) - days_to_keep * 86400
            if days_to_keep > 0:
                logging.info(
                    "Removing completed jobs older than %s days from history",
                    days_to_keep)
                return self.execute(
                    """DELETE FROM history WHERE status = ? AND completed < ?""",
                    (Status.COMPLETED, seconds_to_keep),
                    save=True,
                )
        else:
            # How many to keep?
            to_keep = int_conv(sabnzbd.cfg.history_retention())
            if to_keep > 0:
                logging.info(
                    "Removing all but last %s completed jobs from history",
                    to_keep)
                return self.execute(
                    """DELETE FROM history WHERE status = ? AND id NOT IN ( SELECT id FROM history WHERE status = ? ORDER BY completed DESC LIMIT ? )""",
                    (Status.COMPLETED, Status.COMPLETED, to_keep),
                    save=True,
                )
Пример #2
0
def test_nntp_server_dict(kwargs):
    # Grab the host/port/user/pass/connections/ssl
    host = kwargs.get('host', '').strip()

    if not host:
        return False, T('The hostname is not set.')

    username = kwargs.get('username', '').strip()
    password = kwargs.get('password', '').strip()
    server = kwargs.get('server', '').strip()
    connections = int_conv(kwargs.get('connections', 0))
    ssl = int_conv(kwargs.get('ssl', 0))
    ssl_verify = int_conv(kwargs.get('ssl_verify', 1))
    ssl_ciphers = kwargs.get('ssl_ciphers')
    port = int_conv(kwargs.get('port', 0))

    if not connections:
        return False, T('There are no connections set. Please set at least one connection.')

    if not port:
        if ssl:
            port = 563
        else:
            port = 119

    return test_nntp_server(host, port, server, username=username, password=password,
                            ssl=ssl, ssl_verify=ssl_verify, ssl_ciphers=ssl_ciphers)
Пример #3
0
def test_nntp_server_dict(kwargs):
    # Grab the host/port/user/pass/connections/ssl
    host = kwargs.get("host", "").strip()
    if not host:
        return False, T("The hostname is not set.")
    username = kwargs.get("username", "").strip()
    password = kwargs.get("password", "").strip()
    server = kwargs.get("server", "").strip()
    connections = int_conv(kwargs.get("connections", 0))
    if not connections:
        return False, T(
            "There are no connections set. Please set at least one connection."
        )
    ssl = int_conv(kwargs.get("ssl", 0))
    ssl_verify = int_conv(kwargs.get("ssl_verify", 1))
    ssl_ciphers = kwargs.get("ssl_ciphers")
    port = int_conv(kwargs.get("port", 0))

    if not port:
        if ssl:
            port = 563
        else:
            port = 119

    return test_nntp_server(
        host,
        port,
        server,
        username=username,
        password=password,
        ssl=ssl,
        ssl_verify=ssl_verify,
        ssl_ciphers=ssl_ciphers,
    )
Пример #4
0
def test_nntp_server_dict(kwargs):
    # Grab the host/port/user/pass/connections/ssl
    host = kwargs.get('host', '').strip()
    if not host:
        return False, T('The hostname is not set.')
    username = kwargs.get('username', '').strip()
    password = kwargs.get('password', '').strip()
    server = kwargs.get('server', '').strip()
    connections = int_conv(kwargs.get('connections', 0))
    if not connections:
        return False, T(
            'There are no connections set. Please set at least one connection.'
        )
    ssl = int_conv(kwargs.get('ssl', 0))
    ssl_verify = int_conv(kwargs.get('ssl_verify', 1))
    port = int_conv(kwargs.get('port', 0))
    if not port:
        if ssl:
            port = 563
        else:
            port = 119

    return test_nntp_server(host,
                            port,
                            server,
                            username=username,
                            password=password,
                            ssl=ssl,
                            ssl_verify=ssl_verify)
Пример #5
0
def analyze_rar_filename(filename):
    """ Extract volume number and setname from rar-filenames
        Both ".part01.rar" and ".r01"
    """
    m = RAR_NR.search(filename)
    if m:
        if m.group(4):
            # Special since starts with ".rar", ".r00"
            return m.group(1), int_conv(m.group(4)) + 2
        return m.group(1), int_conv(m.group(3))
    else:
        # Detect if first of "rxx" set
        if filename.endswith('.rar'):
            return os.path.splitext(filename)[0], 1
    return None, None
Пример #6
0
def analyze_rar_filename(filename):
    """Extract volume number and setname from rar-filenames
    Both ".part01.rar" and ".r01"
    """
    m = RAR_NR.search(filename)
    if m:
        if m.group(4):
            # Special since starts with ".rar", ".r00"
            return m.group(1), int_conv(m.group(4)) + 2
        return m.group(1), int_conv(m.group(3))
    else:
        # Detect if first of "rxx" set
        if filename.endswith(".rar"):
            return os.path.splitext(filename)[0], 1
    return None, None
Пример #7
0
 def test_int_conv(self):
     assert 0 == misc.int_conv("0")
     assert 10 == misc.int_conv("10")
     assert 10 == misc.int_conv(10)
     assert 10 == misc.int_conv(10.0)
     assert 0 == misc.int_conv(None)
     assert 1 == misc.int_conv(True)
     assert 0 == misc.int_conv(object)
Пример #8
0
def ep_match(season, episode, expr, title=None):
    """ Return True if season, episode is at or above expected
        Optionally `title` can be matched
    """
    m = _RE_SP.search(expr)
    if m:
        # Make sure they are all integers for comparison
        req_season = int(m.group(1))
        req_episode = int(m.group(2))
        season = int_conv(season)
        episode = int_conv(episode)
        if season > req_season or (season == req_season and episode >= req_episode):
            if title:
                show = expr[:m.start()].replace('.', ' ').replace('_', ' ').strip()
                show = show.replace(' ', '[._ ]+')
                return bool(re.search(show, title, re.I))
            else:
                return True
        else:
            return False
    else:
        return True
Пример #9
0
def ep_match(season, episode, expr, title=None):
    """ Return True if season, episode is at or above expected
        Optionally `title` can be matched
    """
    m = _RE_SP.search(expr)
    if m:
        # Make sure they are all integers for comparison
        req_season = int(m.group(1))
        req_episode = int(m.group(2))
        season = int_conv(season)
        episode = int_conv(episode)
        if season > req_season or (season == req_season and episode >= req_episode):
            if title:
                show = expr[: m.start()].replace(".", " ").replace("_", " ").strip()
                show = show.replace(" ", "[._ ]+")
                return bool(re.search(show, title, re.I))
            else:
                return True
        else:
            return False
    else:
        return True
Пример #10
0
    def auto_history_purge(self):
        """ Remove history items based on the configured history-retention """
        if sabnzbd.cfg.history_retention() == "0":
            return

        if sabnzbd.cfg.history_retention() == "-1":
            # Delete all non-failed ones
            self.remove_completed()

        if "d" in sabnzbd.cfg.history_retention():
            # How many days to keep?
            days_to_keep = int_conv(sabnzbd.cfg.history_retention().strip()[:-1])
            seconds_to_keep = int(time.time()) - days_to_keep * 86400
            if days_to_keep > 0:
                logging.info('Removing completed jobs older than %s days from history', days_to_keep)
                return self.execute("""DELETE FROM history WHERE status = 'Completed' AND completed < ?""", (seconds_to_keep,), save=True)
        else:
            # How many to keep?
            to_keep = int_conv(sabnzbd.cfg.history_retention())
            if to_keep > 0:
                logging.info('Removing all but last %s completed jobs from history', to_keep)
                return self.execute("""DELETE FROM history WHERE id NOT IN ( SELECT id FROM history WHERE status = 'Completed' ORDER BY completed DESC LIMIT ? )""", (to_keep,), save=True)
Пример #11
0
    def run(self):
        logging.info('URLGrabber starting up')
        self.shutdown = False

        while not self.shutdown:
            # Don't pound the website!
            time.sleep(5.0)

            (url, future_nzo) = self.queue.get()

            if not url:
                # stop signal, go test self.shutdown
                continue
            if future_nzo and future_nzo.wait and future_nzo.wait > time.time():
                # Re-queue when too early and still active

                self.add(url, future_nzo)
                continue
            url = url.replace(' ', '')

            try:
                if future_nzo:
                    # If nzo entry deleted, give up
                    try:
                        deleted = future_nzo.deleted
                    except AttributeError:
                        deleted = True
                    if deleted:
                        logging.debug('Dropping URL %s, job entry missing', url)
                        continue

                logging.info('Grabbing URL %s', url)
                req = urllib2.Request(url)
                req.add_header('User-Agent', 'SABnzbd+/%s' % sabnzbd.version.__version__)
                if not [True for item in _BAD_GZ_HOSTS if item in url]:
                    req.add_header('Accept-encoding', 'gzip')
                filename = None
                category = None
                gzipped = False
                nzo_info = {}
                wait = 0
                retry = True
                fn = None
                try:
                    fn = urllib2.urlopen(req)
                except:
                    # Cannot list exceptions here, because of unpredictability over platforms
                    error0 = str(sys.exc_info()[0]).lower()
                    error1 = str(sys.exc_info()[1]).lower()
                    logging.debug('Error "%s" trying to get the url %s', error1, url)
                    if 'certificate_verify_failed' in error1 or 'certificateerror' in error0:
                        msg = T('Server %s uses an untrusted HTTPS certificate') % ''
                        retry = False
                    elif 'nodename nor servname provided' in error1:
                        msg = T('Server name does not resolve')
                        retry = False
                    elif '401' in error1 or 'unauthorized' in error1:
                        msg = T('Unauthorized access')
                        retry = False
                    elif '404' in error1:
                        msg = T('File not on server')
                        retry = False

                new_url = dereferring(url, fn)
                if new_url:
                    self.add(new_url, future_nzo)
                    continue

                if fn:
                    for hdr in fn.headers:
                        try:
                            item = hdr.lower()
                            value = fn.headers[hdr]
                        except:
                            continue
                        if item in ('content-encoding',) and value == 'gzip':
                            gzipped = True
                        if item in ('category_id', 'x-dnzb-category'):
                            category = value
                        elif item in ('x-dnzb-moreinfo',):
                            nzo_info['more_info'] = value
                        elif item in ('x-dnzb-name',):
                            filename = value
                            if not filename.endswith('.nzb'):
                                filename += '.nzb'
                        elif item == 'x-dnzb-propername':
                            nzo_info['propername'] = value
                        elif item == 'x-dnzb-episodename':
                            nzo_info['episodename'] = value
                        elif item == 'x-dnzb-year':
                            nzo_info['year'] = value
                        elif item == 'x-dnzb-failure':
                            nzo_info['failure'] = value
                        elif item == 'x-dnzb-details':
                            nzo_info['details'] = value
                        elif item == 'retry-after':
                            # For NZBFinder
                            wait = misc.int_conv(value)

                        if not filename and "filename=" in value:
                            filename = value[value.index("filename=") + 9:].strip(';').strip('"')

                if wait:
                    # For sites that have a rate-limiting attribute
                    msg = ''
                    retry = True
                    fn = None
                elif retry:
                    fn, msg, retry, wait, data = _analyse(fn, url)

                if not fn:
                    if retry:
                        logging.info('Retry URL %s', url)
                        self.add(url, future_nzo, wait)
                    else:
                        bad_fetch(future_nzo, url, msg)
                    continue

                if not filename:
                    filename = os.path.basename(url) + '.nzb'
                elif '&nzbname=' in filename:
                    # Sometimes the filename contains the full URL, duh!
                    filename = filename[filename.find('&nzbname=') + 9:]

                pp = future_nzo.pp
                script = future_nzo.script
                cat = future_nzo.cat
                if (cat is None or cat == '*') and category:
                    cat = misc.cat_convert(category)
                priority = future_nzo.priority
                nzbname = future_nzo.custom_name

                # process data
                if gzipped:
                    filename = filename + '.gz'
                if not data:
                    data = fn.read()
                fn.close()

                # Sanatize filename first
                filename = misc.sanitize_filename(filename)

                # Write data to temp file
                path = os.path.join(cfg.admin_dir.get_path(), FUTURE_Q_FOLDER)
                path = os.path.join(path, filename)
                f = open(path, 'wb')
                f.write(data)
                f.close()
                del data

                # Check if nzb file
                if os.path.splitext(filename)[1].lower() in ('.nzb', '.gz', 'bz2'):
                    res = dirscanner.ProcessSingleFile(filename, path, pp=pp, script=script, cat=cat, priority=priority,
                                                       nzbname=nzbname, nzo_info=nzo_info, url=future_nzo.url, keep=False,
                                                       nzo_id=future_nzo.nzo_id)[0]
                    if res:
                        if res == -2:
                            logging.info('Incomplete NZB, retry after 5 min %s', url)
                            when = 300
                        elif res == -1:
                            # Error, but no reason to retry. Warning is already given
                            NzbQueue.do.remove(future_nzo.nzo_id, add_to_history=False)
                            continue
                        else:
                            logging.info('Unknown error fetching NZB, retry after 2 min %s', url)
                            when = 120
                        self.add(url, future_nzo, when)
                # Check if a supported archive
                else:
                    if dirscanner.ProcessArchiveFile(filename, path, pp, script, cat, priority=priority,
                                                     nzbname=nzbname, url=future_nzo.url, keep=False,
                                                     nzo_id=future_nzo.nzo_id)[0]:
                        # Not a supported filetype, not an nzb (text/html ect)
                        try:
                            os.remove(fn)
                        except:
                            pass
                        logging.info('Unknown filetype when fetching NZB, retry after 30s %s', url)
                        self.add(url, future_nzo, 30)
            except:
                logging.error(T('URLGRABBER CRASHED'), exc_info=True)
                logging.debug("URLGRABBER Traceback: ", exc_info=True)
Пример #12
0
    def __set_priority(self, nzo_id, priority):
        """ Sets the priority on the nzo and places it in the queue at the appropriate position """
        try:
            priority = int_conv(priority)
            nzo = self.__nzo_table[nzo_id]
            nzo_id_pos1 = -1
            pos = -1

            # If priority == STOP_PRIORITY, then send to queue
            if priority == STOP_PRIORITY:
                self.end_job(nzo)
                return

            # Get the current position in the queue
            for i in range(len(self.__nzo_list)):
                if nzo_id == self.__nzo_list[i].nzo_id:
                    nzo_id_pos1 = i
                    break

            # Don't change priority and order if priority is the same as asked
            if priority == self.__nzo_list[nzo_id_pos1].priority:
                return nzo_id_pos1

            nzo.set_priority(priority)
            if sabnzbd.scheduler.analyse(False, priority) and nzo.status in (
                    Status.CHECKING,
                    Status.DOWNLOADING,
                    Status.QUEUED,
            ):
                nzo.status = Status.PAUSED
            elif nzo.status == Status.PAUSED:
                nzo.status = Status.QUEUED
            nzo.save_to_disk()

            if nzo_id_pos1 != -1:
                del self.__nzo_list[nzo_id_pos1]
                if priority == TOP_PRIORITY:
                    # A top priority item (usually a completed download fetching pars)
                    # is added to the top of the queue
                    self.__nzo_list.insert(0, nzo)
                    pos = 0
                elif priority == LOW_PRIORITY:
                    pos = len(self.__nzo_list)
                    self.__nzo_list.append(nzo)
                else:
                    # for high priority we need to add the item at the bottom
                    # of any other high priority items above the normal priority
                    # for normal priority we need to add the item at the bottom
                    # of the normal priority items above the low priority
                    if self.__nzo_list:
                        p = 0
                        added = False
                        for position in self.__nzo_list:
                            if position.priority < priority:
                                self.__nzo_list.insert(p, nzo)
                                pos = p
                                added = True
                                break
                            p += 1
                        if not added:
                            # if there are no other items classed as a lower priority
                            # then it will be added to the bottom of the queue
                            pos = len(self.__nzo_list)
                            self.__nzo_list.append(nzo)
                    else:
                        # if the queue is empty then simple append the item to the bottom
                        self.__nzo_list.append(nzo)
                        pos = 0

            logging.info("Set priority=%s for job %s => position=%s ",
                         priority, self.__nzo_table[nzo_id].final_name, pos)
            return pos

        except:
            return -1
Пример #13
0
    def run(self):
        # First check IPv6 connectivity
        sabnzbd.EXTERNAL_IPV6 = sabnzbd.test_ipv6()
        logging.debug("External IPv6 test result: %s", sabnzbd.EXTERNAL_IPV6)

        # Then we check SSL certificate checking
        sabnzbd.CERTIFICATE_VALIDATION = sabnzbd.test_cert_checking()
        logging.debug("SSL verification test: %s", sabnzbd.CERTIFICATE_VALIDATION)

        # Kick BPS-Meter to check quota
        sabnzbd.BPSMeter.update()

        # Check server expiration dates
        check_server_expiration()

        while 1:
            now = time.time()

            # Set Article to None so references from this
            # thread do not keep the parent objects alive (see #1628)
            article = None

            for server in self.servers:
                # Skip this server if there's no point searching for new stuff to do
                if not server.busy_threads and server.next_article_search > now:
                    continue

                for nw in server.busy_threads[:]:
                    if (nw.nntp and nw.nntp.error_msg) or (nw.timeout and now > nw.timeout):
                        if nw.nntp and nw.nntp.error_msg:
                            # Already showed error
                            self.__reset_nw(nw)
                        else:
                            self.__reset_nw(nw, "timed out", warn=True)
                        server.bad_cons += 1
                        self.maybe_block_server(server)

                if server.restart:
                    if not server.busy_threads:
                        newid = server.newid
                        server.stop()
                        self.servers.remove(server)
                        if newid:
                            self.init_server(None, newid)
                        self.server_restarts -= 1
                        # Have to leave this loop, because we removed element
                        break
                    else:
                        # Restart pending, don't add new articles
                        continue

                if (
                    not server.idle_threads
                    or server.restart
                    or self.is_paused()
                    or self.shutdown
                    or self.paused_for_postproc
                    or not server.active
                ):
                    continue

                for nw in server.idle_threads[:]:
                    if nw.timeout:
                        if now < nw.timeout:
                            continue
                        else:
                            nw.timeout = None

                    if not server.info:
                        # Only request info if there's stuff in the queue
                        if not sabnzbd.NzbQueue.is_empty():
                            self.maybe_block_server(server)
                            server.request_info()
                        break

                    article = sabnzbd.NzbQueue.get_article(server, self.servers)

                    if not article:
                        # Skip this server for 0.5 second
                        server.next_article_search = now + 0.5
                        break

                    if server.retention and article.nzf.nzo.avg_stamp < now - server.retention:
                        # Let's get rid of all the articles for this server at once
                        logging.info("Job %s too old for %s, moving on", article.nzf.nzo.final_name, server.host)
                        while article:
                            self.decode(article, None)
                            article = article.nzf.nzo.get_article(server, self.servers)
                        break

                    server.idle_threads.remove(nw)
                    server.busy_threads.append(nw)

                    nw.article = article

                    if nw.connected:
                        self.__request_article(nw)
                    else:
                        try:
                            logging.info("%s@%s: Initiating connection", nw.thrdnum, server.host)
                            nw.init_connect()
                        except:
                            logging.error(
                                T("Failed to initialize %s@%s with reason: %s"),
                                nw.thrdnum,
                                server.host,
                                sys.exc_info()[1],
                            )
                            self.__reset_nw(nw, "failed to initialize", warn=True)

            if self.force_disconnect or self.shutdown:
                for server in self.servers:
                    for nw in server.idle_threads + server.busy_threads:
                        # Send goodbye if we have open socket
                        if nw.nntp:
                            self.__reset_nw(
                                nw,
                                "forcing disconnect",
                                wait=False,
                                count_article_try=False,
                                send_quit=True,
                            )
                    # Make sure server address resolution is refreshed
                    server.info = None
                self.force_disconnect = False

                # Exit-point
                if self.shutdown:
                    logging.info("Shutting down")
                    break

            # Use select to find sockets ready for reading/writing
            readkeys = self.read_fds.keys()
            if readkeys:
                read, _, _ = select.select(readkeys, (), (), 1.0)

                # Add a sleep if there are too few results compared to the number of active connections
                if self.can_be_slowed and len(read) < 1 + len(readkeys) / 10:
                    time.sleep(self.sleep_time)

                # Need to initialize the check during first 20 seconds
                if self.can_be_slowed is None or self.can_be_slowed_timer:
                    # Wait for stable speed to start testing
                    if not self.can_be_slowed_timer and sabnzbd.BPSMeter.get_stable_speed(timespan=10):
                        self.can_be_slowed_timer = time.time()

                    # Check 10 seconds after enabling slowdown
                    if self.can_be_slowed_timer and time.time() > self.can_be_slowed_timer + 10:
                        # Now let's check if it was stable in the last 10 seconds
                        self.can_be_slowed = sabnzbd.BPSMeter.get_stable_speed(timespan=10)
                        self.can_be_slowed_timer = 0
                        logging.debug("Downloader-slowdown: %r", self.can_be_slowed)

            else:
                read = []

                sabnzbd.BPSMeter.reset()

                time.sleep(1.0)

                with DOWNLOADER_CV:
                    while (
                        (sabnzbd.NzbQueue.is_empty() or self.is_paused() or self.paused_for_postproc)
                        and not self.shutdown
                        and not self.force_disconnect
                        and not self.server_restarts
                    ):
                        DOWNLOADER_CV.wait()

            if not read:
                sabnzbd.BPSMeter.update()
                continue

            for selected in read:
                nw = self.read_fds[selected]
                article = nw.article
                server = nw.server

                try:
                    bytes_received, done, skip = nw.recv_chunk()
                except:
                    bytes_received, done, skip = (0, False, False)

                if skip:
                    sabnzbd.BPSMeter.update()
                    continue

                if bytes_received < 1:
                    self.__reset_nw(nw, "server closed connection", wait=False)
                    continue

                else:
                    try:
                        article.nzf.nzo.update_download_stats(sabnzbd.BPSMeter.bps, server.id, bytes_received)
                    except AttributeError:
                        # In case nzf has disappeared because the file was deleted before the update could happen
                        pass

                    if self.bandwidth_limit:
                        limit = self.bandwidth_limit
                        if bytes_received + sabnzbd.BPSMeter.bps > limit:
                            while sabnzbd.BPSMeter.bps > limit:
                                time.sleep(0.01)
                                sabnzbd.BPSMeter.update()
                    sabnzbd.BPSMeter.update(server.id, bytes_received)

                if not done and nw.status_code != 222:
                    if not nw.connected or nw.status_code == 480:
                        done = False
                        try:
                            nw.finish_connect(nw.status_code)
                            if sabnzbd.LOG_ALL:
                                logging.debug(
                                    "%s@%s last message -> %s", nw.thrdnum, nw.server.host, nntp_to_msg(nw.data)
                                )
                            nw.clear_data()
                        except NNTPPermanentError as error:
                            # Handle login problems
                            block = False
                            penalty = 0
                            msg = error.response
                            ecode = int_conv(msg[:3])
                            display_msg = " [%s]" % msg
                            logging.debug("Server login problem: %s, %s", ecode, msg)
                            if ecode in (502, 400, 481, 482) and clues_too_many(msg):
                                # Too many connections: remove this thread and reduce thread-setting for server
                                # Plan to go back to the full number after a penalty timeout
                                if server.active:
                                    errormsg = T("Too many connections to server %s") % display_msg
                                    if server.errormsg != errormsg:
                                        server.errormsg = errormsg
                                        logging.warning(T("Too many connections to server %s"), server.host)
                                    # Don't count this for the tries (max_art_tries) on this server
                                    self.__reset_nw(nw, count_article_try=False, send_quit=True)
                                    self.plan_server(server, _PENALTY_TOOMANY)
                                    server.threads -= 1
                            elif ecode in (502, 481, 482) and clues_too_many_ip(msg):
                                # Account sharing?
                                if server.active:
                                    errormsg = T("Probable account sharing") + display_msg
                                    if server.errormsg != errormsg:
                                        server.errormsg = errormsg
                                        name = " (%s)" % server.host
                                        logging.warning(T("Probable account sharing") + name)
                                penalty = _PENALTY_SHARE
                                block = True
                            elif ecode in (452, 481, 482, 381) or (ecode == 502 and clues_login(msg)):
                                # Cannot login, block this server
                                if server.active:
                                    errormsg = T("Failed login for server %s") % display_msg
                                    if server.errormsg != errormsg:
                                        server.errormsg = errormsg
                                        logging.error(T("Failed login for server %s"), server.host)
                                penalty = _PENALTY_PERM
                                block = True
                            elif ecode in (502, 482):
                                # Cannot connect (other reasons), block this server
                                if server.active:
                                    errormsg = T("Cannot connect to server %s [%s]") % ("", display_msg)
                                    if server.errormsg != errormsg:
                                        server.errormsg = errormsg
                                        logging.warning(T("Cannot connect to server %s [%s]"), server.host, msg)
                                if clues_pay(msg):
                                    penalty = _PENALTY_PERM
                                else:
                                    penalty = _PENALTY_502
                                block = True
                            elif ecode == 400:
                                # Temp connection problem?
                                if server.active:
                                    logging.debug("Unspecified error 400 from server %s", server.host)
                                penalty = _PENALTY_VERYSHORT
                                block = True
                            else:
                                # Unknown error, just keep trying
                                if server.active:
                                    errormsg = T("Cannot connect to server %s [%s]") % ("", display_msg)
                                    if server.errormsg != errormsg:
                                        server.errormsg = errormsg
                                        logging.warning(T("Cannot connect to server %s [%s]"), server.host, msg)
                                penalty = _PENALTY_UNKNOWN
                                block = True
                            if block or (penalty and server.optional):
                                if server.active:
                                    server.active = False
                                    if penalty and (block or server.optional):
                                        self.plan_server(server, penalty)
                                # Note that this will count towards the tries (max_art_tries) on this server!
                                self.__reset_nw(nw, send_quit=True)
                            continue
                        except:
                            logging.error(
                                T("Connecting %s@%s failed, message=%s"),
                                nw.thrdnum,
                                nw.server.host,
                                nntp_to_msg(nw.data),
                            )
                            # No reset-warning needed, above logging is sufficient
                            self.__reset_nw(nw)

                        if nw.connected:
                            logging.info("Connecting %s@%s finished", nw.thrdnum, nw.server.host)
                            self.__request_article(nw)

                    elif nw.status_code == 223:
                        done = True
                        logging.debug("Article <%s> is present", article.article)

                    elif nw.status_code == 211:
                        done = False
                        logging.debug("group command ok -> %s", nntp_to_msg(nw.data))
                        nw.group = nw.article.nzf.nzo.group
                        nw.clear_data()
                        self.__request_article(nw)

                    elif nw.status_code in (411, 423, 430):
                        done = True
                        logging.debug(
                            "Thread %s@%s: Article %s missing (error=%s)",
                            nw.thrdnum,
                            nw.server.host,
                            article.article,
                            nw.status_code,
                        )
                        nw.clear_data()

                    elif nw.status_code == 500:
                        if article.nzf.nzo.precheck:
                            # Assume "STAT" command is not supported
                            server.have_stat = False
                            logging.debug("Server %s does not support STAT", server.host)
                        else:
                            # Assume "BODY" command is not supported
                            server.have_body = False
                            logging.debug("Server %s does not support BODY", server.host)
                        nw.clear_data()
                        self.__request_article(nw)

                if done:
                    # Successful data, clear "bad" counter
                    server.bad_cons = 0
                    server.errormsg = server.warning = ""
                    if sabnzbd.LOG_ALL:
                        logging.debug("Thread %s@%s: %s done", nw.thrdnum, server.host, article.article)
                    self.decode(article, nw.data)

                    # Reset connection for new activity
                    nw.soft_reset()
                    server.busy_threads.remove(nw)
                    server.idle_threads.append(nw)
                    self.remove_socket(nw)
Пример #14
0
    def run(self):
        logging.info('URLGrabber starting up')
        self.shutdown = False

        while not self.shutdown:
            (url, future_nzo) = self.queue.get()

            if not url:
                # stop signal, go test self.shutdown
                continue

            if future_nzo:
                # Re-queue when too early and still active
                if future_nzo.url_wait and future_nzo.url_wait > time.time():
                    self.add(url, future_nzo)
                    time.sleep(1.0)
                    continue
                # Paused
                if future_nzo.status == Status.PAUSED:
                    self.add(url, future_nzo)
                    time.sleep(1.0)
                    continue

            url = url.replace(' ', '')

            try:
                if future_nzo:
                    # If nzo entry deleted, give up
                    try:
                        deleted = future_nzo.deleted
                    except AttributeError:
                        deleted = True
                    if deleted:
                        logging.debug('Dropping URL %s, job entry missing', url)
                        continue

                filename = None
                category = None
                gzipped = False
                nzo_info = {}
                wait = 0
                retry = True
                fetch_request = None

                logging.info('Grabbing URL %s', url)
                try:
                    fetch_request = _build_request(url)
                except Exception, e:
                    # Cannot list exceptions here, because of unpredictability over platforms
                    error0 = str(sys.exc_info()[0]).lower()
                    error1 = str(sys.exc_info()[1]).lower()
                    logging.debug('Error "%s" trying to get the url %s', error1, url)
                    if 'certificate_verify_failed' in error1 or 'certificateerror' in error0:
                        msg = T('Server %s uses an untrusted HTTPS certificate') % ''
                        msg += ' - https://sabnzbd.org/certificate-errors'
                        retry = False
                    elif 'nodename nor servname provided' in error1:
                        msg = T('Server name does not resolve')
                        retry = False
                    elif '401' in error1 or 'unauthorized' in error1:
                        msg = T('Unauthorized access')
                        retry = False
                    elif '404' in error1:
                        msg = T('File not on server')
                        retry = False
                    elif hasattr(e, 'headers') and 'retry-after' in e.headers:
                        # Catch if the server send retry (e.headers is case-INsensitive)
                        wait = misc.int_conv(e.headers['retry-after'])

                # Check if dereference is used
                new_url = dereferring(url, fetch_request)
                if new_url:
                    self.add(new_url, future_nzo)
                    continue

                if fetch_request:
                    for hdr in fetch_request.headers:
                        try:
                            item = hdr.lower()
                            value = fetch_request.headers[hdr]
                        except:
                            continue
                        if item in ('content-encoding',) and value == 'gzip':
                            gzipped = True
                        if item in ('category_id', 'x-dnzb-category'):
                            category = value
                        elif item in ('x-dnzb-moreinfo',):
                            nzo_info['more_info'] = value
                        elif item in ('x-dnzb-name',):
                            filename = value
                            if not filename.endswith('.nzb'):
                                filename += '.nzb'
                        elif item == 'x-dnzb-propername':
                            nzo_info['propername'] = value
                        elif item == 'x-dnzb-episodename':
                            nzo_info['episodename'] = value
                        elif item == 'x-dnzb-year':
                            nzo_info['year'] = value
                        elif item == 'x-dnzb-failure':
                            nzo_info['failure'] = value
                        elif item == 'x-dnzb-details':
                            nzo_info['details'] = value
                        elif item == 'x-dnzb-password':
                            nzo_info['password'] = value
                        elif item == 'retry-after':
                            wait = misc.int_conv(value)

                        # Rating fields
                        if item in _RARTING_FIELDS:
                            nzo_info[item] = value

                        # Get filename from Content-Disposition header
                        if not filename and "filename=" in value:
                            filename = value[value.index("filename=") + 9:].strip(';').strip('"')

                if wait:
                    # For sites that have a rate-limiting attribute
                    msg = ''
                    retry = True
                    fetch_request = None
                elif retry:
                    fetch_request, msg, retry, wait, data = _analyse(fetch_request, future_nzo)

                if not fetch_request:
                    if retry:
                        logging.info('Retry URL %s', url)
                        self.add(url, future_nzo, wait)
                    else:
                        self.fail_to_history(future_nzo, url, msg)
                    continue

                if not filename:
                    filename = os.path.basename(urllib2.unquote(url))

                    # URL was redirected, maybe the redirect has better filename?
                    # Check if the original URL has extension
                    if url != fetch_request.url and misc.get_ext(filename) not in VALID_NZB_FILES:
                        filename = os.path.basename(urllib2.unquote(fetch_request.url))
                elif '&nzbname=' in filename:
                    # Sometimes the filename contains the full URL, duh!
                    filename = filename[filename.find('&nzbname=') + 9:]

                pp = future_nzo.pp
                script = future_nzo.script
                cat = future_nzo.cat
                if (cat is None or cat == '*') and category:
                    cat = misc.cat_convert(category)
                priority = future_nzo.priority
                nzbname = future_nzo.custom_name

                # process data
                if gzipped:
                    filename += '.gz'
                if not data:
                    try:
                        data = fetch_request.read()
                    except (IncompleteRead, IOError):
                        self.fail_to_history(future_nzo, url, T('Server could not complete request'))
                        fetch_request.close()
                        continue
                fetch_request.close()

                if '<nzb' in data and misc.get_ext(filename) != '.nzb':
                    filename += '.nzb'

                # Sanitize filename first (also removing forbidden Windows-names)
                filename = misc.sanitize_filename(filename)

                # Write data to temp file
                path = os.path.join(cfg.admin_dir.get_path(), FUTURE_Q_FOLDER)
                path = os.path.join(path, filename)
                f = open(path, 'wb')
                f.write(data)
                f.close()
                del data

                # Check if nzb file
                if misc.get_ext(filename) in VALID_NZB_FILES:
                    res = dirscanner.ProcessSingleFile(filename, path, pp=pp, script=script, cat=cat, priority=priority,
                                                       nzbname=nzbname, nzo_info=nzo_info, url=future_nzo.url, keep=False,
                                                       nzo_id=future_nzo.nzo_id)[0]
                    if res:
                        if res == -2:
                            logging.info('Incomplete NZB, retry after 5 min %s', url)
                            when = 300
                        elif res == -1:
                            # Error, but no reason to retry. Warning is already given
                            NzbQueue.do.remove(future_nzo.nzo_id, add_to_history=False)
                            continue
                        else:
                            logging.info('Unknown error fetching NZB, retry after 2 min %s', url)
                            when = 120
                        self.add(url, future_nzo, when)

                else:
                    # Check if a supported archive
                    status, zf, exp_ext = dirscanner.is_archive(path)
                    if status == 0:
                        if misc.get_ext(filename) not in ('.rar', '.zip', '.7z'):
                            filename = filename + exp_ext
                            os.rename(path, path + exp_ext)
                            path = path + exp_ext

                        dirscanner.ProcessArchiveFile(filename, path, pp, script, cat, priority=priority,
                                                     nzbname=nzbname, url=future_nzo.url, keep=False,
                                                     nzo_id=future_nzo.nzo_id)
                    else:
                        # Not a supported filetype, not an nzb (text/html ect)
                        try:
                            os.remove(fetch_request)
                        except:
                            pass
                        logging.info('Unknown filetype when fetching NZB, retry after 30s %s', url)
                        self.add(url, future_nzo, 30)
            except:
Пример #15
0
    def run(self):
        self.shutdown = False
        while not self.shutdown:
            # Set NzbObject object to None so reference from this thread
            # does not keep the object alive in the future (see #1628)
            future_nzo = None
            url, future_nzo = self.queue.get()

            if not url:
                # stop signal, go test self.shutdown
                continue

            if future_nzo:
                # Re-queue when too early and still active
                if future_nzo.url_wait and future_nzo.url_wait > time.time():
                    self.add(url, future_nzo)
                    time.sleep(1.0)
                    continue
                # Paused
                if future_nzo.status == Status.PAUSED:
                    self.add(url, future_nzo)
                    time.sleep(1.0)
                    continue

            url = url.replace(" ", "")

            try:
                if future_nzo:
                    # If nzo entry deleted, give up
                    try:
                        deleted = future_nzo.deleted
                    except AttributeError:
                        deleted = True
                    if deleted:
                        logging.debug("Dropping URL %s, job entry missing",
                                      url)
                        continue

                filename = None
                category = None
                nzo_info = {}
                wait = 0
                retry = True
                fetch_request = None

                logging.info("Grabbing URL %s", url)
                try:
                    fetch_request = _build_request(url)
                except Exception as e:
                    # Cannot list exceptions here, because of unpredictability over platforms
                    error0 = str(sys.exc_info()[0]).lower()
                    error1 = str(sys.exc_info()[1]).lower()
                    logging.debug('Error "%s" trying to get the url %s',
                                  error1, url)
                    if "certificate_verify_failed" in error1 or "certificateerror" in error0:
                        msg = T("Server %s uses an untrusted HTTPS certificate"
                                ) % ""
                        msg += " - https://sabnzbd.org/certificate-errors"
                        retry = False
                    elif "nodename nor servname provided" in error1:
                        msg = T("Server name does not resolve")
                        retry = False
                    elif "401" in error1 or "unauthorized" in error1:
                        msg = T("Unauthorized access")
                        retry = False
                    elif "404" in error1:
                        msg = T("File not on server")
                        retry = False
                    elif hasattr(e, "headers") and "retry-after" in e.headers:
                        # Catch if the server send retry (e.headers is case-INsensitive)
                        wait = misc.int_conv(e.headers["retry-after"])

                if fetch_request:
                    for hdr in fetch_request.headers:
                        try:
                            item = hdr.lower()
                            value = fetch_request.headers[hdr]
                        except:
                            continue
                        if item in ("category_id", "x-dnzb-category"):
                            category = value
                        elif item in ("x-dnzb-moreinfo", ):
                            nzo_info["more_info"] = value
                        elif item in ("x-dnzb-name", ):
                            filename = value
                            if not filename.endswith(".nzb"):
                                filename += ".nzb"
                        elif item == "x-dnzb-propername":
                            nzo_info["propername"] = value
                        elif item == "x-dnzb-episodename":
                            nzo_info["episodename"] = value
                        elif item == "x-dnzb-year":
                            nzo_info["year"] = value
                        elif item == "x-dnzb-failure":
                            nzo_info["failure"] = value
                        elif item == "x-dnzb-details":
                            nzo_info["details"] = value
                        elif item == "x-dnzb-password":
                            nzo_info["password"] = value
                        elif item == "retry-after":
                            wait = misc.int_conv(value)

                        # Rating fields
                        if item in _RARTING_FIELDS:
                            nzo_info[item] = value

                        # Get filename from Content-Disposition header
                        if not filename and "filename=" in value:
                            filename = value[value.index("filename=") +
                                             9:].strip(";").strip('"')

                if wait:
                    # For sites that have a rate-limiting attribute
                    msg = ""
                    retry = True
                    fetch_request = None
                elif retry:
                    fetch_request, msg, retry, wait, data = _analyse(
                        fetch_request, future_nzo)

                if not fetch_request:
                    if retry:
                        logging.info("Retry URL %s", url)
                        self.add(url, future_nzo, wait)
                    else:
                        self.fail_to_history(future_nzo, url, msg)
                    continue

                if not filename:
                    filename = os.path.basename(urllib.parse.unquote(url))

                    # URL was redirected, maybe the redirect has better filename?
                    # Check if the original URL has extension
                    if (url != fetch_request.geturl()
                            and sabnzbd.filesystem.get_ext(filename)
                            not in VALID_NZB_FILES + VALID_ARCHIVES):
                        filename = os.path.basename(
                            urllib.parse.unquote(fetch_request.geturl()))
                elif "&nzbname=" in filename:
                    # Sometimes the filename contains the full URL, duh!
                    filename = filename[filename.find("&nzbname=") + 9:]

                pp = future_nzo.pp
                script = future_nzo.script
                cat = future_nzo.cat
                if (cat is None or cat == "*") and category:
                    cat = misc.cat_convert(category)
                priority = future_nzo.priority
                nzbname = future_nzo.custom_name

                # process data
                if not data:
                    try:
                        data = fetch_request.read()
                    except (IncompleteRead, IOError):
                        self.fail_to_history(
                            future_nzo, url,
                            T("Server could not complete request"))
                        fetch_request.close()
                        continue
                fetch_request.close()

                if b"<nzb" in data and sabnzbd.filesystem.get_ext(
                        filename) != ".nzb":
                    filename += ".nzb"

                # Sanitize filename first (also removing forbidden Windows-names)
                filename = sabnzbd.filesystem.sanitize_filename(filename)

                # If no filename, make one
                if not filename:
                    filename = sabnzbd.get_new_id(
                        "url",
                        os.path.join(cfg.admin_dir.get_path(),
                                     FUTURE_Q_FOLDER))

                # Write data to temp file
                path = os.path.join(cfg.admin_dir.get_path(), FUTURE_Q_FOLDER,
                                    filename)
                with open(path, "wb") as temp_nzb:
                    temp_nzb.write(data)

                # Check if nzb file
                if sabnzbd.filesystem.get_ext(
                        filename) in VALID_ARCHIVES + VALID_NZB_FILES:
                    res, _ = sabnzbd.add_nzbfile(
                        path,
                        pp=pp,
                        script=script,
                        cat=cat,
                        priority=priority,
                        nzbname=nzbname,
                        nzo_info=nzo_info,
                        url=future_nzo.url,
                        keep=False,
                        password=future_nzo.password,
                        nzo_id=future_nzo.nzo_id,
                    )
                    # -2==Error/retry, -1==Error, 0==OK, 1==Empty
                    if res == -2:
                        logging.info("Incomplete NZB, retry after 5 min %s",
                                     url)
                        self.add(url, future_nzo, when=300)
                    elif res == -1:
                        # Error already thrown
                        self.fail_to_history(future_nzo, url)
                    elif res == 1:
                        # No NZB-files inside archive
                        self.fail_to_history(future_nzo, url,
                                             T("Empty NZB file %s") % filename)
                else:
                    logging.info(
                        "Unknown filetype when fetching NZB, retry after 30s %s",
                        url)
                    self.add(url, future_nzo, 30)

                # Always clean up what we wrote to disk
                try:
                    sabnzbd.filesystem.remove_file(path)
                except:
                    pass
            except:
                logging.error(T("URLGRABBER CRASHED"), exc_info=True)
                logging.debug("URLGRABBER Traceback: ", exc_info=True)
Пример #16
0
    def run(self):
        logging.info('URLGrabber starting up')
        self.shutdown = False

        while not self.shutdown:
            # Don't pound the website!
            time.sleep(5.0)

            (url, future_nzo, retry_count) = self.queue.get()
            if not url:
                continue
            url = url.replace(' ', '')

            try:
                del_bookmark = not future_nzo
                if future_nzo:
                    # If nzo entry deleted, give up
                    try:
                        deleted = future_nzo.deleted
                    except:
                        deleted = True
                    if deleted:
                        logging.debug('Dropping URL %s, job entry missing', url)
                        continue

                # Add nzbmatrix credentials if needed
                url, matrix_id = _matrix_url(url)

                # When still waiting for nzbmatrix wait period, requeue
                if matrix_id and self.matrix_wait > time.time():
                    self.queue.put((url, future_nzo, retry_count))
                    continue

                # _grab_url cannot reside in a function, because the tempfile
                # would not survive the end of the function
                if del_bookmark:
                    logging.info('Removing nzbmatrix bookmark %s', matrix_id)
                else:
                    logging.info('Grabbing URL %s', url)
                opener = urllib.FancyURLopener({})
                opener.prompt_user_passwd = None
                opener.addheaders = []
                opener.addheader('User-Agent', 'SABnzbd+/%s' % sabnzbd.version.__version__)
                opener.addheader('Accept-encoding','gzip')
                filename = None
                category = None
                length = 0
                nzo_info = {}
                try:
                    fn, header = opener.retrieve(url)
                except:
                    fn = None

                if fn:
                    for tup in header.items():
                        try:
                            item = tup[0].lower()
                            value = tup[1].strip()
                        except:
                            continue
                        if item in ('category_id', 'x-dnzb-category'):
                            category = value
                        elif item in ('x-dnzb-moreinfo',):
                            nzo_info['more_info'] = value
                        elif item in ('x-dnzb-name',):
                            filename = value
                            if not filename.endswith('.nzb'):
                                filename += '.nzb'
                        elif item in ('content-length',):
                            length = misc.int_conv(value)

                        if not filename:
                            for item in tup:
                                if "filename=" in item:
                                    filename = item[item.index("filename=") + 9:].strip(';').strip('"')

                if matrix_id:
                    fn, msg, retry, wait = _analyse_matrix(fn, matrix_id)
                    if retry and wait > 0:
                        self.matrix_wait = time.time() + wait
                        logging.debug('Retry URL %s after waiting', url)
                        self.queue.put((url, future_nzo, retry_count))
                        continue
                    category = _MATRIX_MAP.get(category, category)
                else:
                    msg = ''
                    retry = True

                # Check if the filepath is specified, if not, check if a retry is allowed.
                if not fn:
                    retry_count -= 1
                    if retry_count > 0 and retry:
                        logging.info('Retry URL %s', url)
                        self.queue.put((url, future_nzo, retry_count))
                    elif not del_bookmark:
                        misc.bad_fetch(future_nzo, url, msg, retry=True)
                    continue

                if del_bookmark:
                    continue

                if not filename:
                    filename = os.path.basename(url) + '.nzb'
                # Sanitize and trim name, preserving the extension
                filename, ext = os.path.splitext(filename)
                filename = misc.sanitize_foldername(filename)
                filename += '.' + misc.sanitize_foldername(ext)

                pp = future_nzo.pp
                script = future_nzo.script
                cat = future_nzo.cat
                if (cat is None or cat == '*') and category:
                    cat = misc.cat_convert(category)
                priority = future_nzo.priority
                nzbname = future_nzo.custom_name

                # Check if nzb file
                if os.path.splitext(filename)[1].lower() in ('.nzb', '.gz'):
                    res = dirscanner.ProcessSingleFile(filename, fn, pp=pp, script=script, cat=cat, priority=priority, \
                                                       nzbname=nzbname, nzo_info=nzo_info, url=future_nzo.url)
                    if res == 0:
                        NzbQueue.do.remove(future_nzo.nzo_id, add_to_history=False)
                    elif res == -2:
                        retry_count -= 1
                        if retry_count > 0:
                            logging.info('Incomplete NZB, retry %s', url)
                            self.queue.put((url, future_nzo, retry_count))
                        else:
                            misc.bad_fetch(future_nzo, url, retry=True, content=True)
                    else:
                        misc.bad_fetch(future_nzo, url, retry=True, content=True)
                # Check if a supported archive
                else:
                    if dirscanner.ProcessArchiveFile(filename, fn, pp, script, cat, priority=priority, url=future_nzo.url) == 0:
                        NzbQueue.do.remove(future_nzo.nzo_id, add_to_history=False)
                    else:
                        # Not a supported filetype, not an nzb (text/html ect)
                        try:
                            os.remove(fn)
                        except:
                            pass
                        misc.bad_fetch(future_nzo, url, retry=True, content=True)
            except:
                logging.error('URLGRABBER CRASHED', exc_info=True)
                logging.debug("URLGRABBER Traceback: ", exc_info=True)
Пример #17
0
    def run_feed(self, feed=None, download=False, ignoreFirst=False, force=False, readout=True):
        """ Run the query for one URI and apply filters """
        self.shutdown = False

        if not feed:
            return 'No such feed'

        newlinks = []
        new_downloads = []

        # Preparations, get options
        try:
            feeds = config.get_rss()[feed]
        except KeyError:
            logging.error(T('Incorrect RSS feed description "%s"'), feed)
            logging.info("Traceback: ", exc_info=True)
            return T('Incorrect RSS feed description "%s"') % feed

        uris = feeds.uri()
        defCat = feeds.cat()
        import sabnzbd.api
        if not notdefault(defCat) or defCat not in sabnzbd.api.list_cats(default=False):
            defCat = None
        defPP = feeds.pp()
        if not notdefault(defPP):
            defPP = None
        defScript = feeds.script()
        if not notdefault(defScript):
            defScript = None
        defPrio = feeds.priority()
        if not notdefault(defPrio):
            defPrio = None

        # Preparations, convert filters to regex's
        regexes = []
        reTypes = []
        reCats = []
        rePPs = []
        rePrios = []
        reScripts = []
        reEnabled = []
        for filter in feeds.filters():
            reCat = filter[0]
            if defCat in ('', '*'):
                reCat = None
            reCats.append(reCat)
            rePPs.append(filter[1])
            reScripts.append(filter[2])
            reTypes.append(filter[3])
            if filter[3] in ('<', '>', 'F', 'S'):
                regexes.append(filter[4])
            else:
                regexes.append(convert_filter(filter[4]))
            rePrios.append(filter[5])
            reEnabled.append(filter[6] != '0')
        regcount = len(regexes)

        # Set first if this is the very first scan of this URI
        first = (feed not in self.jobs) and ignoreFirst

        # Add sabnzbd's custom User Agent
        feedparser.USER_AGENT = 'SABnzbd+/%s' % sabnzbd.version.__version__

        # Read the RSS feed
        msg = None
        entries = None
        if readout:
            all_entries = []
            for uri in uris:
                uri = uri.replace(' ', '%20')
                logging.debug("Running feedparser on %s", uri)
                feed_parsed = feedparser.parse(uri.replace('feed://', 'http://'))
                logging.debug("Done parsing %s", uri)

                if not feed_parsed:
                    msg = T('Failed to retrieve RSS from %s: %s') % (uri, '?')
                    logging.info(msg)

                status = feed_parsed.get('status', 999)
                if status in (401, 402, 403):
                    msg = T('Do not have valid authentication for feed %s') % feed
                    logging.info(msg)

                if status >= 500 and status <= 599:
                    msg = T('Server side error (server code %s); could not get %s on %s') % (status, feed, uri)
                    logging.info(msg)

                entries = feed_parsed.get('entries')
                if 'bozo_exception' in feed_parsed and not entries:
                    msg = str(feed_parsed['bozo_exception'])
                    if 'CERTIFICATE_VERIFY_FAILED' in msg:
                        msg = T('Server %s uses an untrusted HTTPS certificate') % get_urlbase(uri)
                        msg += ' - https://sabnzbd.org/certificate-errors'
                        logging.error(msg)
                    else:
                        msg = T('Failed to retrieve RSS from %s: %s') % (uri, xml_name(msg))
                    logging.info(msg)

                if not entries:
                    msg = T('RSS Feed %s was empty') % uri
                    logging.info(msg)
                all_entries.extend(entries)
            entries = all_entries

        if feed not in self.jobs:
            self.jobs[feed] = {}
        jobs = self.jobs[feed]
        if readout:
            if not entries:
                return unicoder(msg)
        else:
            entries = jobs.keys()
            # Sort in the order the jobs came from the feed
            entries.sort(lambda x, y: jobs[x].get('order', 0) - jobs[y].get('order', 0))

        order = 0
        # Filter out valid new links
        for entry in entries:
            if self.shutdown:
                return

            if readout:
                try:
                    link, category, size, age, season, episode = _get_link(uri, entry)
                except (AttributeError, IndexError):
                    link = None
                    category = u''
                    size = 0L
                    age = None
                    logging.info(T('Incompatible feed') + ' ' + uri)
                    logging.info("Traceback: ", exc_info=True)
                    return T('Incompatible feed')
                title = entry.title

                # If there's multiple feeds, remove the duplicates based on title and size
                if len(uris) > 1:
                    skip_job = False
                    for job_link, job in jobs.items():
                        # Allow 5% size deviation because indexers might have small differences for same release
                        if job.get('title') == title and link != job_link and (job.get('size')*0.95) < size < (job.get('size')*1.05):
                            logging.info("Ignoring job %s from other feed", title)
                            skip_job = True
                            break
                    if skip_job:
                        continue
            else:
                link = entry
                category = jobs[link].get('orgcat', '')
                if category in ('', '*'):
                    category = None
                title = jobs[link].get('title', '')
                size = jobs[link].get('size', 0L)
                age = jobs[link].get('age')
                season = jobs[link].get('season', 0)
                episode = jobs[link].get('episode', 0)

            if link:
                # Make sure spaces are quoted in the URL
                link = link.strip().replace(' ', '%20')

                newlinks.append(link)

                if link in jobs:
                    jobstat = jobs[link].get('status', ' ')[0]
                else:
                    jobstat = 'N'
                if jobstat in 'NGB' or (jobstat == 'X' and readout):
                    # Match this title against all filters
                    logging.debug('Trying title %s', title)
                    result = False
                    myCat = defCat
                    myPP = defPP
                    myScript = defScript
                    myPrio = defPrio
                    n = 0
                    if ('F' in reTypes or 'S' in reTypes) and (not season or not episode):
                        season, episode = sabnzbd.newsunpack.analyse_show(title)[1:3]
                        season = int_conv(season)
                        episode = int_conv(episode)

                    # Match against all filters until an positive or negative match
                    logging.debug('Size %s for %s', size, title)
                    for n in xrange(regcount):
                        if reEnabled[n]:
                            if category and reTypes[n] == 'C':
                                found = re.search(regexes[n], category)
                                if not found:
                                    logging.debug("Filter rejected on rule %d", n)
                                    result = False
                                    break
                            elif reTypes[n] == '<' and size and from_units(regexes[n]) < size:
                                # "Size at most" : too large
                                logging.debug('Filter rejected on rule %d', n)
                                result = False
                                break
                            elif reTypes[n] == '>' and size and from_units(regexes[n]) > size:
                                # "Size at least" : too small
                                logging.debug('Filter rejected on rule %d', n)
                                result = False
                                break
                            elif reTypes[n] == 'F' and not ep_match(season, episode, regexes[n]):
                                # "Starting from SxxEyy", too early episode
                                logging.debug('Filter requirement match on rule %d', n)
                                result = False
                                break
                            elif reTypes[n] == 'S' and season and episode and ep_match(season, episode, regexes[n], title):
                                logging.debug('Filter matched on rule %d', n)
                                result = True
                                break
                            else:
                                if regexes[n]:
                                    found = re.search(regexes[n], title)
                                else:
                                    found = False
                                if reTypes[n] == 'M' and not found:
                                    logging.debug("Filter rejected on rule %d", n)
                                    result = False
                                    break
                                if found and reTypes[n] == 'A':
                                    logging.debug("Filter matched on rule %d", n)
                                    result = True
                                    break
                                if found and reTypes[n] == 'R':
                                    logging.debug("Filter rejected on rule %d", n)
                                    result = False
                                    break

                    if len(reCats):
                        if not result and defCat:
                            # Apply Feed-category on non-matched items
                            myCat = defCat
                        elif result and notdefault(reCats[n]):
                            # Use the matched info
                            myCat = reCats[n]
                        elif category and not defCat:
                            # No result and no Feed-category
                            myCat = cat_convert(category)

                        if myCat:
                            myCat, catPP, catScript, catPrio = cat_to_opts(myCat)
                        else:
                            myCat = catPP = catScript = catPrio = None
                        if notdefault(rePPs[n]):
                            myPP = rePPs[n]
                        elif not (reCats[n] or category):
                            myPP = catPP
                        if notdefault(reScripts[n]):
                            myScript = reScripts[n]
                        elif not (notdefault(reCats[n]) or category):
                            myScript = catScript
                        if rePrios[n] not in (str(DEFAULT_PRIORITY), ''):
                            myPrio = rePrios[n]
                        elif not ((rePrios[n] != str(DEFAULT_PRIORITY)) or category):
                            myPrio = catPrio


                    if cfg.no_dupes() and self.check_duplicate(title):
                        if cfg.no_dupes() == 1:
                            # Dupe-detection: Discard
                            logging.info("Ignoring duplicate job %s", title)
                            continue
                        elif cfg.no_dupes() == 3:
                            # Dupe-detection: Fail
                            # We accept it so the Queue can send it to the History
                            logging.info("Found duplicate job %s", title)
                        else:
                            # Dupe-detection: Pause
                            myPrio = DUP_PRIORITY

                    act = download and not first
                    if link in jobs:
                        act = act and not jobs[link].get('status', '').endswith('*')
                        act = act or force
                        star = first or jobs[link].get('status', '').endswith('*')
                    else:
                        star = first
                    if result:
                        _HandleLink(jobs, link, title, size, age, season, episode, 'G', category, myCat, myPP, myScript,
                                    act, star, order, priority=myPrio, rule=str(n))
                        if act:
                            new_downloads.append(title)
                    else:
                        _HandleLink(jobs, link, title, size, age, season, episode, 'B', category, myCat, myPP, myScript,
                                    False, star, order, priority=myPrio, rule=str(n))
            order += 1

        # Send email if wanted and not "forced"
        if new_downloads and cfg.email_rss() and not force:
            emailer.rss_mail(feed, new_downloads)

        remove_obsolete(jobs, newlinks)
        return msg
Пример #18
0
    def __set_priority(self, nzo_id, priority):
        """ Sets the priority on the nzo and places it in the queue at the appropriate position """
        try:
            priority = int_conv(priority)
            nzo = self.__nzo_table[nzo_id]
            nzo_id_pos1 = -1
            pos = -1

            # If priority == STOP_PRIORITY, then send to queue
            if priority == STOP_PRIORITY:
                self.end_job(nzo)
                return

            # Get the current position in the queue
            for i in xrange(len(self.__nzo_list)):
                if nzo_id == self.__nzo_list[i].nzo_id:
                    nzo_id_pos1 = i
                    break

            # Don't change priority and order if priority is the same as asked
            if priority == self.__nzo_list[nzo_id_pos1].priority:
                return nzo_id_pos1

            nzo.set_priority(priority)
            if sabnzbd.scheduler.analyse(False, priority) and \
               nzo.status in (Status.CHECKING, Status.DOWNLOADING, Status.QUEUED):
                nzo.status = Status.PAUSED
            elif nzo.status == Status.PAUSED:
                nzo.status = Status.QUEUED
            nzo.save_to_disk()

            if nzo_id_pos1 != -1:
                del self.__nzo_list[nzo_id_pos1]
                if priority == TOP_PRIORITY:
                    # A top priority item (usually a completed download fetching pars)
                    # is added to the top of the queue
                    self.__nzo_list.insert(0, nzo)
                    pos = 0
                elif priority == LOW_PRIORITY:
                    pos = len(self.__nzo_list)
                    self.__nzo_list.append(nzo)
                else:
                    # for high priority we need to add the item at the bottom
                    # of any other high priority items above the normal priority
                    # for normal priority we need to add the item at the bottom
                    # of the normal priority items above the low priority
                    if self.__nzo_list:
                        p = 0
                        added = False
                        for position in self.__nzo_list:
                            if position.priority < priority:
                                self.__nzo_list.insert(p, nzo)
                                pos = p
                                added = True
                                break
                            p += 1
                        if not added:
                            # if there are no other items classed as a lower priority
                            # then it will be added to the bottom of the queue
                            pos = len(self.__nzo_list)
                            self.__nzo_list.append(nzo)
                    else:
                        # if the queue is empty then simple append the item to the bottom
                        self.__nzo_list.append(nzo)
                        pos = 0

            logging.info('Set priority=%s for job %s => position=%s ', priority, self.__nzo_table[nzo_id].final_name, pos)
            return pos

        except:
            return -1
Пример #19
0
    def run(self):
        # First check IPv6 connectivity
        sabnzbd.EXTERNAL_IPV6 = sabnzbd.test_ipv6()
        logging.debug("External IPv6 test result: %s", sabnzbd.EXTERNAL_IPV6)

        # Then we check SSL certificate checking
        sabnzbd.CERTIFICATE_VALIDATION = sabnzbd.test_cert_checking()
        logging.debug("SSL verification test: %s",
                      sabnzbd.CERTIFICATE_VALIDATION)

        # Kick BPS-Meter to check quota
        BPSMeter.do.update()

        while 1:
            for server in self.servers:
                for nw in server.busy_threads[:]:
                    if (nw.nntp and nw.nntp.error_msg) or (
                            nw.timeout and time.time() > nw.timeout):
                        if nw.nntp and nw.nntp.error_msg:
                            self.__reset_nw(nw, "", warn=False)
                        else:
                            self.__reset_nw(nw, "timed out")
                        server.bad_cons += 1
                        self.maybe_block_server(server)
                if server.restart:
                    if not server.busy_threads:
                        newid = server.newid
                        server.stop(self.read_fds, self.write_fds)
                        self.servers.remove(server)
                        if newid:
                            self.init_server(None, newid)
                        self.__restart -= 1
                        sabnzbd.nzbqueue.NzbQueue.do.reset_all_try_lists()
                        # Have to leave this loop, because we removed element
                        break
                    else:
                        # Restart pending, don't add new articles
                        continue

                if not server.idle_threads or server.restart or self.is_paused(
                ) or self.shutdown or self.postproc:
                    continue

                if not server.active:
                    continue

                for nw in server.idle_threads[:]:
                    if nw.timeout:
                        if time.time() < nw.timeout:
                            continue
                        else:
                            nw.timeout = None

                    if not server.info:
                        # Only request info if there's stuff in the queue
                        if not sabnzbd.nzbqueue.NzbQueue.do.is_empty():
                            self.maybe_block_server(server)
                            request_server_info(server)
                        break

                    article = sabnzbd.nzbqueue.NzbQueue.do.get_article(
                        server, self.servers)

                    if not article:
                        break

                    if server.retention and article.nzf.nzo.avg_stamp < time.time(
                    ) - server.retention:
                        # Let's get rid of all the articles for this server at once
                        logging.info("Job %s too old for %s, moving on",
                                     article.nzf.nzo.final_name, server.host)
                        while article:
                            self.decode(article, None)
                            article = article.nzf.nzo.get_article(
                                server, self.servers)
                        break

                    server.idle_threads.remove(nw)
                    server.busy_threads.append(nw)

                    nw.article = article

                    if nw.connected:
                        self.__request_article(nw)
                    else:
                        try:
                            logging.info("%s@%s: Initiating connection",
                                         nw.thrdnum, server.host)
                            nw.init_connect(self.write_fds)
                        except:
                            logging.error(
                                T("Failed to initialize %s@%s with reason: %s"
                                  ),
                                nw.thrdnum,
                                server.host,
                                sys.exc_info()[1],
                            )
                            self.__reset_nw(nw, "failed to initialize")

            # Exit-point
            if self.shutdown:
                empty = True
                for server in self.servers:
                    if server.busy_threads:
                        empty = False
                        break

                if empty:
                    for server in self.servers:
                        server.stop(self.read_fds, self.write_fds)

                    logging.info("Shutting down")
                    break

            if self.force_disconnect:
                for server in self.servers:
                    for nw in server.idle_threads + server.busy_threads:
                        send_quit = nw.connected and server.active
                        self.__reset_nw(nw,
                                        "forcing disconnect",
                                        warn=False,
                                        wait=False,
                                        send_quit=send_quit)
                    # Make sure server address resolution is refreshed
                    server.info = None

                self.force_disconnect = False

            # Use select to find sockets ready for reading/writing
            readkeys = self.read_fds.keys()
            writekeys = self.write_fds.keys()

            if readkeys or writekeys:
                read, write, error = select.select(readkeys, writekeys, (),
                                                   1.0)

                # Why check so often when so few things happened?
                if self.can_be_slowed and len(readkeys) >= 8 and len(
                        read) <= 2:
                    time.sleep(0.01)

                # Need to initialize the check during first 20 seconds
                if self.can_be_slowed is None or self.can_be_slowed_timer:
                    # Wait for stable speed to start testing
                    if not self.can_be_slowed_timer and BPSMeter.do.get_stable_speed(
                            timespan=10):
                        self.can_be_slowed_timer = time.time()

                    # Check 10 seconds after enabling slowdown
                    if self.can_be_slowed_timer and time.time(
                    ) > self.can_be_slowed_timer + 10:
                        # Now let's check if it was stable in the last 10 seconds
                        self.can_be_slowed = BPSMeter.do.get_stable_speed(
                            timespan=10)
                        self.can_be_slowed_timer = 0
                        logging.debug("Downloader-slowdown: %r",
                                      self.can_be_slowed)

            else:
                read, write, error = ([], [], [])

                BPSMeter.do.reset()

                time.sleep(1.0)

                DOWNLOADER_CV.acquire()
                while ((sabnzbd.nzbqueue.NzbQueue.do.is_empty()
                        or self.is_paused() or self.postproc)
                       and not self.shutdown and not self.__restart):
                    DOWNLOADER_CV.wait()
                DOWNLOADER_CV.release()

                self.force_disconnect = False

            for selected in write:
                nw = self.write_fds[selected]

                fileno = nw.nntp.sock.fileno()

                if fileno not in self.read_fds:
                    self.read_fds[fileno] = nw

                if fileno in self.write_fds:
                    self.write_fds.pop(fileno)

            if not read:
                BPSMeter.do.update()
                continue

            for selected in read:
                nw = self.read_fds[selected]
                article = nw.article
                server = nw.server

                if article:
                    nzo = article.nzf.nzo

                try:
                    bytes_received, done, skip = nw.recv_chunk()
                except:
                    bytes_received, done, skip = (0, False, False)

                if skip:
                    BPSMeter.do.update()
                    continue

                if bytes_received < 1:
                    self.__reset_nw(nw,
                                    "server closed connection",
                                    warn=False,
                                    wait=False)
                    continue

                else:
                    if self.bandwidth_limit:
                        limit = self.bandwidth_limit
                        if bytes_received + BPSMeter.do.bps > limit:
                            while BPSMeter.do.bps > limit:
                                time.sleep(0.05)
                                BPSMeter.do.update()
                    BPSMeter.do.update(server.id, bytes_received)
                    nzo.update_download_stats(BPSMeter.do.bps, server.id,
                                              bytes_received)

                if not done and nw.status_code != 222:
                    if not nw.connected or nw.status_code == 480:
                        done = False
                        try:
                            nw.finish_connect(nw.status_code)
                            if sabnzbd.LOG_ALL:
                                logging.debug("%s@%s last message -> %s",
                                              nw.thrdnum, nw.server.host,
                                              nntp_to_msg(nw.data))
                            nw.clear_data()
                        except NNTPPermanentError as error:
                            # Handle login problems
                            block = False
                            penalty = 0
                            msg = error.response
                            ecode = int_conv(msg[:3])
                            display_msg = " [%s]" % msg
                            logging.debug("Server login problem: %s, %s",
                                          ecode, msg)
                            if ecode in (502, 400, 481,
                                         482) and clues_too_many(msg):
                                # Too many connections: remove this thread and reduce thread-setting for server
                                # Plan to go back to the full number after a penalty timeout
                                if server.active:
                                    errormsg = T(
                                        "Too many connections to server %s"
                                    ) % display_msg
                                    if server.errormsg != errormsg:
                                        server.errormsg = errormsg
                                        logging.warning(
                                            T("Too many connections to server %s"
                                              ), server.host)
                                    self.__reset_nw(nw,
                                                    None,
                                                    warn=False,
                                                    destroy=True,
                                                    send_quit=True)
                                    self.plan_server(server, _PENALTY_TOOMANY)
                                    server.threads -= 1
                            elif ecode in (502, 481,
                                           482) and clues_too_many_ip(msg):
                                # Account sharing?
                                if server.active:
                                    errormsg = T("Probable account sharing"
                                                 ) + display_msg
                                    if server.errormsg != errormsg:
                                        server.errormsg = errormsg
                                        name = " (%s)" % server.host
                                        logging.warning(
                                            T("Probable account sharing") +
                                            name)
                                penalty = _PENALTY_SHARE
                                block = True
                            elif ecode in (452, 481, 482,
                                           381) or (ecode == 502
                                                    and clues_login(msg)):
                                # Cannot login, block this server
                                if server.active:
                                    errormsg = T("Failed login for server %s"
                                                 ) % display_msg
                                    if server.errormsg != errormsg:
                                        server.errormsg = errormsg
                                        logging.error(
                                            T("Failed login for server %s"),
                                            server.host)
                                penalty = _PENALTY_PERM
                                block = True
                            elif ecode in (502, 482):
                                # Cannot connect (other reasons), block this server
                                if server.active:
                                    errormsg = T(
                                        "Cannot connect to server %s [%s]") % (
                                            "", display_msg)
                                    if server.errormsg != errormsg:
                                        server.errormsg = errormsg
                                        logging.warning(
                                            T("Cannot connect to server %s [%s]"
                                              ), server.host, msg)
                                if clues_pay(msg):
                                    penalty = _PENALTY_PERM
                                else:
                                    penalty = _PENALTY_502
                                block = True
                            elif ecode == 400:
                                # Temp connection problem?
                                if server.active:
                                    logging.debug(
                                        "Unspecified error 400 from server %s",
                                        server.host)
                                penalty = _PENALTY_VERYSHORT
                                block = True
                            else:
                                # Unknown error, just keep trying
                                if server.active:
                                    errormsg = T(
                                        "Cannot connect to server %s [%s]") % (
                                            "", display_msg)
                                    if server.errormsg != errormsg:
                                        server.errormsg = errormsg
                                        logging.warning(
                                            T("Cannot connect to server %s [%s]"
                                              ), server.host, msg)
                                penalty = _PENALTY_UNKNOWN
                                block = True
                            if block or (penalty and server.optional):
                                if server.active:
                                    server.active = False
                                    if penalty and (block or server.optional):
                                        self.plan_server(server, penalty)
                                    sabnzbd.nzbqueue.NzbQueue.do.reset_all_try_lists(
                                    )
                                self.__reset_nw(nw,
                                                None,
                                                warn=False,
                                                send_quit=True)
                            continue
                        except:
                            logging.error(
                                T("Connecting %s@%s failed, message=%s"),
                                nw.thrdnum,
                                nw.server.host,
                                nntp_to_msg(nw.data),
                            )
                            # No reset-warning needed, above logging is sufficient
                            self.__reset_nw(nw, None, warn=False)

                        if nw.connected:
                            logging.info("Connecting %s@%s finished",
                                         nw.thrdnum, nw.server.host)
                            self.__request_article(nw)

                    elif nw.status_code == 223:
                        done = True
                        logging.debug("Article <%s> is present",
                                      article.article)

                    elif nw.status_code == 211:
                        done = False
                        logging.debug("group command ok -> %s",
                                      nntp_to_msg(nw.data))
                        nw.group = nw.article.nzf.nzo.group
                        nw.clear_data()
                        self.__request_article(nw)

                    elif nw.status_code in (411, 423, 430):
                        done = True
                        logging.debug(
                            "Thread %s@%s: Article %s missing (error=%s)",
                            nw.thrdnum,
                            nw.server.host,
                            article.article,
                            nw.status_code,
                        )
                        nw.clear_data()

                    elif nw.status_code == 500:
                        if nzo.precheck:
                            # Assume "STAT" command is not supported
                            server.have_stat = False
                            logging.debug("Server %s does not support STAT",
                                          server.host)
                        else:
                            # Assume "BODY" command is not supported
                            server.have_body = False
                            logging.debug("Server %s does not support BODY",
                                          server.host)
                        nw.clear_data()
                        self.__request_article(nw)

                if done:
                    server.bad_cons = 0  # Successful data, clear "bad" counter
                    server.errormsg = server.warning = ""
                    if sabnzbd.LOG_ALL:
                        logging.debug("Thread %s@%s: %s done", nw.thrdnum,
                                      server.host, article.article)
                    self.decode(article, nw.data)

                    nw.soft_reset()
                    server.busy_threads.remove(nw)
                    server.idle_threads.append(nw)
Пример #20
0
    def run(self):
        logging.info('URLGrabber starting up')
        self.shutdown = False

        while not self.shutdown:
            # Don't pound the website!
            time.sleep(5.0)

            (url, future_nzo) = self.queue.get()

            if not url:
                # stop signal, go test self.shutdown
                continue
            if future_nzo and future_nzo.wait and future_nzo.wait > time.time():
                # Requeue when too early and still active

                self.add(url, future_nzo)
                continue
            url = url.replace(' ', '')

            try:
                del_bookmark = not future_nzo
                if future_nzo:
                    # If nzo entry deleted, give up
                    try:
                        deleted = future_nzo.deleted
                    except AttributeError:
                        deleted = True
                    if deleted:
                        logging.debug('Dropping URL %s, job entry missing', url)
                        continue

                # Add nzbmatrix credentials if needed
                url, matrix_id = _matrix_url(url)

                # _grab_url cannot reside in a function, because the tempfile
                # would not survive the end of the function
                if del_bookmark:
                    logging.info('Removing nzbmatrix bookmark %s', matrix_id)
                else:
                    logging.info('Grabbing URL %s', url)
                opener = urllib.FancyURLopener({})
                opener.prompt_user_passwd = None
                opener.addheaders = []
                opener.addheader('User-Agent', 'SABnzbd+/%s' % sabnzbd.version.__version__)
                if not [True for item in _BAD_GZ_HOSTS if item in url]:
                    opener.addheader('Accept-encoding','gzip')
                filename = None
                category = None
                length = 0
                nzo_info = {}
                try:
                    fn, header = opener.retrieve(url)
                except:
                    fn = None

                if fn:
                    for tup in header.items():
                        try:
                            item = tup[0].lower()
                            value = tup[1].strip()
                        except:
                            continue
                        if item in ('category_id', 'x-dnzb-category'):
                            category = value
                        elif item in ('x-dnzb-moreinfo',):
                            nzo_info['more_info'] = value
                        elif item in ('x-dnzb-name',):
                            filename = value
                            if not filename.endswith('.nzb'):
                                filename += '.nzb'
                        elif item in ('content-length',):
                            length = misc.int_conv(value)

                        if not filename:
                            for item in tup:
                                if "filename=" in item:
                                    filename = item[item.index("filename=") + 9:].strip(';').strip('"')

                if matrix_id:
                    fn, msg, retry, wait = _analyse_matrix(fn, matrix_id)
                    if not fn:
                        if retry:
                            logging.info(msg)
                            logging.debug('Retry nzbmatrix item %s after waiting %s sec', matrix_id, wait)
                            self.add(url, future_nzo, wait)
                        else:
                            logging.error(msg)
                            misc.bad_fetch(future_nzo, clean_matrix_url(url), msg, retry=True)
                        continue
                    category = _MATRIX_MAP.get(category, category)

                    if del_bookmark:
                        # No retries of nzbmatrix bookmark removals
                        continue

                else:
                    fn, msg, retry, wait = _analyse_others(fn, url)
                    if not fn:
                        if retry:
                            logging.info('Retry URL %s', url)
                            self.add(url, future_nzo, wait)
                        else:
                            misc.bad_fetch(future_nzo, url, msg, retry=True)
                        continue

                if not filename:
                    filename = os.path.basename(url) + '.nzb'
                # Sanitize and trim name, preserving the extension
                filename, ext = os.path.splitext(filename)
                filename = misc.sanitize_foldername(filename)
                filename += '.' + misc.sanitize_foldername(ext)

                pp = future_nzo.pp
                script = future_nzo.script
                cat = future_nzo.cat
                if (cat is None or cat == '*') and category:
                    cat = misc.cat_convert(category)
                priority = future_nzo.priority
                nzbname = future_nzo.custom_name

                # Check if nzb file
                if os.path.splitext(filename)[1].lower() in ('.nzb', '.gz'):
                    res, nzo_ids = dirscanner.ProcessSingleFile(filename, fn, pp=pp, script=script, cat=cat, priority=priority, \
                                                       nzbname=nzbname, nzo_info=nzo_info, url=future_nzo.url)
                    if res == 0:
                        NzbQueue.do.remove(future_nzo.nzo_id, add_to_history=False)
                    else:
                        if res == -2:
                            logging.info('Incomplete NZB, retry after 5 min %s', url)
                            when = 300
                        else:
                            logging.info('Unknown error fetching NZB, retry after 2 min %s', url)
                            when = 120
                        self.add(url, future_nzo, when)
                # Check if a supported archive
                else:
                    if dirscanner.ProcessArchiveFile(filename, fn, pp, script, cat, priority=priority, url=future_nzo.url)[0] == 0:
                        NzbQueue.do.remove(future_nzo.nzo_id, add_to_history=False)
                    else:
                        # Not a supported filetype, not an nzb (text/html ect)
                        try:
                            os.remove(fn)
                        except:
                            pass
                        logging.info('Unknown filetype when fetching NZB, retry after 30s %s', url)
                        self.add(url, future_nzo, 30)
            except:
                logging.error('URLGRABBER CRASHED', exc_info=True)
                logging.debug("URLGRABBER Traceback: ", exc_info=True)
Пример #21
0
    def run(self):
        logging.info('URLGrabber starting up')
        self.shutdown = False

        while not self.shutdown:
            (url, future_nzo) = self.queue.get()

            if not url:
                # stop signal, go test self.shutdown
                continue

            if future_nzo:
                # Re-queue when too early and still active
                if future_nzo.wait and future_nzo.wait > time.time():
                    self.add(url, future_nzo)
                    time.sleep(1.0)
                    continue
                # Paused
                if future_nzo.status == Status.PAUSED:
                    self.add(url, future_nzo)
                    time.sleep(1.0)
                    continue

            url = url.replace(' ', '')

            try:
                if future_nzo:
                    # If nzo entry deleted, give up
                    try:
                        deleted = future_nzo.deleted
                    except AttributeError:
                        deleted = True
                    if deleted:
                        logging.debug('Dropping URL %s, job entry missing', url)
                        continue

                filename = None
                category = None
                gzipped = False
                nzo_info = {}
                wait = 0
                retry = True
                fn = None

                logging.info('Grabbing URL %s', url)
                try:
                    fn = _build_request(url)
                except Exception, e:
                    # Cannot list exceptions here, because of unpredictability over platforms
                    error0 = str(sys.exc_info()[0]).lower()
                    error1 = str(sys.exc_info()[1]).lower()
                    logging.debug('Error "%s" trying to get the url %s', error1, url)
                    if 'certificate_verify_failed' in error1 or 'certificateerror' in error0:
                        msg = T('Server %s uses an untrusted HTTPS certificate') % ''
                        msg += ' - https://sabnzbd.org/certificate-errors'
                        retry = False
                    elif 'nodename nor servname provided' in error1:
                        msg = T('Server name does not resolve')
                        retry = False
                    elif '401' in error1 or 'unauthorized' in error1:
                        msg = T('Unauthorized access')
                        retry = False
                    elif '404' in error1:
                        msg = T('File not on server')
                        retry = False
                    elif hasattr(e, 'headers') and 'retry-after' in e.headers:
                        # Catch if the server send retry (e.headers is case-INsensitive)
                        wait = misc.int_conv(e.headers['retry-after'])

                new_url = dereferring(url, fn)
                if new_url:
                    self.add(new_url, future_nzo)
                    continue

                if fn:
                    for hdr in fn.headers:
                        try:
                            item = hdr.lower()
                            value = fn.headers[hdr]
                        except:
                            continue
                        if item in ('content-encoding',) and value == 'gzip':
                            gzipped = True
                        if item in ('category_id', 'x-dnzb-category'):
                            category = value
                        elif item in ('x-dnzb-moreinfo',):
                            nzo_info['more_info'] = value
                        elif item in ('x-dnzb-name',):
                            filename = value
                            if not filename.endswith('.nzb'):
                                filename += '.nzb'
                        elif item == 'x-dnzb-propername':
                            nzo_info['propername'] = value
                        elif item == 'x-dnzb-episodename':
                            nzo_info['episodename'] = value
                        elif item == 'x-dnzb-year':
                            nzo_info['year'] = value
                        elif item == 'x-dnzb-failure':
                            nzo_info['failure'] = value
                        elif item == 'x-dnzb-details':
                            nzo_info['details'] = value
                        elif item == 'x-dnzb-password':
                            nzo_info['password'] = value
                        elif item == 'retry-after':
                            wait = misc.int_conv(value)

                        # Rating fields
                        if item in _RARTING_FIELDS:
                            nzo_info[item] = value

                        if not filename and "filename=" in value:
                            filename = value[value.index("filename=") + 9:].strip(';').strip('"')

                if wait:
                    # For sites that have a rate-limiting attribute
                    msg = ''
                    retry = True
                    fn = None
                elif retry:
                    fn, msg, retry, wait, data = _analyse(fn, url)

                if not fn:
                    if retry:
                        logging.info('Retry URL %s', url)
                        self.add(url, future_nzo, wait)
                    else:
                        bad_fetch(future_nzo, url, msg)
                    continue

                if not filename:
                    filename = os.path.basename(url)
                elif '&nzbname=' in filename:
                    # Sometimes the filename contains the full URL, duh!
                    filename = filename[filename.find('&nzbname=') + 9:]

                pp = future_nzo.pp
                script = future_nzo.script
                cat = future_nzo.cat
                if (cat is None or cat == '*') and category:
                    cat = misc.cat_convert(category)
                priority = future_nzo.priority
                nzbname = future_nzo.custom_name

                # process data
                if gzipped:
                    filename += '.gz'
                if not data:
                    try:
                        data = fn.read()
                    except (IncompleteRead, IOError):
                        bad_fetch(future_nzo, url, T('Server could not complete request'))
                        fn.close()
                        continue
                fn.close()

                if '<nzb' in data and misc.get_ext(filename) != '.nzb':
                    filename += '.nzb'

                # Sanitize filename first (also removing forbidden Windows-names)
                filename = misc.sanitize_filename(filename)

                # Write data to temp file
                path = os.path.join(cfg.admin_dir.get_path(), FUTURE_Q_FOLDER)
                path = os.path.join(path, filename)
                f = open(path, 'wb')
                f.write(data)
                f.close()
                del data

                # Check if nzb file
                if misc.get_ext(filename) in ('.nzb', '.gz', 'bz2'):
                    res = dirscanner.ProcessSingleFile(filename, path, pp=pp, script=script, cat=cat, priority=priority,
                                                       nzbname=nzbname, nzo_info=nzo_info, url=future_nzo.url, keep=False,
                                                       nzo_id=future_nzo.nzo_id)[0]
                    if res:
                        if res == -2:
                            logging.info('Incomplete NZB, retry after 5 min %s', url)
                            when = 300
                        elif res == -1:
                            # Error, but no reason to retry. Warning is already given
                            NzbQueue.do.remove(future_nzo.nzo_id, add_to_history=False)
                            continue
                        else:
                            logging.info('Unknown error fetching NZB, retry after 2 min %s', url)
                            when = 120
                        self.add(url, future_nzo, when)
                # Check if a supported archive
                else:
                    status, zf, exp_ext = dirscanner.is_archive(path)
                    if status == 0:
                        if misc.get_ext(filename) not in ('.rar', '.zip', '.7z'):
                            filename = filename + exp_ext
                            os.rename(path, path + exp_ext)
                            path = path + exp_ext

                        dirscanner.ProcessArchiveFile(filename, path, pp, script, cat, priority=priority,
                                                     nzbname=nzbname, url=future_nzo.url, keep=False,
                                                     nzo_id=future_nzo.nzo_id)
                        # Not a supported filetype, not an nzb (text/html ect)
                        try:
                            os.remove(fn)
                        except:
                            pass
                        logging.info('Unknown filetype when fetching NZB, retry after 30s %s', url)
                        self.add(url, future_nzo, 30)
            except:
Пример #22
0
    def run(self):
        logging.info('URLGrabber starting up')
        self.shutdown = False

        while not self.shutdown:
            # Don't pound the website!
            time.sleep(5.0)

            (url, future_nzo) = self.queue.get()

            if not url:
                # stop signal, go test self.shutdown
                continue
            if future_nzo and future_nzo.wait and future_nzo.wait > time.time(
            ):
                # Re-queue when too early and still active

                self.add(url, future_nzo)
                continue
            url = url.replace(' ', '')

            try:
                if future_nzo:
                    # If nzo entry deleted, give up
                    try:
                        deleted = future_nzo.deleted
                    except AttributeError:
                        deleted = True
                    if deleted:
                        logging.debug('Dropping URL %s, job entry missing',
                                      url)
                        continue

                logging.info('Grabbing URL %s', url)
                req = urllib2.Request(url)
                req.add_header('User-Agent',
                               'SABnzbd+/%s' % sabnzbd.version.__version__)
                if not [True for item in _BAD_GZ_HOSTS if item in url]:
                    req.add_header('Accept-encoding', 'gzip')
                filename = None
                category = None
                gzipped = False
                nzo_info = {}
                wait = 0
                retry = True
                fn = None
                try:
                    fn = urllib2.urlopen(req)
                except:
                    # Cannot list exceptions here, because of unpredictability over platforms
                    error0 = str(sys.exc_info()[0]).lower()
                    error1 = str(sys.exc_info()[1]).lower()
                    logging.debug('Error "%s" trying to get the url %s',
                                  error1, url)
                    if 'certificate_verify_failed' in error1 or 'certificateerror' in error0:
                        msg = T('Server %s uses an untrusted HTTPS certificate'
                                ) % ''
                        retry = False
                    elif 'nodename nor servname provided' in error1:
                        msg = T('Server name does not resolve')
                        retry = False
                    elif '401' in error1 or 'unauthorized' in error1:
                        msg = T('Unauthorized access')
                        retry = False
                    elif '404' in error1:
                        msg = T('File not on server')
                        retry = False

                new_url = dereferring(url, fn)
                if new_url:
                    self.add(new_url, future_nzo)
                    continue

                if fn:
                    for hdr in fn.headers:
                        try:
                            item = hdr.lower()
                            value = fn.headers[hdr]
                        except:
                            continue
                        if item in ('content-encoding', ) and value == 'gzip':
                            gzipped = True
                        if item in ('category_id', 'x-dnzb-category'):
                            category = value
                        elif item in ('x-dnzb-moreinfo', ):
                            nzo_info['more_info'] = value
                        elif item in ('x-dnzb-name', ):
                            filename = value
                            if not filename.endswith('.nzb'):
                                filename += '.nzb'
                        elif item == 'x-dnzb-propername':
                            nzo_info['propername'] = value
                        elif item == 'x-dnzb-episodename':
                            nzo_info['episodename'] = value
                        elif item == 'x-dnzb-year':
                            nzo_info['year'] = value
                        elif item == 'x-dnzb-failure':
                            nzo_info['failure'] = value
                        elif item == 'x-dnzb-details':
                            nzo_info['details'] = value
                        elif item == 'x-dnzb-password':
                            nzo_info['password'] = value
                        elif item == 'retry-after':
                            # For NZBFinder
                            wait = misc.int_conv(value)

                        if not filename and "filename=" in value:
                            filename = value[value.index("filename=") +
                                             9:].strip(';').strip('"')

                if wait:
                    # For sites that have a rate-limiting attribute
                    msg = ''
                    retry = True
                    fn = None
                elif retry:
                    fn, msg, retry, wait, data = _analyse(fn, url)

                if not fn:
                    if retry:
                        logging.info('Retry URL %s', url)
                        self.add(url, future_nzo, wait)
                    else:
                        bad_fetch(future_nzo, url, msg)
                    continue

                if not filename:
                    filename = os.path.basename(url) + '.nzb'
                elif '&nzbname=' in filename:
                    # Sometimes the filename contains the full URL, duh!
                    filename = filename[filename.find('&nzbname=') + 9:]

                pp = future_nzo.pp
                script = future_nzo.script
                cat = future_nzo.cat
                if (cat is None or cat == '*') and category:
                    cat = misc.cat_convert(category)
                priority = future_nzo.priority
                nzbname = future_nzo.custom_name

                # process data
                if gzipped:
                    filename = filename + '.gz'
                if not data:
                    data = fn.read()
                fn.close()

                # Sanatize filename first
                filename = misc.sanitize_filename(filename)

                # Write data to temp file
                path = os.path.join(cfg.admin_dir.get_path(), FUTURE_Q_FOLDER)
                path = os.path.join(path, filename)
                f = open(path, 'wb')
                f.write(data)
                f.close()
                del data

                # Check if nzb file
                if os.path.splitext(filename)[1].lower() in ('.nzb', '.gz',
                                                             'bz2'):
                    res = dirscanner.ProcessSingleFile(
                        filename,
                        path,
                        pp=pp,
                        script=script,
                        cat=cat,
                        priority=priority,
                        nzbname=nzbname,
                        nzo_info=nzo_info,
                        url=future_nzo.url,
                        keep=False,
                        nzo_id=future_nzo.nzo_id)[0]
                    if res:
                        if res == -2:
                            logging.info(
                                'Incomplete NZB, retry after 5 min %s', url)
                            when = 300
                        elif res == -1:
                            # Error, but no reason to retry. Warning is already given
                            NzbQueue.do.remove(future_nzo.nzo_id,
                                               add_to_history=False)
                            continue
                        else:
                            logging.info(
                                'Unknown error fetching NZB, retry after 2 min %s',
                                url)
                            when = 120
                        self.add(url, future_nzo, when)
                # Check if a supported archive
                else:
                    if dirscanner.ProcessArchiveFile(
                            filename,
                            path,
                            pp,
                            script,
                            cat,
                            priority=priority,
                            nzbname=nzbname,
                            url=future_nzo.url,
                            keep=False,
                            nzo_id=future_nzo.nzo_id)[0]:
                        # Not a supported filetype, not an nzb (text/html ect)
                        try:
                            os.remove(fn)
                        except:
                            pass
                        logging.info(
                            'Unknown filetype when fetching NZB, retry after 30s %s',
                            url)
                        self.add(url, future_nzo, 30)
            except:
                logging.error(T('URLGRABBER CRASHED'), exc_info=True)
                logging.debug("URLGRABBER Traceback: ", exc_info=True)
Пример #23
0
    def run(self):
        logging.info('URLGrabber starting up')
        self.shutdown = False

        while not self.shutdown:
            # Don't pound the website!
            time.sleep(5.0)

            (url, future_nzo) = self.queue.get()

            if not url:
                # stop signal, go test self.shutdown
                continue
            if future_nzo and future_nzo.wait and future_nzo.wait > time.time(
            ):
                # Requeue when too early and still active

                self.add(url, future_nzo)
                continue
            url = url.replace(' ', '')

            try:
                del_bookmark = not future_nzo
                if future_nzo:
                    # If nzo entry deleted, give up
                    try:
                        deleted = future_nzo.deleted
                    except AttributeError:
                        deleted = True
                    if deleted:
                        logging.debug('Dropping URL %s, job entry missing',
                                      url)
                        continue

                # Add nzbmatrix credentials if needed
                url, matrix_id = _matrix_url(url)

                # _grab_url cannot reside in a function, because the tempfile
                # would not survive the end of the function
                if del_bookmark:
                    logging.info('Removing nzbmatrix bookmark %s', matrix_id)
                else:
                    logging.info('Grabbing URL %s', url)
                if '.nzbsrus.' in url:
                    opener = urllib.URLopener({})
                else:
                    opener = urllib.FancyURLopener({})
                opener.prompt_user_passwd = None
                opener.addheaders = []
                opener.addheader('User-Agent',
                                 'SABnzbd+/%s' % sabnzbd.version.__version__)
                if not [True for item in _BAD_GZ_HOSTS if item in url]:
                    opener.addheader('Accept-encoding', 'gzip')
                filename = None
                category = None
                length = 0
                nzo_info = {}
                wait = 0
                try:
                    fn, header = opener.retrieve(url)
                except:
                    fn = None

                if fn:
                    for tup in header.items():
                        try:
                            item = tup[0].lower()
                            value = tup[1].strip()
                        except:
                            continue
                        if item in ('category_id', 'x-dnzb-category'):
                            category = value
                        elif item in ('x-dnzb-moreinfo', ):
                            nzo_info['more_info'] = value
                        elif item in ('x-dnzb-name', ):
                            filename = value
                            if not filename.endswith('.nzb'):
                                filename += '.nzb'
                        elif item == 'x-dnzb-propername':
                            nzo_info['propername'] = value
                        elif item == 'x-dnzb-episodename':
                            nzo_info['episodename'] = value
                        elif item == 'x-dnzb-year':
                            nzo_info['year'] = value
                        elif item == 'x-dnzb-failure':
                            nzo_info['failure'] = value
                        elif item == 'x-dnzb-details':
                            nzo_info['details'] = value
                        elif item in ('content-length', ):
                            length = misc.int_conv(value)
                        elif item == 'retry-after':
                            # For NZBFinder
                            wait = misc.int_conv(value)

                        if not filename:
                            for item in tup:
                                if "filename=" in item:
                                    filename = item[item.index("filename=") +
                                                    9:].strip(';').strip('"')

                if matrix_id:
                    fn, msg, retry, wait = _analyse_matrix(fn, matrix_id)
                    if not fn:
                        if retry:
                            logging.info(msg)
                            logging.debug(
                                'Retry nzbmatrix item %s after waiting %s sec',
                                matrix_id, wait)
                            self.add(url, future_nzo, wait)
                        else:
                            logging.error(msg)
                            misc.bad_fetch(future_nzo,
                                           clean_matrix_url(url),
                                           msg,
                                           retry=True)
                        continue
                    category = get_matrix_category(url, category)

                    if del_bookmark:
                        # No retries of nzbmatrix bookmark removals
                        continue

                else:
                    if wait:
                        # For sites that have a rate-limiting attribute
                        msg = ''
                        retry = True
                        fn = None
                    else:
                        fn, msg, retry, wait = _analyse_others(fn, url)
                    if not fn:
                        if retry:
                            logging.info('Retry URL %s', url)
                            self.add(url, future_nzo, wait)
                        else:
                            misc.bad_fetch(future_nzo, url, msg, retry=True)
                        continue

                if not filename:
                    filename = os.path.basename(url) + '.nzb'

                pp = future_nzo.pp
                script = future_nzo.script
                cat = future_nzo.cat
                if (cat is None or cat == '*') and category:
                    cat = misc.cat_convert(category)
                priority = future_nzo.priority
                nzbname = future_nzo.custom_name

                # Check if nzb file
                if os.path.splitext(filename)[1].lower() in ('.nzb', '.gz'):
                    res, nzo_ids = dirscanner.ProcessSingleFile(filename, fn, pp=pp, script=script, cat=cat, priority=priority, \
                                                       nzbname=nzbname, nzo_info=nzo_info, url=future_nzo.url)
                    if res == 0:
                        NzbQueue.do.remove(future_nzo.nzo_id,
                                           add_to_history=False)
                    else:
                        if res == -2:
                            logging.info(
                                'Incomplete NZB, retry after 5 min %s', url)
                            when = 300
                        elif res == -1:
                            # Error, but no reason to retry. Warning is already given
                            NzbQueue.do.remove(future_nzo.nzo_id,
                                               add_to_history=False)
                            continue
                        else:
                            logging.info(
                                'Unknown error fetching NZB, retry after 2 min %s',
                                url)
                            when = 120
                        self.add(url, future_nzo, when)
                # Check if a supported archive
                else:
                    if dirscanner.ProcessArchiveFile(
                            filename,
                            fn,
                            pp,
                            script,
                            cat,
                            priority=priority,
                            nzbname=nzbname,
                            url=future_nzo.url)[0] == 0:
                        NzbQueue.do.remove(future_nzo.nzo_id,
                                           add_to_history=False)
                    else:
                        # Not a supported filetype, not an nzb (text/html ect)
                        try:
                            os.remove(fn)
                        except:
                            pass
                        logging.info(
                            'Unknown filetype when fetching NZB, retry after 30s %s',
                            url)
                        self.add(url, future_nzo, 30)
            except:
                logging.error('URLGRABBER CRASHED', exc_info=True)
                logging.debug("URLGRABBER Traceback: ", exc_info=True)
Пример #24
0
    def run_feed(self, feed=None, download=False, ignoreFirst=False, force=False, readout=True):
        """ Run the query for one URI and apply filters """
        self.shutdown = False

        def dup_title(title):
            """ Check if this title was in this or other feeds
                Return matching feed name
            """
            title = title.lower()
            for fd in self.jobs:
                for lk in self.jobs[fd]:
                    item = self.jobs[fd][lk]
                    if item.get('status', ' ')[0] == 'D' and \
                       item.get('title', '').lower() == title:
                        return fd
            return ''

        if not feed:
            return 'No such feed'

        newlinks = []
        new_downloads = []

        # Preparations, get options
        try:
            feeds = config.get_rss()[feed]
        except KeyError:
            logging.error(T('Incorrect RSS feed description "%s"'), feed)
            logging.info("Traceback: ", exc_info=True)
            return T('Incorrect RSS feed description "%s"') % feed

        uri = feeds.uri()
        defCat = feeds.cat()
        import sabnzbd.api
        if not notdefault(defCat) or defCat not in sabnzbd.api.list_cats(default=False):
            defCat = None
        defPP = feeds.pp()
        if not notdefault(defPP):
            defPP = None
        defScript = feeds.script()
        if not notdefault(defScript):
            defScript = None
        defPrio = feeds.priority()
        if not notdefault(defPrio):
            defPrio = None

        # Preparations, convert filters to regex's
        regexes = []
        reTypes = []
        reCats = []
        rePPs = []
        rePrios = []
        reScripts = []
        reEnabled = []
        for filter in feeds.filters():
            reCat = filter[0]
            if defCat in ('', '*'):
                reCat = None
            reCats.append(reCat)
            rePPs.append(filter[1])
            reScripts.append(filter[2])
            reTypes.append(filter[3])
            if filter[3] in ('<', '>', 'F'):
                regexes.append(filter[4])
            else:
                regexes.append(convert_filter(filter[4]))
            rePrios.append(filter[5])
            reEnabled.append(filter[6] != '0')
        regcount = len(regexes)

        # Set first if this is the very first scan of this URI
        first = (feed not in self.jobs) and ignoreFirst

        # Add sabnzbd's custom User Agent
        feedparser.USER_AGENT = 'SABnzbd+/%s' % sabnzbd.version.__version__

        # Check for nzbs.org
        if 'nzbs.org/' in uri and '&dl=1' not in uri:
            uri += '&dl=1'

        # Read the RSS feed
        msg = None
        entries = None
        if readout:
            uri = uri.replace(' ', '%20')
            logging.debug("Running feedparser on %s", uri)
            d = feedparser.parse(uri.replace('feed://', 'http://'))
            logging.debug("Done parsing %s", uri)
            if not d:
                msg = T('Failed to retrieve RSS from %s: %s') % (uri, '?')
                logging.info(msg)
                return unicoder(msg)

            status = d.get('status', 999)
            if status in (401, 402, 403):
                msg = T('Do not have valid authentication for feed %s') % feed
                logging.info(msg)
                return unicoder(msg)
            if status >= 500 and status <= 599:
                msg = T('Server side error (server code %s); could not get %s on %s') % (status, feed, uri)
                logging.info(msg)
                return unicoder(msg)

            entries = d.get('entries')
            if 'bozo_exception' in d and not entries:
                msg = str(d['bozo_exception'])
                if 'CERTIFICATE_VERIFY_FAILED' in msg:
                    msg = T('Server %s uses an untrusted HTTPS certificate') % get_urlbase(uri)
                    logging.error(msg)
                else:
                    msg = T('Failed to retrieve RSS from %s: %s') % (uri, xml_name(msg))
                logging.info(msg)
                return unicoder(msg)
            if not entries:
                msg = T('RSS Feed %s was empty') % uri
                logging.info(msg)

        if feed not in self.jobs:
            self.jobs[feed] = {}
        jobs = self.jobs[feed]
        if readout:
            if not entries:
                return unicoder(msg)
        else:
            entries = jobs.keys()
            # Sort in the order the jobs came from the feed
            entries.sort(lambda x, y: jobs[x].get('order', 0) - jobs[y].get('order', 0))

        order = 0
        # Filter out valid new links
        for entry in entries:
            if self.shutdown:
                return

            if readout:
                try:
                    link, category, size = _get_link(uri, entry)
                except (AttributeError, IndexError):
                    link = None
                    category = u''
                    size = 0L
                    logging.info(T('Incompatible feed') + ' ' + uri)
                    logging.info("Traceback: ", exc_info=True)
                    return T('Incompatible feed')
                title = entry.title
            else:
                link = entry
                category = jobs[link].get('orgcat', '')
                if category in ('', '*'):
                    category = None
                title = jobs[link].get('title', '')
                size = jobs[link].get('size', 0L)

            if link:
                # Make sure spaces are quoted in the URL
                link = link.strip().replace(' ', '%20')

                newlinks.append(link)

                if link in jobs:
                    jobstat = jobs[link].get('status', ' ')[0]
                else:
                    jobstat = 'N'
                if jobstat in 'NGB' or (jobstat == 'X' and readout):
                    # Match this title against all filters
                    logging.debug('Trying title %s', title)
                    result = False
                    myCat = defCat
                    myPP = defPP
                    myScript = defScript
                    myPrio = defPrio
                    n = 0
                    if 'F' in reTypes:
                        season, episode = sabnzbd.newsunpack.analyse_show(title)[1:3]
                        season = int_conv(season)
                        episode = int_conv(episode)
                    else:
                        season = episode = 0

                    # Match against all filters until an positive or negative match
                    logging.debug('Size %s for %s', size, title)
                    for n in xrange(regcount):
                        if reEnabled[n]:
                            if category and reTypes[n] == 'C':
                                found = re.search(regexes[n], category)
                                if not found:
                                    logging.debug("Filter rejected on rule %d", n)
                                    result = False
                                    break
                            elif reTypes[n] == '<' and size and from_units(regexes[n]) < size:
                                # "Size at most" : too large
                                logging.debug('Filter rejected on rule %d', n)
                                result = False
                                break
                            elif reTypes[n] == '>' and size and from_units(regexes[n]) > size:
                                # "Size at least" : too small
                                logging.debug('Filter rejected on rule %d', n)
                                result = False
                                break
                            elif reTypes[n] == 'F' and not ep_match(season, episode, regexes[n]):
                                # "Starting from SxxEyy", too early episode
                                logging.debug('Filter requirement match on rule %d', n)
                                result = False
                                break
                            else:
                                if regexes[n]:
                                    found = re.search(regexes[n], title)
                                else:
                                    found = False
                                if reTypes[n] == 'M' and not found:
                                    logging.debug("Filter rejected on rule %d", n)
                                    result = False
                                    break
                                if found and reTypes[n] == 'A':
                                    logging.debug("Filter matched on rule %d", n)
                                    result = True
                                    break
                                if found and reTypes[n] == 'R':
                                    logging.debug("Filter rejected on rule %d", n)
                                    result = False
                                    break

                    if len(reCats):
                        if notdefault(reCats[n]):
                            myCat = reCats[n]
                        elif category and not defCat:
                            myCat = cat_convert(category)
                        if myCat:
                            myCat, catPP, catScript, catPrio = cat_to_opts(myCat)
                        else:
                            myCat = catPP = catScript = catPrio = None
                        if notdefault(rePPs[n]):
                            myPP = rePPs[n]
                        elif not (reCats[n] or category):
                            myPP = catPP
                        if notdefault(reScripts[n]):
                            myScript = reScripts[n]
                        elif not (notdefault(reCats[n]) or category):
                            myScript = catScript
                        if rePrios[n] not in (str(DEFAULT_PRIORITY), ''):
                            myPrio = rePrios[n]
                        elif not ((rePrios[n] != str(DEFAULT_PRIORITY)) or category):
                            myPrio = catPrio

                    if cfg.no_dupes() and dup_title(title):
                        if cfg.no_dupes() == 1:
                            logging.info("Ignoring duplicate job %s", title)
                            continue
                        else:
                            myPrio = DUP_PRIORITY

                    act = download and not first
                    if link in jobs:
                        act = act and not jobs[link].get('status', '').endswith('*')
                        act = act or force
                        star = first or jobs[link].get('status', '').endswith('*')
                    else:
                        star = first
                    if result:
                        _HandleLink(jobs, link, title, size, 'G', category, myCat, myPP, myScript,
                                    act, star, order, priority=myPrio, rule=str(n))
                        if act:
                            new_downloads.append(title)
                    else:
                        _HandleLink(jobs, link, title, size, 'B', category, myCat, myPP, myScript,
                                    False, star, order, priority=myPrio, rule=str(n))
            order += 1

        # Send email if wanted and not "forced"
        if new_downloads and cfg.email_rss() and not force:
            emailer.rss_mail(feed, new_downloads)

        remove_obsolete(jobs, newlinks)
        return ''
Пример #25
0
    def run(self):
        logging.info('URLGrabber starting up')
        self.shutdown = False

        while not self.shutdown:
            # Don't pound the website!
            time.sleep(5.0)

            (url, future_nzo, retry_count) = self.queue.get()
            if not url:
                continue
            url = url.replace(' ', '')

            try:
                del_bookmark = not future_nzo
                if future_nzo:
                    # If nzo entry deleted, give up
                    try:
                        deleted = future_nzo.deleted
                    except:
                        deleted = True
                    if deleted:
                        logging.debug('Dropping URL %s, job entry missing',
                                      url)
                        continue

                # Add nzbmatrix credentials if needed
                url, matrix_id = _matrix_url(url)

                # When still waiting for nzbmatrix wait period, requeue
                if matrix_id and self.matrix_wait > time.time():
                    self.queue.put((url, future_nzo, retry_count))
                    continue

                # _grab_url cannot reside in a function, because the tempfile
                # would not survive the end of the function
                if del_bookmark:
                    logging.info('Removing nzbmatrix bookmark %s', matrix_id)
                else:
                    logging.info('Grabbing URL %s', url)
                opener = urllib.FancyURLopener({})
                opener.prompt_user_passwd = None
                opener.addheaders = []
                opener.addheader('User-Agent',
                                 'SABnzbd+/%s' % sabnzbd.version.__version__)
                opener.addheader('Accept-encoding', 'gzip')
                filename = None
                category = None
                length = 0
                nzo_info = {}
                try:
                    fn, header = opener.retrieve(url)
                except:
                    fn = None

                if fn:
                    for tup in header.items():
                        try:
                            item = tup[0].lower()
                            value = tup[1].strip()
                        except:
                            continue
                        if item in ('category_id', 'x-dnzb-category'):
                            category = value
                        elif item in ('x-dnzb-moreinfo', ):
                            nzo_info['more_info'] = value
                        elif item in ('x-dnzb-name', ):
                            filename = value
                            if not filename.endswith('.nzb'):
                                filename += '.nzb'
                        elif item in ('content-length', ):
                            length = misc.int_conv(value)

                        if not filename:
                            for item in tup:
                                if "filename=" in item:
                                    filename = item[item.index("filename=") +
                                                    9:].strip(';').strip('"')

                if matrix_id:
                    fn, msg, retry, wait = _analyse_matrix(fn, matrix_id)
                    if retry and wait > 0:
                        self.matrix_wait = time.time() + wait
                        logging.debug('Retry URL %s after waiting', url)
                        self.queue.put((url, future_nzo, retry_count))
                        continue
                    category = _MATRIX_MAP.get(category, category)
                else:
                    msg = ''
                    retry = True

                # Check if the filepath is specified, if not, check if a retry is allowed.
                if not fn:
                    retry_count -= 1
                    if retry_count > 0 and retry:
                        logging.info('Retry URL %s', url)
                        self.queue.put((url, future_nzo, retry_count))
                    elif not del_bookmark:
                        misc.bad_fetch(future_nzo, url, msg, retry=True)
                    continue

                if del_bookmark:
                    continue

                if not filename:
                    filename = os.path.basename(url) + '.nzb'
                # Sanitize and trim name, preserving the extension
                filename, ext = os.path.splitext(filename)
                filename = misc.sanitize_foldername(filename)
                filename += '.' + misc.sanitize_foldername(ext)

                pp = future_nzo.pp
                script = future_nzo.script
                cat = future_nzo.cat
                if (cat is None or cat == '*') and category:
                    cat = misc.cat_convert(category)
                priority = future_nzo.priority
                nzbname = future_nzo.custom_name

                # Check if nzb file
                if os.path.splitext(filename)[1].lower() in ('.nzb', '.gz'):
                    res = dirscanner.ProcessSingleFile(filename, fn, pp=pp, script=script, cat=cat, priority=priority, \
                                                       nzbname=nzbname, nzo_info=nzo_info, url=future_nzo.url)
                    if res == 0:
                        NzbQueue.do.remove(future_nzo.nzo_id,
                                           add_to_history=False)
                    elif res == -2:
                        retry_count -= 1
                        if retry_count > 0:
                            logging.info('Incomplete NZB, retry %s', url)
                            self.queue.put((url, future_nzo, retry_count))
                        else:
                            misc.bad_fetch(future_nzo,
                                           url,
                                           retry=True,
                                           content=True)
                    else:
                        misc.bad_fetch(future_nzo,
                                       url,
                                       retry=True,
                                       content=True)
                # Check if a supported archive
                else:
                    if dirscanner.ProcessArchiveFile(filename,
                                                     fn,
                                                     pp,
                                                     script,
                                                     cat,
                                                     priority=priority,
                                                     url=future_nzo.url) == 0:
                        NzbQueue.do.remove(future_nzo.nzo_id,
                                           add_to_history=False)
                    else:
                        # Not a supported filetype, not an nzb (text/html ect)
                        try:
                            os.remove(fn)
                        except:
                            pass
                        misc.bad_fetch(future_nzo,
                                       url,
                                       retry=True,
                                       content=True)
            except:
                logging.error('URLGRABBER CRASHED', exc_info=True)
                logging.debug("URLGRABBER Traceback: ", exc_info=True)