コード例 #1
0
    def findPropers(self, date=None):

        results = []

        for i in [2, 3, 4]: # we will look for a version 2, 3 and 4
            """
            because of this the proper search failed !!
            well more precisly because _doSearch does not accept a dict rather then a string
            params = {
                "q":"v"+str(i).encode('utf-8')
                  }
            """
            for curResult in self._doSearch("v" + str(i)):

                match = re.search('(\w{3}, \d{1,2} \w{3} \d{4} \d\d:\d\d:\d\d) [\+\-]\d{4}', curResult.findtext('pubDate'))
                if not match:
                    continue

                dateString = match.group(1)
                resultDate = parseDate(dateString).replace(tzinfo=None)

                if date == None or resultDate > date:
                    results.append(classes.Proper(curResult.findtext('title'), curResult.findtext('link'), resultDate))

        return results
コード例 #2
0
    def findPropers(self, date=None):

        results = []

        for i in [2, 3, 4]:  # we will look for a version 2, 3 and 4
            """
            because of this the proper search failed !!
            well more precisly because _doSearch does not accept a dict rather then a string
            params = {
                "q":"v"+str(i).encode('utf-8')
                  }
            """
            for curResult in self._doSearch("v" + str(i)):

                match = re.search(
                    '(\w{3}, \d{1,2} \w{3} \d{4} \d\d:\d\d:\d\d) [\+\-]\d{4}',
                    curResult.findtext('pubDate'))
                if not match:
                    continue

                dateString = match.group(1)
                resultDate = parseDate(dateString).replace(tzinfo=None)

                if date == None or resultDate > date:
                    results.append(
                        classes.Proper(curResult.findtext('title'),
                                       curResult.findtext('link'), resultDate))

        return results
コード例 #3
0
ファイル: nzbclub.py プロジェクト: Elky666/SickRage
    def findPropers(self, date=None):

        results = []

        for curResult in self._doSearch("(PROPER,REPACK)"):

            (title, url) = self._get_title_and_url(curResult)

            pubDate_node = curResult.find('pubDate')
            pubDate = helpers.get_xml_text(pubDate_node)
            dateStr = re.search('(\w{3}, \d{1,2} \w{3} \d{4} \d\d:\d\d:\d\d) [\+\-]\d{4}', pubDate)
            if not dateStr:
                logger.log(u"Unable to figure out the date for entry "+title+", skipping it")
                continue
            else:
                resultDate = parseDate(dateStr.group(1)).replace(tzinfo=None)

            if date == None or resultDate > date:
                results.append(classes.Proper(title, url, resultDate))

        return results
コード例 #4
0
    def findPropers(self, date=None):

        results = []

        for curResult in self._doSearch("PROPER | REPACK"):

            (title, url) = self._get_title_and_url(curResult)

            pubDate_node = curResult.getElementsByTagName('pubDate')[0]
            pubDate = helpers.get_xml_text(pubDate_node)
            dateStr = re.search('(\w{3}, \d{1,2} \w{3} \d{4} \d\d:\d\d:\d\d) [\+\-]\d{4}', pubDate)
            if not dateStr:
                logger.log(u"Unable to figure out the date for entry "+title+", skipping it")
                continue
            else:
                resultDate = parseDate(dateStr.group(1)).replace(tzinfo=None)

            if date == None or resultDate > date:
                results.append(classes.Proper(title, url, resultDate))

        return results
コード例 #5
0
ファイル: newzbin.py プロジェクト: Bryan792/Sick-Beard
        for cur_item in items:
            title = helpers.get_xml_text(
                cur_item.getElementsByTagName('title')[0])
            if title == 'Feeds Error':
                raise exceptions.AuthException(
                    "The feed wouldn't load, probably because of invalid auth info"
                )
            if sickbeard.USENET_RETENTION is not None:
                try:
                    dateString = helpers.get_xml_text(
                        cur_item.getElementsByTagName('report:postdate')[0])
                    # use the parse (imported as parseDate) function from the dateutil lib
                    # and we have to remove the timezone info from it because the retention_date will not have one
                    # and a comparison of them is not possible
                    post_date = parseDate(dateString).replace(tzinfo=None)
                    retention_date = datetime.now() - timedelta(
                        days=sickbeard.USENET_RETENTION)
                    if post_date < retention_date:
                        logger.log(
                            u"Date " + str(post_date) +
                            " is out of retention range, skipping",
                            logger.DEBUG)
                        continue
                except Exception, e:
                    logger.log(
                        "Error parsing date from Newzbin RSS feed: " + str(e),
                        logger.ERROR)
                    continue

            item_list.append(cur_item)
コード例 #6
0
ファイル: newzbin.py プロジェクト: jvdpoll/SickBeard-TVRage
            items = data.entries
        except Exception, e:
            logger.log("Error trying to load Newzbin RSS feed: " + ex(e), logger.ERROR)
            return []

        for cur_item in items:
            title = cur_item.title
            if title == 'Feeds Error':
                raise exceptions.AuthException("The feed wouldn't load, probably because of invalid auth info")
            if sickbeard.USENET_RETENTION is not None:
                try:
                    dateString = helpers.get_xml_text(cur_item.getElementsByTagName('report:postdate')[0])
                    # use the parse (imported as parseDate) function from the dateutil lib
                    # and we have to remove the timezone info from it because the retention_date will not have one
                    # and a comparison of them is not possible
                    post_date = parseDate(dateString).replace(tzinfo=None)
                    retention_date = datetime.now() - timedelta(days=sickbeard.USENET_RETENTION)
                    if post_date < retention_date:
                        logger.log(u"Date " + str(post_date) + " is out of retention range, skipping", logger.DEBUG)
                        continue
                except Exception, e:
                    logger.log("Error parsing date from Newzbin RSS feed: " + str(e), logger.ERROR)
                    continue

            item_list.append(cur_item)

        return item_list


    def _getRSSData(self, search=None):