예제 #1
0
 def test_womble(self):
     RSSFeeds().clearCache()
     result = RSSFeeds().getFeed(
         'https://newshost.co.za/rss/?sec=tv-sd&fr=false')
     self.assertTrue('entries' in result)
     self.assertTrue('feed' in result)
     for item in result['entries']:
         self.assertTrue(TVCache._parseItem(item))
예제 #2
0
    def getRSSFeed(self, url, post_data=None, items=[]):
        handlers = []

        if self.provider.proxy.isEnabled():
            self.provider.headers.update(
                {'Referer': self.provider.proxy.getProxyURL()})
        elif sickbeard.PROXY_SETTING:
            logger.log("Using proxy for url: " + url, logger.DEBUG)
            scheme, address = urllib2.splittype(sickbeard.PROXY_SETTING)
            address = sickbeard.PROXY_SETTING if scheme else 'http://' + sickbeard.PROXY_SETTING
            handlers = [
                urllib2.ProxyHandler({
                    'http': address,
                    'https': address
                })
            ]
            self.provider.headers.update({'Referer': address})
        elif 'Referer' in self.provider.headers:
            self.provider.headers.pop('Referer')

        return RSSFeeds(self.providerID).getFeed(
            self.provider.proxy._buildURL(url),
            post_data,
            self.provider.headers,
            items,
            handlers=handlers)
예제 #3
0
    def _search_provider(self, search_params, **kwargs):

        self._authorised()
        results = []

        items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}

        url = self.urls['browse'] % self.passkey
        for mode in search_params.keys():
            for search_string in search_params[mode]:
                search_string = isinstance(search_string, unicode) and unidecode(search_string) or search_string
                search_url = url + (self.urls['search'] % search_string, '')['Cache' == mode]

                xml_data = RSSFeeds(self).get_feed(search_url)

                cnt = len(items[mode])
                if xml_data and 'entries' in xml_data:
                    for entry in xml_data['entries']:
                        try:
                            if entry['title'] and 'download' in entry['link']:
                                items[mode].append((entry['title'], entry['link'], None, None))
                        except KeyError:
                            continue

                self._log_search(mode, len(items[mode]) - cnt, search_url)

            results = list(set(results + items[mode]))

        return results
예제 #4
0
 def getRSSFeed(self, url, post_data=None, items=[]):
     if self.provider.proxy.isEnabled():
         self.provider.headers.update(
             {'Referer': self.provider.proxy.getProxyURL()})
     return RSSFeeds(self.providerID).getFeed(
         self.provider.proxy._buildURL(url), post_data,
         self.provider.headers, items)
예제 #5
0
    def __init__(self,
                 name,
                 url,
                 cookies='',
                 search_mode='eponly',
                 search_fallback=False,
                 enable_recentsearch=False,
                 enable_backlog=False):
        generic.TorrentProvider.__init__(self, name)

        self.url = url.rstrip('/')
        self.cookies = cookies

        self.enable_recentsearch = enable_recentsearch
        self.enable_backlog = enable_backlog
        self.search_mode = search_mode
        self.search_fallback = search_fallback

        self.feeder = RSSFeeds(self)
        self.cache = TorrentRssCache(self)
예제 #6
0
    def __init__(self, name, url, cookies='', search_mode='eponly', search_fallback=False,
                 enable_recentsearch=False, enable_backlog=False):
        generic.TorrentProvider.__init__(self, name)

        self.url = url.rstrip('/')
        self.cookies = cookies

        self.enable_recentsearch = enable_recentsearch
        self.enable_backlog = enable_backlog
        self.search_mode = search_mode
        self.search_fallback = search_fallback

        self.feeder = RSSFeeds(self)
        self.cache = TorrentRssCache(self)
예제 #7
0
    def __init__(self,
                 name,
                 url,
                 cookies='',
                 search_mode='eponly',
                 search_fallback=False,
                 enable_recentsearch=False,
                 enable_backlog=False):
        self.enable_backlog = bool(tryInt(enable_backlog))
        generic.TorrentProvider.__init__(self,
                                         name,
                                         supports_backlog=self.enable_backlog,
                                         cache_update_freq=15)

        self.url = url.rstrip('/')
        self.url_base = self.url
        self.cookies = cookies

        self.enable_recentsearch = bool(
            tryInt(enable_recentsearch)) or not self.enable_backlog
        self.search_mode = search_mode
        self.search_fallback = bool(tryInt(search_fallback))

        self.feeder = RSSFeeds(self)
예제 #8
0
    def get_cache_data(self):

        api_key = self._init_api()
        if False is api_key:
            return self.search_html()
        if None is not api_key:
            params = {
                'user': self.username,
                'api': api_key,
                'eng': 1,
                'catid': '19,20'
            }  # SD,HD

            rss_url = self.urls['cache'] % urllib.urlencode(params)

            logger.log(self.name + u' cache update URL: ' + rss_url,
                       logger.DEBUG)

            data = RSSFeeds(self).get_feed(rss_url)
            if data and 'entries' in data:
                return data.entries
        return []
예제 #9
0
 def getRSSFeed(self, url, post_data=None, request_headers=None):
     return RSSFeeds(self.providerID).getFeed(url, post_data,
                                              request_headers)
예제 #10
0
class TorrentRssProvider(generic.TorrentProvider):
    def __init__(self,
                 name,
                 url,
                 cookies='',
                 search_mode='eponly',
                 search_fallback=False,
                 enable_recentsearch=False,
                 enable_backlog=False):
        generic.TorrentProvider.__init__(self, name)

        self.url = url.rstrip('/')
        self.cookies = cookies

        self.enable_recentsearch = enable_recentsearch
        self.enable_backlog = enable_backlog
        self.search_mode = search_mode
        self.search_fallback = search_fallback

        self.feeder = RSSFeeds(self)
        self.cache = TorrentRssCache(self)

    def image_name(self):

        return generic.GenericProvider.image_name(self, 'torrentrss')

    def config_str(self):
        return '%s|%s|%s|%d|%s|%d|%d|%d' % (
            self.name or '', self.url or '', self.cookies
            or '', self.enabled, self.search_mode or '', self.search_fallback,
            self.enable_recentsearch, self.enable_backlog)

    def _get_title_and_url(self, item):

        title, url = None, None

        if item.title:
            title = re.sub(r'\s+', '.', u'' + item.title)

        attempt_list = [
            lambda: item.torrent_magneturi, lambda: item.enclosures[0].href,
            lambda: item.link
        ]

        for cur_attempt in attempt_list:
            try:
                url = cur_attempt()
            except:
                continue

            if title and url:
                break

        return title, url

    def validate_feed(self):

        success, err_msg = self._check_cookie()
        if not success:
            return success, err_msg

        try:
            items = self.get_cache_data()

            for item in items:
                title, url = self._get_title_and_url(item)
                if not (title and url):
                    continue
                if url.startswith('magnet:'):
                    if re.search('urn:btih:([0-9a-f]{32,40})', url):
                        break
                else:
                    torrent_file = self.get_url(url)
                    try:
                        bdecode(torrent_file)
                        break
                    except Exception:
                        pass
            else:
                return False, '%s fetched RSS feed data: %s' % \
                              (('Fail to validate', 'No items found in the')[0 == len(items)], self.url)

            return True, None

        except Exception as e:
            return False, 'Error when trying to load RSS: ' + ex(e)

    def get_cache_data(self):

        logger.log(u'TorrentRssCache cache update URL: ' + self.url,
                   logger.DEBUG)

        data = self.feeder.get_feed(self.url)

        return [] if not (data and 'entries' in data) else data.entries
예제 #11
0
 def test_newznab(self):
     RSSFeeds().clearCache()
     result = RSSFeeds().getFeed('http://lolo.sickbeard.com/api?t=caps')
     self.assertTrue('entries' in result)
     self.assertTrue('feed' in result)
     self.assertTrue('categories' in result.feed)
예제 #12
0
class TorrentRssProvider(generic.TorrentProvider):

    def __init__(self, name, url, cookies='', search_mode='eponly', search_fallback=False,
                 enable_recentsearch=False, enable_backlog=False):
        generic.TorrentProvider.__init__(self, name)

        self.url = url.rstrip('/')
        self.cookies = cookies

        self.enable_recentsearch = enable_recentsearch
        self.enable_backlog = enable_backlog
        self.search_mode = search_mode
        self.search_fallback = search_fallback

        self.feeder = RSSFeeds(self)
        self.cache = TorrentRssCache(self)

    def image_name(self):

        return generic.GenericProvider.image_name(self, 'torrentrss')

    def config_str(self):
        return '%s|%s|%s|%d|%s|%d|%d|%d' % (self.name or '',
                                            self.url or '',
                                            self.cookies or '',
                                            self.enabled,
                                            self.search_mode or '',
                                            self.search_fallback,
                                            self.enable_recentsearch,
                                            self.enable_backlog)

    def _title_and_url(self, item):

        title, url = None, None

        if item.title:
            title = re.sub(r'\s+', '.', u'' + item.title)

        attempt_list = [lambda: item.torrent_magneturi,

                        lambda: item.enclosures[0].href,

                        lambda: item.link]

        for cur_attempt in attempt_list:
            try:
                url = cur_attempt()
            except:
                continue

            if title and url:
                break

        return title, url

    def validate_feed(self):

        success, err_msg = self._check_cookie()
        if not success:
            return success, err_msg

        try:
            items = self.cache_data()

            for item in items:
                title, url = self._title_and_url(item)
                if not (title and url):
                    continue
                if url.startswith('magnet:'):
                    if re.search('urn:btih:([0-9a-f]{32,40})', url):
                        break
                else:
                    torrent_file = self.get_url(url)
                    try:
                        bdecode(torrent_file)
                        break
                    except Exception:
                        pass
            else:
                return False, '%s fetched RSS feed data: %s' % \
                              (('Fail to validate', 'No items found in the')[0 == len(items)], self.url)

            return True, None

        except Exception as e:
            return False, 'Error when trying to load RSS: ' + ex(e)

    def cache_data(self):

        logger.log(u'TorrentRssCache cache update URL: ' + self.url, logger.DEBUG)

        data = self.feeder.get_feed(self.url)

        return [] if not (data and 'entries' in data) else data.entries
예제 #13
0
 def getRSSFeed(self, url):
     return RSSFeeds(self.provider).get_feed(url)
예제 #14
0
 def get_rss(self, url, **kwargs):
     return RSSFeeds(self.provider).get_feed(url, **kwargs)
예제 #15
0
class TorrentRssProvider(generic.TorrentProvider):
    def __init__(self,
                 name,
                 url,
                 cookies='',
                 search_mode='eponly',
                 search_fallback=False,
                 enable_recentsearch=False,
                 enable_backlog=False):
        self.enable_backlog = bool(tryInt(enable_backlog))
        generic.TorrentProvider.__init__(self,
                                         name,
                                         supports_backlog=self.enable_backlog,
                                         cache_update_freq=15)

        self.url = url.rstrip('/')
        self.url_base = self.url
        self.cookies = cookies

        self.enable_recentsearch = bool(
            tryInt(enable_recentsearch)) or not self.enable_backlog
        self.search_mode = search_mode
        self.search_fallback = bool(tryInt(search_fallback))

        self.feeder = RSSFeeds(self)

    def image_name(self):

        return generic.GenericProvider.image_name(self, 'torrentrss')

    def config_str(self):

        return '%s|%s|%s|%d|%s|%d|%d|%d' % (
            self.name or '', self.url or '', self.cookies
            or '', self.enabled, self.search_mode or '', self.search_fallback,
            self.enable_recentsearch, self.enable_backlog)

    def _title_and_url(self, item):

        title, url = None, None

        if item.title:
            title = re.sub(r'\s+', '.', u'' + item.title)

        attempt_list = [
            lambda: item.torrent_magneturi, lambda: item.enclosures[0].href,
            lambda: item.link
        ]

        for cur_attempt in attempt_list:
            try:
                url = cur_attempt()
            except (StandardError, Exception):
                continue

            if title and url:
                break

        return title, url

    def validate_feed(self):

        success, err_msg = self._check_cookie()
        if not success:
            return success, err_msg

        try:
            items = self._search_provider({'Validate': ['']})

            for item in items:
                title, url = self._title_and_url(item)
                if not (title and url):
                    continue
                if url.startswith('magnet:'):
                    if re.search('urn:btih:([0-9a-f]{32,40})', url):
                        break
                else:
                    torrent_file = self.get_url(url)
                    try:
                        bdecode(torrent_file)
                        break
                    except (StandardError, Exception):
                        pass
            else:
                return False, '%s fetched RSS feed data: %s' % \
                              (('Fail to validate', 'No items found in the')[0 == len(items)], self.url)

            return True, None

        except Exception as e:
            return False, 'Error when trying to load RSS: ' + ex(e)

    def _search_provider(self, search_params, **kwargs):

        result = []
        for mode in search_params.keys():
            data = self.feeder.get_feed(self.url)

            result += (data and 'entries' in data) and data.entries or []

            self.log_result(mode, count=len(result), url=self.url)

        return result