Exemple #1
0
    def __init__(self, name, url, private):
        self.name = name

        # urls
        self._urls = {'base_url': url}

        # other options
        self.private = private
        self.supports_backlog = True
        self.supports_absolute_numbering = False
        self.anime_only = False
        self.search_mode = 'eponly'
        self.search_fallback = False
        self.enabled = False
        self.enable_daily = True
        self.enable_backlog = True
        self.cache = TVCache(self)
        self.proper_strings = ['PROPER|REPACK|REAL|RERIP']
        self.search_separator = ' '

        # cookies
        self.enable_cookies = False
        self.cookies = ''

        # web session
        self.session = WebSession(cloudflare=True)
Exemple #2
0
    def __init__(self,
                 name,
                 url,
                 private,
                 key='',
                 catIDs='5030,5040',
                 search_mode='eponly',
                 search_fallback=False,
                 enable_daily=False,
                 enable_backlog=False,
                 default=False):
        super(NewznabProvider, self).__init__(name, url, private)

        self.key = key

        self.search_mode = search_mode
        self.search_fallback = search_fallback
        self.enable_daily = enable_daily
        self.enable_backlog = enable_backlog
        self.supports_backlog = True

        self.catIDs = catIDs
        self.default = default

        self.cache = TVCache(self, min_time=30)
Exemple #3
0
    def __init__(self, provider_obj):

        TVCache.__init__(self, provider_obj)

        # only poll newznab providers every 30 minutes
        self.minTime = 30
        self.last_search = datetime.datetime.now()
Exemple #4
0
    def __init__(self, provider_obj):

        TVCache.__init__(self, provider_obj)

        # only poll newznab providers every 30 minutes
        self.minTime = 30
        self.last_search = datetime.datetime.now()
Exemple #5
0
    def __init__(self):
        super(NyaaProvider, self).__init__("NyaaTorrents", 'https://nyaa.si', False)

        self.supports_absolute_numbering = True
        self.anime_only = True
        self.confirmed = False

        self.minseed = None
        self.minleech = None

        self.cache = TVCache(self, min_time=20)
Exemple #6
0
    def __init__(self):
        super(NyaaProvider, self).__init__("NyaaTorrents", 'https://nyaa.si', False)

        self.supports_absolute_numbering = True
        self.anime_only = True

        # custom settings
        self.custom_settings = {
            'confirmed': False,
            'minseed': 0,
            'minleech': 0
        }

        self.cache = TVCache(self, min_time=20)
Exemple #7
0
    def __init__(self, provider_obj):
        TVCache.__init__(self, provider_obj, min_time=30)
        # only poll Binsearch every 30 minutes max

        # compile and save our regular expressions

        # this pulls the title from the URL in the description
        self.descTitleStart = re.compile(r'^.*https?://www\.binsearch\.info/.b=')
        self.descTitleEnd = re.compile('&.*$')

        # these clean up the horrible mess of a title if the above fail
        self.titleCleaners = [
            re.compile(r'.?yEnc.?\(\d+/\d+\)$'),
            re.compile(r' \[\d+/\d+\] '),
        ]
    def __init__(self):
        super(NyaaProvider, self).__init__("NyaaTorrents", 'www.nyaa.se',
                                           False)

        self.supports_backlog = True

        self.supports_absolute_numbering = True
        self.anime_only = True
        self.ratio = None

        self.cache = TVCache(self, min_time=15)

        self.minseed = 0
        self.minleech = 0
        self.confirmed = False
Exemple #9
0
    def __init__(self, provider_obj):
        TVCache.__init__(self, provider_obj, min_time=30)
        # only poll Binsearch every 30 minutes max

        # compile and save our regular expressions

        # this pulls the title from the URL in the description
        self.descTitleStart = re.compile(r'^.*https?://www\.binsearch\.info/.b=')
        self.descTitleEnd = re.compile('&.*$')

        # these clean up the horrible mess of a title if the above fail
        self.titleCleaners = [
            re.compile(r'.?yEnc.?\(\d+/\d+\)$'),
            re.compile(r' \[\d+/\d+\] '),
        ]
Exemple #10
0
    def __init__(self):
        super(AlphaRatioProvider, self).__init__("AlphaRatio", 'alpharatio.cc',
                                                 True)
        self.supports_backlog = True
        self.username = None
        self.password = None
        self.ratio = None
        self.minseed = None
        self.minleech = None

        self.urls.update({
            'login':
            '******'.format(base_url=self.urls['base_url']),
            'detail':
            '{base_url}/torrents.php?torrentid=%s'.format(
                base_url=self.urls['base_url']),
            'search':
            '{base_url}/torrents.php?searchstr=%s%s'.format(
                base_url=self.urls['base_url']),
            'download':
            '{base_url}/%s'.format(base_url=self.urls['base_url'])
        })

        self.catagories = "&filter_cat[1]=1&filter_cat[2]=1&filter_cat[3]=1&filter_cat[4]=1&filter_cat[5]=1"

        self.proper_strings = ['PROPER', 'REPACK']

        self.cache = TVCache(self, min_time=20)
Exemple #11
0
    def __init__(self):
        super(DanishbitsProvider,
              self).__init__('Danishbits', 'https://danishbits.org', True)

        # URLs
        self._urls.update({
            'login':
            '******'.format(**self._urls),
            'search':
            '{base_url}/couchpotato.php'.format(**self._urls),
        })

        # custom settings
        self.custom_settings = {
            'username': '',
            'passkey': '',
            'freeleech': True,
            'minseed': 0,
            'minleech': 0
        }

        # Proper Strings
        self.proper_strings = ['PROPER', 'REPACK', 'REAL', 'RERIP']

        # Cache
        self.cache = TVCache(self)
Exemple #12
0
    def __init__(self):

        super(TorrentBytesProvider, self).__init__("TorrentBytes", 'http://www.torrentbytes.net', True)

        self.supports_backlog = True

        self.username = None
        self.password = None
        self.ratio = None
        self.minseed = None
        self.minleech = None
        self.freeleech = False

        self.urls.update({
            'login': '******'.format(base_url=self.urls['base_url']),
            'detail': '{base_url}/details.php?id=%s'.format(base_url=self.urls['base_url']),
            'search': '{base_url}/browse.php?search=%s%s'.format(base_url=self.urls['base_url']),
            'download': '{base_url}/download.php?id=%s&name=%s'.format(base_url=self.urls['base_url'])
        })

        self.categories = "&c41=1&c33=1&c38=1&c32=1&c37=1"

        self.proper_strings = ['PROPER', 'REPACK']

        self.cache = TVCache(self, min_time=20)
Exemple #13
0
    def __init__(self):
        super(BLUETIGERSProvider, self).__init__("BLUETIGERS",
                                                 'www.bluetigers.ca', True)

        self.supports_backlog = True

        self.username = None
        self.password = None
        self.ratio = None
        self.token = None
        self.tokenLastUpdate = None

        self.cache = TVCache(self, min_time=10)

        self.urls.update({
            'search':
            '{base_url}/torrents-search.php'.format(
                base_url=self.urls['base_url']),
            'login':
            '******'.format(
                base_url=self.urls['base_url']),
            'download':
            '{base_url}/torrents-details.php?id=%s&hit=1'.format(
                base_url=self.urls['base_url'])
        })
Exemple #14
0
    def __init__(self):
        super(ArcheTorrentProvider,
              self).__init__('ArcheTorrent', 'https://www.archetorrent.com',
                             True)
        # Credentials
        self.username = None
        self.password = None

        # Torrent Stats
        self.minseed = None
        self.minleech = None

        # Freelech
        self.freeleech = False

        # URLs
        self.urls.update({
            'login':
            '******'.format(**self.urls),
            'search':
            '{base_url}/torrents-search.php'.format(**self.urls),
            'download':
            '{base_url}/download.php'.format(**self.urls),
        })

        # Proper Strings
        self.proper_strings = ['PROPER']

        # Cache
        self.cache = TVCache(self, min_time=15)
Exemple #15
0
    def __init__(self):

        super(SCCProvider, self).__init__("SceneAccess", 'sceneaccess.eu',
                                          True)

        self.supports_backlog = True

        self.username = None
        self.password = None
        self.ratio = None
        self.minseed = None
        self.minleech = None

        self.cache = TVCache(self, min_time=20)

        self.urls.update({
            'login':
            '******'.format(base_url=self.urls['base_url']),
            'detail':
            '{base_url}/details?id=%s'.format(base_url=self.urls['base_url']),
            'search':
            '{base_url}/all?search=%s&method=1&%s'.format(
                base_url=self.urls['base_url']),
            'download':
            '{base_url}/%s'.format(base_url=self.urls['base_url'])
        })

        self.categories = {
            'sponly': 'c26=26&c44=44&c45=45',
            # Archive, non-scene HD, non-scene SD; need to include non-scene because WEB-DL packs get added to those categories
            'eponly': 'c27=27&c17=17&c44=44&c45=45&c33=33&c34=34'
        }  # TV HD, TV SD, non-scene HD, non-scene SD, foreign XviD, foreign x264
Exemple #16
0
    def __init__(self):
        super(MoreThanTVProvider, self).__init__("MoreThanTV", 'https://www.morethan.tv', True)

        self._urls.update({
            'login': '******'.format(**self._urls),
            'detail': '{base_url}/torrents.php'
                      '?id=%s'.format(**self._urls),
            'search': '{base_url}/torrents.php'
                      '?tags_type=1'
                      '&order_by=time'
                      '&order_way=desc'
                      '&action=basic'
                      '&searchsubmit=1'
                      '&searchstr=%s'.format(**self._urls),
            'download': '{base_url}/torrents.php'
                        '?action=download'
                        '&id=%s'.format(**self._urls)
        })

        self._uid = None
        self._hash = None
        self.username = None
        self.password = None

        self.minseed = None
        self.minleech = None

        self.cookies = None

        self.proper_strings = ['PROPER', 'REPACK']

        self.cache = TVCache(self)
Exemple #17
0
    def __init__(self):
        super(TorrentLeechProvider,
              self).__init__("TorrentLeech", 'https://www.torrentleech.org',
                             True)

        self.urls.update({
            'login':
            '******'.format(**self.urls),
            'search':
            '{base_url}/torrents/browse/list/'.format(**self.urls),
            'download':
            '{base_url}/download/%s/%s'.format(**self.urls),
            'details':
            '{base_url}/download/%s/%s'.format(**self.urls),
        })

        self.username = None
        self.password = None

        # self.enable_cookies = True
        # self.required_cookies = ('tluid', 'tlpass')

        self.minseed = None
        self.minleech = None

        self.proper_strings = ['PROPER', 'REPACK', 'REAL', 'RERIP']

        self.cache = TVCache(self, min_time=20)
Exemple #18
0
    def __init__(self,
                 name,
                 url,
                 private,
                 key='',
                 catIDs='5030,5040',
                 search_mode='eponly',
                 search_fallback=False,
                 enable_daily=False,
                 enable_backlog=False,
                 default=False):
        super(NewznabProvider, self).__init__(name, url, private)

        self.key = key

        self.search_mode = search_mode
        self.search_fallback = search_fallback
        self.enable_daily = enable_daily
        self.enable_backlog = enable_backlog
        self.supports_backlog = True

        self.catIDs = catIDs
        self.default = default

        self.cache = TVCache(self, min_time=30)
Exemple #19
0
    def __init__(self):
        super(SCCProvider, self).__init__("SceneAccess",
                                          'http://sceneaccess.eu', True)

        self.urls.update({
            'login':
            '******'.format(**self.urls),
            'detail':
            '{base_url}/details?id=%s'.format(**self.urls),
            'search':
            '{base_url}/all?search=%s&method=1&%s'.format(**self.urls),
            'download':
            '{base_url}/%s'.format(**self.urls)
        })

        self.username = None
        self.password = None

        self.minseed = None
        self.minleech = None

        self.categories = {
            'Season': 'c26=26&c44=44&c45=45',
            # Archive, non-scene HD, non-scene SD; need to include non-scene because WEB-DL packs get added to those categories
            'Episode': 'c17=17&c27=27&c33=33&c34=34&c44=44&c45=45',
            # TV HD, TV SD, non-scene HD, non-scene SD, foreign XviD, foreign x264
            'RSS':
            'c17=17&c26=26&c27=27&c33=33&c34=34&c44=44&c45=45'  # Season + Episode
        }

        self.cache = TVCache(self, min_time=20)
Exemple #20
0
    def __init__(self):
        super(PretomeProvider, self).__init__("Pretome",
                                              'https://pretome.info', True)

        self.urls.update({
            'login':
            '******'.format(**self.urls),
            'detail':
            '{base_url}/details.php?id=%s'.format(**self.urls),
            'search':
            '{base_url}/browse.php?search=%s%s'.format(**self.urls),
            'download':
            '{base_url}/download.php/%s/%s.torrent'.format(**self.urls)
        })

        self.username = None
        self.password = None
        self.pin = None

        self.minseed = None
        self.minleech = None

        self.categories = "&st=1&cat%5B%5D=7"

        self.proper_strings = ['PROPER', 'REPACK']

        self.cache = TVCache(self, min_time=30)
Exemple #21
0
    def __init__(self):
        super(DanishbitsProvider,
              self).__init__('Danishbits', 'https://danishbits.org', True)

        # Credentials
        self.username = None
        self.passkey = None

        # Torrent Stats
        self.minseed = 0
        self.minleech = 0
        self.freeleech = True

        # URLs
        self.urls.update({
            'login':
            '******'.format(**self.urls),
            'search':
            '{base_url}/couchpotato.php'.format(**self.urls),
        })

        # Proper Strings

        # Cache
        self.cache = TVCache(self, min_time=10)
Exemple #22
0
    def __init__(self):

        super(TorrentDayProvider, self).__init__("TorrentDay", 'http://torrentday.com', True)

        self.username = None
        self.password = None

        self.freeleech = False
        self.minseed = None
        self.minleech = None

        self.enable_cookies = True

        self.cache = TVCache(self, min_time=10)

        self.urls.update({
            'login': '******'.format(base_url=self.urls['base_url']),
            'search': '{base_url}/V3/API/API.php'.format(base_url=self.urls['base_url']),
            'download': '{base_url}/download.php/%s/%s'.format(base_url=self.urls['base_url'])
        })

        self.cookies = None

        self.categories = {'Season': {'c14': 1}, 'Episode': {'c2': 1, 'c26': 1, 'c7': 1, 'c24': 1},
                           'RSS': {'c2': 1, 'c26': 1, 'c7': 1, 'c24': 1, 'c14': 1}}
Exemple #23
0
    def __init__(self):
        super(BitSoupProvider, self).__init__("BitSoup", 'www.bitsoup.me',
                                              True)

        self.urls.update({
            'login':
            '******'.format(base_url=self.urls['base_url']),
            'detail':
            '{base_url}/details.php?id=%s'.format(
                base_url=self.urls['base_url']),
            'search':
            '{base_url}/browse.php'.format(base_url=self.urls['base_url']),
            'download':
            '{base_url}/%s'.format(base_url=self.urls['base_url'])
        })

        self.supports_backlog = True

        self.username = None
        self.password = None
        self.ratio = None
        self.minseed = None
        self.minleech = None

        self.cache = TVCache(self, min_time=20)
Exemple #24
0
    def __init__(self):
        super(IPTorrentsProvider, self).__init__("IPTorrents", 'iptorrents.eu',
                                                 True)

        self.supports_backlog = True

        self.username = None
        self.password = None
        self.ratio = None
        self.freeleech = False
        self.minseed = None
        self.minleech = None

        self.enable_cookies = True

        self.cache = TVCache(self, min_time=10)

        self.urls.update({
            'login':
            '******'.format(base_url=self.urls['base_url']),
            'search':
            '{base_url}/t?%s%s&q=%s&qf=#torrents'.format(
                base_url=self.urls['base_url'])
        })

        self.categories = '73=&60='
Exemple #25
0
    def __init__(self):
        super(FileListProvider, self).__init__('FileList',
                                               'https://filelist.ro', True)

        # Credentials
        self.username = None
        self.password = None

        # Torrent Stats
        self.minseed = None
        self.minleech = None

        # URLs
        self._urls.update({
            "login":
            "******".format(**self._urls),
            "search":
            "{base_url}/browse.php".format(**self._urls),
        })

        # Proper Strings
        self.proper_strings = ["PROPER", "REPACK"]

        # Cache
        self.cache = TVCache(self)
Exemple #26
0
    def __init__(self):
        super(NcoreProvider, self).__init__('nCore', 'https://ncore.cc', True)

        # custom settings
        self.custom_settings = {
            'username': '',
            'password': '',
            'minseed': 0,
            'minleech': 0
        }

        categories = [
            'xvidser_hun', 'xvidser', 'dvd_hun', 'dvd', 'dvd9_hun', 'dvd9',
            'hd_hun', 'hd'
        ]

        categories = '&'.join(
            ['kivalasztott_tipus[]=' + x for x in categories])

        self._urls.update({
            'login':
            '******'.format(**self._urls),
            'search':
            ('{base_url}/torrents.php?{cats}&mire=%s&miben=name'
             '&tipus=kivalasztottak_kozott&submit.x=0&submit.y=0&submit=Ok'
             '&tags=&searchedfrompotato=true&jsons=true').format(
                 cats=categories, **self._urls),
        })

        self.cache = TVCache(self)
Exemple #27
0
    def __init__(self):
        super(MoreThanTVProvider, self).__init__("MoreThanTV",
                                                 'www.morethan.tv', True)

        self.supports_backlog = True

        self._uid = None
        self._hash = None
        self.username = None
        self.password = None
        self.ratio = None
        self.minseed = None
        self.minleech = None
        # self.freeleech = False

        self.urls.update({
            'login':
            '******'.format(base_url=self.urls['base_url']),
            'detail':
            '{base_url}/torrents.php?id=%s'.format(
                base_url=self.urls['base_url']),
            'search':
            '{base_url}/torrents.php?tags_type=1&order_by=time&order_way=desc&action=basic&searchsubmit=1&searchstr=%s'
            .format(base_url=self.urls['base_url']),
            'download':
            '{base_url}/torrents.php?action=download&id=%s'.format(
                base_url=self.urls['base_url'])
        })

        self.cookies = None

        self.proper_strings = ['PROPER', 'REPACK']

        self.cache = TVCache(self, min_time=10)
Exemple #28
0
    def __init__(self):
        super(HDSpaceProvider, self).__init__("HDSpace",
                                              'https://hd-space.org', True)

        self._urls.update({
            'login':
            '******'.format(**self._urls),
            'search':
            '{base_url}/index.php?page=torrents&search=%s&active=1&options=0&category='
            .format(**self._urls),
            'rss':
            '{base_url}/rss_torrents.php?feed=dl'.format(**self._urls)
        })

        self.username = None
        self.password = None

        self.minseed = None
        self.minleech = None

        self.categories = [15, 21, 22, 24, 25,
                           40]  # HDTV/DOC 1080/720, bluray, remux
        for cat in self.categories:
            self.urls['search'] += str(cat) + '%%3B'
            self.urls['rss'] += '&cat[]=' + str(cat)

        self.urls['search'] = self.urls['search'][:-4]  # remove extra %%3B

        self.cache = TVCache(self)
Exemple #29
0
    def __init__(self):
        super(ABNormalProvider, self).__init__("ABNormal",
                                               'https://abnormal.ws', True)

        # Credentials
        self.username = None
        self.password = None

        # Torrent Stats
        self.minseed = None
        self.minleech = None

        # URLs
        self.urls.update({
            'login':
            '******'.format(**self.urls),
            'search':
            '{base_url}/torrents.php'.format(**self.urls),
        })

        # Proper Strings
        self.proper_strings = ['PROPER']

        # Cache
        self.cache = TVCache(self, min_time=30)
Exemple #30
0
    def __init__(self):
        super(T411Provider, self).__init__("T411", 'www.t411.li', True)

        self.supports_backlog = True

        self.username = None
        self.password = None
        self.ratio = None
        self.token = None
        self.tokenLastUpdate = None

        self.cache = TVCache(self, min_time=10)

        self.urls.update({
            'search':
            '{base_url}/torrents/search/%s?cid=%s&limit=100'.format(
                base_url=self.urls['base_url']),
            'rss':
            '{base_url}/torrents/top/today'.format(
                base_url=self.urls['base_url']),
            'login':
            '******'.format(base_url=self.urls['base_url']),
            'download':
            '{base_url}/torrents/download/%s'.format(
                base_url=self.urls['base_url'])
        })

        self.subcategories = [433, 637, 455, 639]

        self.minseed = 0
        self.minleech = 0
        self.confirmed = False
Exemple #31
0
    def __init__(self):
        super(GFTrackerProvider, self).__init__("GFTracker",
                                                'http://www.thegft.org', True)

        self.urls.update({
            'login':
            '******'.format(**self.urls),
            'search':
            '{base_url}/browse.php?view=%s%s'.format(**self.urls),
            'download':
            '{base_url}/%s'.format(**self.urls)
        })

        self.username = None
        self.password = None

        self.minseed = None
        self.minleech = None

        self.cookies = None

        self.categories = "0&c26=1&c37=1&c19=1&c47=1&c17=1&c4=1&search="

        self.proper_strings = ['PROPER', 'REPACK']

        self.cache = TVCache(self, min_time=20)
Exemple #32
0
    def __init__(self):

        super(TorrentLeechProvider, self).__init__("TorrentLeech",
                                                   'torrentleech.org', True)

        self.supports_backlog = True

        self.username = None
        self.password = None
        self.ratio = None
        self.minseed = None
        self.minleech = None

        self.urls.update({
            'login':
            '******'.format(
                base_url=self.urls['base_url']),
            'detail':
            '{base_url}/torrent/%s'.format(base_url=self.urls['base_url']),
            'search':
            '{base_url}/torrents/browse/index/query/%s/categories/%s'.format(
                base_url=self.urls['base_url']),
            'download':
            '{base_url}/%s'.format(base_url=self.urls['base_url']),
            'index':
            '{base_url}/torrents/browse/index/categories/%s'.format(
                base_url=self.urls['base_url'])
        })

        self.categories = "2,7,26,27,32,34,35"

        self.proper_strings = ['PROPER', 'REPACK']

        self.cache = TVCache(self, min_time=20)
Exemple #33
0
    def __init__(self):
        super(SceneTimeProvider,
              self).__init__("SceneTime", 'https://www.scenetime.com', True)

        self.urls.update({
            'login':
            '******'.format(**self.urls),
            'detail':
            '{base_url}/details.php?id=%s'.format(**self.urls),
            'search':
            '{base_url}/browse_API.php'.format(**self.urls),
            'download':
            '{base_url}/download.php/%s/%s'.format(**self.urls)
        })

        self.username = None
        self.password = None

        self.minseed = None
        self.minleech = None

        self.enable_cookies = True
        self.required_cookies = ('uid', 'pass')

        self.categories = [2, 42, 9, 63, 77, 79, 100, 83]

        self.cache = TVCache(self, min_time=20)
Exemple #34
0
    def __init__(self):
        super(NyaaProvider, self).__init__("NyaaTorrents", 'https://nyaa.si', False)

        self.supports_absolute_numbering = True
        self.anime_only = True
        self.confirmed = False

        self.minseed = None
        self.minleech = None

        self.cache = TVCache(self, min_time=20)
    def __init__(self):
        super(NyaaProvider, self).__init__("NyaaTorrents",'www.nyaa.se', False)

        self.supports_backlog = True

        self.supports_absolute_numbering = True
        self.anime_only = True
        self.ratio = None

        self.cache = TVCache(self, min_time=15)

        self.minseed = 0
        self.minleech = 0
        self.confirmed = False
Exemple #36
0
    def __init__(self, name, url, private):
        self.name = name
        self.urls = {'base_url': url}
        self.private = private
        self.show = None
        self.supports_backlog = False
        self.supports_absolute_numbering = False
        self.anime_only = False
        self.search_mode = 'eponly'
        self.search_fallback = False
        self.enabled = False
        self.enable_daily = False
        self.enable_backlog = False
        self.cache = TVCache(self)
        self.proper_strings = ['PROPER|REPACK|REAL']

        self.enable_cookies = False
        self.cookies = ''
        self.rss_cookies = ''
        self.cookie_jar = dict()
Exemple #37
0
    def __init__(self):
        """Initialize the class."""
        super(ZooqleProvider, self).__init__('Zooqle', 'https://zooqle.com', False)

        # URLs
        self.urls.update({
            'search': '{base_url}/search'.format(**self.urls),
            'api': '{base_url}/api/media/%s'.format(**self.urls),
        })

        # Proper Strings
        self.proper_strings = ['PROPER', 'REPACK', 'REAL']

        # Miscellaneous Options

        # Torrent Stats
        self.minseed = None
        self.minleech = None

        # Cache
        self.cache = TVCache(self, min_time=15)
Exemple #38
0
    def __init__(self, name, url):
        self.name = name
        self.urls = {'base_url': url}
        self.show = None
        self.supportsBacklog = False
        self.supportsAbsoluteNumbering = False
        self.anime_only = False
        self.search_mode = None
        self.search_fallback = False
        self.enabled = False
        self.enable_daily = False
        self.enable_backlog = False
        self.cache = TVCache(self)
        self.proper_strings = ['PROPER|REPACK|REAL']
        self.private = False

        self.btCacheURLS = [
            'http://torcache.net/torrent/{torrent_hash}.torrent',
            'http://thetorrent.org/torrent/{torrent_hash}.torrent',
            'http://btdig.com/torrent/{torrent_hash}.torrent',
            # 'http://torrage.com/torrent/{torrent_hash}.torrent',
            # 'http://itorrents.org/torrent/{torrent_hash}.torrent',
        ]
class NyaaProvider(TorrentProvider):
    def __init__(self):
        super(NyaaProvider, self).__init__("NyaaTorrents",'www.nyaa.se', False)

        self.supports_backlog = True

        self.supports_absolute_numbering = True
        self.anime_only = True
        self.ratio = None

        self.cache = TVCache(self, min_time=15)

        self.minseed = 0
        self.minleech = 0
        self.confirmed = False

    def search(self, search_strings, search_mode='eponly', epcount=0, age=0, epObj=None):
        if self.show and not self.show.is_anime:
            return []

        results = []
        items = {'Season': [], 'Episode': [], 'RSS': []}

        for mode in search_strings.keys():
            sickrage.srCore.srLogger.debug("Search Mode: %s" % mode)
            for search_string in search_strings[mode]:
                if mode != 'RSS':
                    sickrage.srCore.srLogger.debug("Search string: %s" % search_string)

                params = {
                    "page": 'rss',
                    "cats": '1_0',  # All anime
                    "sort": 2,  # Sort Descending By Seeders
                    "order": 1
                }
                if mode != 'RSS':
                    params["term"] = search_string.encode('utf-8')

                searchURL = self.urls['base_url'] + '?' + urllib.urlencode(params)
                sickrage.srCore.srLogger.debug("Search URL: %s" % searchURL)

                summary_regex = ur"(\d+) seeder\(s\), (\d+) leecher\(s\), \d+ download\(s\) - (\d+.?\d* [KMGT]iB)(.*)"
                s = re.compile(summary_regex, re.DOTALL)

                results = []
                for curItem in self.cache.getRSSFeed(searchURL)['entries'] or []:
                    title = curItem['title']
                    download_url = curItem['link']
                    if not all([title, download_url]):
                        continue

                    seeders, leechers, size, verified = s.findall(curItem['summary'])[0]
                    size = convert_size(size)

                    # Filter unseeded torrent
                    if seeders < self.minseed or leechers < self.minleech:
                        if mode != 'RSS':
                            sickrage.srCore.srLogger.debug(
                                    "Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(
                                            title, seeders, leechers))
                        continue

                    if self.confirmed and not verified and mode != 'RSS':
                        sickrage.srCore.srLogger.debug(
                                "Found result " + title + " but that doesn't seem like a verified result so I'm ignoring it")
                        continue

                    item = title, download_url, size, seeders, leechers
                    if mode != 'RSS':
                        sickrage.srCore.srLogger.debug("Found result: %s " % title)

                    items[mode].append(item)

            # For each search mode sort all the items by seeders if available
            items[mode].sort(key=lambda tup: tup[3], reverse=True)

            results += items[mode]

        return results

    def seed_ratio(self):
        return self.ratio
Exemple #40
0
class NewznabProvider(NZBProvider):
    type = 'newznab'

    def __init__(self,
                 name,
                 url,
                 private,
                 key='',
                 catIDs='5030,5040',
                 search_mode='eponly',
                 search_fallback=False,
                 enable_daily=False,
                 enable_backlog=False,
                 default=False):
        super(NewznabProvider, self).__init__(name, url, private)

        self.key = key

        self.search_mode = search_mode
        self.search_fallback = search_fallback
        self.enable_daily = enable_daily
        self.enable_backlog = enable_backlog
        self.supports_backlog = True

        self.catIDs = catIDs
        self.default = default

        self.cache = TVCache(self, min_time=30)

    def get_newznab_categories(self):
        """
        Uses the newznab provider url and apikey to get the capabilities.
        Makes use of the default newznab caps param. e.a. http://yournewznab/api?t=caps&apikey=skdfiw7823sdkdsfjsfk
        Returns a tuple with (succes or not, array with dicts [{"id": "5070", "name": "Anime"},
        {"id": "5080", "name": "Documentary"}, {"id": "5020", "name": "Foreign"}...etc}], error message)
        """
        success = False
        categories = []
        message = ""

        self.check_auth()

        params = {"t": "caps"}
        if self.key:
            params['apikey'] = self.key

        try:
            resp = sickrage.srCore.srWebSession.get("{}api?{}".format(self.urls['base_url'], urllib.urlencode(params)))
            data = xmltodict.parse(resp.content)

            for category in data["caps"]["categories"]["category"]:
                if category.get('@name') == 'TV':
                    categories += [{"id": category['@id'], "name": category['@name']}]
                    categories += [{"id": x["@id"], "name": x["@name"]} for x in category["subcat"]]

            success = True
        except Exception as e:
            sickrage.srCore.srLogger.debug("[%s] failed to list categories" % self.name)
            message = "[%s] failed to list categories" % self.name

        return success, categories, message

    def _get_season_search_strings(self, ep_obj):

        to_return = []
        params = {}
        if not ep_obj:
            return to_return

        params['maxage'] = (datetime.datetime.now() - datetime.datetime.combine(ep_obj.airdate,
                                                                                datetime.datetime.min.time())).days + 1
        params['tvdbid'] = ep_obj.show.indexerid

        # season
        if ep_obj.show.air_by_date or ep_obj.show.sports:
            date_str = str(ep_obj.airdate).split('-')[0]
            params['season'] = date_str
            params['q'] = date_str.replace('-', '.')
        else:
            params['season'] = str(ep_obj.scene_season)

        save_q = ' ' + params['q'] if 'q' in params else ''

        # add new query strings for exceptions
        name_exceptions = list(
            set([ep_obj.show.name] + get_scene_exceptions(ep_obj.show.indexerid)))
        for cur_exception in name_exceptions:
            params['q'] = sanitizeSceneName(cur_exception) + save_q
            to_return.append(dict(params))

        return to_return

    def _get_episode_search_strings(self, ep_obj, add_string=''):
        to_return = []
        params = {}
        if not ep_obj:
            return to_return

        params['maxage'] = (datetime.datetime.now() - datetime.datetime.combine(ep_obj.airdate,
                                                                                datetime.datetime.min.time())).days + 1
        params['tvdbid'] = ep_obj.show.indexerid

        if ep_obj.show.air_by_date or ep_obj.show.sports:
            date_str = str(ep_obj.airdate)
            params['season'] = date_str.partition('-')[0]
            params['ep'] = date_str.partition('-')[2].replace('-', '/')
        else:
            params['season'] = ep_obj.scene_season
            params['ep'] = ep_obj.scene_episode

        # add new query strings for exceptions
        name_exceptions = list(
            set([ep_obj.show.name] + get_scene_exceptions(ep_obj.show.indexerid)))
        for cur_exception in name_exceptions:
            params['q'] = sanitizeSceneName(cur_exception)
            if add_string:
                params['q'] += ' ' + add_string

            to_return.append(dict(params))

        return to_return

    def _doGeneralSearch(self, search_string):
        return self.search({'q': search_string})

    def check_auth(self):
        if self.private and not len(self.key):
            sickrage.srCore.srLogger.warning('Invalid api key for {}. Check your settings'.format(self.name))
            return False

        return True

    def _checkAuthFromData(self, data):

        """

        :type data: dict
        """
        if all([x in data for x in ['feed', 'entries']]):
            return self.check_auth()

        try:
            if int(data['bozo']) == 1:
                raise data['bozo_exception']
        except (AttributeError, KeyError):
            pass

        try:
            err_code = data['feed']['error']['code']
            err_desc = data['feed']['error']['description']

            if int(err_code) == 100:
                raise AuthException("Your API key for " + self.name + " is incorrect, check your config.")
            elif int(err_code) == 101:
                raise AuthException("Your account on " + self.name + " has been suspended, contact the administrator.")
            elif int(err_code) == 102:
                raise AuthException(
                    "Your account isn't allowed to use the API on " + self.name + ", contact the administrator")
            raise Exception("Error {}: {}".format(err_code, err_desc))
        except (AttributeError, KeyError):
            pass

        return False

    def search(self, search_params, search_mode='eponly', epcount=0, age=0, epObj=None):
        results = []

        if not self.check_auth():
            return results

        params = {
            "t": "tvsearch",
            "maxage": min(age, sickrage.srCore.srConfig.USENET_RETENTION),
            "limit": 100,
            "offset": 0,
            "cat": self.catIDs or '5030,5040'
        }

        params.update(search_params)

        if self.key:
            params['apikey'] = self.key

        offset = total = 0
        last_search = datetime.datetime.now()
        while total >= offset:
            if (datetime.datetime.now() - last_search).seconds < 5:
                continue

            search_url = self.urls['base_url'] + '/api'
            sickrage.srCore.srLogger.debug("Search url: %s?%s" % (search_url, urllib.urlencode(params)))

            data = self.cache.getRSSFeed(search_url, params=params)

            last_search = datetime.datetime.now()

            if not self._checkAuthFromData(data):
                break

            for item in data['entries']:

                (title, url) = self._get_title_and_url(item)

                if title and url:
                    results.append(item)

            # get total and offset attribs
            try:
                if total == 0:
                    total = int(data['feed'].newznab_response['total'] or 0)
                offset = int(data['feed'].newznab_response['offset'] or 0)
            except AttributeError:
                break

            # No items found, prevent from doing another search
            if total == 0:
                break

            if offset != params['offset']:
                sickrage.srCore.srLogger.info("Tell your newznab provider to fix their bloody newznab responses")
                break

            params['offset'] += params['limit']
            if (total > int(params['offset'])) and (offset < 500):
                offset = int(params['offset'])
                # if there are more items available then the amount given in one call, grab some more
                sickrage.srCore.srLogger.debug('%d' % (total - offset) + ' more items to be fetched from provider.' +
                                               'Fetching another %d' % int(params['limit']) + ' items.')
            else:
                break

        return results

    def find_propers(self, search_date=datetime.datetime.today()):
        results = []
        dbData = []

        for show in [s['doc'] for s in sickrage.srCore.mainDB.db.all('tv_shows', with_doc=True)]:
            for episode in [e['doc'] for e in sickrage.srCore.mainDB.db.get_many('tv_episodes', show['indexer_id'], with_doc=True)]:
                if episode['airdate'] >= str(search_date.toordinal()) \
                        and episode['status'] in Quality.DOWNLOADED + Quality.SNATCHED + Quality.SNATCHED_BEST:

                    self.show = findCertainShow(sickrage.srCore.SHOWLIST, int(show["showid"]))
                    if not self.show: continue

                    curEp = self.show.getEpisode(int(episode["season"]), int(episode["episode"]))
                    searchStrings = self._get_episode_search_strings(curEp, add_string='PROPER|REPACK')
                    for searchString in searchStrings:
                        for item in self.search(searchString):
                            title, url = self._get_title_and_url(item)
                            if re.match(r'.*(REPACK|PROPER).*', title, re.I):
                                results += [Proper(title, url, datetime.datetime.today(), self.show)]

        return results

    @classmethod
    def getProviders(cls):
        providers = cls.getDefaultProviders()

        try:
            for curProviderStr in sickrage.srCore.srConfig.CUSTOM_PROVIDERS.split('!!!'):
                if not len(curProviderStr):
                    continue

                try:
                    cur_type, curProviderData = curProviderStr.split('|', 1)

                    if cur_type == "newznab":
                        cur_name, cur_url, cur_key, cur_cat = curProviderData.split('|')
                        cur_url = sickrage.srCore.srConfig.clean_url(cur_url)

                        provider = NewznabProvider(
                            cur_name,
                            cur_url,
                            bool(not cur_key == 0),
                            key=cur_key,
                            catIDs=cur_cat
                        )

                        providers += [provider]
                except Exception:
                    continue
        except Exception:
            pass

        return providers

    @classmethod
    def getDefaultProviders(cls):
        return [
            cls('SickBeard', 'lolo.sickbeard.com', False, '', '5030,5040', 'eponly', False, False, False, True),
            cls('NZB.Cat', 'nzb.cat', True, '', '5030,5040,5010', 'eponly', True, True, True, True),
            cls('NZBGeek', 'api.nzbgeek.info', True, '', '5030,5040', 'eponly', False, False, False, True),
            cls('NZBs.org', 'nzbs.org', True, '', '5030,5040', 'eponly', False, False, False, True),
            cls('Usenet-Crawler', 'usenet-crawler.com', True, '', '5030,5040', 'eponly', False, False, False, True)
        ]
Exemple #41
0
class GenericProvider(object):
    def __init__(self, name, url):
        self.name = name
        self.urls = {'base_url': url}
        self.show = None
        self.supportsBacklog = False
        self.supportsAbsoluteNumbering = False
        self.anime_only = False
        self.search_mode = None
        self.search_fallback = False
        self.enabled = False
        self.enable_daily = False
        self.enable_backlog = False
        self.cache = TVCache(self)
        self.proper_strings = ['PROPER|REPACK|REAL']
        self.private = False

        self.btCacheURLS = [
            'http://torcache.net/torrent/{torrent_hash}.torrent',
            'http://thetorrent.org/torrent/{torrent_hash}.torrent',
            'http://btdig.com/torrent/{torrent_hash}.torrent',
            # 'http://torrage.com/torrent/{torrent_hash}.torrent',
            # 'http://itorrents.org/torrent/{torrent_hash}.torrent',
        ]

    @property
    def id(self):
        return str(re.sub(r"[^\w\d_]", "_", self.name.strip().lower()))

    @property
    def isEnabled(self):
        return self.enabled

    @property
    def imageName(self):
        return ""

    def _checkAuth(self):
        return True

    def _doLogin(self):
        return True

    @classmethod
    def get_subclasses(cls):
        yield cls
        if cls.__subclasses__():
            for sub in cls.__subclasses__():
                for s in sub.get_subclasses():
                    yield s

    def getResult(self, episodes):
        """
        Returns a result of the correct type for this provider
        """
        try:
            result = {'nzb': NZBSearchResult, 'torrent': TorrentSearchResult}[getattr(self, 'type')](episodes)
        except:
            result = SearchResult(episodes)

        result.provider = self
        return result

    def make_url(self, result):
        urls = []
        filename = ''
        if result.url.startswith('magnet'):
            try:
                torrent_hash = re.findall(r'urn:btih:([\w]{32,40})', result.url)[0].upper()

                try:
                    torrent_name = re.findall('dn=([^&]+)', result.url)[0]
                except Exception:
                    torrent_name = 'NO_DOWNLOAD_NAME'

                if len(torrent_hash) == 32:
                    torrent_hash = b16encode(b32decode(torrent_hash)).upper()

                if not torrent_hash:
                    sickrage.srCore.srLogger.error("Unable to extract torrent hash from magnet: " + result.url)
                    return urls, filename

                urls = random.shuffle(
                    [x.format(torrent_hash=torrent_hash, torrent_name=torrent_name) for x in self.btCacheURLS])
            except Exception:
                sickrage.srCore.srLogger.error("Unable to extract torrent hash or name from magnet: " + result.url)
                return urls, filename
        else:
            urls = [result.url]

        return urls, filename

    def downloadResult(self, result):
        """
        Save the result to disk.
        """

        # check for auth
        if not self._doLogin:
            return False

        urls, filename = self.make_url(result)

        for url in urls:
            if 'NO_DOWNLOAD_NAME' in url:
                continue

            sickrage.srCore.srLogger.info("Downloading a result from " + self.name + " at " + url)

            # Support for Jackett/TorzNab
            if url.endswith('torrent') and filename.endswith('nzb'):
                filename = filename.rsplit('.', 1)[0] + '.' + 'torrent'

            if sickrage.srCore.srWebSession.download(url, filename,
                                                     headers=(None, {'Referer': '/'.join(url.split('/')[:3]) + '/'})[
                                                         url.startswith('http')]):

                if self._verify_download(filename):
                    sickrage.srCore.srLogger.info("Saved result to " + filename)
                    return True
                else:
                    sickrage.srCore.srLogger.warning("Could not download %s" % url)
                    remove_file_failed(filename)

        if len(urls):
            sickrage.srCore.srLogger.warning("Failed to download any results")

        return False

    def _verify_download(self, file_name=None):
        """
        Checks the saved file to see if it was actually valid, if not then consider the download a failure.
        """

        # primitive verification of torrents, just make sure we didn't get a text file or something
        if file_name.endswith('torrent'):
            try:
                with open(file_name, 'rb') as file:
                    mime_type = guessParser(StringInputStream(file.read()))._getMimeType()
                    if mime_type == 'application/x-bittorrent':
                        return True
            except Exception as e:
                sickrage.srCore.srLogger.debug("Failed to validate torrent file: {}".format(e.message))

            sickrage.srCore.srLogger.debug("Result is not a valid torrent file")
            return False

        return True

    def searchRSS(self, episodes):
        return self.cache.findNeededEpisodes(episodes)

    def getQuality(self, item, anime=False):
        """
        Figures out the quality of the given RSS item node

        item: An elementtree.ElementTree element representing the <item> tag of the RSS feed

        Returns a Quality value obtained from the node's data
        """
        (title, url) = self._get_title_and_url(item)
        quality = Quality.sceneQuality(title, anime)
        return quality

    def search(self, search_params, search_mode='eponly', epcount=0, age=0, epObj=None):
        return []

    def _get_season_search_strings(self, episode):
        return [{}]

    def _get_episode_search_strings(self, eb_obj, add_string=''):
        return [{}]

    def _get_title_and_url(self, item):
        """
        Retrieves the title and URL data from the item XML node

        item: An elementtree.ElementTree element representing the <item> tag of the RSS feed

        Returns: A tuple containing two strings representing title and URL respectively
        """

        title = item.get('title', '').replace(' ', '.')
        url = item.get('link', '').replace('&amp;', '&').replace('%26tr%3D', '&tr=')

        return title, url

    def _get_size(self, item):
        """Gets the size from the item"""
        sickrage.srCore.srLogger.error("Provider type doesn't have _get_size() implemented yet")
        return -1

    def findSearchResults(self, show, episodes, search_mode, manualSearch=False, downCurQuality=False):

        if not self._checkAuth:
            return

        self.show = show

        results = {}
        itemList = []

        searched_scene_season = None
        for epObj in episodes:
            # search cache for episode result
            cacheResult = self.cache.searchCache(epObj, manualSearch, downCurQuality)
            if cacheResult:
                if epObj.episode not in results:
                    results[epObj.episode] = cacheResult
                else:
                    results[epObj.episode].extend(cacheResult)

                # found result, search next episode
                continue

            # skip if season already searched
            if len(episodes) > 1 and search_mode == 'sponly' and searched_scene_season == epObj.scene_season:
                continue

            # mark season searched for season pack searches so we can skip later on
            searched_scene_season = epObj.scene_season

            search_strings = []
            if len(episodes) > 1 and search_mode == 'sponly':
                # get season search results
                search_strings = self._get_season_search_strings(epObj)
            elif search_mode == 'eponly':
                # get single episode search results
                search_strings = self._get_episode_search_strings(epObj)

            first = search_strings and isinstance(search_strings[0], dict) and 'rid' in search_strings[0]
            if first:
                sickrage.srCore.srLogger.debug('First search_string has rid')

            for curString in search_strings:
                itemList += self.search(curString, search_mode, len(episodes), epObj=epObj)
                if first:
                    first = False
                    if itemList:
                        sickrage.srCore.srLogger.debug(
                            'First search_string had rid, and returned results, skipping query by string')
                        break
                    else:
                        sickrage.srCore.srLogger.debug(
                            'First search_string had rid, but returned no results, searching with string query')

        # if we found what we needed already from cache then return results and exit
        if len(results) == len(episodes):
            return results

        # sort list by quality
        if len(itemList):
            items = {}
            itemsUnknown = []
            for item in itemList:
                quality = self.getQuality(item, anime=show.is_anime)
                if quality == Quality.UNKNOWN:
                    itemsUnknown += [item]
                else:
                    if quality not in items:
                        items[quality] = [item]
                    else:
                        items[quality].append(item)

            itemList = list(itertools.chain(*[v for (k, v) in sorted(items.items(), reverse=True)]))
            itemList += itemsUnknown or []

        # filter results
        cl = []
        for item in itemList:
            (title, url) = self._get_title_and_url(item)

            # parse the file name
            try:
                myParser = NameParser(False)
                parse_result = myParser.parse(title)
            except InvalidNameException:
                sickrage.srCore.srLogger.debug("Unable to parse the filename " + title + " into a valid episode")
                continue
            except InvalidShowException:
                sickrage.srCore.srLogger.debug("Unable to parse the filename " + title + " into a valid show")
                continue

            showObj = parse_result.show
            quality = parse_result.quality
            release_group = parse_result.release_group
            version = parse_result.version

            addCacheEntry = False
            if not (showObj.air_by_date or showObj.sports):
                if search_mode == 'sponly':
                    if len(parse_result.episode_numbers):
                        sickrage.srCore.srLogger.debug(
                            "This is supposed to be a season pack search but the result " + title + " is not a valid season pack, skipping it")
                        addCacheEntry = True
                    if len(parse_result.episode_numbers) and (
                                    parse_result.season_number not in set([ep.season for ep in episodes])
                            or not [ep for ep in episodes if ep.scene_episode in parse_result.episode_numbers]):
                        sickrage.srCore.srLogger.debug(
                            "The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring")
                        addCacheEntry = True
                else:
                    if not len(parse_result.episode_numbers) and parse_result.season_number and not [ep for ep in
                                                                                                     episodes if
                                                                                                     ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
                        sickrage.srCore.srLogger.debug(
                            "The result " + title + " doesn't seem to be a valid season that we are trying to snatch, ignoring")
                        addCacheEntry = True
                    elif len(parse_result.episode_numbers) and not [ep for ep in episodes if
                                                                    ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
                        sickrage.srCore.srLogger.debug(
                            "The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring")
                        addCacheEntry = True

                if not addCacheEntry:
                    # we just use the existing info for normal searches
                    actual_season = parse_result.season_number
                    actual_episodes = parse_result.episode_numbers
            else:
                if not parse_result.is_air_by_date:
                    sickrage.srCore.srLogger.debug(
                        "This is supposed to be a date search but the result " + title + " didn't parse as one, skipping it")
                    addCacheEntry = True
                else:
                    airdate = parse_result.air_date.toordinal()
                    sql_results = main_db.MainDB().select(
                        "SELECT season, episode FROM tv_episodes WHERE showid = ? AND airdate = ?",
                        [showObj.indexerid, airdate])

                    if len(sql_results) != 1:
                        sickrage.srCore.srLogger.warning(
                            "Tried to look up the date for the episode " + title + " but the database didn't give proper results, skipping it")
                        addCacheEntry = True

                if not addCacheEntry:
                    actual_season = int(sql_results[0]["season"])
                    actual_episodes = [int(sql_results[0]["episode"])]

            # add parsed result to cache for usage later on
            if addCacheEntry:
                sickrage.srCore.srLogger.debug("Adding item from search to cache: " + title)
                ci = self.cache._addCacheEntry(title, url, parse_result=parse_result)
                if ci is not None:
                    cl.append(ci)
                continue

            # make sure we want the episode
            wantEp = True
            for epNo in actual_episodes:
                if not showObj.wantEpisode(actual_season, epNo, quality, manualSearch, downCurQuality):
                    wantEp = False
                    break

            if not wantEp:
                sickrage.srCore.srLogger.info("RESULT:[{}] QUALITY:[{}] IGNORED!".format(title, Quality.qualityStrings[quality]))
                continue

            sickrage.srCore.srLogger.debug("FOUND RESULT:[{}] URL:[{}]".format(title, url))

            # make a result object
            epObj = []
            for curEp in actual_episodes:
                epObj.append(showObj.getEpisode(actual_season, curEp))

            result = self.getResult(epObj)
            result.show = showObj
            result.url = url
            result.name = title
            result.quality = quality
            result.release_group = release_group
            result.version = version
            result.content = None
            result.size = self._get_size(item)

            if len(epObj) == 1:
                epNum = epObj[0].episode
                sickrage.srCore.srLogger.debug("Single episode result.")
            elif len(epObj) > 1:
                epNum = MULTI_EP_RESULT
                sickrage.srCore.srLogger.debug(
                    "Separating multi-episode result to check for later - result contains episodes: " + str(
                        parse_result.episode_numbers))
            elif len(epObj) == 0:
                epNum = SEASON_RESULT
                sickrage.srCore.srLogger.debug("Separating full season result to check for later")

            if epNum not in results:
                results[epNum] = [result]
            else:
                results[epNum].append(result)

        # check if we have items to add to cache
        if len(cl) > 0:
            self.cache._getDB().mass_action(cl)
            del cl  # cleanup

        return results

    def findPropers(self, search_date=None):

        results = self.cache.listPropers(search_date)

        return [Proper(x['name'], x['url'], datetime.datetime.fromtimestamp(x['time']), self.show) for x in
                results]

    def seedRatio(self):
        '''
        Provider should override this value if custom seed ratio enabled
        It should return the value of the provider seed ratio
        '''
        return ''

    @classmethod
    def getDefaultProviders(cls):
        pass

    @classmethod
    def getProvider(cls, name):
        providerMatch = [x for x in cls.getProviders() if x.name == name]
        if len(providerMatch) == 1:
            return providerMatch[0]

    @classmethod
    def getProviderByID(cls, id):
        providerMatch = [x for x in cls.getProviders() if x.id == id]
        if len(providerMatch) == 1:
            return providerMatch[0]

    @classmethod
    def getProviders(cls):
        modules = [TorrentProvider.type, NZBProvider.type]
        for type in []:
            modules += cls.loadProviders(type)
        return modules

    @classmethod
    def loadProviders(cls, type):
        providers = []
        pregex = re.compile('^([^_]*?)\.py$', re.IGNORECASE)
        path = os.path.join(os.path.dirname(__file__), type)
        names = [pregex.match(m) for m in os.listdir(path)]
        providers += [cls.loadProvider(name.group(1), type) for name in names if name]
        return providers

    @classmethod
    def loadProvider(cls, name, type, *args, **kwargs):
        import inspect
        members = dict(
            inspect.getmembers(
                importlib.import_module('.{}.{}'.format(type, name), 'sickrage.providers'),
                lambda x: hasattr(x, 'type') and x not in [NZBProvider, TorrentProvider])
        )
        return [v for v in members.values() if hasattr(v, 'type') and v.type == type][0](
            *args, **kwargs)
Exemple #42
0
class ZooqleProvider(TorrentProvider):
    def __init__(self):
        """Initialize the class."""
        super(ZooqleProvider, self).__init__('Zooqle', 'https://zooqle.com', False)

        # URLs
        self.urls.update({
            'search': '{base_url}/search'.format(**self.urls),
            'api': '{base_url}/api/media/%s'.format(**self.urls),
        })

        # Proper Strings
        self.proper_strings = ['PROPER', 'REPACK', 'REAL']

        # Miscellaneous Options

        # Torrent Stats
        self.minseed = None
        self.minleech = None

        # Cache
        self.cache = TVCache(self, min_time=15)

    def _get_season_search_strings(self, episode):
        search_string = {'Season': []}

        for show_name in set(show_names.allPossibleShowNames(episode.show)):
            for sep in ' ', ' - ':
                season_string = show_name + sep + 'Series '
                if episode.show.air_by_date or episode.show.sports:
                    season_string += str(episode.airdate).split('-')[0]
                elif episode.show.anime:
                    season_string += '%d' % episode.scene_absolute_number
                else:
                    season_string += '%d' % int(episode.scene_season)

                search_string['Season'].append(re.sub(r'\s+', ' ', season_string.replace('.', ' ').strip()))

        return [search_string]

    def _get_episode_search_strings(self, episode, add_string=''):
        search_string = {'Episode': []}

        if not episode:
            return []

        for show_name in set(show_names.allPossibleShowNames(episode.show)):
            for sep in ' ', ' - ':
                ep_string = sanitizeSceneName(show_name) + sep
                if episode.show.air_by_date:
                    ep_string += str(episode.airdate)
                elif episode.show.sports:
                    ep_string += str(episode.airdate) + '|' + episode.airdate.strftime('%b')
                elif episode.show.anime:
                    ep_string += '%i' % int(episode.scene_absolute_number)
                else:
                    ep_string += sickrage.app.naming_ep_type[4] % {'seasonnumber': episode.scene_season,
                                                                   'episodenumber': episode.scene_episode}

                if add_string:
                    ep_string += ' %s' % add_string

                search_string['Episode'].append(re.sub(r'\s+', ' ', ep_string.replace('.', ' ').strip()))

        return [search_string]

    def _get_torrent_info(self, torrent_hash):
        try:
            return self.session.get(self.urls['api'] % torrent_hash).json()
        except Exception:
            return {}

    def search(self, search_strings, age=0, ep_obj=None, **kwargs):
        """
        Search a provider and parse the results.

        :param search_strings: A dict with mode (key) and the search value (value)
        :param age: Not used
        :param ep_obj: Not used
        :returns: A list of search results (structure)
        """
        results = []

        # Search Params
        search_params = {
            'q': '* category:TV',
            's': 'dt',
            'v': 't',
            'sd': 'd',
        }

        for mode in search_strings:
            sickrage.app.log.debug('Search mode: {}'.format(mode))

            for search_string in search_strings[mode]:
                if mode != 'RSS':
                    sickrage.app.log.debug('Search string: {}'.format(search_string))
                    search_params['q'] = '{} category:TV'.format(search_string)

                search_params['fmt'] = 'rss'
                search_params['pg'] = 1

                while search_params['pg'] < 11:
                    data = self.cache.get_rss_feed(self.urls['search'], params=search_params)
                    if not data or not data.get('feed'):
                        sickrage.app.log.debug('No data returned from provider')
                        break

                    results += self.parse(data, mode)

                    total_results = try_int(data['feed'].get('opensearch_totalresults'))
                    start_index = try_int(data['feed'].get('opensearch_startindex'))
                    items_per_page = try_int(data['feed'].get('opensearch_itemsperpage'))
                    if not total_results or start_index + items_per_page > total_results:
                        break

                    search_params['pg'] += 1

        return results

    def parse(self, data, mode):
        """
        Parse search results for items.

        :param data: The raw response from a search
        :param mode: The current mode used to search, e.g. RSS

        :return: A list of items found
        """
        results = []

        if not data.get('entries'):
            sickrage.app.log.debug('Data returned from provider does not contain any torrents')
            return results

        for item in data['entries']:
            try:
                title = item.get('title')
                download_url = item.get('torrent_magneturi')
                if not all([title, download_url]):
                    continue

                seeders = try_int(item['torrent_seeds'])
                leechers = try_int(item['torrent_peers'])
                size = try_int(item['torrent_contentlength'], -1)

                results += [{
                    'title': title,
                    'link': download_url,
                    'size': size,
                    'seeders': seeders,
                    'leechers': leechers
                }]

                if mode != 'RSS':
                    sickrage.app.log.debug('Found result: {}'.format(title))
            except Exception:
                sickrage.app.log.error("Failed parsing provider")

        return results
Exemple #43
0
 def __init__(self, provider_obj):
     TVCache.__init__(self, provider_obj)
     self.minTime = 20
Exemple #44
0
class NyaaProvider(TorrentProvider):
    def __init__(self):
        super(NyaaProvider, self).__init__("NyaaTorrents", 'https://nyaa.si', False)

        self.supports_absolute_numbering = True
        self.anime_only = True
        self.confirmed = False

        self.minseed = None
        self.minleech = None

        self.cache = TVCache(self, min_time=20)

    def search(self, search_strings, age=0, ep_obj=None, **kwargs):
        """
        Search a provider and parse the results.

        :param search_strings: A dict with mode (key) and the search value (value)
        :param age: Not used
        :param ep_obj: Not used
        :returns: A list of search results (structure)
        """
        results = []

        # Search Params
        search_params = {
            'page': 'rss',
            'c': '1_0',  # All Anime
            'f': 0,  # No filter
            'q': '',
        }

        for mode in search_strings:
            sickrage.app.log.debug('Search mode: {}'.format(mode))

            if self.confirmed:
                search_params['f'] = 2  # Trusted only
                sickrage.app.log.debug('Searching only confirmed torrents')

            for search_string in search_strings[mode]:
                if mode != 'RSS':
                    sickrage.app.log.debug('Search string: {}'.format(search_string))
                    search_params['q'] = search_string

                data = self.cache.get_rss_feed(self.urls['base_url'], params=search_params)
                if not data:
                    sickrage.app.log.debug('No data returned from provider')
                    continue
                if not data.get('entries'):
                    sickrage.app.log.debug('Data returned from provider does not contain any {}torrents'.format(
                        'confirmed ' if self.confirmed else ''))
                    continue

                results += self.parse(data['entries'], mode)

        return results

    def parse(self, data, mode, **kwargs):
        """
        Parse search results from data
        :param data: response data
        :param mode: search mode
        :return: search results
        """

        results = []

        for item in data:
            try:
                title = item['title']
                download_url = item['link']
                if not all([title, download_url]):
                    continue

                seeders = try_int(item['nyaa_seeders'])
                leechers = try_int(item['nyaa_leechers'])

                size = convert_size(item['nyaa_size'], -1, units=['B', 'KIB', 'MIB', 'GIB', 'TIB', 'PIB'])

                results += [
                    {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers}
                ]

                if mode != 'RSS':
                    sickrage.app.log.debug("Found result: {}".format(title))
            except Exception:
                sickrage.app.log.error('Failed parsing provider')

        return results