Пример #1
0
    def sources(self, url, hostDict, hostprDict):
        try:
            self._sources = []
            if url is None:
                return self._sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            self.title = data[
                'tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.hdlr = 'S%02dE%02d' % (
                int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else data['year']
            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub(r'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            if 'tvshowtitle' in data:
                url = self.search.format('8', urllib.quote(query))
            else:
                url = self.search.format('4', urllib.quote(query))
            self.hostDict = hostDict + hostprDict
            _html = client.request(url, headers=self.headers)
            threads = []
            for i in re.findall(r'<item>(.+?)</item>', _html, re.DOTALL):
                threads.append(workers.Thread(self._get_items, i))
            [i.start() for i in threads]
            [i.join() for i in threads]
            return self._sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('---Torrdown Testing - Exception: \n' + str(failure))
            return self._sources
Пример #2
0
 def _get_items(self, url):
     try:
         headers = {'User-Agent': client.agent()}
         r = client.request(url, headers=headers)
         posts = client.parseDOM(r, 'tbody')[0]
         posts = client.parseDOM(posts, 'tr')
         for post in posts:
             data = dom.parse_dom(post, 'a', req='href')[1]
             link = urlparse.urljoin(self.base_link, data.attrs['href'])
             name = data.content
             t = name.split(self.hdlr)[0]
             if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title):
                 continue
             try:
                 y = re.findall('[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper()
             except BaseException:
                 y = re.findall('[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper()
             if not y == self.hdlr:
                 continue
             try:
                 size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0]
                 div = 1 if size.endswith('GB') else 1024
                 size = float(re.sub('[^0-9|/.|/,]', '', size.replace(',', '.'))) / div
                 size = '%.2f GB' % size
             except BaseException:
                 size = '0'
             self.items.append((name, link, size))
         return self.items
     except BaseException:
         return self.items
Пример #3
0
 def _get_items(self, url):
     items = []
     try:
         r = client.request(url, headers=self.headers)
         posts = client.parseDOM(r, 'tr', attrs={'class': 't-row'})
         posts = [i for i in posts if not 'racker:' in i]
         for post in posts:
             data = client.parseDOM(post, 'a', ret='href')
             url = [i for i in data if 'magnet:' in i][0]
             seeders = re.findall(
                 "<td class='ttable_col2' align='center'><font color='green'><b>(.+?)</b></font></td>", post)[0]
             if self.min_seeders > seeders:
                 continue
             name = client.parseDOM(post, 'a', ret='title')[0]
             t = name.split(self.hdlr)[0]
             if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title): continue
             try:
                 y = re.findall('[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper()
             except BaseException:
                 y = re.findall('[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper()
             if not y == self.hdlr: continue
             try:
                 size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0]
                 div = 1 if size.endswith('GB') else 1024
                 size = float(re.sub('[^0-9|/.|/,]', '', size.replace(',', '.'))) / div
                 size = '%.2f GB' % size
             except BaseException:
                 size = '0'
             items.append((name, url, size))
         return items
     except BaseException:
         return items
Пример #4
0
 def _get_sources(self, item):
     try:
         name = item[0]
         quality, info = source_utils.get_release_quality(name, name)
         info.append(item[2])
         data = client.request(item[1])
         url = re.search('''href=["'](magnet:\?[^"']+)''', data).groups()[0]
         url = url.split('&tr')[0]
         info = ' | '.join(info)
         if control.setting('torrent.rd_check') == 'true':
             checked = rd_check.rd_cache_check(url)
             if checked:
                 self._sources.append({
                     'source': 'Cached Torrent',
                     'quality': quality,
                     'language': 'en',
                     'url': checked,
                     'info': ' | '.join(info),
                     'direct': False,
                     'debridonly': True
                 })
         else:
             self._sources.append({
                 'source': 'Torrent',
                 'quality': quality,
                 'language': 'en',
                 'url': url,
                 'info': info,
                 'direct': False,
                 'debridonly': True
             })
     except:
         pass
Пример #5
0
 def _get_sources(self, item):
     try:
         name = item[0]
         quality, info = source_utils.get_release_quality(item[1], name)
         info.append(item[2])
         data = client.request(item[1], headers=self.headers)
         seeders = re.compile('<span class="seeds">(.+?)</span>').findall(data)[0]
         if self.min_seeders > int(seeders):
             raise Exception()
         data = client.parseDOM(data, 'a', ret='href')
         url = [i for i in data if 'magnet:' in i][0]
         url = url.split('&tr')[0]
         info = ' | '.join(info)
         if control.setting('torrent.rd_check') == 'true':
             checked = rd_check.rd_cache_check(url)
             if checked:
                 self._sources.append(
                     {'source': 'Cached Torrent', 'quality': quality, 'language': 'en', 'url': checked,
                      'info': info, 'direct': False, 'debridonly': True})
         else:
             self._sources.append(
                 {'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info,
                  'direct': False, 'debridonly': True})
     except:
         pass
Пример #6
0
 def sources(self, url, hostDict, hostprDict):
     try:
         self._sources = []
         if url is None:
             return self._sources
         if debrid.status() is False:
             raise Exception()
         if debrid.torrent_enabled() is False:
             raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
         self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
         self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
         query = '%s S%02dE%02d' % (
         data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
         data['title'], data['year'])
         query = re.sub(r'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
         if 'tvshowtitle' in data:
             url = self.search.format('8', urllib.quote(query))
         else:
             url = self.search.format('4', urllib.quote(query))
         self.hostDict = hostDict + hostprDict
         headers = {'User-Agent': client.agent()}
         _html = client.request(url, headers=headers)
         threads = []
         for i in re.findall(r'<item>(.+?)</item>', _html, re.DOTALL):
             threads.append(workers.Thread(self._get_items, i))
         [i.start() for i in threads]
         [i.join() for i in threads]
         return self._sources
     except BaseException:
         return self._sources
Пример #7
0
 def _get_sources(self, item):
     try:
         name = item[0]
         quality, info = source_utils.get_release_quality(item[1], name)
         info.append(item[2])
         info = ' | '.join(info)
         data = client.request(item[1])
         data = client.parseDOM(data, 'a', ret='href')
         url = [i for i in data if 'magnet:' in i][0]
         url = url.split('&tr')[0]
         self._sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
     except BaseException:
         pass
Пример #8
0
 def _get_sources(self, item):
     try:
         name = item[0]
         quality, info = source_utils.get_release_quality(name, name)
         info.append(item[2])
         info = ' | '.join(info)
         data = client.request(item[1])
         url = re.search('''href=["'](magnet:\?[^"']+)''', data).groups()[0]
         self._sources.append(
             {'source': 'torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False,
              'debridonly': True})
     except BaseException:
         pass