Exemplo n.º 1
0
 def sources(self, url, hostDict, hostprDict):
     try:
         self._sources = []
         if url is None: return self._sources
         if debrid.status() is False: raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
         self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
         self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data[
             'year']
         query = '%s S%02dE%02d' % (
             data['tvshowtitle'], int(data['season']),
             int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
             data['title'], data['year'])
         query = re.sub(r'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
         if 'tvshowtitle' in data:
             url = self.search.format('8', urllib.quote(query))
         else:
             url = self.search.format('4', urllib.quote(query))
         self.hostDict = hostDict + hostprDict
         headers = {'User-Agent': client.agent()}
         _html = client.request(url, headers=headers)
         threads = []
         for i in re.findall(r'<item>(.+?)</item>', _html, re.DOTALL):
             threads.append(workers.Thread(self._get_items, i))
         [i.start() for i in threads]
         [i.join() for i in threads]
         return self._sources
     except BaseException:
         return self._sources
Exemplo n.º 2
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None: return sources
         if debrid.status() is False: raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
         query = '%s %s' % (data['title'], data['year'])
         url = self.search_link % urllib.quote(query)
         url = urlparse.urljoin(self.base_link, url)
         html = client.request(url)
         try:
             results = client.parseDOM(html, 'div', attrs={'class': 'row'})[2]
         except Exception:
             return sources
         items = re.findall('class="browse-movie-bottom">(.+?)</div>\s</div>', results, re.DOTALL)
         if items is None:
             return sources
         for entry in items:
             try:
                 try:
                     link, name = \
                         re.findall('<a href="(.+?)" class="browse-movie-title">(.+?)</a>', entry, re.DOTALL)[0]
                     name = client.replaceHTMLCodes(name)
                     if not cleantitle.get(name) == cleantitle.get(data['title']):
                         continue
                 except Exception:
                     continue
                 y = entry[-4:]
                 if not y == data['year']:
                     continue
                 response = client.request(link)
                 try:
                     entries = client.parseDOM(response, 'div', attrs={'class': 'modal-torrent'})
                     for torrent in entries:
                         link, name = re.findall(
                             'href="magnet:(.+?)" class="magnet-download download-torrent magnet" title="(.+?)"',
                             torrent, re.DOTALL)[0]
                         link = 'magnet:%s' % link
                         link = str(client.replaceHTMLCodes(link).split('&tr')[0])
                         quality, info = source_utils.get_release_quality(name, name)
                         try:
                             size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', torrent)[-1]
                             div = 1 if size.endswith(('GB', 'GiB')) else 1024
                             size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                             size = '%.2f GB' % size
                             info.append(size)
                         except Exception:
                             pass
                         info = ' | '.join(info)
                         sources.append(
                             {'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': link, 'info': info,
                              'direct': False, 'debridonly': True})
                 except Exception:
                     continue
             except Exception:
                 continue
         return sources
     except Exception:
         return sources
Exemplo n.º 3
0
 def sources(self, url, hostDict, hostprDict):
     try:
         self._sources = []
         self.items = []
         if url is None: return self._sources
         if debrid.status() is False: raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         self.title = data[
             'tvshowtitle'] if 'tvshowtitle' in data else data['title']
         self.hdlr = 'S%02dE%02d' % (
             int(data['season']), int(data['episode'])
         ) if 'tvshowtitle' in data else data['year']
         query = '%s S%02dE%02d' % (
             data['tvshowtitle'], int(data['season']),
             int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                 data['title'], data['year'])
         query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
         url = self.search.format(urllib.quote(query))
         self._get_items(url)
         self.hostDict = hostDict + hostprDict
         threads = []
         for i in self.items:
             threads.append(workers.Thread(self._get_sources, i))
         [i.start() for i in threads]
         [i.join() for i in threads]
         return self._sources
     except BaseException:
         return self._sources
Exemplo n.º 4
0
    def movie(self, imdb, title, localtitle, aliases, year):
        if debrid.status() is False:
            return

        try:
            url = {'imdb': imdb, 'title': title, 'year': year}
            url = urllib.urlencode(url)
            return url
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('TPB - Exception: \n' + str(failure))
            return
Exemplo n.º 5
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url is None: return sources
         if debrid.status() is False: raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
         hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
         query = '%s s%02de%02d' % (
             data['tvshowtitle'], int(data['season']),
             int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
             data['title'], data['year'])
         query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
         url = self.search_link % urllib.quote_plus(query)
         url = urlparse.urljoin(self.base_link, url)
         r = client.request(url)
         try:
             posts = client.parseDOM(r, 'div', attrs={'class': 'box-info'})
             for post in posts:
                 data = client.parseDOM(post, 'a', ret='href')
                 u = [i for i in data if '/torrent/' in i]
                 for u in u:
                     match = '%s %s' % (title, hdlr)
                     match = match.replace('+', '-').replace(' ', '-').replace(':-', '-').replace('---', '-')
                     if not match in u: continue
                     u = self.base_link + u
                     r = client.request(u)
                     r = client.parseDOM(r, 'div', attrs={'class': 'torrent-category-detail clearfix'})
                     for t in r:
                         link = re.findall('href="magnet:(.+?)" onclick=".+?"', t)[0]
                         link = 'magnet:%s' % link
                         link = str(client.replaceHTMLCodes(link).split('&tr')[0])
                         seeds = int(re.compile('<span class="seeds">(.+?)</span>').findall(t)[0])
                         if self.min_seeders > seeds:
                             continue
                         quality, info = source_utils.get_release_quality(link, link)
                         try:
                             size = re.findall('<strong>Total size</strong> <span>(.+?)</span>', t)
                             for size in size:
                                 size = '%s' % size
                                 info.append(size)
                         except BaseException:
                             pass
                         info = ' | '.join(info)
                         sources.append(
                             {'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': link, 'info': info,
                              'direct': False, 'debridonly': True})
         except:
             return
         return sources
     except:
         return sources
Exemplo n.º 6
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url is None: return sources
         if debrid.status() is False: raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
             'title']
         hdlr = 'S%02dE%02d' % (int(data['season']), int(
             data['episode'])) if 'tvshowtitle' in data else data['year']
         query = '%s s%02de%02d' % (
             data['tvshowtitle'], int(data['season']),
             int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                 data['title'], data['year'])
         query = re.sub(r'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
         url = self.search_link % urllib.quote_plus(query)
         url = urlparse.urljoin(self.base_link, url)
         r = client.request(url)
         try:
             posts = client.parseDOM(r, 'tbody', attrs={'id': 'results'})
             for post in posts:
                 link = re.findall('a href="(magnet:.+?)" title="(.+?)"',
                                   post, re.DOTALL)
                 for url, data in link:
                     if not hdlr in data: continue
                     url = url.split('&tr')[0]
                     quality, info = source_utils.get_release_quality(data)
                     if any(x in url for x in [
                             'FRENCH', 'Ita', 'italian', 'TRUEFRENCH',
                             '-lat-', 'Dublado'
                     ]):
                         continue
                     info = ' | '.join(info)
                     sources.append({
                         'source': 'Torrent',
                         'quality': quality,
                         'language': 'en',
                         'url': url,
                         'info': info,
                         'direct': False,
                         'debridonly': True
                     })
         except:
             return
         return sources
     except:
         return sources
Exemplo n.º 7
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None: return sources
         if debrid.status() is False: raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         query = '%s %s' % (data['title'], data['year'])
         url = self.search_link % urllib.quote(query)
         url = urlparse.urljoin(self.base_link, url).replace('%20', '-')
         html = self.scraper(url).content
         try:
             results = client.parseDOM(html, 'div', attrs={'class': 'ava1'})
         except:
             return sources
         for torrent in results:
             link = re.findall(
                 'a data-torrent-id=".+?" href="(magnet:.+?)" class=".+?" title="(.+?)"',
                 torrent, re.DOTALL)
             for link, name in link:
                 link = str(client.replaceHTMLCodes(link).split('&tr')[0])
                 quality, info = source_utils.get_release_quality(
                     name, name)
                 try:
                     size = re.findall(
                         '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                         torrent)[-1]
                     div = 1 if size.endswith(('GB', 'GiB')) else 1024
                     size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                     size = '%.2f GB' % size
                     info.append(size)
                 except Exception:
                     pass
                 info = ' | '.join(info)
                 sources.append({
                     'source': 'Torrent',
                     'quality': quality,
                     'language': 'en',
                     'url': link,
                     'info': info,
                     'direct': False,
                     'debridonly': True
                 })
         return sources
     except:
         return
Exemplo n.º 8
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        if debrid.status() is False:
            return

        try:
            if url is None:
                return

            url = urlparse.parse_qs(url)
            url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
            url['title'], url['premiered'], url['season'], url[
                'episode'] = title, premiered, season, episode
            url = urllib.urlencode(url)
            return url
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('TPB - Exception: \n' + str(failure))
            return
Exemplo n.º 9
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url is None:
                return sources
            if debrid.status() is False:
                raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']
            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else '%s' % (data['imdb'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            s = self.scraper.get(self.base_link).content
            s = re.findall('\'(http.+?)\'', s) + re.findall('\"(http.+?)\"', s)
            s = [
                i for i in s if urlparse.urlparse(self.base_link).netloc in i
                and len(i.strip('/').split('/')) > 3
            ]
            s = s[0] if s else urlparse.urljoin(self.base_link, 'vv')
            s = s.strip('/')
            url = s + self.search_link % urllib.quote_plus(query)
            r = self.scraper.get(url).content
            r = client.parseDOM(r, 'h2')
            l = zip(client.parseDOM(r, 'a', ret='href'),
                    client.parseDOM(r, 'a', ret='title'))
            r = [(i[0], i[1],
                  re.sub('(\.|\(|\[|\s)(\d{4}|3D)(\.|\)|\]|\s|)(.+|)', '',
                         i[1]),
                  re.findall('[\.|\(|\[|\s](\d{4}|)([\.|\)|\]|\s|].+)', i[1]))
                 for i in l]
            r = [(i[0], i[1], i[2], i[3][0][0], i[3][0][1]) for i in r if i[3]]
            r = [(i[0], i[1], i[2], i[3], re.split('\.|\(|\)|\[|\]|\s|\-',
                                                   i[4])) for i in r]
            r = [
                i for i in r if cleantitle.get(title) == cleantitle.get(i[2])
                and data['year'] == i[3]
            ]
            r = [
                i for i in r if not any(x in i[4] for x in [
                    'HDCAM', 'CAM', 'DVDR', 'DVDRip', 'DVDSCR', 'HDTS', 'TS',
                    '3D'
                ])
            ]
            r = [i for i in r if '1080p' in i[4]
                 ][:1] + [i for i in r if '720p' in i[4]][:1]
            if 'tvshowtitle' in data:
                posts = [(i[1], i[0]) for i in l]
            else:
                posts = [(i[1], i[0]) for i in l]
            hostDict = hostprDict + hostDict
            items = []
            for post in posts:
                try:
                    t = post[0]
                    u = self.scraper.get(post[1]).content
                    u = re.findall('"(http.+?)"', u) + re.findall(
                        '"(http.+?)"', u)
                    u = [i for i in u if not '/embed/' in i]
                    u = [i for i in u if not 'youtube' in i]
                    items += [(t, i) for i in u]
                except:
                    pass

            seen_urls = set()
            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)
                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '', name)
                    if not cleantitle.get(t) == cleantitle.get(title):
                        raise Exception()
                    y = re.findall(
                        '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                        name)[-1].upper()
                    if not y == hdlr:
                        raise Exception()
                    url = item[1]

                    if url in seen_urls:
                        continue
                    seen_urls.add(url)

                    quality, info = source_utils.get_release_quality(url, name)
                    try:
                        size = re.findall(
                            '((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))',
                            item[2])[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass
                    info = ' | '.join(info)
                    if any(x in url for x in ['.rar', '.zip', '.iso']):
                        raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')
                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })
                except:
                    pass
            check = [i for i in sources if not i['quality'] == 'CAM']
            if check:
                sources = check
            return sources
        except:
            return
Exemplo n.º 10
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None:
             return sources
         if debrid.status() is False:
             raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
         hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
         query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if \
             'tvshowtitle' in data else '%s' % (data['title'])
         url = self.search_link % urllib.quote_plus(query).lower()
         url = urlparse.urljoin(self.base_link, url)
         headers = {'Referer': url}
         r = self.scraper.get(url, headers=headers).content
         items = dom_parser2.parse_dom(r, 'h2')
         items = [dom_parser2.parse_dom(i.content, 'a', req=['href', 'rel', 'data-wpel-link']) for i in items]
         items = [(i[0].content, i[0].attrs['href']) for i in items]
         hostDict = hostprDict + hostDict
         for item in items:
             try:
                 name = item[0]
                 name = client.replaceHTMLCodes(name)
                 query = query.lower().replace(' ', '-')
                 if not query in item[1]:
                     continue
                 url = item[1]
                 headers = {'Referer': url}
                 r = self.scraper.get(url, headers=headers).content
                 links = dom_parser2.parse_dom(r, 'a', req=['href', 'rel', 'data-wpel-link'])
                 links = [i.attrs['href'] for i in links]
                 for url in links:
                     try:
                         if hdlr in name.upper() and cleantitle.get(title) in cleantitle.get(name):
                             fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
                             fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
                             fmt = [i.lower() for i in fmt]
                             if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception()
                             if any(i in ['extras'] for i in fmt): raise Exception()
                             quality, info = source_utils.get_release_quality(name, url)
                             try:
                                 size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', name[2])[-1]
                                 div = 1 if size.endswith(('GB', 'GiB')) else 1024
                                 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                                 size = '%.2f GB' % size
                                 info.append(size)
                             except:
                                 pass
                             info = ' | '.join(info)
                             if not any(x in url for x in ['.rar', '.zip', '.iso']):
                                 url = client.replaceHTMLCodes(url)
                                 url = url.encode('utf-8')
                                 host = \
                                     re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                                 if host in hostDict:
                                     host = client.replaceHTMLCodes(host)
                                     host = host.encode('utf-8')
                                     sources.append(
                                         {'source': host, 'quality': quality, 'language': 'en', 'url': url,
                                          'info': info, 'direct': False, 'debridonly': True})
                     except:
                         pass
             except:
                 pass
         check = [i for i in sources if not i['quality'] == 'CAM']
         if check:
             sources = check
         return sources
     except:
         return
Exemplo n.º 11
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources
            if debrid.status() == False: raise Exception()
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']), int(data['episode'])) \
                if 'tvshowtitle' in data else '%s' % (data['title'])
            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url).replace('-', '+')
            r = self.scraper.get(url).content
            posts = client.parseDOM(r,
                                    "div",
                                    attrs={"class": "postpage_movie_download"})
            hostDict = hostprDict + hostDict
            items = []
            for post in posts:
                try:
                    u = client.parseDOM(post, 'a', ret='href')
                    for i in u:
                        items.append(i)
                except:
                    pass

            seen_urls = set()
            for item in items:
                try:
                    i = str(item)
                    r = client.request(i)
                    u = client.parseDOM(r,
                                        "div",
                                        attrs={"class": "multilink_lnks"})
                    for t in u:
                        r = client.parseDOM(t, 'a', ret='href')
                        for url in r:
                            if 'www.share-online.biz' in url:
                                continue
                            if url in seen_urls:
                                continue
                            seen_urls.add(url)
                            quality, info = source_utils.get_release_quality(
                                url)
                            if 'SD' in quality:
                                continue
                            valid, host = source_utils.is_host_valid(
                                url, hostDict)
                            sources.append({
                                'source': host,
                                'quality': quality,
                                'language': 'en',
                                'url': url,
                                'info': info,
                                'direct': False,
                                'debridonly': True
                            })
                except:
                    pass
            return sources
        except:
            return
Exemplo n.º 12
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None: return sources
         if debrid.status() == False: raise Exception()
         hostDict = hostprDict + hostDict
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
             'title']
         hdlr = '%sS%02dE%02d' % (
             data['year'], int(data['season']), int(data['episode'])
         ) if 'tvshowtitle' in data else data['year']
         query = '%s %s S%02dE%02d' % (
             data['tvshowtitle'], data['year'], int(data['season']),
             int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                 data['title'], data['year'])
         query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
         try:
             url = self.search_link % urllib.quote_plus(query)
             url = urlparse.urljoin(self.base_link, url)
             r = self.scraper.get(url).content
             posts = client.parseDOM(r, 'div', attrs={'class': 'post'})
             items = []
             dupes = []
             for post in posts:
                 try:
                     t = client.parseDOM(post, 'a')[0]
                     t = re.sub('<.+?>|</.+?>', '', t)
                     x = re.sub(
                         '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                         '', t)
                     if not cleantitle.get(title) in cleantitle.get(x):
                         raise Exception()
                     y = re.findall(
                         '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                         t)[-1].upper()
                     if not y == hdlr: raise Exception()
                     if len(dupes) > 2: raise Exception()
                     dupes += [x]
                     u = client.parseDOM(post, 'a', ret='href')[0]
                     r = self.scraper.get(u).content
                     u = client.parseDOM(r, 'a', ret='href')
                     u = [(i.strip('/').split('/')[-1], i) for i in u]
                     items += u
                 except:
                     pass
         except:
             pass
         for item in items:
             try:
                 name = item[0]
                 name = client.replaceHTMLCodes(name)
                 t = re.sub(
                     '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                     '', name)
                 if not cleantitle.get(t) == cleantitle.get(title):
                     raise Exception()
                 y = re.findall(
                     '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                     name)[-1].upper()
                 if not y == hdlr: raise Exception()
                 quality, info = source_utils.get_release_quality(
                     name, item[1])
                 url = item[1]
                 if any(x in url for x in ['.rar', '.zip', '.iso']):
                     raise Exception()
                 url = client.replaceHTMLCodes(url)
                 url = url.encode('utf-8')
                 host = re.findall(
                     '([\w]+[.][\w]+)$',
                     urlparse.urlparse(url.strip().lower()).netloc)[0]
                 if not host in hostDict: raise Exception()
                 host = client.replaceHTMLCodes(host)
                 host = host.encode('utf-8')
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'url': url,
                     'info': info,
                     'direct': False,
                     'debridonly': True
                 })
             except:
                 pass
         return sources
     except:
         return
Exemplo n.º 13
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None: return sources
         if debrid.status() is False: raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
             'title']
         hdlr = 'S%02dE%02d' % (int(data['season']), int(
             data['episode'])) if 'tvshowtitle' in data else data['year']
         category = '+category%3ATV' if 'tvshowtitle' in data else '+category%3AMovies'
         query = '%s S%02dE%02d' % (
             data['tvshowtitle'], int(data['season']),
             int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                 data['title'], data['year'])
         query = re.sub('(\\\|/| -|:|;|\*|\?|"|<|>|\|)', ' ', query)
         url = self.search_link % urllib.quote_plus(query)
         url = urlparse.urljoin(self.base_link, url) + str(category)
         html = client.request(url)
         html = html.replace('&nbsp;', ' ')
         try:
             results = \
                 client.parseDOM(html, 'table', attrs={'class': 'table table-condensed table-torrents vmiddle'})[0]
         except Exception:
             return sources
         rows = re.findall('<tr(.+?)</tr>', results, re.DOTALL)
         if rows is None:
             return sources
         for entry in rows:
             try:
                 try:
                     name = re.findall('<a class=".+?>(.+?)</a>', entry,
                                       re.DOTALL)[0]
                     name = client.replaceHTMLCodes(name).replace(
                         '<hl>', '').replace('</hl>', '')
                     if not cleantitle.get(title) in cleantitle.get(name):
                         continue
                 except Exception:
                     continue
                 y = re.findall(
                     '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                     name)[-1].upper()
                 if not y == hdlr:
                     continue
                 try:
                     seeders = int(
                         re.findall(
                             'class="progress prog trans90" title="Seeders: (.+?) \|',
                             entry, re.DOTALL)[0])
                 except Exception:
                     continue
                 if self.min_seeders > seeders:
                     continue
                 try:
                     link = 'magnet:%s' % (re.findall(
                         'href="magnet:(.+?)"', entry, re.DOTALL)[0])
                     link = str(
                         client.replaceHTMLCodes(link).split('&tr')[0])
                 except Exception:
                     continue
                 quality, info = source_utils.get_release_quality(
                     name, name)
                 try:
                     size = re.findall(
                         '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                         entry)[-1]
                     div = 1 if size.endswith(('GB', 'GiB')) else 1024
                     size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                     size = '%.2f GB' % size
                     info.append(size)
                 except Exception:
                     pass
                 info = ' | '.join(info)
                 sources.append({
                     'source': 'Torrent',
                     'quality': quality,
                     'language': 'en',
                     'url': link,
                     'info': info,
                     'direct': False,
                     'debridonly': True
                 })
             except Exception:
                 continue
         check = [i for i in sources if not i['quality'] == 'CAM']
         if check:
             sources = check
         return sources
     except Exception:
         return self._sources
Exemplo n.º 14
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            query = '%s s%02de%02d' % (
                data['tvshowtitle'], int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else '%s' % (data['title'])
            query = re.sub('[\\\\:;*?"<>|/ \+\']+', '-', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)
            r = self.scraper.get(url).content

            r = client.parseDOM(r, "div", attrs={'class': 'entry-content'})[0]
            r = re.sub('shareaholic-canvas.+', '', r, flags=re.DOTALL)

            a_txt = client.parseDOM(r, "a", attrs={'href': '.+?'})
            a_url = client.parseDOM(r, "a", ret="href")
            r = re.sub('<a .+?</a>', '', r, flags=re.DOTALL)
            r = re.sub('<img .+?>', '', r, flags=re.DOTALL)

            size = ''
            pre_txt = []
            pre_url = []
            pres = client.parseDOM(r, "pre", attrs={'style': '.+?'})
            for pre in pres:
                try:
                    size = re.findall('([0-9,\.]+ ?(?:GB|GiB|MB|MiB))', pre)[0]
                except:
                    pass

                url0 = re.findall('https?://[^ <"\'\s]+', pre, re.DOTALL)
                txt0 = [size] * len(url0)
                pre_url = pre_url + url0
                pre_txt = pre_txt + txt0

            r = re.sub('<pre .+?</pre>', '', r, flags=re.DOTALL)

            size = ''
            if not 'tvshowtitle' in data:
                try:
                    size = " " + re.findall('([0-9,\.]+ ?(?:GB|GiB|MB|MiB))',
                                            r)[0]
                except:
                    pass

            raw_url = re.findall('https?://[^ <"\'\s]+', r, re.DOTALL)
            raw_txt = [size] * len(raw_url)

            pairs = zip(a_url + pre_url + raw_url, a_txt + pre_txt + raw_txt)

            for pair in pairs:
                try:
                    url = str(pair[0])
                    info = re.sub('<.+?>', '', pair[1])

                    if any(x in url for x in ['.rar', '.zip', '.iso']):
                        raise Exception()
                    if not query.lower() in re.sub('[\\\\:;*?"<>|/ \+\'\.]+',
                                                   '-', url + info).lower():
                        raise Exception()

                    size0 = info + " " + size

                    try:
                        size0 = re.findall('([0-9,\.]+ ?(?:GB|GiB|MB|MiB))',
                                           size0)[0]
                        div = 1 if size0.endswith(('GB', 'GiB')) else 1024
                        size0 = float(re.sub('[^0-9\.]', '', size0)) / div
                        size0 = '%.2f GB' % size0
                    except:
                        size0 = ''
                        pass

                    quality, info = source_utils.get_release_quality(url, info)
                    info.append(size0)
                    info = ' | '.join(info)

                    url = url.encode('utf-8')
                    hostDict = hostDict + hostprDict

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if not valid: continue
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })

                except:
                    pass

            return sources
        except:
            return sources
Exemplo n.º 15
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url is None:
                return sources
            if debrid.status() is False:
                raise Exception()

            hostDict = hostprDict + hostDict

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']
            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if \
                'tvshowtitle' in data else '%s' % (data['title'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)
            headers = {'Referer': self.base_link}
            r = self.scraper.get(url, headers=headers).content

            search_results = dom_parser2.parse_dom(r, 'h2')
            search_results = [
                dom_parser2.parse_dom(i.content, 'a', req=['href'])
                for i in search_results
            ]
            search_results = [(i[0].content, i[0].attrs['href'])
                              for i in search_results]

            items = []
            for search_result in search_results:
                try:
                    headers = {'Referer': url}
                    r = self.scraper.get(search_result[1],
                                         headers=headers).content
                    links = dom_parser2.parse_dom(r,
                                                  'a',
                                                  req=[
                                                      'href',
                                                      'rel',
                                                  ])
                    links = [i.attrs['href'] for i in links]
                    for url in links:
                        try:
                            if hdlr in url.upper() and cleantitle.get(
                                    title) in cleantitle.get(url):
                                items.append(url)
                        except:
                            pass
                except:
                    pass

            seen_urls = set()
            for item in items:
                try:
                    url = str(item)
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    if url in seen_urls:
                        continue
                    seen_urls.add(url)

                    if any(x in url for x in
                           ['.part', 'extras', 'subs', 'dubbed', 'dub', 'MULTISUBS', 'sample', 'youtube', 'trailer']) \
                            or any(url.endswith(x) for x in ['.rar', '.zip', '.iso', '.sub', '.idx', '.srt']):
                        raise Exception()
                    quality, info = source_utils.get_release_quality(url, url)
                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if host in hostDict:
                        host = client.replaceHTMLCodes(host)
                        host = host.encode('utf-8')
                        sources.append({
                            'source': host,
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'info': info,
                            'direct': False,
                            'debridonly': True
                        })
                except:
                    pass

            return sources
        except:
            return sources
Exemplo n.º 16
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            if debrid.status() is False:
                raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = (data['tvshowtitle']
                     if 'tvshowtitle' in data else data['title'])
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else '%s' % (data['title'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

            query = query.replace("&", "and")
            query = query.replace("  ", " ")
            query = query.replace(" ", "-")

            query = urllib.quote_plus(query)
            url = '%s/?s=%s&submit=Find' % (self.base_link, query)

            resp = self.scraper.get(url)

            capture = re.findall(
                r'<script id="rlsbb_script" data-code-rlsbb="(\d*)" .*? src="(.*?)"><',
                resp.text)[0]
            rlsbb_code = capture[0]
            script_url = capture[1]

            resp = self.scraper.get(script_url)
            location_code = re.findall(r'\'/lib/search\' (.*?);', resp.text)[0]

            location_maths = re.findall(
                r'( \(.*?\) )| (\'.*?\') |\+ (\d*) \+|(\'\d*.php\')',
                location_code)

            location_maths = [
                x for i in location_maths for x in i if str(x) != ''
            ]

            location_builder = parseJSString(location_maths)

            url = '%s%s' % (self.searchbase_link, self.search_link %
                            (location_builder, query, rlsbb_code))

            r = self.scraper.get(url).content

            try:
                results = json.loads(r)['results']
            except:
                return None

            if 'tvshowtitle' in data:
                regex = r'.*?(%s) .*?(s%se%s)' % (
                    data['tvshowtitle'].lower(), str(data['season']).zfill(2),
                    str(data['episode']).zfill(2))
            else:
                regex = r'.*?(%s) .*?(%s)' % (data['title'], data['year'])

            post_urls = []

            for post in results:

                if 'old' in post['domain']:
                    continue
                capture = re.findall(regex, post['post_title'].lower())
                capture = [i for i in capture if len(i) > 1]
                if len(capture) >= 1:
                    post_urls.append('http://%s/%s' %
                                     (post['domain'], post['post_name']))

            if len(post_urls) == 0:
                return None

            items = []
            for url in post_urls:
                r = self.scraper.get(url).content
                posts = client.parseDOM(r, "div", attrs={"class": "content"})
                hostDict = hostprDict + hostDict
                for post in posts:
                    try:
                        u = client.parseDOM(post, 'a', ret='href')
                        for i in u:
                            try:
                                if hdlr in i.upper() and cleantitle.get(
                                        title) in cleantitle.get(i):
                                    items.append(i)
                            except:
                                pass
                    except:
                        pass

            seen_urls = set()

            for item in items:
                try:
                    url = str(item)
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    if url in seen_urls:
                        continue
                    seen_urls.add(url)

                    host = url.replace("\\", "")
                    host2 = host.strip('"')
                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(host2.strip().lower()).netloc)[0]

                    if host not in hostDict:
                        continue

                    if any(x in host2 for x in ['.rar', '.zip', '.iso']):
                        continue

                    quality, info = source_utils.get_release_quality(url)

                    info = ' | '.join(info)
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': host2,
                        'info': info,
                        'direct': False,
                        'debridonly': False
                    })

                except:
                    pass
            check = [i for i in sources if not i['quality'] == 'CAM']
            if check:
                sources = check
            return sources
        except:
            return sources
Exemplo n.º 17
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url == None: return sources
         if debrid.status() is False: raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
             'title']
         hdlr = 'S%02dE%02d' % (int(data['season']), int(
             data['episode'])) if 'tvshowtitle' in data else data['year']
         query = '%s s%02de%02d' % (
             data['tvshowtitle'], int(data['season']),
             int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                 data['title'], data['year'])
         query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
         url = self.search_link % urllib.quote_plus(query)
         url = urlparse.urljoin(self.base_link, url)
         headers = {'Referer': url}
         r = self.scraper.get(url, headers=headers).content
         try:
             posts = client.parseDOM(r,
                                     'h2',
                                     attrs={'class': 'entry-title'})
             for post in posts:
                 data = client.parseDOM(post, 'a', ret='href')
                 for u in data:
                     headers = {'Referer': u}
                     r = self.scraper.get(u, headers=headers).content
                     r = client.parseDOM(
                         r,
                         'div',
                         attrs={'class': 'clearfix entry-content'})
                     for t in r:
                         link = re.findall(
                             'a class="buttn magnet" href="(.+?)"', t)[0]
                         quality, info = source_utils.get_release_quality(u)
                         try:
                             size = re.findall(
                                 '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:gb|gib|mb|mib))',
                                 str(data))[-1]
                             div = 1 if size.endswith(('gb')) else 1024
                             size = float(re.sub('[^0-9|/.|/,]', '',
                                                 size)) / div
                             size = '%.2f gb' % size
                             info.append(size)
                         except:
                             pass
                         info = ' | '.join(info)
                         sources.append({
                             'source': 'Torrent',
                             'quality': quality,
                             'language': 'en',
                             'url': link,
                             'info': info,
                             'direct': False,
                             'debridonly': True
                         })
         except:
             return
         return sources
     except:
         return sources
Exemplo n.º 18
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None: return sources
         if debrid.status() is False: raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
             'title']
         hdlr = 'S%02dE%02d' % (int(data['season']), int(
             data['episode'])) if 'tvshowtitle' in data else data['year']
         query = '%s S%02dE%02d' % (
             data['tvshowtitle'], int(data['season']),
             int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                 data['title'], data['year'])
         query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
         url = self.search_link % urllib.quote_plus(query)
         url = urlparse.urljoin(self.base_link, url)
         html = client.request(url)
         posts = client.parseDOM(html, 'item')
         hostDict = hostprDict + hostDict
         items = []
         for post in posts:
             try:
                 t = client.parseDOM(post, 'title')[0]
                 u = client.parseDOM(post, 'a', ret='href')
                 s = re.search(
                     '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                     post)
                 s = s.groups()[0] if s else '0'
                 items += [(t, i, s) for i in u]
             except:
                 pass
         for item in items:
             try:
                 url = item[1]
                 if any(x in url for x in ['.rar', '.zip', '.iso']):
                     raise Exception()
                 url = client.replaceHTMLCodes(url)
                 url = url.encode('utf-8')
                 valid, host = source_utils.is_host_valid(url, hostDict)
                 if not valid: raise Exception()
                 host = client.replaceHTMLCodes(host)
                 host = host.encode('utf-8')
                 name = item[0]
                 name = client.replaceHTMLCodes(name)
                 t = re.sub(
                     '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                     '',
                     name,
                     flags=re.I)
                 if not cleantitle.get(t) == cleantitle.get(title):
                     raise Exception()
                 y = re.findall(
                     '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                     name)[-1].upper()
                 if not y == hdlr: raise Exception()
                 quality, info = source_utils.get_release_quality(name, url)
                 try:
                     size = re.findall(
                         '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                         item[2])[-1]
                     div = 1 if size.endswith(('GB', 'GiB')) else 1024
                     size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                     size = '%.2f GB' % size
                     info.append(size)
                 except:
                     pass
                 info = ' | '.join(info)
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'url': url,
                     'info': info,
                     'direct': False,
                     'debridonly': True
                 })
             except:
                 pass
         check = [i for i in sources if not i['quality'] == 'CAM']
         if check: sources = check
         return sources
     except:
         return
Exemplo n.º 19
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources
            if debrid.status() == False: raise Exception()
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            try:
                links = []
                f = ['S%02dE%02d' % (int(data['season']), int(data['episode']))]
                t = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', data['tvshowtitle'])
                t = t.replace("&", "")
                q = self.search_link + urllib.quote_plus('%s %s' % (t, f[0]))
                q = urlparse.urljoin(self.base_link, q)
                result = self.scraper.get(q).content
                result = json.loads(result)
                result = result['results']
            except:
                links = result = []
            for i in result:
                try:
                    if not cleantitle.get(t) == cleantitle.get(i['showName']): raise Exception()
                    y = i['release']
                    y = re.compile('[\.|\(|\[|\s](\d{4}|S\d*E\d*)[\.|\)|\]|\s]').findall(y)[-1]
                    y = y.upper()
                    if not any(x == y for x in f): raise Exception()
                    quality = i['quality']
                    quality, info = source_utils.get_release_quality(quality)

                    try:
                        size = i['size']
                        size = float(size) / 1024
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    info = ' | '.join(info)
                    url = i['links']
                    # for x in url.keys(): links.append({'url': url[x], 'quality': quality, 'info': info})
                    links = []
                    for x in url.keys(): links.append({'url': url[x], 'quality': quality})
                    for link in links:
                        try:
                            url = link['url']
                            quality2 = link['quality']
                            # url = url[1]
                            # url = link
                            if len(url) > 1: raise Exception()
                            url = url[0].encode('utf-8')
                            host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                            if not host in hostprDict: raise Exception()
                            host = host.encode('utf-8')
                            sources.append(
                                {'source': host, 'quality': quality2, 'language': 'en', 'url': url, 'info': info,
                                 'direct': False, 'debridonly': True})
                        except:
                            pass
                except:
                    pass
            return sources
        except:
            return sources
Exemplo n.º 20
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None: return sources
         if debrid.status() == False: raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
             'title']
         hdlr = 'S%02dE%02d' % (int(data['season']), int(
             data['episode'])) if 'tvshowtitle' in data else data['year']
         query = '%s S%02dE%02d' % (
             data['tvshowtitle'], int(data['season']), int(data['episode'])) \
             if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
         url = self.search_link % urllib.quote_plus(query)
         url = urlparse.urljoin(self.base_link, url).replace('-', '+')
         r = self.scraper.get(url).content
         if r == None and 'tvshowtitle' in data:
             season = re.search('S(.*?)E', hdlr)
             season = season.group(1)
             url = title
             r = self.scraper.get(url).content
         for loopCount in range(0, 2):
             if loopCount == 1 or (r == None and 'tvshowtitle' in data):
                 r = self.scraper.get(url).content
             posts = client.parseDOM(r, "h2")
             hostDict = hostprDict + hostDict
             items = []
             for post in posts:
                 try:
                     u = client.parseDOM(post, 'a', ret='href')
                     for i in u:
                         try:
                             name = str(i)
                             items.append(name)
                         except:
                             pass
                 except:
                     pass
             if len(items) > 0: break
         for item in items:
             try:
                 info = []
                 i = str(item)
                 r = self.scraper.get(i).content
                 u = client.parseDOM(r,
                                     "div",
                                     attrs={"class": "entry-content"})
                 for t in u:
                     r = re.compile('a href="(.+?)">.+?<').findall(t)
                     query = query.replace(' ', '.')
                     for url in r:
                         if not query in url:
                             continue
                         if any(x in url for x in ['.rar', '.zip', '.iso']):
                             raise Exception()
                         quality, info = source_utils.get_release_quality(
                             url)
                         valid, host = source_utils.is_host_valid(
                             url, hostDict)
                         sources.append({
                             'source': host,
                             'quality': quality,
                             'language': 'en',
                             'url': url,
                             'info': info,
                             'direct': False,
                             'debridonly': True
                         })
             except:
                 pass
         check = [i for i in sources if not i['quality'] == 'CAM']
         if check: sources = check
         return sources
     except:
         return
Exemplo n.º 21
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None: return sources
         if debrid.status() is False: raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         title = data['tvshowtitle']
         hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
         query = '%s S%02dE%02d' % (
             data['tvshowtitle'], int(data['season']),
             int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                 data['title'], data['year'])
         query = re.sub('(\\\|/| -|:|;|\*|\?|"|<|>|\|)', ' ', query)
         url = self.search_link % (urllib.quote_plus(query).replace(
             '+', '-'))
         url = urlparse.urljoin(self.base_link, url)
         html = client.request(url)
         try:
             results = client.parseDOM(
                 html, 'table', attrs={'class': 'forum_header_border'})
             for result in results:
                 if 'magnet:' in result:
                     results = result
                     break
         except Exception:
             return sources
         rows = re.findall(
             '<tr name="hover" class="forum_header_border">(.+?)</tr>',
             results, re.DOTALL)
         if rows is None:
             return sources
         for entry in rows:
             try:
                 try:
                     columns = re.findall('<td\s.+?>(.+?)</td>', entry,
                                          re.DOTALL)
                     derka = re.findall(
                         'href="magnet:(.+?)" class="magnet" title="(.+?)"',
                         columns[2], re.DOTALL)[0]
                     name = derka[1]
                     link = 'magnet:%s' % (str(
                         client.replaceHTMLCodes(derka[0]).split('&tr')[0]))
                     t = name.split(hdlr)[0]
                     if not cleantitle.get(re.sub(
                             '(|)', '', t)) == cleantitle.get(title):
                         continue
                 except Exception:
                     continue
                 y = re.findall(
                     '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                     name)[-1].upper()
                 if not y == hdlr:
                     continue
                 try:
                     seeders = int(
                         re.findall('<font color=".+?">(.+?)</font>',
                                    columns[5], re.DOTALL)[0])
                 except Exception:
                     continue
                 if self.min_seeders > seeders:
                     continue
                 quality, info = source_utils.get_release_quality(
                     name, name)
                 try:
                     size = re.findall(
                         '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                         name)[-1]
                     div = 1 if size.endswith(('GB', 'GiB')) else 1024
                     size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                     size = '%.2f GB' % size
                     info.append(size)
                 except Exception:
                     pass
                 info = ' | '.join(info)
                 sources.append({
                     'source': 'Torrent',
                     'quality': quality,
                     'language': 'en',
                     'url': link,
                     'info': info,
                     'direct': False,
                     'debridonly': True
                 })
             except Exception:
                 continue
         check = [i for i in sources if not i['quality'] == 'CAM']
         if check:
             sources = check
         return sources
     except Exception:
         return sources
Exemplo n.º 22
0
    def sources(self, url, hostDict, hostprDict):
        try:
            self._sources = []

            if url is None:
                return self._sources

            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = self.scraper.get(url).content

            posts = client.parseDOM(r, 'div', attrs={'class': 'item'})

            hostDict = hostprDict + hostDict

            items = []

            for post in posts:
                try:
                    tit = client.parseDOM(post, 'img', ret='alt')[0]
                    c = client.parseDOM(post, 'a', ret='href')[0]
                    name = tit
                    name = client.replaceHTMLCodes(name)

                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d+E\d+|S\d+|3D)(\.|\)|\]|\s|)(.+|)',
                        '',
                        name,
                        flags=re.I)

                    if not cleantitle.get(t) == cleantitle.get(title):
                        raise Exception()

                    try:
                        y = re.findall(
                            '(?:\.|\(|\[|\s*|)(S\d+E\d+|S\d+)(?:\.|\)|\]|\s*|)',
                            name, re.I)[-1].upper()
                    except Exception:
                        y = re.findall(
                            '(?:\.|\(|\[|\s*|)(\d{4})(?:\.|\)|\]|\s*|)', name,
                            re.I)[0].upper()

                    if not y == hdlr: raise Exception()

                    try:
                        s = re.findall(
                            '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                            t)[0]
                    except BaseException:
                        s = '0'

                    items += [(tit, c, s)]
                except Exception:
                    pass
            threads = []
            for item in items:
                threads.append(
                    workers.Thread(self._get_sources, item, hostDict))
            [i.start() for i in threads]
            [i.join() for i in threads]

            return self._sources
        except Exception:
            return self._sources
Exemplo n.º 23
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            if debrid.status() is False:
                raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            post = {'query': data['imdb']}
            url = urlparse.urljoin(self.base_link, 'engine/ajax/search.php')
            r = self.scraper.post(url, data=post).content
            urls = client.parseDOM(r, 'a', ret='href')
            urls = [i for i in urls if not data['imdb'] in i]

            hostDict = hostprDict + hostDict
            links = []
            for u in urls:
                try:
                    data = self.scraper.get(u).content
                    data = re.findall('</iframe>(.+?)QuoteEEnd--><br /><br',
                                      data, re.DOTALL)[0]
                    links += re.findall(
                        '''start--><b>(.+?)</b>.+?<b><a href=['"](.+?)['"]''',
                        data, re.DOTALL)

                except Exception:
                    pass
            links = [(i[0], i[1]) for i in links if not 'vip' in i[0].lower()]
            for name, url in links:
                try:
                    name = re.sub('<.+?>', '', name)
                    if '4K' in name:
                        quality = '4K'
                    elif '1080p' in name:
                        quality = '1080p'
                    elif '720p' in name:
                        quality = '720p'
                    else:
                        quality = 'SD'

                    info = []
                    if '3D' in name or '.3D.' in url:
                        info.append('3D')
                        quality = '1080p'
                    if any(i in ['hevc', 'h265', 'x265'] for i in name):
                        info.append('HEVC')

                    info = ' | '.join(info)

                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')
                    if any(x in url for x in ['.rar', '.zip', '.iso', 'turk']):
                        raise Exception()

                    if 'ftp' in url:
                        host = 'CDN'
                        direct = True
                    else:
                        valid, host = source_utils.is_host_valid(url, hostDict)
                        if not valid: raise Exception()
                        host = host
                        direct = False

                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': direct,
                        'debridonly': True
                    })
                except Exception:
                    pass

            return sources
        except Exception:
            return sources
Exemplo n.º 24
0
    def sources(self, url, hostDict, hostprDict):
        try:
            hostDict = hostDict + hostprDict

            sources = []
            query_bases = []
            options = []

            if url is None:
                return sources

            if not debrid.status():
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = (data['tvshowtitle'] if 'tvshowtitle' in data else data['title'])
            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            # tvshowtitle
            if 'tvshowtitle' in data:
                query_bases.append('%s ' % (data['tvshowtitle'].replace("-", "")))
                if 'year' in data:
                    query_bases.append('%s %s ' % (data['tvshowtitle'], data['year']))
                options.append('S%02dE%02d' % (int(data['season']), int(data['episode'])))
                options.append('S%02d' % (int(data['season'])))
            else:
                query_bases.append('%s %s ' % (data['title'], data['year']))
                query_bases.append('%s ' % (data['title']))
                query_bases.append('2160p')
                query_bases.append('')

            for option in options:
                for query_base in query_bases:
                    q = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query_base + option)
                    q = q.replace("  ", " ").replace(" ", "+")
                    url = self.base_link + self.search_link % q
                    html = self.scraper.get(url)
                    if html.status_code == 200:
                        posts = client.parseDOM(html.content, "div", attrs={"class": "title"})
                        for post in posts:
                            url = client.parseDOM(post, "a", ret='href')
                            if len(url) > 0:
                                html = self.scraper.get(url[0])
                                if html.status_code == 200:
                                    quotes = client.parseDOM(html.content, "div", attrs={"class": "dlinks"})
                                    for quote in quotes:
                                        hrefs = client.parseDOM(quote, "a", ret='href')
                                        if not hrefs:
                                            continue
                                        for href in hrefs:
                                            quality = source_utils.check_sd_url(href)
                                            href = href.encode('utf-8')
                                            valid, host = source_utils.is_host_valid(href, hostDict)
                                            if any(x in href for x in ['.rar', '.zip', '.iso']):
                                                continue
                                            if not valid:
                                                continue
                                            if hdlr in href.upper() and cleantitle.get(title) in cleantitle.get(href):
                                                sources.append(
                                                    {'source': host, 'quality': quality, 'language': 'en', 'url': href,
                                                     'direct': False, 'debridonly': False})
                if len(sources) > 0:
                    return sources
            return sources
        except:
            return sources
Exemplo n.º 25
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url is None: return sources
         if debrid.status() is False: raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
             'title']
         hdlr = 'S%02dE%02d' % (int(data['season']), int(
             data['episode'])) if 'tvshowtitle' in data else data['year']
         query = '%s s%02de%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) \
             if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
         query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
         url = urlparse.urljoin(
             self.base_link,
             self.search_link.format(query[0].lower(),
                                     cleantitle.geturl(query)))
         r = client.request(url)
         r = client.parseDOM(r, 'tbody')[0]
         posts = client.parseDOM(r, 'tr')
         posts = [i for i in posts if 'magnet:' in i]
         for post in posts:
             post = post.replace('&nbsp;', ' ')
             name = client.parseDOM(post, 'a', ret='title')[1]
             t = name.split(hdlr)[0]
             if not cleantitle.get(re.sub('(|)', '',
                                          t)) == cleantitle.get(title):
                 continue
             try:
                 y = re.findall(
                     '[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]',
                     name, re.I)[-1].upper()
             except BaseException:
                 y = re.findall(
                     '[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name,
                     re.I)[-1].upper()
             if not y == hdlr: continue
             links = client.parseDOM(post, 'a', ret='href')
             magnet = [
                 i.replace('&amp;', '&') for i in links if 'magnet:' in i
             ][0]
             url = magnet.split('&tr')[0]
             quality, info = source_utils.get_release_quality(name, name)
             try:
                 size = re.findall(
                     '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                     post)[0]
                 div = 1 if size.endswith(('GB', 'GiB')) else 1024
                 size = float(
                     re.sub('[^0-9|/.|/,]', '', size.replace(',',
                                                             '.'))) / div
                 size = '%.2f GB' % size
             except BaseException:
                 size = '0'
             info.append(size)
             info = ' | '.join(info)
             sources.append({
                 'source': 'Torrent',
                 'quality': quality,
                 'language': 'en',
                 'url': url,
                 'info': info,
                 'direct': False,
                 'debridonly': True
             })
         return sources
     except BaseException:
         return sources