Esempio n. 1
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None:
             return sources
         if debrid.status() is False:
             raise Exception()
         html = client.request(url)
         link = re.findall('href="(magnet:.+?)"', html, re.DOTALL)
         for link in link:
             link = str(client.replaceHTMLCodes(link).split('&tr')[0])
             quality, info = source_utils.get_release_quality(link, link)
             try:
                 size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', html)[-1]
                 div = 1 if size.endswith(('GB', 'GiB')) else 1024
                 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                 size = '%.2f GB' % size
                 info.append(size)
             except:
                 pass
             info = ' | '.join(info)
             sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': False, 'debridonly': True})
         return sources
     except:
         return sources
Esempio n. 2
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     if debrid.status() is False: return
     try:
         url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
         url = urllib.parse.urlencode(url)
         return url
     except:
         return
Esempio n. 3
0
    def movie(self, imdb, title, localtitle, aliases, year):
        if debrid.status() is False:
            return

        try:
            url = {'imdb': imdb, 'title': title, 'year': year}
            url = urlencode(url)
            return url
        except Exception:
            return
Esempio n. 4
0
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        if debrid.status() is False:
            return

        try:
            url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
            url = urlencode(url)
            return url
        except:
            log_utils.log('ZOOGLE - Exception', 1)
            return
Esempio n. 5
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        if debrid.status() is False: return
        try:
            if url is None: return

            url = urllib.parse.parse_qs(url)
            url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
            url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
            url = urllib.parse.urlencode(url)
            return url
        except:
            return
Esempio n. 6
0
    def sources(self, url, hostDict, hostprDict):

        self.sources = []

        try:
            if url is None:
                return self.sources

            if debrid.status() is False:
                return self.sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = cleantitle.get_query(title)

            hdlr = data['year']
            hdlr2 = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else ''
            imdb = data['imdb']

            url = self.search(title, hdlr)
            headers = {'User-Agent': client.agent()}
            r = cfScraper.get(url, headers=headers).content
            r = ensure_text(r, errors='replace')
            if hdlr2 == '':
                r = dom_parser2.parse_dom(r, 'ul', {'id': 'releases'})[0]
            else:
                r = dom_parser2.parse_dom(r, 'ul', {'id': 'episodes'})[0]
            r = dom_parser2.parse_dom(r.content, 'a', req=['href'])
            r = [(i.content, urljoin(self.base_link, i.attrs['href']))
                 for i in r if i and i.content != 'Watch']
            if hdlr2 != '':
                r = [(i[0], i[1]) for i in r if hdlr2.lower() in i[0].lower()]

            self.hostDict = hostDict + hostprDict
            threads = []

            for i in r:
                threads.append(workers.Thread(self._get_sources, i[0], i[1]))
            [i.start() for i in threads]

            alive = [x for x in threads if x.is_alive() is True]
            while alive:
                alive = [x for x in threads if x.is_alive() is True]
                time.sleep(0.1)
            return self.sources
        except:
            log_utils.log('RMZ - Exception', 1)
            return self.sources
Esempio n. 7
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            self.hostDict = hostDict + hostprDict
            if url is None:
                return sources
            if debrid.status() is False: return
            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = cleantitle.get_query(self.title)
            self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
            query = '%s S%02dE%02d' % (self.title, int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (self.title, data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            if 'tvshowtitle' in data:
                url = self.tvsearch.format(quote(query))
                url = urljoin(self.base_link, url)
            else:
                url = self.moviesearch.format(quote(query))
                url = urljoin(self.base_link, url)

            r = cfScraper.get(url).content
            r = ensure_text(r, errors='ignore')
            posts = client.parseDOM(r, 'table', attrs={'class': 'table2'})[0]
            posts = client.parseDOM(posts, 'tr')
            for post in posts:
                link = client.parseDOM(post, 'a', ret='href')[0]
                hash = re.findall(r'(\w{40})', link, re.I)
                if hash:
                    url = 'magnet:?xt=urn:btih:' + hash[0]
                    name = link.split('title=')[1]
                    t = name.split(self.hdlr)[0]
                    if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title): continue
                    try:
                        y = re.findall('[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper()
                    except:
                        y = re.findall('[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper()
                    if not y == self.hdlr: continue
                    quality, info = source_utils.get_release_quality(name, name)
                    try:
                        size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0]
                        dsize, isize = source_utils._size(size)
                    except:
                        dsize, isize = 0.0, ''
                    info.insert(0, isize)
                    info = ' | '.join(info)
                    sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False,
                                    'debridonly': True, 'size': dsize, 'name': name})
            return sources
        except:
            log_utils.log('lime0 - Exception', 1)
            return sources
Esempio n. 8
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources
            if debrid.status() is False:
                raise Exception()
            data = urllib.parse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s s%02de%02d' % (
            data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
            data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.parse.quote_plus(query)
            url = urllib.parse.urljoin(self.base_link, url)

            try:
                r = client.request(url)
                posts = client.parseDOM(r, 'div', attrs={'class': 'tgxtable'})
                for post in posts:
                    link = re.findall('a href="(magnet:.+?)"', post, re.DOTALL)
                    try:
                        size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0]
                        div = 1 if size.endswith('GB') else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size.replace(',', '.'))) / div
                        size = '%.2f GB' % size
                    except BaseException:
                        size = '0'
                    for url in link:
                        if hdlr not in url:
                            continue
                        url = url.split('&tr')[0]
                        quality, info = source_utils.get_release_quality(url)
                        if any(x in url for x in ['FRENCH', 'Ita', 'italian', 'TRUEFRENCH', '-lat-', 'Dublado']):
                            continue
                        info.append(size)
                        info = ' | '.join(info)
                        sources.append(
                            {'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info,
                             'direct': False, 'debridonly': True})
            except:
                return
            return sources
        except:
            return sources
Esempio n. 9
0
    def sources(self, url, hostDict, hostprDict):
        self.sources = []
        try:
            if url is None:
                return self.sources

            if debrid.status() is False:
                return self.sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = cleantitle.get_query(self.title)

            self.hdlr = 's%02de%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
            self.hdlr = self.hdlr.lower()
            self.year = data['year']

            query = '%s %s' % (self.title, self.hdlr)
            query = re.sub('[^A-Za-z0-9\s\.-]+', '', query)

            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url).replace('+', '-')

            try:
                r = client.request(url)
                links = re.findall('<a href="(/torrent/.+?)"', r, re.DOTALL)[:20]

                threads = []
                for link in links:
                    threads.append(workers.Thread(self.get_sources, link))
                [i.start() for i in threads]
                [i.join() for i in threads]
                return self.sources
            except:
                log_utils.log('YourBT3 - Exception', 1)
                return self.sources

        except:
            log_utils.log('YourBT3 - Exception', 1)
            return self.sources
Esempio n. 10
0
    def sources(self, url, hostDict, hostprDict):
        self._sources = []
        try:
            if url is None:
                return self._sources

            if debrid.status() is False:
                return self._sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            self.title = data[
                'tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = cleantitle.get_query(self.title)
            self.hdlr = 'S%02dE%02d' % (
                int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                self.title, int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    self.title, data['year'])
            query = re.sub(r'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            if 'tvshowtitle' in data:
                url = self.search.format('8', quote(query))
            else:
                url = self.search.format('4', quote(query))

            self.hostDict = hostDict + hostprDict
            headers = {'User-Agent': client.agent()}
            _html = client.request(url, headers=headers)
            threads = []
            for i in re.findall(r'<item>(.+?)</item>', _html, re.DOTALL):
                threads.append(workers.Thread(self._get_items, i))
            [i.start() for i in threads]
            [i.join() for i in threads]

            return self._sources
        except BaseException:
            return self._sources
Esempio n. 11
0
    def sources(self, url, hostDict, hostprDict):
        self.sources = []
        try:
            if url is None:
                return self.sources

            if debrid.status() is False:
                return self.sources

            data = urllib.parse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = self.title.replace('&', 'and').replace('Special Victims Unit', 'SVU')

            self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
            self.year = data['year']

            query = '%s %s' % (self.title, self.hdlr)
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

            url = self.search_link % urllib.parse.quote_plus(query)
            url = urllib.parse.urljoin(self.base_link, url)
            # log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

            r = client.request(url)
            r = client.parseDOM(r, 'table', attrs={'class': 'tmain'})[0]
            links = re.findall('<a href="(/torrent/.+?)">(.+?)<', r, re.DOTALL)

            threads = []
            for link in links:
                threads.append(workers.Thread(self.get_sources, link))
            [i.start() for i in threads]
            [i.join() for i in threads]
            return self.sources
        except:
            source_utils.scraper_error('TORRENTFUNK')
            return self.sources
Esempio n. 12
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url is None: return sources
         if debrid.status() is False: raise Exception()
         data = parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
         title = cleantitle.get_query(title)
         query = '%s S%02dE%02d' % (title, int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s' % data['imdb']
         query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
         token = cfScraper.get(self.token).content
         token = json.loads(token)["token"]
         if 'tvshowtitle' in data:
             search_link = self.tvsearch.format(token, quote_plus(query), 'format=json_extended')
         else:
             search_link = self.msearch.format(token, data['imdb'], 'format=json_extended')
         control.sleep(250)
         rjson = cfScraper.get(search_link).content
         rjson = ensure_text(rjson, errors='ignore')
         files = json.loads(rjson)['torrent_results']
         for file in files:
             name = file["title"]
             url = file["download"]
             url = url.split('&tr')[0]
             quality, info = source_utils.get_release_quality(name, url)
             try:
                 dsize = float(file["size"]) / 1073741824
                 isize = '%.2f GB' % dsize
             except:
                 dsize, isize = 0.0, ''
             info.insert(0, isize)
             info = ' | '.join(info)
             sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name})
         return sources
     except:
         log_utils.log('torapi - Exception', 1)
         return sources
Esempio n. 13
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            if debrid.status() is False:
                raise Exception()

            hostDict = hostprDict + hostDict

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = cleantitle.get_query(title)

            hdlr = 's%02de%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s s%02de%02d' % (
                title, int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else '%s %s' % (title, data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            try:
                url = self.search_link % quote_plus(query)
                url = urljoin(self.base_link, url)

                r = cfScraper.get(url).content
                r = ensure_text(r, errors='replace')

                posts = client.parseDOM(r, 'div', attrs={'class': 'post'})

                items = []

                for post in posts:
                    try:
                        u = client.parseDOM(post,
                                            "div",
                                            attrs={"class": "postContent"})
                        size = re.findall(
                            '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                            u[0])[0]
                        u = client.parseDOM(u, "h2")
                        u = client.parseDOM(u, 'a', ret='href')
                        u = [(i.strip('/').split('/')[-1], i, size) for i in u]
                        items += u
                    except:
                        pass
            except:
                pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '', name)

                    if not cleantitle.get(t) == cleantitle.get(title): continue

                    quality, info = source_utils.get_release_quality(
                        name, item[1])

                    try:
                        dsize, isize = source_utils._size(item[2])
                    except:
                        dsize, isize = 0.0, ''
                    info.insert(0, isize)

                    info = ' | '.join(info)

                    url = item[1]
                    if any(x in url for x in ['.rar', '.zip', '.iso']):
                        raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = ensure_text(url)

                    host = re.findall('([\w]+[.][\w]+)$',
                                      urlparse(url.strip().lower()).netloc)[0]
                    if host not in hostDict:
                        raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = ensure_text(host)

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True,
                        'size': dsize,
                        'name': name
                    })
                except:
                    pass

            return sources
        except:
            return sources
Esempio n. 14
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources
            if debrid.status() is False:
                raise Exception()
            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s s%02de%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url)

            try:
                r = client.request(url)
                posts = client.parseDOM(r, 'tr')
                for post in posts:
                    link = re.findall(
                        'a title="Download Torrent Magnet" href="(magnet:.+?)"',
                        post, re.DOTALL)
                    try:
                        size = re.findall(
                            '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                            post)[0]
                        dsize, isize = source_utils._size(size)
                    except BaseException:
                        dsize, isize = 0.0, ''
                    for url in link:
                        url = unquote_plus(url).split('&tr')[0].replace(
                            '&amp;', '&').replace(' ', '.')
                        if hdlr not in url:
                            continue
                        name = url.split('&dn=')[1]
                        quality, info = source_utils.get_release_quality(
                            name, url)
                        if any(x in url for x in [
                                'FRENCH', 'Ita', 'italian', 'TRUEFRENCH',
                                '-lat-', 'Dublado'
                        ]):
                            continue
                        info.insert(0, isize)
                        info = ' | '.join(info)
                        sources.append({
                            'source': 'Torrent',
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'info': info,
                            'direct': False,
                            'debridonly': True,
                            'size': dsize,
                            'name': name
                        })
            except:
                return
            return sources
        except:
            return sources
Esempio n. 15
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s %s' % (title, hdlr)
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url)

            try:
                r = client.request(url)
                links = zip(client.parseDOM(r, 'a', attrs={'class': 'btn btn-default magnet-button stats-action banner-button'}, ret='href'), client.parseDOM(r, 'td', attrs={'class': 'size'}))

                for link in links:
                    url = link[0].replace('&amp;', '&')
                    url = re.sub(r'(&tr=.+)&dn=', '&dn=', url) # some links on bitlord &tr= before &dn=
                    url = url.split('&tr=')[0]
                    if 'magnet' not in url:
                        continue

                    if any(x in url.lower() for x in ['french', 'italian', 'spanish', 'truefrench', 'dublado', 'dubbed']):
                        continue

                    name = url.split('&dn=')[1]
                    t = name.split(hdlr)[0].replace(data['year'], '').replace('(', '').replace(')', '').replace('&', 'and')
                    if cleantitle.get(t) != cleantitle.get(title):
                        continue

                    if hdlr not in name:
                        continue

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        size = link[1]
                        size = str(size) + ' GB' if len(str(size)) == 1 else str(size) + ' MB'
                        dsize, isize = source_utils._size(size)
                    except:
                        dsize, isize = 0.0, ''

                    info.insert(0, isize)
                    info = ' | '.join(info)

                    sources.append({'source': 'torrent', 'quality': quality, 'language': 'en', 'url': url,
                                                'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name})
                return sources

            except:
                return sources

        except:
            from prophetscrapers.modules import log_utils
            log_utils.log('bitlord - Exception', 1)
            return sources
Esempio n. 16
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            if debrid.status() is False:
                raise Exception()

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            self.title = data[
                'tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = cleantitle.get_query(self.title)
            self.hdlr = 'S%02dE%02d' % (
                int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                self.title, int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    self.title, data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            if 'tvshowtitle' in data:
                url = self.tvsearch.format(quote_plus(query))
                url = urljoin(self.base_link, url)

            else:
                url = self.moviesearch.format(quote_plus(query))
                url = urljoin(self.base_link, url)

            items = self._get_items(url)

            hostDict = hostDict + hostprDict
            for item in items:
                try:
                    name = item[0]
                    url = item[1]
                    url = url.split('&tr')[0]
                    quality, info = source_utils.get_release_quality(name, url)
                    info.insert(0, item[2])
                    info = ' | '.join(info)

                    sources.append({
                        'source': 'Torrent',
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True,
                        'size': item[3],
                        'name': name
                    })
                except:
                    log_utils.log('glodls0_exc', 1)
                    pass

            return sources
        except:
            log_utils.log('glodls1_exc', 1)
            return sources
Esempio n. 17
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if debrid.status() is False:
                return sources

            if url is None:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            query = '%s s%02de%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode']))\
                                       if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub(u'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ',
                           query).lower()

            url = urljoin(self.base_link, self.search_link % quote_plus(query))

            #r = client.request(url)
            #r = requests.get(url).content
            r = cfScraper.get(url).content
            r = ensure_text(r, errors='replace').replace('&nbsp;', ' ')
            r = client.parseDOM(
                r,
                'div',
                attrs={'style': 'display:table;width:100%;text-align:left'})
            posts = client.parseDOM(r, 'div', attrs={'class': 'one_result'})
            #log_utils.log('posts_is: '+str(posts))
            for post in posts:

                links = client.parseDOM(post,
                                        'div',
                                        attrs={'class': 'fa fa-magnet'})[0]
                url = client.parseDOM(links, 'a', ret='href')[0]
                url = client.replaceHTMLCodes(url).split('&tr=')[0]
                name = url.split('&dn=')[1]
                if not query in cleantitle.get_title(name): continue

                quality, info = source_utils.get_release_quality(name, url)
                try:
                    size = client.parseDOM(post,
                                           'span',
                                           attrs={'class': 'torrent_size'})[0]
                    dsize, isize = source_utils._size(size)
                except:
                    dsize, isize = 0.0, ''

                info.insert(0, isize)

                info = ' | '.join(info)

                sources.append({
                    'source': 'Torrent',
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True,
                    'size': dsize,
                    'name': name
                })

            return sources
        except:
            log_utils.log('btdig3 - Exception', 1)
            return sources
Esempio n. 18
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources
            if debrid.status() is False:
                raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            try:
                links = []

                f = [
                    'S%02dE%02d' % (int(data['season']), int(data['episode']))
                ]
                t = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '',
                           data['tvshowtitle'])
                t = t.replace("&", "")

                q = self.search_link + urllib.quote_plus('%s %s' % (t, f[0]))

                q = urlparse.urljoin(self.base_link, q)
                result = client.request(q)
                result = json.loads(result)

                result = result['results']
            except:
                links = result = []

            for i in result:
                try:
                    if not cleantitle.get(t) == cleantitle.get(i['showName']):
                        raise Exception()

                    y = i['release']
                    y = re.compile('[\.|\(|\[|\s](\d{4}|S\d*E\d*)[\.|\)|\]|\s]'
                                   ).findall(y)[-1]
                    y = y.upper()
                    if not any(x == y for x in f):
                        raise Exception()

                    quality = i['quality']

                    quality = quality.upper()

                    size = i['size']
                    size = float(size) / 1024
                    size = '[B]%.2f GB[/B]' % size

                    info = size

                    if '4k' in quality:
                        quality = '4K'
                    elif '1080P' in quality:
                        quality = '1080p'
                    elif '720P' in quality:
                        quality = 'HD'
                    else:
                        quality = 'SD'

                    url = i['links']
                    #for x in url.keys(): links.append({'url': url[x], 'quality': quality, 'info': info})

                    links = []

                    for x in url.keys():
                        links.append({'url': url[x], 'quality': quality})

                    for link in links:
                        try:
                            url = link['url']
                            quality2 = link['quality']
                            #url = url[1]
                            #url = link
                            if len(url) > 1:
                                raise Exception()
                            url = url[0].encode('utf-8')

                            host = re.findall(
                                '([\w]+[.][\w]+)$',
                                urlparse.urlparse(
                                    url.strip().lower()).netloc)[0]
                            if host not in hostprDict:
                                raise Exception()
                            host = host.encode('utf-8')

                            sources.append({
                                'source': host,
                                'quality': quality2,
                                'language': 'en',
                                'url': url,
                                'info': info,
                                'direct': False,
                                'debridonly': True
                            })
                        except:
                            pass

                except:
                    pass

            return sources
        except:
            return sources
Esempio n. 19
0
    def sources(self, url, hostDict, hostprDict):
        try:
            self._sources = []
            self.items = []
            if url is None:
                return self._sources

            if debrid.status() is False:
                return self._sources

            self.tvsearch = '%s/sort-category-search/%s/TV/seeders/desc/1/' % (
                self.base_link, '%s')
            self.moviesearch = '%s/sort-category-search/%s/Movies/size/desc/1/' % (
                self.base_link, '%s')

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            self.title = data[
                'tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = cleantitle.get_query(self.title)
            self.hdlr = 'S%02dE%02d' % (
                int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                self.title, int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    self.title, data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            urls = []
            if 'tvshowtitle' in data:
                urls.append(self.tvsearch % (quote(query)))
                '''
                Why spam for multiple pages, since it gives plenty on each page?

                urls.append(self.tvsearch.format(quote(query), '2'))
                urls.append(self.tvsearch.format(quote(query), '3'))
                '''
            else:
                urls.append(self.moviesearch % (quote(query)))
                '''
                Why spam for multiple pages, since it gives plenty on each page?

                urls.append(self.moviesearch.format(quote(query), '2'))
                urls.append(self.moviesearch.format(quote(query), '3'))
                '''
            threads = []
            for url in urls:
                threads.append(workers.Thread(self._get_items, url))
            [i.start() for i in threads]
            [i.join() for i in threads]

            self.hostDict = hostDict + hostprDict
            threads2 = []
            for i in self.items:
                threads2.append(workers.Thread(self._get_sources, i))
            [i.start() for i in threads2]
            [i.join() for i in threads2]

            return self._sources
        except:
            log_utils.log('1337x_exc2', 1)
            return self._sources
Esempio n. 20
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if debrid.status() is False:
                return sources

            if url is None:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url)

            r = client.request(url)

            posts = re.findall('<h2 class="title">(.+?)</h2>', r,
                               re.IGNORECASE)

            hostDict = hostprDict + hostDict

            urls = []
            for item in posts:

                try:
                    link, name = re.findall('href="(.+?)" title="(.+?)"', item,
                                            re.IGNORECASE)[0]
                    if not cleantitle.get(title) in cleantitle.get(name):
                        continue
                    name = client.replaceHTMLCodes(name)
                    try:
                        _name = name.lower().replace('permalink to', '')
                    except:
                        _name = name

                    quality, info = source_utils.get_release_quality(
                        name, link)

                    try:
                        size = re.findall(
                            '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                            name)[-1]
                        dsize, isize = source_utils._size(size)
                    except Exception:
                        dsize, isize = 0.0, ''
                    info.insert(0, isize)

                    info = ' | '.join(info)

                    links = self.links(link)
                    urls += [(i, quality, info) for i in links]
                except Exception:
                    pass

            for item in urls:
                if 'earn-money' in item[0]:
                    continue

                if any(x in item[0] for x in ['.rar', '.zip', '.iso']):
                    continue
                url = client.replaceHTMLCodes(item[0])
                #url = url.encode('utf-8')
                url = ensure_text(url)

                valid, host = source_utils.is_host_valid(url, hostDict)
                if not valid:
                    continue
                host = client.replaceHTMLCodes(host)
                #host = host.encode('utf-8')
                host = ensure_text(host)

                sources.append({
                    'source': host,
                    'quality': item[1],
                    'language': 'en',
                    'url': url,
                    'info': item[2],
                    'direct': False,
                    'debridonly': True,
                    'size': dsize,
                    'name': _name
                })
            return sources
        except Exception:
            return sources
Esempio n. 21
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if debrid.status() is False:
                return sources

            if url is None:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            query = '%s s%02de%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode']))\
                                       if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub(u'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ',
                           query).lower()

            url = urljoin(self.base_link, self.search_link % quote_plus(query))

            r = client.request(url)
            #r = cfScraper.get(url).content
            r = ensure_text(r, errors='replace').strip()
            posts = client.parseDOM(r,
                                    'table',
                                    attrs={
                                        'class': 'table2',
                                        'cellspacing': '0'
                                    })[1]
            posts = client.parseDOM(posts, 'tr')[1:]
            for post in posts:

                links = client.parseDOM(post, 'a', ret='href')[0]
                links = client.replaceHTMLCodes(links).lstrip('/')
                hash = links.split('/')[0]
                name = links.split('/')[1]
                url = 'magnet:?xt=urn:btih:{}'.format(hash)
                if not query in cleantitle.get_title(name): continue

                quality, info = source_utils.get_release_quality(name)
                try:
                    size = client.parseDOM(post,
                                           'td',
                                           attrs={'class': 'tdnormal'})[1]
                    dsize, isize = source_utils._size(size)
                except:
                    dsize, isize = 0.0, ''

                info.insert(0, isize)

                info = ' | '.join(info)

                sources.append({
                    'source': 'Torrent',
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True,
                    'size': dsize,
                    'name': name
                })

            return sources
        except:
            log_utils.log('tdl3 - Exception', 1)
            return sources
Esempio n. 22
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            hostDict = hostprDict + hostDict

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            year = data['year']
            title = cleantitle.get_query(title)
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else year
            premDate = ''

            query = '%s S%02dE%02d' % (
                title, int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else '%s %s' % (title, year)
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)
            query = query.replace(" ", "-")

            _base_link = self.base_link if int(
                year) >= 2021 else self.old_base_link

            #url = self.search_link % quote_plus(query)
            #url = urljoin(_base_link, url)

            url = _base_link + query

            r = cfScraper.get(url).content
            r = ensure_text(r, errors='replace')

            if r is None and 'tvshowtitle' in data:
                season = re.search('S(.*?)E', hdlr)
                season = season.group(1)
                query = title
                query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)
                query = query + "-S" + season
                query = query.replace("&", "and")
                query = query.replace("  ", " ")
                query = query.replace(" ", "-")
                url = _base_link + query
                r = cfScraper.get(url).content
                r = ensure_text(r, errors='replace')

            for loopCount in list(range(0, 2)):
                if loopCount == 1 or (r is None and 'tvshowtitle' in data):

                    #premDate = re.sub('[ \.]', '-', data['premiered'])
                    query = re.sub(r'[\\\\:;*?"<>|/\-\']', '', title)
                    query = query.replace("&", " and ").replace(
                        "  ", " ").replace(
                            " ",
                            "-")  # throw in extra spaces around & just in case
                    #query = query + "-" + premDate

                    url = _base_link + query
                    url = url.replace('The-Late-Show-with-Stephen-Colbert',
                                      'Stephen-Colbert')

                    r = cfScraper.get(url).content
                    r = ensure_text(r, errors='replace')

                posts = client.parseDOM(r, "div", attrs={"class": "content"})
                #hostDict = hostprDict + hostDict
                items = []
                for post in posts:
                    try:
                        u = client.parseDOM(post, 'a', ret='href')
                        for i in u:
                            try:
                                name = str(i)
                                if hdlr in name.upper():
                                    items.append(name)
                                #elif len(premDate) > 0 and premDate in name.replace(".", "-"):
                                #items.append(name)
                            except:
                                log_utils.log('RLSBB - Exception', 1)
                                pass
                    except:
                        log_utils.log('RLSBB - Exception', 1)
                        pass

                if len(items) > 0:
                    break

            seen_urls = set()

            for item in items:
                try:
                    info = []

                    url = str(item)
                    url = client.replaceHTMLCodes(url)
                    url = ensure_text(url)

                    if url in seen_urls:
                        continue
                    seen_urls.add(url)

                    host = url.replace("\\", "")
                    host2 = host.strip('"')
                    host = re.findall('([\w]+[.][\w]+)$',
                                      urlparse(
                                          host2.strip().lower()).netloc)[0]

                    if host not in hostDict:
                        continue
                    if any(x in host2
                           for x in ['.rar', '.zip', '.iso', '.part']):
                        continue

                    quality, info = source_utils.get_release_quality(host2)

                    #try:
                    #    size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0]
                    #    div = 1 if size.endswith(('GB', 'GiB')) else 1024
                    #    size = float(re.sub('[^0-9|/.|/,]', '', size.replace(',', '.'))) / div
                    #    size = '%.2f GB' % size
                    #    info.append(size)
                    #except:
                    #    pass

                    info = ' | '.join(info)

                    host = client.replaceHTMLCodes(host)
                    host = ensure_text(host)
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': host2,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })
                except:
                    log_utils.log('RLSBB - Exception', 1)
                    pass
            check = [i for i in sources if not i['quality'] == 'CAM']
            if check:
                sources = check
            return sources
        except:
            log_utils.log('RLSBB - Exception', 1)
            return sources
Esempio n. 23
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = cleantitle.get_query(title)

            hdlr = 's%02de%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s %s' % (title, hdlr)
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

            url = self.search_link % quote(query)
            url = urljoin(self.base_link, url)

            html = client.request(url)
            html = html.replace('&nbsp;', ' ')

            try:
                results = client.parseDOM(html,
                                          'table',
                                          attrs={'id': 'searchResult'})
            except:
                return sources

            url2 = url.replace('/1/', '/2/')

            html2 = client.request(url2)
            html2 = html2.replace('&nbsp;', ' ')

            try:
                results += client.parseDOM(html2,
                                           'table',
                                           attrs={'id': 'searchResult'})
            except:
                return sources

            results = ''.join(results)

            rows = re.findall('<tr(.+?)</tr>', results, re.DOTALL)
            if rows is None:
                return sources

            for entry in rows:
                try:
                    try:
                        url = 'magnet:%s' % (re.findall(
                            'a href="magnet:(.+?)"', entry, re.DOTALL)[0])
                        url = str(client.replaceHTMLCodes(url).split('&tr')[0])
                    except:
                        continue

                    try:
                        name = re.findall(
                            'class="detLink" title=".+?">(.+?)</a>', entry,
                            re.DOTALL)[0]
                        name = client.replaceHTMLCodes(name)
                        name = unquote_plus(name).replace(' ', '.').lower()

                        t = name.split(hdlr)[0].replace(
                            data['year'],
                            '').replace('(', '').replace(')', '').replace(
                                '&', 'and').replace('.US.',
                                                    '.').replace('.us.',
                                                                 '.').lower()
                        if cleantitle.get(t) != cleantitle.get(title):
                            continue
                    except:
                        continue

                    if hdlr not in name:
                        continue

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        size = re.findall(
                            '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                            entry)[-1]
                        dsize, isize = source_utils._size(size)
                    except:
                        dsize, isize = 0.0, ''

                    info.insert(0, isize)

                    info = ' | '.join(info)

                    sources.append({
                        'source': 'torrent',
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True,
                        'size': dsize,
                        'name': name
                    })
                except:
                    continue

            return sources

        except:
            log_utils.log('tpb_exc', 1)
            return sources
Esempio n. 24
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url == None:
                return sources
            if debrid.status() is False:
                return sources
            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            query = '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            url = urljoin(self.base_link, self.search_link % quote_plus(query))
            r = ensure_str(cfScraper.get(url).content, errors='replace')
            #log_utils.log('ultrahd_r ' + str(r))
            r = client.parseDOM(r, 'div', attrs={'class': 'box-out margin'})
            r = [(dom_parser.parse_dom(i, 'div', attrs={'class':'news-title'})) for i in r if data['imdb'] in i]
            r = [(dom_parser.parse_dom(i[0], 'a', req='href')) for i in r if i]
            r = [(i[0].attrs['href'], i[0].content) for i in r if i]
            hostDict = hostprDict + hostDict

            for item in r:
                try:
                    data = ensure_text(cfScraper.get(item[0]).content, errors='replace')
                    data = client.parseDOM(data, 'div', attrs={'id': 'r-content'})[0]
                    urls = re.findall(r'\s*<u><a href="(.+?)".+?</a></u>', data, re.S)
                    try: details = client.parseDOM(data, 'div', attrs={'class': 'text_spoiler'})[0]
                    except: details = None
                    if details:
                        _zip = zip([u for u in urls if u.startswith('https://turbobit')], re.findall(r'General : (.+?)<br', details), re.findall(r'Length : (.+?) for', details))
                    else:
                        _zip = zip([u for u in urls if u.startswith('https://turbobit')], re.findall(r'/uploads/0-0-vip-(.+?).jpg', data, re.I|re.S))

                    for z in _zip:
                        try:
                            url = ensure_str(client.replaceHTMLCodes(z[0]))
                            name = ensure_str(client.replaceHTMLCodes(z[1])).replace('dual', ' dual ')
                            if 'dublaj' in name.lower(): continue

                            info = []
                            quality, info = source_utils.get_release_quality(url, name)
                            if quality == 'sd' and 'remux' in name.lower(): quality = '1080p'

                            try:
                                size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', z[2])[0]
                                dsize, isize = source_utils._size(size)
                            except:
                                dsize, isize = 0.0, ''
                            info.insert(0, isize)

                            info = ' | '.join(info)
                            if any(x in url for x in ['.rar', '.zip', '.iso']):
                                raise Exception()
                            # if not 'turbobit' in url:
                                # continue
                            sources.append({'source': 'turbobit', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'size': dsize, 'name': name, 'direct': True, 'debridonly': True})
                        except:
                            log_utils.log('ultrahd_exc2', 1)
                            pass
                except:
                    log_utils.log('ultrahd_exc1', 1)
                    pass
            return sources
        except:
            log_utils.log('ultrahd_exc0', 1)
            return sources
Esempio n. 25
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            hostDict = hostprDict + hostDict

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = cleantitle.get_query(title)

            query = '%s S%02dE%02d' % (
                title, int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else '%s %s' % (title, data['year'])

            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url).replace('%3A+', '+')

            #r = client.request(url)
            r = cfScraper.get(url).content
            r = ensure_text(r, errors='replace')

            posts = client.parseDOM(r, "div", attrs={"class": "postContent"})
            items = []
            for post in posts:
                try:
                    p = client.parseDOM(post, "p", attrs={"dir": "ltr"})[1:]
                    for i in p:
                        items.append(i)
                except:
                    pass

            try:
                for item in items:
                    u = client.parseDOM(item, 'a', ret='href')
                    name = re.findall('<strong>(.*?)</strong>', item,
                                      re.DOTALL)[0]
                    name = client.replaceHTMLCodes(name)
                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '', name)
                    if not cleantitle.get(t) == cleantitle.get(title): continue
                    for url in u:
                        if any(x in url for x in ['.rar', '.zip', '.iso']):
                            continue
                        quality, info = source_utils.get_release_quality(
                            name, url)
                        try:
                            size = re.findall(
                                '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB|gb|mb))',
                                item, re.DOTALL)[0]
                            dsize, isize = source_utils._size(size)
                        except:
                            dsize, isize = 0.0, ''
                        info.insert(0, isize)
                        info = ' | '.join(info)
                        valid, host = source_utils.is_host_valid(url, hostDict)
                        if valid:
                            sources.append({
                                'source': host,
                                'quality': quality,
                                'language': 'en',
                                'url': url,
                                'info': info,
                                'direct': False,
                                'debridonly': True,
                                'size': dsize,
                                'name': name
                            })
            except:
                pass
            return sources
        except:
            log_utils.log('max_rls Exception', 1)
            return sources
Esempio n. 26
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            hostDict = hostprDict + hostDict

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            ref_url = url = data['url']
            # log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            _headers = {
                'User-Agent':
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/72.0'
            }
            r = client.request(url, headers=_headers)
            posts = client.parseDOM(r, 'h2', attrs={'class': 'title'})
            posts = zip(client.parseDOM(posts, 'a', ret='title'),
                        client.parseDOM(posts, 'a', ret='href'))

            if posts == []:
                return sources

            for item in posts:
                try:
                    name = item[0].replace(' ', '.')
                    url = item[1]
                    r = client.request(url, headers=_headers)
                    list = client.parseDOM(r, 'div', attrs={'id': 'content'})

                    if 'tvshowtitle' in data:
                        regex = '(<strong>(.*?)</strong><br />\s?[A-Z,0-9]*?\s\|\s([A-Z,0-9,\s]*)\|\s((\d+\.\d+|\d*)\s?(?:GB|GiB|Gb|MB|MiB|Mb))?</p>(?:\s<p><a href=\".*?\" .*?_blank\">.*?</a></p>)+)'
                    else:
                        regex = '(<strong>Release Name:</strong>\s*(.*?)<br />\s?<strong>Size:</strong>\s?((\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+)\s(?:GB|GiB|Gb|MB|MiB|Mb))?<br />(.*\s)*)'

                    for match in re.finditer(
                            regex,
                            list[0].encode('ascii', errors='ignore').decode(
                                'ascii',
                                errors='ignore').replace('&nbsp;', ' ')):
                        name = str(match.group(2))
                        t = name.split(hdlr)[0].replace(
                            data['year'],
                            '').replace('(',
                                        '').replace(')',
                                                    '').replace('&', 'and')
                        if cleantitle.get(t) != cleantitle.get(title):
                            continue

                        if hdlr not in name:
                            continue

                        if 'tvshowtitle' in data:
                            size = str(match.group(4))
                        else:
                            size = str(match.group(3))

                        links = client.parseDOM(
                            match.group(1),
                            'a',
                            attrs={'class': 'autohyperlink'},
                            ret='href')

                        for url in links:
                            try:
                                if any(x in url for x in
                                       ['.rar', '.zip', '.iso', '.sample.']):
                                    continue

                                if url in str(sources):
                                    continue

                                valid, host = source_utils.is_host_valid(
                                    url, hostDict)
                                if not valid:
                                    continue

                                host = client.replaceHTMLCodes(host)
                                host = host.encode('utf-8')

                                quality, info = source_utils.get_release_quality(
                                    name, url)

                                try:
                                    div = 1 if size.endswith(
                                        ('GB', 'GiB', 'Gb')) else 1024
                                    size = float(
                                        re.sub('[^0-9|/.|/,]', '',
                                               size.replace(',', '.'))) / div
                                    size = '[B]%.2f GB[/B]' % size
                                    info.insert(0, size)
                                except:
                                    pass

                                info = ' | '.join(info)

                                sources.append({
                                    'source': host,
                                    'quality': quality,
                                    'language': 'en',
                                    'url': url,
                                    'info': info,
                                    'direct': False,
                                    'debridonly': True
                                })
                            except:
                                pass
                except:
                    pass

            return sources

        except:
            return sources
Esempio n. 27
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if debrid.status() is False:
                return sources

            if url is None:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            query = '%s s%02de%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode']))\
                                       if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub(u'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ',
                           query).lower()

            url = urljoin(self.base_link, self.search_link % query)

            #r = client.request(url)
            #r = requests.get(url).content
            r = cfScraper.get(url).content
            r = ensure_text(r, errors='replace').replace('&nbsp;', ' ')
            r = client.parseDOM(r, 'div', attrs={'class': 'col s12'})
            posts = client.parseDOM(r, 'div')[1:]
            posts = [i for i in posts if 'magnet/' in i]
            for post in posts:

                links = client.parseDOM(post, 'a', ret='href')[0]
                url = 'magnet:?xt=urn:btih:' + links.lstrip('magnet/')
                try:
                    name = client.parseDOM(post, 'a', ret='title')[0]
                    if not query in cleantitle.get_title(name): continue
                except:
                    name = ''

                quality, info = source_utils.get_release_quality(name, name)
                try:
                    size = re.findall(r'<b class="cpill .+?-pill">(.+?)</b>',
                                      post)[0]
                    dsize, isize = source_utils._size(size)
                except:
                    dsize, isize = 0.0, ''

                info.insert(0, isize)

                info = ' | '.join(info)

                sources.append({
                    'source': 'Torrent',
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True,
                    'size': dsize,
                    'name': name
                })

            return sources
        except:
            log_utils.log('bt4g3 - Exception', 1)
            return sources