예제 #1
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else '%s' % data['imdb']
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            token = client.getHTML(self.token)
            token = json.loads(token)["token"]
            if 'tvshowtitle' in data:
                search_link = self.tvsearch.format(token,
                                                   urllib.quote_plus(query),
                                                   'format=json_extended')
            else:
                search_link = self.msearch.format(token, data['imdb'],
                                                  'format=json_extended')
            time.sleep(2)
            rjson = client.getHTML(search_link)
            files = json.loads(rjson)['torrent_results']
            for file in files:
                name = file["title"]
                quality, info = source_utils.get_release_quality(name, name)
                size = source_utils.convert_size(file["size"])
                info.append(size)
                url = file["download"]
                url = url.split('&tr')[0]
                info = ' | '.join(info)
                if control.setting('torrent.rd_check') == 'true':
                    checked = rd_check.rd_cache_check(url)
                    if checked:
                        sources.append({
                            'source': 'Cached Torrent',
                            'quality': quality,
                            'language': 'en',
                            'url': checked,
                            'info': info,
                            'direct': False,
                            'debridonly': True
                        })
                else:
                    sources.append({
                        'source': 'Torrent',
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('---Torrapi Testing - Exception: \n' + str(failure))
            return sources
예제 #2
0
    def imdb_user_list(self, url):
        try:
            result = client.getHTML(url)
            items = client.parseDOM(result, 'li', attrs={'class': 'ipl-zebra-list__item user-list'})
        except:
            pass

        for item in items:
            try:
                name = client.parseDOM(item, 'a')[0]
                name = client.replaceHTMLCodes(name)
                name = name.encode('utf-8')

                url = client.parseDOM(item, 'a', ret='href')[0]
                url = url.split('/list/', 1)[-1].strip('/')
                url = self.imdblist_link % url
                url = client.replaceHTMLCodes(url)
                url = url.encode('utf-8')

                self.list.append({'name': name, 'url': url, 'context': url})
            except:
                pass

        self.list = sorted(self.list, key=lambda k: utils.title_key(k['name']))
        return self.list
예제 #3
0
    def imdb_person_list(self, url):
        try:
            result = client.getHTML(url)
            items = client.parseDOM(result, 'div', attrs={'class': '.+?etail'})
        except:
            return

        for item in items:
            try:
                name = client.parseDOM(item, 'img', ret='alt')[0]
                name = name.encode('utf-8')

                url = client.parseDOM(item, 'a', ret='href')[0]
                url = re.findall('(nm\d*)', url, re.I)[0]
                url = self.person_link % url
                url = client.replaceHTMLCodes(url)
                url = url.encode('utf-8')

                image = client.parseDOM(item, 'img', ret='src')[0]
                # if not ('._SX' in image or '._SY' in image): raise Exception()
                image = re.sub('(?:_SX|_SY|_UX|_UY|_CR|_AL)(?:\d+|_).+?\.', '_SX500.', image)
                image = client.replaceHTMLCodes(image)
                image = image.encode('utf-8')

                self.list.append({'name': name, 'url': url, 'image': image})
            except:
                pass

        return self.list
예제 #4
0
 def play(self, url):
     try:
         stream = client.getHTML(url)
         try:
             link = re.compile('var mustave = atob\((.+?)\)').findall(
                 stream)[0]
         except:
             link = re.compile(
                 '<iframe frameborder=0 height=100% width=100% src="(.+?php)"',
                 re.DOTALL).findall(stream)[0]
             link = client.getHTML(link)
             link = re.compile('var mustave = atob\((.+?)\)').findall(
                 link)[0]
         link = base64.b64decode(link)
         if link.startswith('/'):
             link = self.base_link + link
         link = '%s|User-Agent=%s&Referer=%s' % (link, self.uAgent, url)
         control.execute('PlayMedia(%s)' % link)
     except:
         return
예제 #5
0
    def getTVShowTranslation(self, thetvdb, lang):
        try:
            url = 'https://thetvdb.com/api/%s/series/%s/%s.xml' % (
                '7A9F0F57B6E8132D', thetvdb, lang)
            r = client.getHTML(url)
            title = client.parseDOM(r, 'SeriesName')[0]
            title = client.replaceHTMLCodes(title)
            title = title.encode('utf-8')

            return title
        except:
            pass
예제 #6
0
    def imdb_list(self, url):
        try:
            for i in re.findall('date\[(\d+)\]', url):
                url = url.replace('date[%s]' % i, (self.datetime - datetime.timedelta(days=int(i))).strftime('%Y-%m-%d'))

            def imdb_watchlist_id(url):
                return client.parseDOM(client.request(url), 'meta', ret='content', attrs={'property': 'pageId'})[0]

            if url == self.imdbwatchlist_link:
                url = cache.get(imdb_watchlist_id, 8640, url)
                url = self.imdblist_link % url

            elif url == self.imdbwatchlist2_link:
                url = cache.get(imdb_watchlist_id, 8640, url)
                url = self.imdblist2_link % url

            result = client.getHTML(url)

            result = result.replace('\n', ' ')

            items = client.parseDOM(result, 'div', attrs={'class': 'lister-item .+?'})
            items += client.parseDOM(result, 'div', attrs={'class': 'list_item.+?'})
        except:
            return

        try:
            next = client.parseDOM(result, 'a', ret='href', attrs={'class': '.+?ister-page-nex.+?'})

            if len(next) == 0:
                next = client.parseDOM(result, 'div', attrs={'class': 'pagination'})[0]
                next = zip(client.parseDOM(next, 'a', ret='href'), client.parseDOM(next, 'a'))
                next = [i[0] for i in next if 'Next' in i[1]]

            next = url.replace(urlparse.urlparse(url).query, urlparse.urlparse(next[0]).query)
            next = client.replaceHTMLCodes(next)
            next = next.encode('utf-8')
        except:
            next = ''

        for item in items:
            try:
                title = client.parseDOM(item, 'a')[1]
                title = client.replaceHTMLCodes(title)
                title = title.encode('utf-8')

                year = client.parseDOM(item, 'span', attrs={'class': 'lister-item-year.+?'})
                year += client.parseDOM(item, 'span', attrs={'class': 'year_type'})
                try:
                    year = re.compile('(\d{4})').findall(year)[0]
                except:
                    year = '0'
                year = year.encode('utf-8')

                if int(year) > int(self.datetime.strftime('%Y')):
                    raise Exception()

                imdb = client.parseDOM(item, 'a', ret='href')[0]
                imdb = re.findall('(tt\d*)', imdb)[0]
                imdb = imdb.encode('utf-8')

                try:
                    poster = client.parseDOM(item, 'img', ret='loadlate')[0]
                except:
                    poster = '0'
                if '/nopicture/' in poster:
                    poster = '0'
                poster = re.sub('(?:_SX|_SY|_UX|_UY|_CR|_AL)(?:\d+|_).+?\.', '_SX500.', poster)
                poster = client.replaceHTMLCodes(poster)
                poster = poster.encode('utf-8')

                try:
                    genre = client.parseDOM(item, 'span', attrs={'class': 'genre'})[0]
                except:
                    genre = '0'
                genre = ' / '.join([i.strip() for i in genre.split(',')])
                if genre == '':
                    genre = '0'
                genre = client.replaceHTMLCodes(genre)
                genre = genre.encode('utf-8')

                try:
                    duration = re.findall('(\d+?) min(?:s|)', item)[-1]
                except:
                    duration = '0'
                duration = duration.encode('utf-8')

                rating = '0'
                try:
                    rating = client.parseDOM(item, 'span', attrs={'class': 'rating-rating'})[0]
                except:
                    pass
                try:
                    rating = client.parseDOM(rating, 'span', attrs={'class': 'value'})[0]
                except:
                    rating = '0'
                try:
                    rating = client.parseDOM(item, 'div', ret='data-value', attrs={'class': '.*?imdb-rating'})[0]
                except:
                    pass
                if rating == '' or rating == '-':
                    rating = '0'
                rating = client.replaceHTMLCodes(rating)
                rating = rating.encode('utf-8')

                try:
                    votes = client.parseDOM(item, 'div', ret='title', attrs={'class': '.*?rating-list'})[0]
                except:
                    votes = '0'
                try:
                    votes = re.findall('\((.+?) vote(?:s|)\)', votes)[0]
                except:
                    votes = '0'
                if votes == '':
                    votes = '0'
                votes = client.replaceHTMLCodes(votes)
                votes = votes.encode('utf-8')

                try:
                    mpaa = client.parseDOM(item, 'span', attrs={'class': 'certificate'})[0]
                except:
                    mpaa = '0'
                if mpaa == '' or mpaa == 'NOT_RATED':
                    mpaa = '0'
                mpaa = mpaa.replace('_', '-')
                mpaa = client.replaceHTMLCodes(mpaa)
                mpaa = mpaa.encode('utf-8')

                try:
                    director = re.findall('Director(?:s|):(.+?)(?:\||</div>)', item)[0]
                except:
                    director = '0'
                director = client.parseDOM(director, 'a')
                director = ' / '.join(director)
                if director == '':
                    director = '0'
                director = client.replaceHTMLCodes(director)
                director = director.encode('utf-8')

                try:
                    cast = re.findall('Stars(?:s|):(.+?)(?:\||</div>)', item)[0]
                except:
                    cast = '0'
                cast = client.replaceHTMLCodes(cast)
                cast = cast.encode('utf-8')
                cast = client.parseDOM(cast, 'a')
                if cast == []:
                    cast = '0'

                plot = '0'
                try:
                    plot = client.parseDOM(item, 'p', attrs={'class': 'text-muted'})[0]
                except:
                    pass
                try:
                    plot = client.parseDOM(item, 'div', attrs={'class': 'item_description'})[0]
                except:
                    pass
                plot = plot.rsplit('<span>', 1)[0].strip()
                plot = re.sub('<.+?>|</.+?>', '', plot)
                if plot == '':
                    plot = '0'
                plot = client.replaceHTMLCodes(plot)
                plot = plot.encode('utf-8')

                self.list.append(
                    {'title': title, 'originaltitle': title, 'year': year, 'genre': genre, 'duration': duration,
                     'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'cast': cast, 'plot': plot,
                     'tagline': '0', 'imdb': imdb, 'tmdb': '0', 'tvdb': '0', 'poster': poster, 'next': next})
            except:
                pass

        return self.list
예제 #7
0
    def resolveA(self, url):
        try:
            r = client.getHTML(url)
            r = r.replace('\\', '')
            s = re.findall('\s*:\s*\"(http.+?)\"', r) + re.findall(
                '\s*:\s*\'(http.+?)\'', r)
            s = [
                i for i in s if (urlparse.urlparse(i).path).strip('/').split(
                    '/')[-1].split('.')[-1] in ['mp4', 'flv', 'm3u8']
            ]
            if not s:
                s = re.findall('\s*\(\s*\"(http.+?)\"', r) + re.findall(
                    '\s*\(\s*\'(http.+?)\'', r)
            s = [
                i for i in s if (urlparse.urlparse(i).path).strip('/').split(
                    '/')[-1].split('.')[-1] in ['mp4', 'flv', 'm3u8']
            ]
            if not s:
                s = re.findall('\s*=\s*\'(http.+?)\'', r) + re.findall(
                    '\s*=\s*\"(http.+?)\"', r)
            s = [
                i for i in s if (urlparse.urlparse(i).path).strip('/').split(
                    '/')[-1].split('.')[-1] in ['mp4', 'flv', 'm3u8']
            ]
            if not s:
                s = re.findall('\s*:\s*\"(//.+?)\"', r) + re.findall(
                    '\s*:\s*\'(//.+?)\'', r)
            s = [
                i for i in s if (urlparse.urlparse(i).path).strip('/').split(
                    '/')[-1].split('.')[-1] in ['mp4', 'flv', 'm3u8']
            ]
            if not s:
                s = re.findall('\:\"(\.+?)\"', r) + re.findall(
                    '\:\'(\.+?)\'', r)
            s = [
                i for i in s if (urlparse.urlparse(i).path).strip('/').split(
                    '/')[-1].split('.')[-1] in ['mp4', 'flv', 'm3u8']
            ]
            if not s:
                s = re.findall('\s*\(\s*\"(//.+?)\"', r) + re.findall(
                    '\s*\(\s*\'(//.+?)\'', r)
            s = [
                i for i in s if (urlparse.urlparse(i).path).strip('/').split(
                    '/')[-1].split('.')[-1] in ['mp4', 'flv', 'm3u8']
            ]
            if not s:
                s = re.findall('\s*=\s*\'(//.+?)\'', r) + re.findall(
                    '\s*=\s*\"(//.+?)\"', r)
            s = [
                i for i in s if (urlparse.urlparse(i).path).strip('/').split(
                    '/')[-1].split('.')[-1] in ['mp4', 'flv', 'm3u8']
            ]
            if not s:
                s = re.findall('\w*:\s*\"(http.+?)\"', r) + re.findall(
                    '\w*:\s*\'(http.+?)\'', r)
            s = [
                i for i in s if (urlparse.urlparse(i).path).strip('/').split(
                    '/')[-1].split('.')[-1] in ['mp4', 'flv', 'm3u8']
            ]
            if not s:
                s = re.findall('\w*=\'([^\']*)', r) + re.findall(
                    '\w*="([^"]*)', r)
            s = [
                i for i in s if (urlparse.urlparse(i).path).strip('/').split(
                    '/')[-1].split('.')[-1] in ['mp4', 'flv', 'm3u8']
            ]
            if not s:
                s = client.parseDOM(r,
                                    'source',
                                    ret='src',
                                    attrs={'type': 'video.+?'})
            s = ['http:' + i if i.startswith('//') else i for i in s]
            s = [
                urlparse.urljoin(url, i) if not i.startswith('http') else i
                for i in s
            ]
            s = [x for y, x in enumerate(s) if x not in s[:y]]
            self.u = []

            def request(i):
                try:
                    c = client.request(i, output='headers')
                    checks = ['video', 'mpegurl']
                    if any(f for f in checks if f in c['Content-Type']):
                        self.u.append((i, int(c['Content-Length'])))
                except:
                    pass

            threads = []
            for i in s:
                threads.append(workers.Thread(request, i))
            [i.start() for i in threads]
            [i.join() for i in threads]
            u = sorted(self.u, key=lambda x: x[1])[::-1]
            u = client.request(u[0][0], output='geturl', referer=url)
            return u
        except Exception:
            return url
예제 #8
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
            data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
            data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url).replace('++', '+')

            try:
                r = client.getHTML(url)
                posts = client.parseDOM(r, 'tbody')[0]
                posts = client.parseDOM(posts, 'tr')
                for post in posts:
                    links = re.compile('<a href="(/torrent_details/.+?)">\n<span>(.+?)</span>').findall(post)
                    for link, data in links:
                        if hdlr not in data:
                            continue
                        if any(x in data for x in ['FRENCH', 'Ita', 'ITA', 'italian', 'Tamil', 'TRUEFRENCH', '-lat-', 'Dublado', 'Dub', 'Rus', 'Hindi']):
                            continue
                        link = urlparse.urljoin(self.base_link, link)
                        link = client.getHTML(link)
                        getsize = re.findall('Size&nbsp;(.+?)&nbsp', link, re.DOTALL)[0]
                        try:
                            size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', getsize)[0]
                            div = 1 if size.endswith('GB') else 1024
                            size = float(re.sub('[^0-9|/.|/,]', '', size.replace(',', '.'))) / div
                            size = '%.2f GB' % size
                        except BaseException:
                            size = '0'
                        link = re.findall('<a href="(https:.+?)"', link, re.DOTALL)
                        for url in link:
                            url = unquote(url).decode('utf8')
                            url = url.split('url=')[1].split('&tr=')[0].replace('%28', '(').replace('%29', ')')

                            quality, info = source_utils.get_release_quality(data)
                            info.append(size)
                            info = ' | '.join(info)
                            if control.setting('torrent.cache_check') == 'true':
                                cached = rd_check.rd_cache_check(url)
                                if not cached:
                                    continue
                                sources.append(
                                    {'source': 'Cached Torrent', 'quality': quality, 'language': 'en', 'url': url,
                                     'info': info, 'direct': False, 'debridonly': True})
                            else:
                                sources.append(
                                    {'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info,
                                     'direct': False, 'debridonly': True})
            except:
                return
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('---Isohunt2 Testing - Exception: \n' + str(failure))
            return sources