Пример #1
0
 def matchAlias(self, title, aliases):
     try:
         for alias in aliases:
             if cleantitle.get(title) == cleantitle.get(alias['title']):
                 return True
     except:
         return False
Пример #2
0
 def searchname(r):
     r = [(i[0], i[1][0]) for i in r if len(i[1]) > 0]
     r = [
         i for i in r
         if cleantitle.get(title) == cleantitle.get(i[1])
     ]
     r = [] if r == [] else [i[0] for i in r][0]
     return r
Пример #3
0
 def _search(self, title, year, aliases, headers):
     try:
         q = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
         r = client.request(q)
         r = client.parseDOM(r, 'div', attrs={'class':'ml-img'})
         r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'img', ret='alt'))
         url = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1]) and year in i[1]][0][0]
         return url
     except:
         pass
Пример #4
0
    def get_sources(self, link):
        try:
            url = link[0].encode('ascii', errors='ignore').decode('ascii', errors='ignore').replace(' ', ' ')
            if '/torrent/' not in url:
                return

            name = link[1].encode('ascii', errors='ignore').decode('ascii', errors='ignore').replace(' ', '.').replace(' ', '.')
            if any(x in url.lower() for x in ['french', 'italian', 'spanish', 'truefrench', 'dublado', 'dubbed']):
                raise Exception()

            t = name.split(self.hdlr)[0].replace(self.year, '').replace('(', '').replace(')', '').replace('&', 'and').replace('.US.', '.').replace('.us.', '.')
            if cleantitle.get(t) != cleantitle.get(self.title):
                return

            if self.hdlr not in name:
                return

            if not url.startswith('http'): 
                link = urllib.parse.urljoin(self.base_link, url)

            link = client.request(link)
            if link is None:
                return
            infohash = re.findall('<b>Infohash</b></td><td valign=top>(.+?)</td>', link, re.DOTALL)[0]
            url = 'magnet:?xt=urn:btih:%s&dn=%s' % (infohash, name)
            if url in str(self.sources):
                return

            try:
                seeders = int(re.findall('<font color=red>(.*?)</font>.+Seeds', link, re.DOTALL)[0].replace(',', ''))
                if self.min_seeders > seeders: 
                    return
            except:
                pass

            quality, info = source_utils.get_release_quality(name, url)

            try:
                size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', link)[0]
                div = 1 if size.endswith('GB') else 1024
                size = float(re.sub('[^0-9|/.|/,]', '', size.replace(',', '.'))) / div
                size = '%.2f GB' % size
                info.insert(0, size)
            except:
                size = '0'
                pass

            info = ' | '.join(info)

            self.sources.append({'source': 'torrent', 'quality': quality, 'language': 'en', 'url': url,
                                                'info': info, 'direct': False, 'debridonly': True})

        except:
            source_utils.scraper_error('TORRENTFUNK')
            pass
Пример #5
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:

            if url is None:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['title']
            year = data['year']

            search_id = title.lower()
            url = urljoin(self.base_link, self.search_link % (search_id.replace(' ', '+')))
            headers = {
                'User-Agent': client.agent(),
                'Accept': '*/*',
                'Accept-Encoding': 'identity;q=1, *;q=0',
                'Accept-Language': 'en-US,en;q=0.5',
                'Connection': 'keep-alive',
                'Pragma': 'no-cache',
                'Cache-Control': 'no-cache',
                'DNT': '1'
            }

            response = requests.Session()
            r = response.get(url, headers=headers, timeout=5).text
            r = client.parseDOM(r, 'div', attrs={'class': 'container'})[1]
            items = client.parseDOM(r, 'div', attrs={'class': r'col-xs-12 col-sm-6 col-md-3 '})
            for item in items:
                movie_url = client.parseDOM(item, 'a', ret='href')[0]
                movie_title = re.compile('div class="post-title">(.+?)<', re.DOTALL).findall(item)[0]
                if cleantitle.get(title).lower() == cleantitle.get(movie_title).lower():

                    r = response.get(movie_url, headers=headers, timeout=5).text
                    year_data = re.findall('<h2 style="margin-bottom: 0">(.+?)</h2>', r, re.IGNORECASE)[0]
                    if year == year_data:
                        links = re.findall(r"<a href='(.+?)'>(\d+)p<\/a>", r)

                        for link, quality in links:

                            if not link.startswith('https:'):
                                link = 'https:' + link.replace('http:', '')
                            link = link + '|Referer=https://iwaatch.com/movie/' + title

                            quality, info = source_utils.get_release_quality(quality, link)

                            sources.append({'source': 'Direct', 'quality': quality, 'language': 'en', 'url': link, 'direct': True, 'debridonly': False})
            return sources
        except:
            log_utils.log('iWAATCH - Exception', 1)
            return sources
Пример #6
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            self.hostDict = hostDict + hostprDict
            if url is None:
                return sources
            if debrid.status() is False: return
            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = cleantitle.get_query(self.title)
            self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
            query = '%s S%02dE%02d' % (self.title, int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (self.title, data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            if 'tvshowtitle' in data:
                url = self.tvsearch.format(quote(query))
                url = urljoin(self.base_link, url)
            else:
                url = self.moviesearch.format(quote(query))
                url = urljoin(self.base_link, url)

            r = cfScraper.get(url).content
            r = ensure_text(r, errors='ignore')
            posts = client.parseDOM(r, 'table', attrs={'class': 'table2'})[0]
            posts = client.parseDOM(posts, 'tr')
            for post in posts:
                link = client.parseDOM(post, 'a', ret='href')[0]
                hash = re.findall(r'(\w{40})', link, re.I)
                if hash:
                    url = 'magnet:?xt=urn:btih:' + hash[0]
                    name = link.split('title=')[1]
                    t = name.split(self.hdlr)[0]
                    if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title): continue
                    try:
                        y = re.findall('[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper()
                    except:
                        y = re.findall('[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper()
                    if not y == self.hdlr: continue
                    quality, info = source_utils.get_release_quality(name, name)
                    try:
                        size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0]
                        dsize, isize = source_utils._size(size)
                    except:
                        dsize, isize = 0.0, ''
                    info.insert(0, isize)
                    info = ' | '.join(info)
                    sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False,
                                    'debridonly': True, 'size': dsize, 'name': name})
            return sources
        except:
            log_utils.log('lime0 - Exception', 1)
            return sources
Пример #7
0
def getAlternativTitle(title):
    try:
        t = cleantitle.get(title)

        r = _getAniList('/anime/search/%s' % title)
        r = [(i.get('title_romaji'), i.get('synonyms', []))
             for i in utils.json_loads_as_str(r)
             if cleantitle.get(i.get('title_english', '')) == t]
        r = [i[1][0] if i[0] == title and len(i[1]) > 0 else i[0] for i in r]
        r = [i for i in r if i if i != title][0]
        return r
    except:
        pass
Пример #8
0
    def _get_items(self, r):
        try:
            size = re.search(r'<size>([\d]+)</size>', r).groups()[0]
            seeders = re.search(r'<seeders>([\d]+)</seeders>', r).groups()[0]
            _hash = re.search(r'<info_hash>([a-zA-Z0-9]+)</info_hash>',
                              r).groups()[0]
            name = re.search(r'<title>(.+?)</title>', r).groups()[0]
            url = 'magnet:?xt=urn:btih:%s&dn=%s' % (_hash.upper(),
                                                    quote_plus(name))
            url = url.split('&tr')[0]
            t = name.split(self.hdlr)[0]

            try:
                y = re.findall(
                    r'[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]',
                    name, re.I)[-1].upper()
            except BaseException:
                y = re.findall(r'[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]',
                               name, re.I)[-1].upper()

            quality, info = source_utils.get_release_quality(name, url)

            try:
                div = 1000**3
                dsize = float(size) / div
                isize = '%.2f GB' % dsize
            except BaseException:
                dsize, isize = 0.0, ''

            info.insert(0, isize)

            info = ' | '.join(info)

            if cleantitle.get(re.sub('(|)', '',
                                     t)) == cleantitle.get(self.title):
                if y == self.hdlr:
                    self._sources.append({
                        'source': 'Torrent',
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True,
                        'size': dsize,
                        'name': name
                    })

        except BaseException:
            pass
Пример #9
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         search_id = cleantitle.getsearch(title)
         url = urlparse.urljoin(self.base_link, self.search_link)
         url = url % (search_id.replace(':', ' ').replace(' ', '+'))
         search_results = client.request(url)
         match = re.compile('<a href="/watch/(.+?)" title="(.+?)">', re.DOTALL).findall(search_results)
         for row_url, row_title in match:
             row_url = self.base_link + '/watch/%s' % row_url
             if cleantitle.get(title) in cleantitle.get(row_title):
                 return row_url
         return
     except:
         return
Пример #10
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         search = cleantitle.getsearch(imdb)
         url = urlparse.urljoin(self.base_link, self.search_link)
         url = url % (search.replace(':', ' ').replace(' ', '+'))
         r = cfScraper.get(url).content
         Yourmouth = re.compile(
             '<div class="post_thumb".+?href="(.+?)"><h2 class="thumb_title">(.+?)</h2>',
             re.DOTALL).findall(r)
         for Myballs, Mycock in Yourmouth:
             if cleantitle.get(title) in cleantitle.get(Mycock):
                 return Myballs
         return
     except Exception:
         return
Пример #11
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         search_id = cleantitle.getsearch(title)
         url = urlparse.urljoin(self.base_link, self.search_link)
         url = url  % (search_id.replace(' ', '+').replace('-', '+').replace('++', '+'), year)
         headers = {'User-Agent': self.User_Agent}
         search_results = requests.get(url, headers=headers, timeout=10).content
         items = re.compile('<item>(.+?)</item>', re.DOTALL).findall(search_results)
         for item in items:
             match = re.compile('<title>(.+?)</title>.+?<link>(.+?)</link>', re.DOTALL).findall(item)
             for row_title, row_url in match:
                 if cleantitle.get(title) in cleantitle.get(row_title):
                     if year in str(row_title):
                         return row_url
         return
     except:
         return
Пример #12
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         tvshowtitle = cleantitle.geturl(tvshowtitle)
         url = self.base_link + self.search_link % (tvshowtitle.replace(' ', '+').replace('-', '+').replace('++', '+'))
         page = client.request(url)
         items = client.parseDOM(page, 'div', attrs={'class': 'content-left'})
         for item in items:
             match = re.compile('<a href="(.+?)">', re.DOTALL).findall(item)
             for url in match:
                 if cleantitle.get(tvshowtitle) in cleantitle.get(url):
                     url = self.base_link + url
                     return url
         return
     except:
         failure = traceback.format_exc()
         log_utils.log('watchseriestv - Exception: \n' + str(failure))
         return
Пример #13
0
 def search(self, title, year):
     try:
         url = urljoin(self.base_link,
                       self.search_link % (quote_plus(title)))
         headers = {'User-Agent': client.agent()}
         r = cfScraper.get(url, headers=headers).content
         r = ensure_text(r, errors='replace')
         r = dom_parser2.parse_dom(r, 'div', {'class': 'list_items'})[0]
         r = dom_parser2.parse_dom(r.content, 'li')
         r = [(dom_parser2.parse_dom(i, 'a', {'class': 'title'}))
              for i in r]
         r = [(i[0].attrs['href'], i[0].content) for i in r]
         r = [(urljoin(self.base_link, i[0])) for i in r
              if cleantitle.get(title) in cleantitle.get(i[1])
              and year in i[1]]
         if r: return r[0]
         else: return
     except:
         log_utils.log('RMZ - Exception', 1)
         return
Пример #14
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if not url:
             return
         page = client.request(url)
         items = client.parseDOM(page, 'div', attrs={'class': 'season-table-row'})
         for item in items:
             try:
                 url = re.compile('<a href="(.+?)">', re.DOTALL).findall(item)[0]
             except:
                 pass
             sea = client.parseDOM(item, 'div', attrs={'class': 'season-table-season'})[0]
             epi = client.parseDOM(item, 'div', attrs={'class': 'season-table-ep'})[0]
             if cleantitle.get(season) in cleantitle.get(sea) and cleantitle.get(episode) in cleantitle.get(epi):
                 url = self.base_link + url
                 return url
         return
     except:
         failure = traceback.format_exc()
         log_utils.log('watchseriestv - Exception: \n' + str(failure))
         return
Пример #15
0
    def _get_items(self, url):
        items = []
        try:
            headers = {'User-Agent': client.agent()}
            r = client.request(url, headers=headers)
            posts = client.parseDOM(r, 'tr', attrs={'class': 't-row'})
            posts = [i for i in posts if not 'racker:' in i]
            for post in posts:
                data = client.parseDOM(post, 'a', ret='href')
                url = [i for i in data if 'magnet:' in i][0]
                name = client.parseDOM(post, 'a', ret='title')[0]
                t = name.split(self.hdlr)[0]

                if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(
                        self.title):
                    continue

                try:
                    y = re.findall(
                        '[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]',
                        name, re.I)[-1].upper()
                except BaseException:
                    y = re.findall(
                        '[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name,
                        re.I)[-1].upper()
                if not y == self.hdlr: continue

                try:
                    size = re.findall(
                        '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                        post)[0]
                    dsize, isize = source_utils._size(size)
                except BaseException:
                    dsize, isize = 0.0, ''

                items.append((name, url, isize, dsize))
            return items
        except:
            log_utils.log('glodls2_exc', 1)
            return items
Пример #16
0
    def _get_items(self, url):
        try:
            r = cfScraper.get(url).content
            r = ensure_text(r, errors='replace')
            posts = client.parseDOM(r, 'tbody')[0]
            posts = client.parseDOM(posts, 'tr')
            for post in posts:
                data = dom.parse_dom(post, 'a', req='href')[1]
                link = urljoin(self.base_link, data.attrs['href'])
                name = data.content
                t = name.split(self.hdlr)[0]

                if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(
                        self.title):
                    continue

                try:
                    y = re.findall(
                        '[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]',
                        name, re.I)[-1].upper()
                except BaseException:
                    y = re.findall(
                        '[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name,
                        re.I)[-1].upper()
                if not y == self.hdlr:
                    continue

                try:
                    size = re.findall(
                        '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                        post)[0]
                    dsize, isize = source_utils._size(size)
                except BaseException:
                    dsize, isize = 0.0, ''

                self.items.append((name, link, isize, dsize))
            return self.items
        except:
            log_utils.log('1337x_exc0', 1)
            return self.items
Пример #17
0
    def sources(self, url, hostDict, hostprDict):
        try:
            self._sources = []
            if url is None: return self._sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            hdlr = 's%02de%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s s%02de%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            query = self.search_link % cleantitle.geturl(query)
            url = urljoin(self.base_link, query)
            r = client.request(url)
            posts = dom_parser2.parse_dom(r, 'div', {'class': 'eTitle'})
            posts = [
                dom_parser2.parse_dom(i.content, 'a', req='href')
                for i in posts if i
            ]
            posts = [(i[0].attrs['href'], re.sub('<.+?>', '', i[0].content))
                     for i in posts if i]
            posts = [
                (i[0], i[1]) for i in posts
                if (cleantitle.get_simple(i[1].split(hdlr)[0]) ==
                    cleantitle.get(title) and hdlr.lower() in i[1].lower())
            ]
            self.hostDict = hostDict + hostprDict
            threads = []

            for i in posts:
                threads.append(workers.Thread(self._get_sources, i))
            [i.start() for i in threads]
            [i.join() for i in threads]

            alive = [x for x in threads if x.is_alive() == True]
            while alive:
                alive = [x for x in threads if x.is_alive() == True]
                time.sleep(0.1)
            return self._sources
        except Exception:
            return self._sources
Пример #18
0
    def get_sources(self, link):
        try:
            url = '%s%s' % (self.base_link, link)
            result = client.request(url)

            info_hash = re.findall('<kbd>(.+?)<', result, re.DOTALL)[0]
            url = 'magnet:?xt=urn:btih:' + info_hash
            name = re.findall('<h3 class="card-title">(.+?)<', result, re.DOTALL)[0]
            name = unquote_plus(name).replace(' ', '.').replace('Original.Name:.', '').lower()
            #url = '%s%s%s' % (url1, '&dn=', str(name))

            t = name.split(self.hdlr)[0].replace(self.year, '').replace('(', '').replace(')', '').replace('&', 'and').replace('.US.', '.').replace('.us.', '.')
            if cleantitle.get(t) != cleantitle.get(self.title):
                return

            if self.hdlr not in name:
                return

            quality, info = source_utils.get_release_quality(name, url)

            try:
                size = re.findall('<div class="col-3">File size:</div><div class="col">(.+?)<', result, re.DOTALL)[0]
                size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', size)[0]
                dsize, isize = source_utils._size(size)
            except:
                dsize, isize = 0.0, ''

            info.insert(0, isize)

            info = ' | '.join(info)

            self.sources.append({'source': 'torrent', 'quality': quality, 'language': 'en', 'url': url,
                                 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name})

        except:
            log_utils.log('YourBT4 - Exception', 1)
            pass
Пример #19
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            if debrid.status() is False:
                raise Exception()

            hostDict = hostprDict + hostDict

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = cleantitle.get_query(title)

            hdlr = 's%02de%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s s%02de%02d' % (
                title, int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else '%s %s' % (title, data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            try:
                url = self.search_link % quote_plus(query)
                url = urljoin(self.base_link, url)

                r = cfScraper.get(url).content
                r = ensure_text(r, errors='replace')

                posts = client.parseDOM(r, 'div', attrs={'class': 'post'})

                items = []

                for post in posts:
                    try:
                        u = client.parseDOM(post,
                                            "div",
                                            attrs={"class": "postContent"})
                        size = re.findall(
                            '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                            u[0])[0]
                        u = client.parseDOM(u, "h2")
                        u = client.parseDOM(u, 'a', ret='href')
                        u = [(i.strip('/').split('/')[-1], i, size) for i in u]
                        items += u
                    except:
                        pass
            except:
                pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '', name)

                    if not cleantitle.get(t) == cleantitle.get(title): continue

                    quality, info = source_utils.get_release_quality(
                        name, item[1])

                    try:
                        dsize, isize = source_utils._size(item[2])
                    except:
                        dsize, isize = 0.0, ''
                    info.insert(0, isize)

                    info = ' | '.join(info)

                    url = item[1]
                    if any(x in url for x in ['.rar', '.zip', '.iso']):
                        raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = ensure_text(url)

                    host = re.findall('([\w]+[.][\w]+)$',
                                      urlparse(url.strip().lower()).netloc)[0]
                    if host not in hostDict:
                        raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = ensure_text(host)

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True,
                        'size': dsize,
                        'name': name
                    })
                except:
                    pass

            return sources
        except:
            return sources
Пример #20
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s %s' % (title, hdlr)
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url)

            try:
                r = client.request(url)
                links = zip(client.parseDOM(r, 'a', attrs={'class': 'btn btn-default magnet-button stats-action banner-button'}, ret='href'), client.parseDOM(r, 'td', attrs={'class': 'size'}))

                for link in links:
                    url = link[0].replace('&amp;', '&')
                    url = re.sub(r'(&tr=.+)&dn=', '&dn=', url) # some links on bitlord &tr= before &dn=
                    url = url.split('&tr=')[0]
                    if 'magnet' not in url:
                        continue

                    if any(x in url.lower() for x in ['french', 'italian', 'spanish', 'truefrench', 'dublado', 'dubbed']):
                        continue

                    name = url.split('&dn=')[1]
                    t = name.split(hdlr)[0].replace(data['year'], '').replace('(', '').replace(')', '').replace('&', 'and')
                    if cleantitle.get(t) != cleantitle.get(title):
                        continue

                    if hdlr not in name:
                        continue

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        size = link[1]
                        size = str(size) + ' GB' if len(str(size)) == 1 else str(size) + ' MB'
                        dsize, isize = source_utils._size(size)
                    except:
                        dsize, isize = 0.0, ''

                    info.insert(0, isize)
                    info = ' | '.join(info)

                    sources.append({'source': 'torrent', 'quality': quality, 'language': 'en', 'url': url,
                                                'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name})
                return sources

            except:
                return sources

        except:
            from prophetscrapers.modules import log_utils
            log_utils.log('bitlord - Exception', 1)
            return sources
Пример #21
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            host_dict = hostprDict + hostDict

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            if 'season' in data:
                season = data['season']
            if 'episode' in data:
                episode = data['episode']
            year = data['year']

            r = client.request(self.base_link, output='extended', timeout='10')
            #r = cfScraper.get(self.base_link).content
            cookie = r[3]
            headers = r[2]
            result = r[0]
            headers['Cookie'] = cookie

            query = urljoin(
                self.base_link,
                self.search_link % quote_plus(cleantitle.getsearch(title)))
            query2 = urljoin(self.base_link,
                             self.search_link % quote_plus(title).lower())
            r = client.request(query, headers=headers, XHR=True)
            if len(r) < 20:
                r = client.request(query2, headers=headers, XHR=True)
            r = json.loads(r)['content']
            r = zip(client.parseDOM(r, 'a', ret='href'),
                    client.parseDOM(r, 'a'))

            if 'tvshowtitle' in data:
                cltitle = cleantitle.get(title + 'season' + season)
                cltitle2 = cleantitle.get(title + 'season%02d' % int(season))
                r = [
                    i for i in r if cltitle == cleantitle.get(i[1])
                    or cltitle2 == cleantitle.get(i[1])
                ]
                vurl = '%s%s-episode-%s' % (self.base_link, str(
                    r[0][0]).replace('/info', ''), episode)
                vurl2 = None

            else:
                cltitle = cleantitle.getsearch(title)
                cltitle2 = cleantitle.getsearch('%s (%s)' % (title, year))
                r = [
                    i for i in r if cltitle2 == cleantitle.getsearch(i[1])
                    or cltitle == cleantitle.getsearch(i[1])
                ]
                vurl = '%s%s-episode-0' % (self.base_link, str(
                    r[0][0]).replace('/info', ''))
                vurl2 = '%s%s-episode-1' % (self.base_link, str(
                    r[0][0]).replace('/info', ''))

            r = client.request(vurl, headers=headers)
            headers['Referer'] = vurl

            slinks = client.parseDOM(r,
                                     'div',
                                     attrs={'class': 'anime_muti_link'})
            slinks = client.parseDOM(slinks, 'li', ret='data-video')
            if len(slinks) == 0 and vurl2 is not None:
                r = client.request(vurl2, headers=headers)
                headers['Referer'] = vurl2
                slinks = client.parseDOM(r,
                                         'div',
                                         attrs={'class': 'anime_muti_link'})
                slinks = client.parseDOM(slinks, 'li', ret='data-video')
            slinks = [
                slink
                if slink.startswith('http') else 'https:{0}'.format(slink)
                for slink in slinks
            ]

            for url in slinks:
                url = client.replaceHTMLCodes(url)
                #url = url.encode('utf-8')
                valid, host = source_utils.is_host_valid(url, host_dict)
                if valid:
                    sources.append({
                        'source': host,
                        'quality': '720p',
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })
            return sources
        except:
            log_utils.log('gowatchseries3 - Exception', 1)
            return sources
Пример #22
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle']
            title = cleantitle.get_query(title)

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode']))

            query = '%s S%02dE%02d' % (
                title,
                int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                title,
                data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|<|>|\|)', ' ', query)

            url = self.search_link % (quote_plus(query).replace('+', '-'))
            url = urljoin(self.base_link, url)
            html = client.request(url)

            try:
                results = client.parseDOM(html, 'table', attrs={'class': 'forum_header_border'})
                for result in results:
                    if 'magnet:' in result:
                        results = result
                        break
            except Exception:
                return sources
            rows = re.findall('<tr name="hover" class="forum_header_border">(.+?)</tr>', results, re.DOTALL)
            if rows is None:
                return sources

            for entry in rows:
                try:
                    try:
                        columns = re.findall('<td\s.+?>(.+?)</td>', entry, re.DOTALL)
                        derka = re.findall('href="magnet:(.+?)" class="magnet" title="(.+?)"', columns[2], re.DOTALL)[0]
                        name = derka[1]
                        link = 'magnet:%s' % (str(client.replaceHTMLCodes(derka[0]).split('&tr')[0]))
                        t = name.split(hdlr)[0]
                        if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(title):
                            continue
                    except Exception:
                        continue
                    y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()
                    if not y == hdlr:
                        continue

                    quality, info = source_utils.get_release_quality(name, name)

                    try:
                        size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', name)[-1]
                        dsize, isize = source_utils._size(size)
                    except Exception:
                        dsize, isize = 0.0, ''

                    info.insert(0, isize)

                    info = ' | '.join(info)
                    sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en',
                                    'url': link, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name})
                except Exception:
                    continue

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check:
                sources = check

            return sources
        except:
            log_utils.log('eztv_exc', 1)
            return sources
Пример #23
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = cleantitle.get_query(title)

            hdlr = 's%02de%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s %s' % (title, hdlr)
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

            url = self.search_link % quote(query)
            url = urljoin(self.base_link, url)

            html = client.request(url)
            html = html.replace('&nbsp;', ' ')

            try:
                results = client.parseDOM(html,
                                          'table',
                                          attrs={'id': 'searchResult'})
            except:
                return sources

            url2 = url.replace('/1/', '/2/')

            html2 = client.request(url2)
            html2 = html2.replace('&nbsp;', ' ')

            try:
                results += client.parseDOM(html2,
                                           'table',
                                           attrs={'id': 'searchResult'})
            except:
                return sources

            results = ''.join(results)

            rows = re.findall('<tr(.+?)</tr>', results, re.DOTALL)
            if rows is None:
                return sources

            for entry in rows:
                try:
                    try:
                        url = 'magnet:%s' % (re.findall(
                            'a href="magnet:(.+?)"', entry, re.DOTALL)[0])
                        url = str(client.replaceHTMLCodes(url).split('&tr')[0])
                    except:
                        continue

                    try:
                        name = re.findall(
                            'class="detLink" title=".+?">(.+?)</a>', entry,
                            re.DOTALL)[0]
                        name = client.replaceHTMLCodes(name)
                        name = unquote_plus(name).replace(' ', '.').lower()

                        t = name.split(hdlr)[0].replace(
                            data['year'],
                            '').replace('(', '').replace(')', '').replace(
                                '&', 'and').replace('.US.',
                                                    '.').replace('.us.',
                                                                 '.').lower()
                        if cleantitle.get(t) != cleantitle.get(title):
                            continue
                    except:
                        continue

                    if hdlr not in name:
                        continue

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        size = re.findall(
                            '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                            entry)[-1]
                        dsize, isize = source_utils._size(size)
                    except:
                        dsize, isize = 0.0, ''

                    info.insert(0, isize)

                    info = ' | '.join(info)

                    sources.append({
                        'source': 'torrent',
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True,
                        'size': dsize,
                        'name': name
                    })
                except:
                    continue

            return sources

        except:
            log_utils.log('tpb_exc', 1)
            return sources
Пример #24
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            hostDict = hostprDict + hostDict

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            ref_url = url = data['url']
            # log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            _headers = {
                'User-Agent':
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/72.0'
            }
            r = client.request(url, headers=_headers)
            posts = client.parseDOM(r, 'h2', attrs={'class': 'title'})
            posts = zip(client.parseDOM(posts, 'a', ret='title'),
                        client.parseDOM(posts, 'a', ret='href'))

            if posts == []:
                return sources

            for item in posts:
                try:
                    name = item[0].replace(' ', '.')
                    url = item[1]
                    r = client.request(url, headers=_headers)
                    list = client.parseDOM(r, 'div', attrs={'id': 'content'})

                    if 'tvshowtitle' in data:
                        regex = '(<strong>(.*?)</strong><br />\s?[A-Z,0-9]*?\s\|\s([A-Z,0-9,\s]*)\|\s((\d+\.\d+|\d*)\s?(?:GB|GiB|Gb|MB|MiB|Mb))?</p>(?:\s<p><a href=\".*?\" .*?_blank\">.*?</a></p>)+)'
                    else:
                        regex = '(<strong>Release Name:</strong>\s*(.*?)<br />\s?<strong>Size:</strong>\s?((\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+)\s(?:GB|GiB|Gb|MB|MiB|Mb))?<br />(.*\s)*)'

                    for match in re.finditer(
                            regex,
                            list[0].encode('ascii', errors='ignore').decode(
                                'ascii',
                                errors='ignore').replace('&nbsp;', ' ')):
                        name = str(match.group(2))
                        t = name.split(hdlr)[0].replace(
                            data['year'],
                            '').replace('(',
                                        '').replace(')',
                                                    '').replace('&', 'and')
                        if cleantitle.get(t) != cleantitle.get(title):
                            continue

                        if hdlr not in name:
                            continue

                        if 'tvshowtitle' in data:
                            size = str(match.group(4))
                        else:
                            size = str(match.group(3))

                        links = client.parseDOM(
                            match.group(1),
                            'a',
                            attrs={'class': 'autohyperlink'},
                            ret='href')

                        for url in links:
                            try:
                                if any(x in url for x in
                                       ['.rar', '.zip', '.iso', '.sample.']):
                                    continue

                                if url in str(sources):
                                    continue

                                valid, host = source_utils.is_host_valid(
                                    url, hostDict)
                                if not valid:
                                    continue

                                host = client.replaceHTMLCodes(host)
                                host = host.encode('utf-8')

                                quality, info = source_utils.get_release_quality(
                                    name, url)

                                try:
                                    div = 1 if size.endswith(
                                        ('GB', 'GiB', 'Gb')) else 1024
                                    size = float(
                                        re.sub('[^0-9|/.|/,]', '',
                                               size.replace(',', '.'))) / div
                                    size = '[B]%.2f GB[/B]' % size
                                    info.insert(0, size)
                                except:
                                    pass

                                info = ' | '.join(info)

                                sources.append({
                                    'source': host,
                                    'quality': quality,
                                    'language': 'en',
                                    'url': url,
                                    'info': info,
                                    'direct': False,
                                    'debridonly': True
                                })
                            except:
                                pass
                except:
                    pass

            return sources

        except:
            return sources
Пример #25
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:

            if url is None:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                title, int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else '%s %s' % (title, data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            #r = client.request(self.base_link)
            #search_base = client.parseDOM(r, 'form', ret='action')[0]
            #log_utils.log(search_base)
            #url = urljoin(search_base, self.search_link)
            url = urljoin(self.base_link, self.search_link)
            url = url % quote_plus(query)

            r = client.request(url)

            r = client.parseDOM(r, 'h2')

            z = zip(client.parseDOM(r, 'a', ret='href'),
                    client.parseDOM(r, 'a'))

            if 'tvshowtitle' in data:
                posts = [(i[1], i[0]) for i in z]
            else:
                posts = [(i[1], i[0]) for i in z]

            host_dict = hostprDict + hostDict

            items = []

            for post in posts:
                try:
                    r = client.request(post[1])
                    r = ensure_text(r, errors='replace')
                    r = client.parseDOM(r,
                                        'div',
                                        attrs={'class': 'entry-content cf'})[0]

                    if 'tvshowtitle' in data:
                        z = zip(
                            re.findall(r'<p><b>(%s.+?)</b>' % title, r,
                                       re.I | re.S),
                            re.findall(r'<ul>(.+?)</ul>', r, re.S))
                        for f in z:
                            u = re.findall(r'\'(http.+?)\'',
                                           f[1]) + re.findall(
                                               r'\"(http.+?)\"', f[1])
                            u = [i for i in u if '/embed/' not in i]
                            t = f[0]
                            try:
                                s = re.findall(
                                    r'((?:\d+\.\d+|\d+\,\d+|\d+|\d+\,\d+\.\d+)\s*(?:GB|GiB|MB|MiB))',
                                    t)[0]
                            except:
                                s = '0'
                            items += [(t, i, s) for i in u]

                    else:
                        t = ensure_text(post[0], errors='replace')
                        u = re.findall(r'\'(http.+?)\'', r) + re.findall(
                            '\"(http.+?)\"', r)
                        u = [i for i in u if '/embed/' not in i]
                        try:
                            s = re.findall(
                                r'((?:\d+\.\d+|\d+\,\d+|\d+|\d+\,\d+\.\d+)\s*(?:GB|GiB|MB|MiB))',
                                r)[0]
                        except:
                            s = '0'
                        items += [(t, i, s) for i in u]

                except:
                    log_utils.log('MYVIDEOLINK ERROR', 1)
                    pass

            for item in items:
                try:
                    url = ensure_text(item[1])
                    url = client.replaceHTMLCodes(url)

                    void = ('.rar', '.zip', '.iso', '.part', '.png', '.jpg',
                            '.bmp', '.gif', 'sub', 'srt')
                    if url.endswith(void):
                        continue

                    name = ensure_text(item[0], errors='replace')
                    name = client.replaceHTMLCodes(name)

                    t = re.sub(
                        r'(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '', name, re.I)
                    if not cleantitle.get(t) == cleantitle.get(title):
                        continue

                    y = re.findall(
                        r'[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                        name)[-1].upper()
                    if not y == hdlr:
                        continue

                    valid, host = source_utils.is_host_valid(url, host_dict)
                    if not valid:
                        continue
                    host = client.replaceHTMLCodes(host)

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        size = item[2]
                        dsize, isize = source_utils._size(size)
                    except:
                        dsize, isize = 0.0, ''
                    info.insert(0, isize)

                    info = ' | '.join(info)

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': False,
                        'size': dsize,
                        'name': name
                    })
                except:
                    log_utils.log('MYVIDEOLINK ERROR', 1)
                    pass

            return sources
        except:
            log_utils.log('MYVIDEOLINK ERROR', 1)
            return sources
Пример #26
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if url is None:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            content_type = 'episode' if 'tvshowtitle' in data else 'movie'

            years = (data['year'], str(int(data['year']) + 1),
                     str(int(data['year']) - 1))

            if content_type == 'movie':
                title = cleantitle.get(data['title'])
                localtitle = cleantitle.get(data['localtitle'])
                ids = [data['imdb']]

                r = control.jsonrpc(
                    '{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovies", "params": {"filter":{"or": [{"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}]}, "properties": ["imdbnumber", "title", "originaltitle", "file"]}, "id": 1}'
                    % years)
                r = ensure_text(r, 'utf-8', errors='ignore')
                r = json.loads(r)['result']['movies']

                r = [
                    i for i in r if str(i['imdbnumber']) in ids or title in [
                        cleantitle.get(ensure_str(i['title'])),
                        cleantitle.get(ensure_str(i['originaltitle']))
                    ]
                ]
                r = [
                    i for i in r if not ensure_str(i['file']).endswith('.strm')
                ][0]

                r = control.jsonrpc(
                    '{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovieDetails", "params": {"properties": ["streamdetails", "file"], "movieid": %s }, "id": 1}'
                    % str(r['movieid']))
                r = ensure_text(r, 'utf-8', errors='ignore')
                r = json.loads(r)['result']['moviedetails']
            elif content_type == 'episode':
                title = data['tvshowtitle']
                localtitle = data['localtvshowtitle']
                season, episode = data['season'], data['episode']

                r = control.jsonrpc(
                    '{"jsonrpc": "2.0", "method": "VideoLibrary.GetTVShows", "params": {"filter":{"or": [{"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}]}, "properties": ["imdbnumber", "title"]}, "id": 1}'
                    % years)
                r = ensure_text(r, 'utf-8', errors='ignore')
                r = json.loads(r)['result']['tvshows']

                r = [i for i in r if title in (ensure_str(i['title']))][0]

                r = control.jsonrpc(
                    '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params": {"filter":{"and": [{"field": "season", "operator": "is", "value": "%s"}, {"field": "episode", "operator": "is", "value": "%s"}]}, "properties": ["file"], "tvshowid": %s }, "id": 1}'
                    % (str(season), str(episode), str(r['tvshowid'])))
                r = ensure_text(r, 'utf-8', errors='ignore')
                r = json.loads(r)['result']['episodes']

                r = [
                    i for i in r if not ensure_str(i['file']).endswith('.strm')
                ][0]

                r = control.jsonrpc(
                    '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodeDetails", "params": {"properties": ["streamdetails", "file"], "episodeid": %s }, "id": 1}'
                    % str(r['episodeid']))
                r = ensure_text(r, 'utf-8', errors='ignore')
                r = json.loads(r)['result']['episodedetails']

            url = ensure_str(r['file'])

            try:
                qual = int(r['streamdetails']['video'][0]['width'])
            except:
                qual = -1

            if qual >= 2160: quality = '4K'
            elif 1920 <= qual < 2000: quality = '1080p'
            elif 1280 <= qual < 1900: quality = '720p'
            elif qual < 1280: quality = 'SD'

            info = []

            try:
                f = control.openFile(url)
                s = f.size()
                f.close()
                s = '%.2f GB' % (float(s) / 1024 / 1024 / 1024)
                info.append(s)
            except:
                pass

            try:
                c = r['streamdetails']['video'][0]['codec']
                if c == 'avc1': c = 'h264'
                info.append(c)
            except:
                pass

            try:
                ac = r['streamdetails']['audio'][0]['codec']
                if ac == 'dca': ac = 'dts'
                if ac == 'dtshd_ma': ac = 'dts-hd ma'
                info.append(ac)
            except:
                pass

            try:
                ach = r['streamdetails']['audio'][0]['channels']
                if ach == 1: ach = 'mono'
                if ach == 2: ach = '2.0'
                if ach == 6: ach = '5.1'
                if ach == 8: ach = '7.1'
                info.append(ach)
            except:
                pass

            info = ' | '.join(info)
            #info = info.encode('utf-8')

            sources.append({
                'source': '',
                'quality': quality,
                'language': 'en',
                'url': url,
                'info': info,
                'local': True,
                'direct': True,
                'debridonly': False
            })

            return sources
        except:
            log_utils.log('lib_scraper_fail0', 1)
            return sources
Пример #27
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources
            if debrid.status() is False:
                raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            try:
                links = []

                f = [
                    'S%02dE%02d' % (int(data['season']), int(data['episode']))
                ]
                t = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '',
                           data['tvshowtitle'])
                t = t.replace("&", "")

                q = self.search_link + urllib.quote_plus('%s %s' % (t, f[0]))

                q = urlparse.urljoin(self.base_link, q)
                result = client.request(q)
                result = json.loads(result)

                result = result['results']
            except:
                links = result = []

            for i in result:
                try:
                    if not cleantitle.get(t) == cleantitle.get(i['showName']):
                        raise Exception()

                    y = i['release']
                    y = re.compile('[\.|\(|\[|\s](\d{4}|S\d*E\d*)[\.|\)|\]|\s]'
                                   ).findall(y)[-1]
                    y = y.upper()
                    if not any(x == y for x in f):
                        raise Exception()

                    quality = i['quality']

                    quality = quality.upper()

                    size = i['size']
                    size = float(size) / 1024
                    size = '[B]%.2f GB[/B]' % size

                    info = size

                    if '4k' in quality:
                        quality = '4K'
                    elif '1080P' in quality:
                        quality = '1080p'
                    elif '720P' in quality:
                        quality = 'HD'
                    else:
                        quality = 'SD'

                    url = i['links']
                    #for x in url.keys(): links.append({'url': url[x], 'quality': quality, 'info': info})

                    links = []

                    for x in url.keys():
                        links.append({'url': url[x], 'quality': quality})

                    for link in links:
                        try:
                            url = link['url']
                            quality2 = link['quality']
                            #url = url[1]
                            #url = link
                            if len(url) > 1:
                                raise Exception()
                            url = url[0].encode('utf-8')

                            host = re.findall(
                                '([\w]+[.][\w]+)$',
                                urlparse.urlparse(
                                    url.strip().lower()).netloc)[0]
                            if host not in hostprDict:
                                raise Exception()
                            host = host.encode('utf-8')

                            sources.append({
                                'source': host,
                                'quality': quality2,
                                'language': 'en',
                                'url': url,
                                'info': info,
                                'direct': False,
                                'debridonly': True
                            })
                        except:
                            pass

                except:
                    pass

            return sources
        except:
            return sources
Пример #28
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = cleantitle.get_query(data['title'])

            query = '%s %s' % (title, data['year'])

            #_headers = {'User-Agent': client.agent()}

            url = self.search_link % quote(query)
            url = urljoin(self.base_link, url)
            html = client.request(url)  #, headers=_headers)
            try:
                results = client.parseDOM(html, 'div', attrs={'class':
                                                              'row'})[2]
            except Exception:
                return sources

            items = re.findall(
                'class="browse-movie-bottom">(.+?)</div>\s</div>', results,
                re.DOTALL)
            if items is None:
                return sources

            for entry in items:
                try:
                    try:
                        link, name = re.findall(
                            '<a href="(.+?)" class="browse-movie-title">(.+?)</a>',
                            entry, re.DOTALL)[0]
                        name = client.replaceHTMLCodes(name)
                        if not cleantitle.get(title) in cleantitle.get(name):
                            continue
                    except Exception:
                        continue
                    y = entry[-4:]
                    if not y == data['year']:
                        continue

                    response = client.request(link)  #, headers=_headers)
                    try:
                        entries = client.parseDOM(
                            response, 'div', attrs={'class': 'modal-torrent'})
                        for torrent in entries:
                            link, name = re.findall(
                                'href="magnet:(.+?)" class="magnet-download download-torrent magnet" title="(.+?)"',
                                torrent, re.DOTALL)[0]
                            try:
                                _name = name.lower().replace('download',
                                                             '').replace(
                                                                 'magnet', '')
                            except:
                                _name = name
                            link = 'magnet:%s' % link
                            link = str(
                                client.replaceHTMLCodes(link).split('&tr')[0])
                            quality, info = source_utils.get_release_quality(
                                name, link)
                            try:
                                size = re.findall(
                                    '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                                    torrent)[-1]
                                dsize, isize = source_utils._size(size)
                            except Exception:
                                dsize, isize = 0.0, ''
                            info.insert(0, isize)
                            info = ' | '.join(info)

                            sources.append({
                                'source': 'Torrent',
                                'quality': quality,
                                'language': 'en',
                                'url': link,
                                'info': info,
                                'direct': False,
                                'debridonly': True,
                                'size': dsize,
                                'name': _name
                            })
                    except Exception:
                        continue
                except Exception:
                    continue

            return sources
        except:
            from prophetscrapers.modules import log_utils
            log_utils.log('Ytsam - Exception', 1)
            return sources
Пример #29
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['title']

            hdlr = data['year']

            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', title)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)
            r = client.request(url)

            posts = client.parseDOM(r, 'div', attrs={'class': 'video_title'})

            items = []

            for post in posts:
                try:
                    data = dom_parser2.parse_dom(post,
                                                 'a',
                                                 req=['href', 'title'])[0]
                    t = data.content
                    y = re.findall('\((\d{4})\)', data.attrs['title'])[0]
                    qual = data.attrs['title'].split('-')[1]
                    link = data.attrs['href']

                    if not cleantitle.get(t) == cleantitle.get(title):
                        raise Exception()
                    if not y == hdlr:
                        raise Exception()

                    items += [(link, qual)]

                except Exception:
                    pass
            for item in items:
                try:
                    r = client.request(item[0]) if item[0].startswith(
                        'http') else client.request(
                            urlparse.urljoin(self.base_link, item[0]))

                    qual = client.parseDOM(r, 'h1')[0]
                    # quality = source_utils.get_release_quality(item[1], qual)[0]

                    url = re.findall('''frame_url\s*=\s*["']([^']+)['"]\;''',
                                     r, re.DOTALL)[0]
                    url = url if url.startswith('http') else urlparse.urljoin(
                        'https://', url)

                    ua = {
                        'User-Agent':
                        'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0'
                    }

                    postID = url.split('/embed/')[1]
                    post_link = 'https://vidlink.org/embed/update_views'
                    payload = {'postID': postID}
                    headers = ua
                    headers['X-Requested-With'] = 'XMLHttpRequest'
                    headers['Referer'] = url

                    ihtml = client.request(post_link,
                                           post=payload,
                                           headers=headers)
                    linkcode = jsunpack.unpack(ihtml).replace('\\', '')
                    try:
                        extra_link = re.findall(r'var oploadID="(.+?)"',
                                                linkcode)[0]
                        oload = 'https://openload.co/embed/' + extra_link
                        sources.append({
                            'source': 'openload.co',
                            'quality': '1080p',
                            'language': 'en',
                            'url': oload,
                            'direct': False,
                            'debridonly': False
                        })

                    except Exception:
                        pass

                    give_me = re.findall(r'var file1="(.+?)"', linkcode)[0]
                    stream_link = give_me.split('/pl/')[0]
                    headers = {
                        'Referer':
                        'https://vidlink.org/',
                        'User-Agent':
                        'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0'
                    }
                    r = client.request(give_me, headers=headers)
                    my_links = re.findall(
                        r'[A-Z]{10}=\d+x(\d+)\W[A-Z]+=\"\w+\"\s+\/(.+?)\.', r)
                    for quality_bitches, link in my_links:

                        if '1080' in quality_bitches:
                            quality = '1080p'
                        elif '720' in quality_bitches:
                            quality = '720p'
                        elif '480' in quality_bitches:
                            quality = 'SD'
                        elif '360' in quality_bitches:
                            quality = 'SD'
                        else:
                            quality = 'SD'

                        final = stream_link + '/' + link + '.m3u8'
                        sources.append({
                            'source': 'GVIDEO',
                            'quality': quality,
                            'language': 'en',
                            'url': final,
                            'direct': True,
                            'debridonly': False
                        })

                except Exception:
                    pass

            return sources
        except Exception:
            return sources
Пример #30
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            data = urllib.parse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            imdb_id = data['imdb']

            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = urllib.parse.urljoin(self.base_link, self.search_link % urllib.parse.quote_plus(query))

            r = client.request(url)

            result = json.loads(r)
            result = result['results']

            items = []

            for item in result:
                try:
                    name = item['title']
                    url = item['magnet']

                    size = ''
                    try:
                        size = item['size']
                        size = float(size)/(1024**3)
                        size = '%.2f GB' % size
                    except:
                        pass

                    t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d+|3D)(\.|\)|\]|\s|)(.+|)', '', name)
                    if not cleantitle.get(t) == cleantitle.get(title): raise Exception()
                    y = re.findall('[\.|\(|\[|\s](\d{4}|(?:S|s)\d*(?:E|e)\d*|(?:S|s)\d*)[\.|\)|\]|\s]', name)[-1].upper()
                    if not y == hdlr: raise Exception()

                    quality, info = source_utils.get_release_quality(name, name)
                    info.append(size)
                    info = ' | '.join(info)

                    sources.append(
                        {'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info,
                         'direct': False, 'debridonly': True})
                    
                except:
                    pass

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('---SolidTorrents Testing - Exception: \n' + str(failure))
            return sources