Exemple #1
0
    def searchMovie(self, title, year, aliases, headers):
        try:
            for alias in aliases:
                url = '%s/full-movie/%s' % (self.base_link,
                                            cleantitle.geturl(alias['title']))
                url = client.request(url,
                                     headers=headers,
                                     output='geturl',
                                     timeout='10')
                if url is not None and url != self.base_link:
                    break
            if url is None:
                for alias in aliases:
                    url = '%s/full-movie/%s-%s' % (self.base_link,
                                                   cleantitle.geturl(
                                                       alias['title']), year)
                    url = client.request(url,
                                         headers=headers,
                                         output='geturl',
                                         timeout='10')
                    if url is not None and url != self.base_link:
                        break

            return url
        except:
            log_utils.log('cartoonhd - Exception', 1)
            return
Exemple #2
0
    def searchMovie(self, title, year, aliases):
        try:
            #title = cleantitle.normalize(title)
            url = urljoin(self.base_link,
                          self.search_link % cleantitle.geturl(title))
            r = cfScraper.get(url).content
            r = ensure_text(r, errors='ignore')
            r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
            r = zip(client.parseDOM(r, 'a', ret='href'),
                    client.parseDOM(r, 'a', ret='title'))
            results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
            try:
                r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
                url = [
                    i[0] for i in r
                    if self.matchAlias(i[1], aliases) and (year == i[2])
                ][0]
            except:
                url = None
                pass

            if url == None:
                url = [
                    i[0] for i in results if self.matchAlias(i[1], aliases)
                ][0]

            url = urljoin(self.base_link, '%s/watching.html' % url)
            return url
        except:
            log_utils.log('123movies2 exception', 1)
            return
Exemple #3
0
    def _get_sources(self, item):
        try:
            name = item[0]
            quality, info = source_utils.get_release_quality(name, item[1])
            info.insert(0, item[2])
            data = cfScraper.get(item[1]).content
            data = ensure_text(data, errors='replace')
            data = client.parseDOM(data, 'a', ret='href')
            url = [i for i in data if 'magnet:' in i][0]
            url = url.split('&tr')[0]
            info = ' | '.join(info)

            self._sources.append({
                'source': 'Torrent',
                'quality': quality,
                'language': 'en',
                'url': url,
                'info': info,
                'direct': False,
                'debridonly': True,
                'size': item[3],
                'name': name
            })
        except:
            log_utils.log('1337x_exc1', 1)
            pass
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None:
             return sources
         hostDict = hostprDict + hostDict
         r = client.request(url)
         r = re.compile(
             'class="watch-button" data-actuallink="(.+?)"').findall(r)
         for url in r:
             if url in str(sources):
                 continue
             quality, info = source_utils.get_release_quality(url, url)
             valid, host = source_utils.is_host_valid(url, hostDict)
             if valid:
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
         return sources
     except:
         log_utils.log('Watchepisodes4 Exception', 1)
         return sources
Exemple #5
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url is None:
             return sources
         data = parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
         q = '%s' % cleantitle.get_gan_url(data['title'])
         url = self.base_link + self.search_link % q
         r = cfScraper.get(url).content
         r = ensure_text(r)
         v = re.compile('<a href="(.+?)" class="ml-mask jt" title="(.+?)">\s+<span class=".+?">(.+?)</span>').findall(r)
         for url, check, qual in v:
             t = '%s (%s)' % (data['title'], data['year'])
             if t in check:
                 key = url.split('-hd')[1]
                 url = 'https://fmovies.tw/moviedownload.php?q=%s' % key
                 r = cfScraper.get(url).content
                 r = ensure_text(r)
                 r = re.compile('<a rel=".+?" href="(.+?)" target=".+?">').findall(r)
                 for url in r:
                     if any(x in url for x in ['.rar']): continue
                     #quality, _ = source_utils.get_release_quality(qual, url)
                     valid, host = source_utils.is_host_valid(url, hostDict)
                     if valid:
                         #info = ' | '.join(info)
                         sources.append(
                             {'source': host, 'quality': '720p', 'language': 'en', 'url': url,
                              'direct': False, 'debridonly': False})
         return sources
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('Ganool Testing - Exception: \n' + str(failure))
         return sources
Exemple #6
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         url = {'imdb': imdb, 'title': title, 'year': year, 'aliases': aliases}
         url = urlencode(url)
         return url
     except:
         log_utils.log('plockers0 Exception', 1)
         return
Exemple #7
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
         url = urlencode(url)
         return url
     except:
         log_utils.log('plockers1 Exception', 1)
         return
Exemple #8
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         url = {'imdb': imdb, 'title': title, 'year': year}
         url = urlencode(url)
         return url
     except:
         log_utils.log('gowatchseries0 - Exception', 1)
         return
Exemple #9
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         url = {'imdb': imdb, 'title': title, 'year': year}
         url = urlencode(url)
         return url
     except:
         log_utils.log('filmxy', 1)
         return
Exemple #10
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         url = {'imdb': imdb, 'title': title, 'year': year}
         url = urlencode(url)
         return url
     except BaseException:
         failure = traceback.format_exc()
         log_utils.log('Ganool Testing - Exception: \n' + str(failure))
         return
Exemple #11
0
    def _get_sources(self, name, url):
        try:
            headers = {'User-Agent': client.agent()}
            r = cfScraper.get(url, headers=headers).content
            r = ensure_text(r, errors='replace')
            name = client.replaceHTMLCodes(name)
            try:
                _name = name.lower().replace('rr',
                                             '').replace('nf', '').replace(
                                                 'ul', '').replace('cu', '')
            except:
                _name = name
            l = dom_parser2.parse_dom(r, 'pre', {'class': 'links'})
            s = ''
            for i in l:
                s += i.content
            urls = re.findall(
                r'''((?:http|ftp|https)://[\w_-]+(?:(?:\.[\w_-]+)+)[\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])''',
                i.content,
                flags=re.MULTILINE | re.DOTALL)
            urls = [
                i for i in urls if not i.endswith(('.rar', '.zip', '.iso',
                                                   '.idx', '.sub', '.srt'))
            ]
            for url in urls:
                if url in str(self.sources):
                    continue

                valid, host = source_utils.is_host_valid(url, self.hostDict)
                if not valid:
                    continue
                host = client.replaceHTMLCodes(host)
                #host = host.encode('utf-8')
                quality, info = source_utils.get_release_quality(name, url)
                try:
                    size = re.findall(
                        '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                        name)[0]
                    dsize, isize = source_utils._size(size)
                except BaseException:
                    dsize, isize = 0.0, ''
                info.insert(0, isize)
                info = ' | '.join(info)
                self.sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True,
                    'size': dsize,
                    'name': _name
                })
        except:
            log_utils.log('RMZ - Exception', 1)
            pass
Exemple #12
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         return urlencode({
             'imdb': imdb,
             'title': title,
             'localtitle': localtitle,
             'year': year
         })
     except:
         log_utils.log('lib_scraper_fail1', 1)
         return
Exemple #13
0
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        if debrid.status() is False:
            return

        try:
            url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
            url = urlencode(url)
            return url
        except:
            log_utils.log('ZOOGLE - Exception', 1)
            return
Exemple #14
0
    def movie(self, imdb, title, localtitle, aliases, year):
        if debrid.status() is False:
            return

        try:
            url = {'imdb': imdb, 'title': title, 'year': year}
            url = urlencode(url)
            return url
        except:
            log_utils.log('1337x - Exception', 1)
            return
Exemple #15
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if not url: return
         url = parse_qs(url)
         url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
         url['title'], url['premiered'], url['season'], url[
             'episode'] = title, premiered, season, episode
         url = urlencode(url)
         return url
     except:
         log_utils.log('nyaa2 - Exception', 1)
         return
Exemple #16
0
    def sources(self, url, hostDict, hostprDict):

        self.sources = []

        try:
            if url is None:
                return self.sources

            if debrid.status() is False:
                return self.sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = cleantitle.get_query(title)

            hdlr = data['year']
            hdlr2 = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else ''
            imdb = data['imdb']

            url = self.search(title, hdlr)
            headers = {'User-Agent': client.agent()}
            r = cfScraper.get(url, headers=headers).content
            r = ensure_text(r, errors='replace')
            if hdlr2 == '':
                r = dom_parser2.parse_dom(r, 'ul', {'id': 'releases'})[0]
            else:
                r = dom_parser2.parse_dom(r, 'ul', {'id': 'episodes'})[0]
            r = dom_parser2.parse_dom(r.content, 'a', req=['href'])
            r = [(i.content, urljoin(self.base_link, i.attrs['href']))
                 for i in r if i and i.content != 'Watch']
            if hdlr2 != '':
                r = [(i[0], i[1]) for i in r if hdlr2.lower() in i[0].lower()]

            self.hostDict = hostDict + hostprDict
            threads = []

            for i in r:
                threads.append(workers.Thread(self._get_sources, i[0], i[1]))
            [i.start() for i in threads]

            alive = [x for x in threads if x.is_alive() is True]
            while alive:
                alive = [x for x in threads if x.is_alive() is True]
                time.sleep(0.1)
            return self.sources
        except:
            log_utils.log('RMZ - Exception', 1)
            return self.sources
Exemple #17
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:

            if url is None:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['title']
            year = data['year']

            search_id = title.lower()
            url = urljoin(self.base_link, self.search_link % (search_id.replace(' ', '+')))
            headers = {
                'User-Agent': client.agent(),
                'Accept': '*/*',
                'Accept-Encoding': 'identity;q=1, *;q=0',
                'Accept-Language': 'en-US,en;q=0.5',
                'Connection': 'keep-alive',
                'Pragma': 'no-cache',
                'Cache-Control': 'no-cache',
                'DNT': '1'
            }

            response = requests.Session()
            r = response.get(url, headers=headers, timeout=5).text
            r = client.parseDOM(r, 'div', attrs={'class': 'container'})[1]
            items = client.parseDOM(r, 'div', attrs={'class': r'col-xs-12 col-sm-6 col-md-3 '})
            for item in items:
                movie_url = client.parseDOM(item, 'a', ret='href')[0]
                movie_title = re.compile('div class="post-title">(.+?)<', re.DOTALL).findall(item)[0]
                if cleantitle.get(title).lower() == cleantitle.get(movie_title).lower():

                    r = response.get(movie_url, headers=headers, timeout=5).text
                    year_data = re.findall('<h2 style="margin-bottom: 0">(.+?)</h2>', r, re.IGNORECASE)[0]
                    if year == year_data:
                        links = re.findall(r"<a href='(.+?)'>(\d+)p<\/a>", r)

                        for link, quality in links:

                            if not link.startswith('https:'):
                                link = 'https:' + link.replace('http:', '')
                            link = link + '|Referer=https://iwaatch.com/movie/' + title

                            quality, info = source_utils.get_release_quality(quality, link)

                            sources.append({'source': 'Direct', 'quality': quality, 'language': 'en', 'url': link, 'direct': True, 'debridonly': False})
            return sources
        except:
            log_utils.log('iWAATCH - Exception', 1)
            return sources
Exemple #18
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         return urlencode({
             'imdb': imdb,
             'tvdb': tvdb,
             'tvshowtitle': tvshowtitle,
             'localtvshowtitle': localtvshowtitle,
             'year': year
         })
     except:
         log_utils.log('lib_scraper_fail2', 1)
         return
Exemple #19
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            self.hostDict = hostDict + hostprDict
            if url is None:
                return sources
            if debrid.status() is False: return
            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = cleantitle.get_query(self.title)
            self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
            query = '%s S%02dE%02d' % (self.title, int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (self.title, data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            if 'tvshowtitle' in data:
                url = self.tvsearch.format(quote(query))
                url = urljoin(self.base_link, url)
            else:
                url = self.moviesearch.format(quote(query))
                url = urljoin(self.base_link, url)

            r = cfScraper.get(url).content
            r = ensure_text(r, errors='ignore')
            posts = client.parseDOM(r, 'table', attrs={'class': 'table2'})[0]
            posts = client.parseDOM(posts, 'tr')
            for post in posts:
                link = client.parseDOM(post, 'a', ret='href')[0]
                hash = re.findall(r'(\w{40})', link, re.I)
                if hash:
                    url = 'magnet:?xt=urn:btih:' + hash[0]
                    name = link.split('title=')[1]
                    t = name.split(self.hdlr)[0]
                    if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title): continue
                    try:
                        y = re.findall('[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper()
                    except:
                        y = re.findall('[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper()
                    if not y == self.hdlr: continue
                    quality, info = source_utils.get_release_quality(name, name)
                    try:
                        size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0]
                        dsize, isize = source_utils._size(size)
                    except:
                        dsize, isize = 0.0, ''
                    info.insert(0, isize)
                    info = ' | '.join(info)
                    sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False,
                                    'debridonly': True, 'size': dsize, 'name': name})
            return sources
        except:
            log_utils.log('lime0 - Exception', 1)
            return sources
Exemple #20
0
 def get(self, netloc, ua, timeout):
     try:
         self.netloc = netloc
         self.ua = ua
         self.timeout = timeout
         self.cookie = None
         self._get_cookie(netloc, ua, timeout)
         if self.cookie is None:
             log_utils.log('%s returned an error. Could not collect tokens.' % netloc)
         return self.cookie
     except Exception as e:
         log_utils.log('%s returned an error. Could not collect tokens - Error: %s.' % (netloc, str(e)))
         return self.cookie
Exemple #21
0
def resolver(url, debrid):
    try:
        debrid_resolver = [
            resolver for resolver in debrid_resolvers
            if resolver.name == debrid
        ][0]
        debrid_resolver.login()
        _host, _media_id = debrid_resolver.get_host_and_id(url)
        stream_url = debrid_resolver.get_media_url(_host, _media_id)
        return stream_url
    except:
        log_utils.log('%s Resolve Failure' % debrid, 1)
        return None
Exemple #22
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            hostDict = hostprDict + hostDict

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 's%02de%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s-s%02de%02d' % (data['tvshowtitle'], int(
                data['season']), int(data['episode']))
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url).replace('+', '-')

            r = cfScraper.get(url).content
            r = ensure_text(r, errors='ignore')
            try:
                data = re.compile(
                    '<a href="(.+?)" target="_blank" rel="nofollow" title.+?'
                ).findall(r)
                for url in data:
                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if valid:
                        quality, info = source_utils.get_release_quality(
                            url, url)
                        sources.append({
                            'source': host,
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'direct': False,
                            'debridonly': False
                        })
            except:
                log_utils.log('projectfree2 - Exception', 1)
                pass
            return sources
        except:
            log_utils.log('projectfree3 - Exception', 1)
            return sources
Exemple #23
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:

            if url == None: return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            aliases = eval(data['aliases'])

            if 'tvshowtitle' in data:
                ep = data['episode']
                url = '%s/film/%s-season-%01d/watching.html?ep=%s' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), ep)
                r = client.request(url, timeout='10', output='geturl')

                if url == None:
                    url = self.searchShow(data['tvshowtitle'], data['season'], aliases)

            else:
                url = self.searchMovie(data['title'], data['year'], aliases)

            if url == None: raise Exception()

            r = cfScraper.get(url).content
            r = ensure_text(r, errors='ignore')
            r = client.parseDOM(r, 'div', attrs={'class': 'les-content'})
            if 'tvshowtitle' in data:
                ep = data['episode']
                links = client.parseDOM(r, 'a', attrs={'episode-data': ep}, ret='player-data')
            else:
                links = client.parseDOM(r, 'a', ret='player-data')

            for link in links:
                try:
                    if link.startswith('//'):
                        link = 'https:' + link
                    host = re.findall('([\w]+[.][\w]+)$', urlparse(link.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    if 'load.php' not in link:
                        sources.append({'source': host, 'quality': '720p', 'language': 'en', 'url': link, 'direct': False, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            log_utils.log('123movies0 exception', 1)
            return sources
Exemple #24
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         url = {
             'imdb': imdb,
             'tvdb': tvdb,
             'tvshowtitle': tvshowtitle,
             'year': year
         }
         url = urlencode(url)
         return url
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('SwatchSeries - Exception: \n' + str(failure))
         return
Exemple #25
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         aliases.append({'country': 'us', 'title': title})
         url = {
             'imdb': imdb,
             'title': title,
             'year': year,
             'aliases': aliases
         }
         url = urlencode(url)
         return url
     except:
         log_utils.log('cartoonhd - Exception', 1)
         return
Exemple #26
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url is None:
             return
         url = parse_qs(url)
         url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
         url['title'], url['premiered'], url['season'], url[
             'episode'] = title, premiered, season, episode
         url = urlencode(url)
         return url
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('ExtraMovie - Exception: \n' + str(failure))
         return
Exemple #27
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url is None:
             return
         url = parse_qs(url)
         url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
         tit = cleantitle.get_query_(url['tvshowtitle'])
         tit = re.sub('[^A-Za-z0-9]+', '_', tit)
         url = '%s/episode/%s_s%s_e%s.html' % (self.base_link, tit, season,
                                               episode)
         return url
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('SwatchSeries - Exception: \n' + str(failure))
         return
Exemple #28
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         aliases.append({'country': 'us', 'title': title})
         url = {
             'imdb': imdb,
             'title': title,
             'year': year,
             'aliases': aliases
         }
         url = urlencode(url)
         return url
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('ExtraMovie - Exception: \n' + str(failure))
         return
Exemple #29
0
 def searchShow(self, title, season, episode, aliases, headers):
     try:
         for alias in aliases:
             url = '%s/tv-show/%s/season/%01d/episode/%01d' % (
                 self.base_link, cleantitle.geturl(title), int(season),
                 int(episode))
             url = client.request(url,
                                  headers=headers,
                                  output='geturl',
                                  timeout='10')
             if url is not None and url != self.base_link:
                 break
         return url
     except:
         log_utils.log('cartoonhd - Exception', 1)
         return
Exemple #30
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url is None:
                return

            url = parse_qs(url)
            url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
            url.update({
                'premiered': premiered,
                'season': season,
                'episode': episode
            })
            return urlencode(url)
        except:
            log_utils.log('lib_scraper_fail3', 1)
            return