Пример #1
0
    def searchMovie(self, title, year):
        title = cleantitle.normalize(title)
        url = self.search_link % cleantitle.geturl(title)
        r = self.scraper.get(url, params={'link_web': self.base_link}).content
        r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
        r = zip(client.parseDOM(r, 'a', ret='href'),
                client.parseDOM(r, 'a', ret='title'))
        results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
        try:
            r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
            url = [
                i[0] for i in r
                if cleantitle.get(i[1]) == cleantitle.get(title) and (
                    year == i[2])
            ][0]
        except:
            url = None
            log_utils.log('series9 - Exception: \n' +
                          str(traceback.format_exc()))
            pass

        if (url == None):
            url = [
                i[0] for i in results
                if cleantitle.get(i[1]) == cleantitle.get(title)
            ][0]

        url = urlparse.urljoin(self.base_link, '%s/watching.html' % url)
        return url
Пример #2
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         r = self.scraper.get(url).content
         try:
             match = re.compile('iframe id="odbIframe" src="(.+?)"').findall(r)
             for url in match:
                 host = url.split('//')[1].replace('www.', '')
                 host = host.split('/')[0].lower()
                 sources.append({
                     'source': host,
                     'quality': 'HD',
                     'language': 'en',
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
         except Exception:
             failure = traceback.format_exc()
             log_utils.log('ODB - Exception: \n' + str(failure))
             return sources
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('ODB - Exception: \n' + str(failure))
         return sources
     return sources
Пример #3
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None:
             return sources
         r = self.scraper.get(url).content
         try:
             match = re.compile('<iframe src="(.+?)"').findall(r)
             for url in match:
                 quality = source_utils.check_url(url)
                 valid, host = source_utils.is_host_valid(url, hostDict)
                 if valid:
                     sources.append({
                         'source': host,
                         'quality': quality,
                         'language': 'en',
                         'url': url,
                         'direct': False,
                         'debridonly': False
                     })
         except:
             return
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('1putlocker - Exception: \n' + str(failure))
         return
     return sources
Пример #4
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         return urllib.urlencode({'imdb': imdb, 'title': title, 'localtitle': localtitle, 'year': year})
     except:
         failure = traceback.format_exc()
         log_utils.log('Library - Exception: \n' + str(failure))
         return
Пример #5
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None:
             return sources
         html = client.request(url)
         quality = re.compile(
             '<div>Quanlity: <span class="quanlity">(.+?)</span></div>',
             re.DOTALL).findall(html)
         for qual in quality:
             quality = source_utils.check_url(qual)
             info = qual
         links = re.compile('var link_.+? = "(.+?)"',
                            re.DOTALL).findall(html)
         for url in links:
             if not url.startswith('http'):
                 url = "https:" + url
             valid, host = source_utils.is_host_valid(url, hostDict)
             if valid:
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'info': info,
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
         return sources
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('FmoviesIO - Exception: \n' + str(failure))
         return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.query(title)))

            if 'tvshowtitle' in data:
                html = self.scraper.get(url).content

                match = re.compile('class="post-item.+?href="(.+?)" title="(.+?)"', re.DOTALL).findall(html)
                for url, item_name in match:
                    if cleantitle.getsearch(title).lower() in cleantitle.getsearch(item_name).lower():
                        season_url = '%02d' % int(data['season'])
                        episode_url = '%02d' % int(data['episode'])
                        sea_epi = 'S%sE%s' % (season_url, episode_url)

                        result = self.scraper.get(url).content
                        regex = re.compile('href="(.+?)"', re.DOTALL).findall(result)
                        for ep_url in regex:
                            if sea_epi in ep_url:
                                quality, info = source_utils.get_release_quality(url)
                                sources.append({'source': 'CDN', 'quality': quality, 'language': 'en',
                                                'url': ep_url, 'direct': False, 'debridonly': False})
            else:
                html = self.scraper.get(url).content
                match = re.compile('<div class="thumbnail".+?href="(.+?)" title="(.+?)"', re.DOTALL).findall(html)

                for url, item_name in match:
                    if cleantitle.getsearch(title).lower() in cleantitle.getsearch(item_name).lower():
                        quality, info = source_utils.get_release_quality(url)
                        result = self.scraper.get(url).content
                        regex = re.compile('href="/download.php.+?link=(.+?)"', re.DOTALL).findall(result)

                        for link in regex:
                            if 'server=' not in link:
                                try:
                                    link = base64.b64decode(link)
                                except Exception:
                                    pass
                                try:
                                    host = link.split('//')[1].replace('www.', '')
                                    host = host.split('/')[0].lower()
                                except Exception:
                                    pass
                                if not self.filter_host(host):
                                    continue
                                sources.append({'source': host, 'quality': quality, 'language': 'en',
                                                'url': link, 'direct': False, 'debridonly': False})

            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('ExtraMovie - Exception: \n' + str(failure))
            return sources
Пример #7
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         url = imdb
         return url
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('ODB - Exception: \n' + str(failure))
         return
Пример #8
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         url = cleantitle.geturl(tvshowtitle)
         return url
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('1putlocker - Exception: \n' + str(failure))
         return
Пример #9
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         url = self.base_link + self.movie_link % imdb
         return url
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('ODB - Exception: \n' + str(failure))
         return
Пример #10
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         title = cleantitle.geturl(title)
         url = self.base_link + '/%s/' % title
         return url
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('1putlocker - Exception: \n' + str(failure))
         return
Пример #11
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         url = {'imdb': imdb, 'title': title, 'year': year}
         url = urllib.urlencode(url)
         return url
     except:
         log_utils.log('filmrls.com - Exception: \n' +
                       str(traceback.format_exc()))
         return
Пример #12
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         url = {'imdb': imdb, 'title': title, 'year': year}
         url = urllib.urlencode(url)
         return url
     except:
         failure = traceback.format_exc()
         log_utils.log('YIFYDLL - Exception: \n' + str(failure))
         return
Пример #13
0
 def matchAlias(self, title, aliases):
     try:
         for alias in aliases:
             if cleantitle.get(title) == cleantitle.get(alias['title']):
                 return True
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('YMovies - Exception: \n' + str(failure))
         return
Пример #14
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         aliases.append({'country': 'us', 'title': tvshowtitle})
         url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year, 'aliases': aliases}
         url = urllib.urlencode(url)
         return url
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('ExtraMovie - Exception: \n' + str(failure))
         return
Пример #15
0
def resolver(url, debrid):
    try:
        debrid_resolver = [resolver for resolver in debrid_resolvers if resolver.name == debrid][0]
        debrid_resolver.login()
        _host, _media_id = debrid_resolver.get_host_and_id(url)
        stream_url = debrid_resolver.get_media_url(_host, _media_id)
        return stream_url
    except Exception as e:
        log_utils.log('%s Resolve Failure: %s' % (debrid, e), log_utils.LOGWARNING)
        return None
Пример #16
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if not url:
             return
         imdb = url
         url = self.base_link + self.tv_link % (imdb, season, episode)
         return url
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('ODB - Exception: \n' + str(failure))
         return
Пример #17
0
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        if debrid.status() is False:
            return

        try:
            url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
            url = urllib.urlencode(url)
            return url
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('TPB - Exception: \n' + str(failure))
            return
Пример #18
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url is None:
             return
         tvshowtitle = url
         url = self.base_link + '/episode/%s-season-%s-episode-%s/' % (
             tvshowtitle, season, episode)
         return url
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('1putlocker - Exception: \n' + str(failure))
         return
Пример #19
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         url = urlparse.parse_qs(url)
         url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
         url['title'], url['premiered'], url['season'], url[
             'episode'] = title, premiered, season, episode
         url = urllib.urlencode(url)
         return url
     except:
         log_utils.log('filmrls.com - Exception: \n' +
                       str(traceback.format_exc()))
         return
Пример #20
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url is None: return sources
            if debrid.status() is False: raise Exception()
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            query = '%s %s' % (data['title'], data['year'])
            url = self.search_link % urllib.quote(query)
            url = urlparse.urljoin(self.base_link, url).replace('%20', '-')
            html = client.request(url)
            try:
                results = client.parseDOM(html, 'div', attrs={'class': 'ava1'})
            except:
                failure = traceback.format_exc()
                log_utils.log('YIFYDLL - Exception: \n' + str(failure))
                return sources
            for torrent in results:
                link = re.findall(
                    'a data-torrent-id=".+?" href="(magnet:.+?)" class=".+?" title="(.+?)"',
                    torrent, re.DOTALL)
                for link, name in link:
                    link = str(client.replaceHTMLCodes(link).split('&tr')[0])
                    quality, info = source_utils.get_release_quality(
                        name, name)
                    try:
                        size = re.findall(
                            '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                            torrent)[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except Exception:
                        pass
                    info = ' | '.join(info)
                    sources.append({
                        'source': 'Torrent',
                        'quality': quality,
                        'language': 'en',
                        'url': link,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('YIFYDLL - Exception: \n' + str(failure))
            return
Пример #21
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url is None:
             return
         url = urlparse.parse_qs(url)
         url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
         url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
         url = urllib.urlencode(url)
         return url
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('ExtraMovie - Exception: \n' + str(failure))
         return
Пример #22
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url is None:
                return

            url = urlparse.parse_qs(url)
            url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
            url.update({'premiered': premiered, 'season': season, 'episode': episode})
            return urllib.urlencode(url)
        except:
            failure = traceback.format_exc()
            log_utils.log('Library - Exception: \n' + str(failure))
            return
Пример #23
0
 def get(self, netloc, ua, timeout):
     try:
         self.netloc = netloc
         self.ua = ua
         self.timeout = timeout
         self.cookie = None
         self._get_cookie(netloc, ua, timeout)
         if self.cookie is None:
             log_utils.log('%s returned an error. Could not collect tokens.' % netloc, log_utils.LOGDEBUG)
         return self.cookie
     except Exception as e:
         log_utils.log('%s returned an error. Could not collect tokens - Error: %s.' % (netloc, str(e)),
                       log_utils.LOGDEBUG)
         return self.cookie
Пример #24
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         url = {
             'imdb': imdb,
             'tvdb': tvdb,
             'tvshowtitle': tvshowtitle,
             'year': year
         }
         url = urllib.urlencode(url)
         return url
     except:
         failure = traceback.format_exc()
         log_utils.log('StreamLord - Exception: \n' + str(failure))
         return
Пример #25
0
def unpacked(url):
    try:
        from exoscrapers.modules import client
        from exoscrapers.modules import jsunpack
        from exoscrapers.modules import log_utils
        unpacked = ''
        html = client.request(url)
        if jsunpack.detect(html):
            unpacked = jsunpack.unpack(html)
            # log_utils.log('WatchWrestling - unpacked: \n' + str(unpacked))
        else:
            log_utils.log('getSum - unpacked - Failed.')
        return unpacked
    except:
        return
Пример #26
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            r = self.scraper.get(url, headers={
                'referer': self.base_link
            }).content
            links = client.parseDOM(r,
                                    'a',
                                    ret='href',
                                    attrs={'target': '.+?'})
            links = [x for y, x in enumerate(links) if x not in links[:y]]

            for i in links:
                try:
                    url = i
                    url = proxy.parse(url)
                    url = urlparse.parse_qs(
                        urlparse.urlparse(url).query)['r'][0]
                    url = url.decode('base64')
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if host not in hostDict:
                        continue
                    host = host.encode('utf-8')
                    sources.append({
                        'source': host,
                        'quality': 'SD',
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })
                except:
                    pass

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('XWatchSeries - Exception: \n' + str(failure))
            return sources
Пример #27
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         search_id = cleantitle.getsearch(title)
         url = urlparse.urljoin(self.base_link, self.search_link)
         url = url % (search_id.replace(':', ' ').replace(' ', '+'))
         search_results = client.request(url)
         match = re.compile('<a href="/watch/(.+?)" title="(.+?)">',
                            re.DOTALL).findall(search_results)
         for row_url, row_title in match:
             row_url = 'https://fmoviesto.to/watch/%s' % row_url
             if cleantitle.get(title) in cleantitle.get(row_title):
                 return row_url
         return
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('FmoviesIO - Exception: \n' + str(failure))
         return
Пример #28
0
def get(url, Type=None):
    if not url:
        return
    if Type == 'client' or Type is None:
        from exoscrapers.modules import client
        content = client.request(url, headers=headers)
    if Type == 'cfscrape':
        from exoscrapers.modules import cfscrape
        cfscraper = cfscrape.create_scraper()
        content = cfscraper.get(url, headers=headers).content
    if Type == 'redirect':
        import requests
        content = requests.get(url, headers=headers).url
    if content is None:
        log_utils.log('getSum - Get ERROR:  No Content Got for:  ' + str(url))
        raise Exception()
    return content
Пример #29
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            search_id = title.replace(':', ' ').replace(' ', '+').lower()
            start_url = urlparse.urljoin(self.base_link,
                                         self.search_link % (search_id))

            search_results = self.scraper.get(start_url).content
            match = re.compile('<header>.+?href="(.+?)" title="(.+?)"',
                               re.DOTALL).findall(search_results)
            for item_url, item_title in match:
                movie_name, movie_year = re.findall("(.*?)(\d+)",
                                                    item_title)[0]
                if not cleantitle.get(title) == cleantitle.get(movie_name):
                    continue
                if not year in movie_year:
                    continue
                return item_url
        except:
            failure = traceback.format_exc()
            log_utils.log('HDPopcorn - Exception: \n' + str(failure))
            return
Пример #30
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url is None:
                return

            r = self.scraper.get(url, headers={
                'referer': self.base_link
            }).content

            r = client.parseDOM(r, 'li', attrs={'itemprop': 'episode'})

            t = cleantitle.get(title)

            r = [(client.parseDOM(i, 'a', ret='href'),
                  client.parseDOM(i, 'span', attrs={'itemprop': 'name'}),
                  re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in r]
            r = [(i[0], i[1][0].split('&nbsp;')[-1], i[2])
                 for i in r if i[1]] + [(i[0], None, i[2])
                                        for i in r if not i[1]]
            r = [(i[0], i[1], i[2][0])
                 for i in r if i[2]] + [(i[0], i[1], None)
                                        for i in r if not i[2]]
            r = [(i[0][0], i[1], i[2]) for i in r if i[0]]

            url = [
                i for i in r if t == cleantitle.get(i[1]) and premiered == i[2]
            ][:1]
            if not url:
                url = [i for i in r if t == cleantitle.get(i[1])]
            if len(url) > 1 or not url:
                url = [i for i in r if premiered == i[2]]
            if len(url) > 1 or not url:
                raise Exception()

            return url[0][0]
        except:
            failure = traceback.format_exc()
            log_utils.log('XWatchSeries - Exception: \n' + str(failure))
            return