Ejemplo n.º 1
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None:
             return sources
         hostDict = hostDict + hostprDict
         r = cfScraper.get(url).content
         match = re.compile(
             '<a href="http://www.tvmovieflix.com/report-.+?/(.+?)" target="_blank"><span class="a">Report Broken</span></a></li>',
             re.DOTALL | re.M).findall(r)
         for link in match:
             if "/show/" in url:
                 surl = "http://www.tvmovieflix.com/e/" + link
             else:
                 surl = "http://www.tvmovieflix.com/m/" + link
             i = cfScraper.get(surl).content
             match = re.compile('<IFRAME.+?SRC="(.+?)"',
                                re.DOTALL | re.IGNORECASE).findall(i)
             for link in match:
                 if "realtalksociety.com" in link:
                     r = requests.get(link).content
                     match = re.compile(
                         '<source src="(.+?)" type="video/mp4">',
                         re.DOTALL | re.IGNORECASE).findall(r)
                     for url in match:
                         valid, host = source_utils.is_host_valid(
                             url, hostDict)
                         quality, info = source_utils.get_release_quality(
                             url, url)
                         sources.append({
                             'source': host,
                             'quality': quality,
                             'language': 'en',
                             'info': info,
                             'url': url,
                             'direct': True,
                             'debridonly': False
                         })
                 else:
                     valid, host = source_utils.is_host_valid(
                         link, hostDict)
                     quality, info = source_utils.get_release_quality(
                         link, link)
                     sources.append({
                         'source': host,
                         'quality': quality,
                         'language': 'en',
                         'info': info,
                         'url': link,
                         'direct': False,
                         'debridonly': False
                     })
         return sources
     except:
         return sources
Ejemplo n.º 2
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None:
             return sources
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
         aliases = eval(data['aliases'])
         headers = {}
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
         year = data['year']
         if 'tvshowtitle' in data:    
             episode = data['episode']
             season = data['season']
             url = self._search(title, year, aliases, headers)
             url = url.replace('online-free', 'season-%s-episode-%s-online-free' % (season, episode))
         else:
             episode = None
             year = data['year']
             url = self._search(data['title'], data['year'], aliases, headers)
         url = url if 'http' in url else urlparse.urljoin(self.base_link, url)
         result = client.request(url);
         result = client.parseDOM(result, 'li', attrs={'class':'link-button'})
         links = client.parseDOM(result, 'a', ret='href')
         i = 0
         for l in links:
             #if i == 15:
                 #break
             try:
                 l = l.split('=')[1]
                 l = urlparse.urljoin(self.base_link, self.video_link % l)
                 result = client.request(l, post={}, headers={'Referer':url})
                 u = result if 'http' in result else 'http:' + result
                 if ' href' in u: u = 'http:' + re.compile(r" href='(.+?)'").findall(u)[0]
                 if 'google' in u:
                     valid, hoster = source_utils.is_host_valid(u, hostDict)
                     urls, host, direct = source_utils.check_directstreams(u, hoster)
                     for x in urls:
                         sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False})
                 else:
                     valid, hoster = source_utils.is_host_valid(u, hostDict)
                     if not valid:
                         continue
                     try:
                         u.decode('utf-8')
                         sources.append({'source': hoster, 'quality': 'sd', 'language': 'en', 'url': u, 'direct': False, 'debridonly': False})
                         i+=1
                     except:
                         pass
             except:
                 pass
         return sources
     except:
         return sources
Ejemplo n.º 3
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         hostDict = hostprDict + hostDict
         r = cfScraper.get(url).content
         qual = re.compile('class="quality">(.+?)</span>').findall(r)
         for i in qual:
             info = i
             if '1080' in i:
                 quality = '1080p'
             elif '720' in i:
                 quality = '720p'
             else:
                 quality = 'SD'
         u = re.compile('data-video="(.+?)"').findall(r)
         for url in u:
             if not url.startswith('http'):
                 url = "https:" + url
             if 'vidcloud' in url:
                 r = cfScraper.get(url).content
                 t = re.compile('data-video="(.+?)"').findall(r)
                 for url in t:
                     if not url.startswith('http'):
                         url = "https:" + url
                     valid, host = source_utils.is_host_valid(url, hostDict)
                     if valid and 'vidcloud' not in url:
                         sources.append({
                             'source': host,
                             'quality': quality,
                             'language': 'en',
                             'info': info,
                             'url': url,
                             'direct': False,
                             'debridonly': False
                         })
             valid, host = source_utils.is_host_valid(url, hostDict)
             if valid:
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'info': info,
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
         return sources
     except:
         return sources
Ejemplo n.º 4
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url is None:
             return sources
         hostDict = hostprDict + hostDict
         #headers = {'Referer': url}
         r = cfScraper.get(url).content
         u = client.parseDOM(r,
                             "span",
                             attrs={"class": "movie_version_link"})
         for t in u:
             match = client.parseDOM(t, 'a', ret='data-href')
             for url in match:
                 if url in str(sources):
                     continue
                 quality, info = source_utils.get_release_quality(url, url)
                 valid, host = source_utils.is_host_valid(url, hostDict)
                 if valid:
                     sources.append({
                         'source': host,
                         'quality': quality,
                         'language': 'en',
                         'info': info,
                         'url': url,
                         'direct': False,
                         'debridonly': False
                     })
         return sources
     except:
         return sources
Ejemplo n.º 5
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url is None:
             return sources
         data = parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
         q = '%s' % cleantitle.get_gan_url(data['title'])
         url = self.base_link + self.search_link % q
         r = cfScraper.get(url).content
         r = ensure_text(r)
         v = re.compile('<a href="(.+?)" class="ml-mask jt" title="(.+?)">\s+<span class=".+?">(.+?)</span>').findall(r)
         for url, check, qual in v:
             t = '%s (%s)' % (data['title'], data['year'])
             if t in check:
                 key = url.split('-hd')[1]
                 url = 'https://fmovies.tw/moviedownload.php?q=%s' % key
                 r = cfScraper.get(url).content
                 r = ensure_text(r)
                 r = re.compile('<a rel=".+?" href="(.+?)" target=".+?">').findall(r)
                 for url in r:
                     if any(x in url for x in ['.rar']): continue
                     #quality, _ = source_utils.get_release_quality(qual, url)
                     valid, host = source_utils.is_host_valid(url, hostDict)
                     if valid:
                         #info = ' | '.join(info)
                         sources.append(
                             {'source': host, 'quality': '720p', 'language': 'en', 'url': url,
                              'direct': False, 'debridonly': False})
         return sources
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('Ganool Testing - Exception: \n' + str(failure))
         return sources
Ejemplo n.º 6
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            hostDict = hostprDict + hostDict

            if url == None: return sources

            r = cfScraper.get(url).content
            quality = re.findall(">(\w+)<\/p", r)
            if quality[0] == "HD":
                quality = "720p"
            else:
                quality = "SD"
            r = dom_parser2.parse_dom(r, 'div', {'id': 'servers-list'})
            r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]

            for i in r[0]:
                url = {'url': i.attrs['href'], 'data-film': i.attrs['data-film'], 'data-server': i.attrs['data-server'],
                       'data-name': i.attrs['data-name']}
                url = urllib.urlencode(url)
                valid, host = source_utils.is_host_valid(i.content, hostDict)
                if valid:
                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})

            return sources
        except:
            return
Ejemplo n.º 7
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None:
             return sources
         hostDict = hostprDict + hostDict
         r = client.request(url)
         r = re.compile(
             'class="watch-button" data-actuallink="(.+?)"').findall(r)
         for url in r:
             if url in str(sources):
                 continue
             quality, info = source_utils.get_release_quality(url, url)
             valid, host = source_utils.is_host_valid(url, hostDict)
             if valid:
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
         return sources
     except:
         log_utils.log('Watchepisodes4 Exception', 1)
         return sources
Ejemplo n.º 8
0
    def _get_sources(self, name, url):
        try:
            headers = {'User-Agent': client.agent()}
            r = cfScraper.get(url, headers=headers).content
            r = ensure_text(r, errors='replace')
            name = client.replaceHTMLCodes(name)
            try:
                _name = name.lower().replace('rr',
                                             '').replace('nf', '').replace(
                                                 'ul', '').replace('cu', '')
            except:
                _name = name
            l = dom_parser2.parse_dom(r, 'pre', {'class': 'links'})
            s = ''
            for i in l:
                s += i.content
            urls = re.findall(
                r'''((?:http|ftp|https)://[\w_-]+(?:(?:\.[\w_-]+)+)[\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])''',
                i.content,
                flags=re.MULTILINE | re.DOTALL)
            urls = [
                i for i in urls if not i.endswith(('.rar', '.zip', '.iso',
                                                   '.idx', '.sub', '.srt'))
            ]
            for url in urls:
                if url in str(self.sources):
                    continue

                valid, host = source_utils.is_host_valid(url, self.hostDict)
                if not valid:
                    continue
                host = client.replaceHTMLCodes(host)
                #host = host.encode('utf-8')
                quality, info = source_utils.get_release_quality(name, url)
                try:
                    size = re.findall(
                        '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                        name)[0]
                    dsize, isize = source_utils._size(size)
                except BaseException:
                    dsize, isize = 0.0, ''
                info.insert(0, isize)
                info = ' | '.join(info)
                self.sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True,
                    'size': dsize,
                    'name': _name
                })
        except:
            log_utils.log('RMZ - Exception', 1)
            pass
Ejemplo n.º 9
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            hostDict = hostprDict + hostDict

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 's%02de%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s-s%02de%02d' % (data['tvshowtitle'], int(
                data['season']), int(data['episode']))
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url).replace('+', '-')

            r = cfScraper.get(url).content
            r = ensure_text(r, errors='ignore')
            try:
                data = re.compile(
                    '<a href="(.+?)" target="_blank" rel="nofollow" title.+?'
                ).findall(r)
                for url in data:
                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if valid:
                        quality, info = source_utils.get_release_quality(
                            url, url)
                        sources.append({
                            'source': host,
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'direct': False,
                            'debridonly': False
                        })
            except:
                log_utils.log('projectfree2 - Exception', 1)
                pass
            return sources
        except:
            log_utils.log('projectfree3 - Exception', 1)
            return sources
Ejemplo n.º 10
0
    def _get_sources(self, url):
        try:
            item = client.request(url[0])
            title = url[1]
            links = dom_parser2.parse_dom(item, 'a', req='href')
            links = [i.attrs['href'] for i in links]
            info = []
            try:
                size = re.findall(
                    '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', item)[0]
                div = 1 if size.endswith(('GB', 'GiB')) else 1024
                size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                size = '%.2f GB' % size
                info.insert(0, size)
            except Exception:
                pass
            info = ' | '.join(info)
            for url in links:
                if 'youtube' in url: continue
                if any(x in url.lower()
                       for x in ['.rar.', '.zip.', '.iso.']) or any(
                           url.lower().endswith(x)
                           for x in ['.rar', '.zip', '.iso']):
                    raise Exception()

                if any(x in url.lower()
                       for x in ['youtube', 'sample', 'trailer']):
                    raise Exception()
                valid, host = source_utils.is_host_valid(url, self.hostDict)
                if not valid: continue

                host = client.replaceHTMLCodes(host)
                quality, info2 = source_utils.get_release_quality(title, url)

                self._sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': False
                })
        except Exception:
            pass
Ejemplo n.º 11
0
 def sources(self, url, hostDict, hostprDict):
     try:
         hostDict = hostprDict + hostDict
         sources = []
         if url == None:
             return sources
         page = client.request(url)
         links = re.compile('<a rel="nofollow" target="blank" href="(.+?)"', re.DOTALL).findall(page)
         for link in links:
             link = "https:" + link if not link.startswith('http') else link
             valid, host = source_utils.is_host_valid(link, hostDict)
             if valid:
                 quality, info = source_utils.get_release_quality(link, link)
                 sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': False, 'debridonly': False})
         return sources
     except:
         failure = traceback.format_exc()
         log_utils.log('watchseriestv - Exception: \n' + str(failure))
         return sources
Ejemplo n.º 12
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url == None:
             return sources
         hostDict = hostDict + hostprDict
         sourcePage = ensure_text(cfScraper.get(url).content, errors='replace')
         thesources = re.compile('<tbody>(.+?)</tbody>', re.DOTALL).findall(sourcePage)[0]
         links = re.compile("<a href=\'(.+?)\' target=\'_blank\'>Download</a>", re.DOTALL).findall(thesources)
         for link in links:
             linkPage = ensure_text(cfScraper.get(link).content, errors='replace')
             vlink = re.compile('<a id="link" rel="nofollow" href="(.+?)" class="btn"', re.DOTALL).findall(linkPage)
             for zlink in vlink:
                 valid, host = source_utils.is_host_valid(zlink, hostDict)
                 if valid:
                     quality, info = source_utils.get_release_quality(zlink, zlink)
                     sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': zlink, 'info': info, 'direct': False, 'debridonly': False})
         return sources
     except:
         return sources
Ejemplo n.º 13
0
 def sources(self, url, hostDict, hostprDict):
     try:
         hostDict = hostprDict + hostDict
         sources = []
         if url == None:
             return sources
         headers = {'User-Agent': self.User_Agent}
         html = requests.get(url, headers=headers, timeout=10).content
         qual = re.compile('<div class="cf">.+?class="quality">(.+?)</td>', re.DOTALL).findall(html)
         for i in qual:
             quality = source_utils.check_url(i)
         links = re.compile('data-href="(.+?)"', re.DOTALL).findall(html)
         for link in links:
             if 'http' not in link:
                 link = 'https://' + link
             valid, host = source_utils.is_host_valid(link, hostDict)
             if valid:
                 sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': link, 'direct': False, 'debridonly': False})
         return sources
     except:
         return sources
Ejemplo n.º 14
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None:
             return sources
         html = client.request(url)
         quality = re.compile('<div>Quanlity: <span class="quanlity">(.+?)</span></div>', re.DOTALL).findall(html)
         for qual in quality:
             quality = source_utils.check_url(qual)
             info = qual
         links = re.compile('var link_.+? = "(.+?)"', re.DOTALL).findall(html)
         for url in links:
             if not url.startswith('http'):
                 url = "https:" + url
             valid, host = source_utils.is_host_valid(url, hostDict)
             if valid:
                 sources.append({'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url,
                                 'direct': False, 'debridonly': False})
         return sources
     except:
         return sources
Ejemplo n.º 15
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None: return
            data = parse_qs(url)
            data = dict((i, data[i][0]) for i in data)
            title = data['title']
            year = data['year']

            tit = cleantitle.geturl(title + ' ' + year)
            query = urljoin(self.base_link, tit)

            r = client.request(query, referer=self.base_link, redirect=True)
            if not data['imdb'] in r:
                return sources

            links = []

            try:
                down = client.parseDOM(r, 'div', attrs={'id':
                                                        'tab-download'})[0]
                down = client.parseDOM(down, 'a', ret='href')[0]
                data = client.request(down)
                frames = client.parseDOM(data,
                                         'div',
                                         attrs={'class': 'single-link'})
                frames = [
                    client.parseDOM(i, 'a', ret='href')[0] for i in frames if i
                ]
                for i in frames:
                    links.append(i)

            except Exception:
                pass
            try:
                streams = client.parseDOM(r, 'div', attrs={'id':
                                                           'tab-stream'})[0]
                streams = re.findall(r'''iframe src=(.+?) frameborder''',
                                     streams.replace('&quot;', ''),
                                     re.I | re.DOTALL)
                for i in streams:
                    links.append(i)
            except Exception:
                pass

            for url in links:
                try:
                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if not valid:
                        valid, host = source_utils.is_host_valid(
                            url, hostprDict)
                        if not valid:
                            continue
                        else:
                            rd = True
                    else:
                        rd = False
                    #quality, _ = source_utils.get_release_quality(url, url)
                    quality = '720p'
                    host = client.replaceHTMLCodes(host)
                    host = ensure_text(host)
                    if rd:
                        sources.append({
                            'source': host,
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'direct': False,
                            'debridonly': True
                        })
                    else:
                        sources.append({
                            'source': host,
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'direct': False,
                            'debridonly': False
                        })
                except Exception:
                    pass
            return sources
        except:
            log_utils.log('filmxy', 1)
            return sources
Ejemplo n.º 16
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            hostDict = hostprDict + hostDict

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            ref_url = url = data['url']
            # log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            _headers = {
                'User-Agent':
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/72.0'
            }
            r = client.request(url, headers=_headers)
            posts = client.parseDOM(r, 'h2', attrs={'class': 'title'})
            posts = zip(client.parseDOM(posts, 'a', ret='title'),
                        client.parseDOM(posts, 'a', ret='href'))

            if posts == []:
                return sources

            for item in posts:
                try:
                    name = item[0].replace(' ', '.')
                    url = item[1]
                    r = client.request(url, headers=_headers)
                    list = client.parseDOM(r, 'div', attrs={'id': 'content'})

                    if 'tvshowtitle' in data:
                        regex = '(<strong>(.*?)</strong><br />\s?[A-Z,0-9]*?\s\|\s([A-Z,0-9,\s]*)\|\s((\d+\.\d+|\d*)\s?(?:GB|GiB|Gb|MB|MiB|Mb))?</p>(?:\s<p><a href=\".*?\" .*?_blank\">.*?</a></p>)+)'
                    else:
                        regex = '(<strong>Release Name:</strong>\s*(.*?)<br />\s?<strong>Size:</strong>\s?((\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+)\s(?:GB|GiB|Gb|MB|MiB|Mb))?<br />(.*\s)*)'

                    for match in re.finditer(
                            regex,
                            list[0].encode('ascii', errors='ignore').decode(
                                'ascii',
                                errors='ignore').replace('&nbsp;', ' ')):
                        name = str(match.group(2))
                        t = name.split(hdlr)[0].replace(
                            data['year'],
                            '').replace('(',
                                        '').replace(')',
                                                    '').replace('&', 'and')
                        if cleantitle.get(t) != cleantitle.get(title):
                            continue

                        if hdlr not in name:
                            continue

                        if 'tvshowtitle' in data:
                            size = str(match.group(4))
                        else:
                            size = str(match.group(3))

                        links = client.parseDOM(
                            match.group(1),
                            'a',
                            attrs={'class': 'autohyperlink'},
                            ret='href')

                        for url in links:
                            try:
                                if any(x in url for x in
                                       ['.rar', '.zip', '.iso', '.sample.']):
                                    continue

                                if url in str(sources):
                                    continue

                                valid, host = source_utils.is_host_valid(
                                    url, hostDict)
                                if not valid:
                                    continue

                                host = client.replaceHTMLCodes(host)
                                host = host.encode('utf-8')

                                quality, info = source_utils.get_release_quality(
                                    name, url)

                                try:
                                    div = 1 if size.endswith(
                                        ('GB', 'GiB', 'Gb')) else 1024
                                    size = float(
                                        re.sub('[^0-9|/.|/,]', '',
                                               size.replace(',', '.'))) / div
                                    size = '[B]%.2f GB[/B]' % size
                                    info.insert(0, size)
                                except:
                                    pass

                                info = ' | '.join(info)

                                sources.append({
                                    'source': host,
                                    'quality': quality,
                                    'language': 'en',
                                    'url': url,
                                    'info': info,
                                    'direct': False,
                                    'debridonly': True
                                })
                            except:
                                pass
                except:
                    pass

            return sources

        except:
            return sources
Ejemplo n.º 17
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            url = urljoin(
                self.base_link,
                self.search_link % quote_plus(cleantitle.query(title)))
            headers = {'User-Agent': self.User_Agent}

            if 'tvshowtitle' in data:
                html = cfScraper.get(url, headers=headers).content
                html = ensure_str(html)

                match = re.compile(
                    'class="post-item.+?href="(.+?)" title="(.+?)"',
                    re.DOTALL).findall(html)
                for url, item_name in match:
                    if cleantitle.getsearch(title).lower(
                    ) in cleantitle.getsearch(item_name).lower():
                        season_url = '%02d' % int(data['season'])
                        episode_url = '%02d' % int(data['episode'])
                        sea_epi = 'S%sE%s' % (season_url, episode_url)

                        result = cfScraper.get(url,
                                               headers=headers,
                                               timeout=10).content
                        Regex = re.compile('href="(.+?)"',
                                           re.DOTALL).findall(result)
                        for ep_url in Regex:
                            if sea_epi in ep_url:
                                if '1080p' in ep_url:
                                    qual = '1080p'
                                elif '720p' in ep_url:
                                    qual = '720p'
                                elif '480p' in ep_url:
                                    qual = '480p'
                                else:
                                    qual = 'SD'

                                sources.append({
                                    'source': 'CDN',
                                    'quality': qual,
                                    'language': 'en',
                                    'url': ep_url,
                                    'direct': False,
                                    'debridonly': False
                                })
            else:
                html = requests.get(url, headers=headers).text
                match = re.compile(
                    '<div class="thumbnail".+?href="(.+?)" title="(.+?)"',
                    re.DOTALL).findall(html)

                for url, item_name in match:
                    if cleantitle.getsearch(title).lower(
                    ) in cleantitle.getsearch(item_name).lower():
                        if '1080' in url:
                            quality = '1080p'
                        elif '720' in url:
                            quality = '720p'
                        else:
                            quality = 'SD'

                        result = requests.get(url, headers=headers,
                                              timeout=10).text
                        Regex = re.compile('href="/download.php.+?link=(.+?)"',
                                           re.DOTALL).findall(result)

                        for link in Regex:
                            if 'server=' not in link:
                                try:
                                    link = base64.b64decode(link)
                                    link = ensure_str(link)
                                except Exception:
                                    pass
                                try:
                                    host = link.split('//')[1].replace(
                                        'www.', '')
                                    host = host.split('/')[0].lower()
                                except Exception:
                                    pass
                                _hostDict = hostDict + hostprDict
                                valid, host = source_utils.is_host_valid(
                                    host, _hostDict)
                                if not valid:
                                    continue
                                # if not self.filter_host(host):
                                # continue
                                sources.append({
                                    'source': host,
                                    'quality': quality,
                                    'language': 'en',
                                    'url': link,
                                    'direct': False,
                                    'debridonly': False
                                })

            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('ExtraMovies - Exception: \n' + str(failure))
            return sources
Ejemplo n.º 18
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            imdb = data['imdb']
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                url = self.searchShow(title, int(data['season']),
                                      int(data['episode']), aliases, headers)
            else:
                url = self.searchMovie(title, data['year'], aliases, headers)

            r = client.request(url,
                               headers=headers,
                               output='extended',
                               timeout='10')

            #if imdb not in r[0]:
            #raise Exception()

            try:
                cookie = r[4]
                headers = r[3]
            except:
                cookie = r[3]
                headers = r[2]
            result = r[0]

            try:
                r = re.findall('(https:.*?redirector.*?)[\'\"]', result)
                for i in r:
                    try:
                        sources.append({
                            'source':
                            'gvideo',
                            'quality':
                            directstream.googletag(i)[0]['quality'],
                            'language':
                            'en',
                            'url':
                            i,
                            'direct':
                            True,
                            'debridonly':
                            False
                        })
                    except:
                        pass
            except:
                pass

            try:
                auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0]
            except:
                auth = 'false'
            auth = 'Bearer %s' % unquote_plus(auth)
            headers['Authorization'] = auth
            headers['Referer'] = url

            u = '/ajax/vsozrflxcw.php'
            self.base_link = client.request(
                self.base_link,
                headers={'User-Agent': client.agent()},
                output='geturl')
            u = urljoin(self.base_link, u)

            action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb'

            tim = str(int(time.time())) if six.PY2 else six.ensure_binary(
                str(int(time.time())))
            elid = quote(base64.encodestring(tim)).strip()

            token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0]

            idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0]

            post = {
                'action': action,
                'idEl': idEl,
                'token': token,
                'nopop': '',
                'elid': elid
            }
            post = urlencode(post)
            cookie += ';%s=%s' % (idEl, elid)
            headers['Cookie'] = cookie

            r = client.request(u,
                               post=post,
                               headers=headers,
                               cookie=cookie,
                               XHR=True)
            r = str(json.loads(r))

            r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r)

            for i in r:
                try:
                    if 'google' in i:
                        quality = 'SD'

                        if 'googleapis' in i:
                            try:
                                quality = source_utils.check_sd_url(i)
                            except Exception:
                                pass

                        if 'googleusercontent' in i:
                            i = directstream.googleproxy(i)
                            try:
                                quality = directstream.googletag(
                                    i)[0]['quality']
                            except Exception:
                                pass

                        sources.append({
                            'source': 'gvideo',
                            'quality': quality,
                            'language': 'en',
                            'url': i,
                            'direct': True,
                            'debridonly': False
                        })

                    elif 'llnwi.net' in i or 'vidcdn.pro' in i:
                        try:
                            quality = source_utils.check_sd_url(i)
                            sources.append({
                                'source': 'CDN',
                                'quality': quality,
                                'language': 'en',
                                'url': i,
                                'direct': True,
                                'debridonly': False
                            })

                        except Exception:
                            pass
                    else:
                        valid, hoster = source_utils.is_host_valid(i, hostDict)
                        if valid:
                            if 'vidnode.net' in i:
                                i = i.replace('vidnode.net', 'vidcloud9.com')
                                hoster = 'vidcloud9'
                            sources.append({
                                'source': hoster,
                                'quality': '720p',
                                'language': 'en',
                                'url': i,
                                'direct': False,
                                'debridonly': False
                            })
                except Exception:
                    pass
            return sources
        except:
            log_utils.log('cartoonhd - Exception', 1)
            return sources
Ejemplo n.º 19
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            host_dict = hostprDict + hostDict

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            if 'season' in data:
                season = data['season']
            if 'episode' in data:
                episode = data['episode']
            year = data['year']

            r = client.request(self.base_link, output='extended', timeout='10')
            #r = cfScraper.get(self.base_link).content
            cookie = r[3]
            headers = r[2]
            result = r[0]
            headers['Cookie'] = cookie

            query = urljoin(
                self.base_link,
                self.search_link % quote_plus(cleantitle.getsearch(title)))
            query2 = urljoin(self.base_link,
                             self.search_link % quote_plus(title).lower())
            r = client.request(query, headers=headers, XHR=True)
            if len(r) < 20:
                r = client.request(query2, headers=headers, XHR=True)
            r = json.loads(r)['content']
            r = zip(client.parseDOM(r, 'a', ret='href'),
                    client.parseDOM(r, 'a'))

            if 'tvshowtitle' in data:
                cltitle = cleantitle.get(title + 'season' + season)
                cltitle2 = cleantitle.get(title + 'season%02d' % int(season))
                r = [
                    i for i in r if cltitle == cleantitle.get(i[1])
                    or cltitle2 == cleantitle.get(i[1])
                ]
                vurl = '%s%s-episode-%s' % (self.base_link, str(
                    r[0][0]).replace('/info', ''), episode)
                vurl2 = None

            else:
                cltitle = cleantitle.getsearch(title)
                cltitle2 = cleantitle.getsearch('%s (%s)' % (title, year))
                r = [
                    i for i in r if cltitle2 == cleantitle.getsearch(i[1])
                    or cltitle == cleantitle.getsearch(i[1])
                ]
                vurl = '%s%s-episode-0' % (self.base_link, str(
                    r[0][0]).replace('/info', ''))
                vurl2 = '%s%s-episode-1' % (self.base_link, str(
                    r[0][0]).replace('/info', ''))

            r = client.request(vurl, headers=headers)
            headers['Referer'] = vurl

            slinks = client.parseDOM(r,
                                     'div',
                                     attrs={'class': 'anime_muti_link'})
            slinks = client.parseDOM(slinks, 'li', ret='data-video')
            if len(slinks) == 0 and vurl2 is not None:
                r = client.request(vurl2, headers=headers)
                headers['Referer'] = vurl2
                slinks = client.parseDOM(r,
                                         'div',
                                         attrs={'class': 'anime_muti_link'})
                slinks = client.parseDOM(slinks, 'li', ret='data-video')
            slinks = [
                slink
                if slink.startswith('http') else 'https:{0}'.format(slink)
                for slink in slinks
            ]

            for url in slinks:
                url = client.replaceHTMLCodes(url)
                #url = url.encode('utf-8')
                valid, host = source_utils.is_host_valid(url, host_dict)
                if valid:
                    sources.append({
                        'source': host,
                        'quality': '720p',
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })
            return sources
        except:
            log_utils.log('gowatchseries3 - Exception', 1)
            return sources
Ejemplo n.º 20
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:

            if url is None:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                title, int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else '%s %s' % (title, data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            #r = client.request(self.base_link)
            #search_base = client.parseDOM(r, 'form', ret='action')[0]
            #log_utils.log(search_base)
            #url = urljoin(search_base, self.search_link)
            url = urljoin(self.base_link, self.search_link)
            url = url % quote_plus(query)

            r = client.request(url)

            r = client.parseDOM(r, 'h2')

            z = zip(client.parseDOM(r, 'a', ret='href'),
                    client.parseDOM(r, 'a'))

            if 'tvshowtitle' in data:
                posts = [(i[1], i[0]) for i in z]
            else:
                posts = [(i[1], i[0]) for i in z]

            host_dict = hostprDict + hostDict

            items = []

            for post in posts:
                try:
                    r = client.request(post[1])
                    r = ensure_text(r, errors='replace')
                    r = client.parseDOM(r,
                                        'div',
                                        attrs={'class': 'entry-content cf'})[0]

                    if 'tvshowtitle' in data:
                        z = zip(
                            re.findall(r'<p><b>(%s.+?)</b>' % title, r,
                                       re.I | re.S),
                            re.findall(r'<ul>(.+?)</ul>', r, re.S))
                        for f in z:
                            u = re.findall(r'\'(http.+?)\'',
                                           f[1]) + re.findall(
                                               r'\"(http.+?)\"', f[1])
                            u = [i for i in u if '/embed/' not in i]
                            t = f[0]
                            try:
                                s = re.findall(
                                    r'((?:\d+\.\d+|\d+\,\d+|\d+|\d+\,\d+\.\d+)\s*(?:GB|GiB|MB|MiB))',
                                    t)[0]
                            except:
                                s = '0'
                            items += [(t, i, s) for i in u]

                    else:
                        t = ensure_text(post[0], errors='replace')
                        u = re.findall(r'\'(http.+?)\'', r) + re.findall(
                            '\"(http.+?)\"', r)
                        u = [i for i in u if '/embed/' not in i]
                        try:
                            s = re.findall(
                                r'((?:\d+\.\d+|\d+\,\d+|\d+|\d+\,\d+\.\d+)\s*(?:GB|GiB|MB|MiB))',
                                r)[0]
                        except:
                            s = '0'
                        items += [(t, i, s) for i in u]

                except:
                    log_utils.log('MYVIDEOLINK ERROR', 1)
                    pass

            for item in items:
                try:
                    url = ensure_text(item[1])
                    url = client.replaceHTMLCodes(url)

                    void = ('.rar', '.zip', '.iso', '.part', '.png', '.jpg',
                            '.bmp', '.gif', 'sub', 'srt')
                    if url.endswith(void):
                        continue

                    name = ensure_text(item[0], errors='replace')
                    name = client.replaceHTMLCodes(name)

                    t = re.sub(
                        r'(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '', name, re.I)
                    if not cleantitle.get(t) == cleantitle.get(title):
                        continue

                    y = re.findall(
                        r'[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                        name)[-1].upper()
                    if not y == hdlr:
                        continue

                    valid, host = source_utils.is_host_valid(url, host_dict)
                    if not valid:
                        continue
                    host = client.replaceHTMLCodes(host)

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        size = item[2]
                        dsize, isize = source_utils._size(size)
                    except:
                        dsize, isize = 0.0, ''
                    info.insert(0, isize)

                    info = ' | '.join(info)

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': False,
                        'size': dsize,
                        'name': name
                    })
                except:
                    log_utils.log('MYVIDEOLINK ERROR', 1)
                    pass

            return sources
        except:
            log_utils.log('MYVIDEOLINK ERROR', 1)
            return sources
Ejemplo n.º 21
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:

            if url == None: return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            year = data['year']
            hdlr = 's%02de%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else year
            query = '%s %s' % (title, year)
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            query = quote_plus(query)

            url = urljoin(self.base_link, self.search_link % query)

            r = client.request(url)
            posts = client.parseDOM(r, 'item')

            for post in posts:
                try:
                    name = client.parseDOM(post, 'title')[0]
                    name = client.replaceHTMLCodes(name)
                    name = ensure_str(name, errors='ignore')

                    y = re.findall('(\d{4}|S\d+E\d+|S\d+)', name, re.I)[0]

                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d+E\d+|S\d+|3D)(\.|\)|\]|\s|)(.+|)',
                        '', name, re.I)

                    if not (re.findall('\w+', cleantitle.get(t))[0]
                            == cleantitle.get(title) and year == y):
                        raise Exception()

                    if not 'tvshowtitle' in data:
                        links = client.parseDOM(post, 'a', ret='href')
                    else:
                        ep = '%02d' % int(data['episode'])
                        pattern = '>Season[\s|\:]%d<(.+?)(?:<b>Season|</content)' % int(
                            data['season'])
                        data = re.findall(pattern, post, re.S | re.I)
                        data = dom_parser2.parse_dom(data, 'a', req='href')
                        links = [(i.attrs['href'], i.content.lower())
                                 for i in data]
                        links = [
                            i[0] for i in links
                            if (hdlr in i[0] or hdlr in i[1] or ep == i[1])
                        ]

                    for url in links:
                        try:
                            if any(x in url for x in [
                                    '.online', 'xrysoi.', 'filmer', '.bp',
                                    '.blogger'
                            ]):
                                continue

                            url = client.replaceHTMLCodes(url)
                            valid, host = source_utils.is_host_valid(
                                url, hostDict)
                            #if 'hdvid' in host: valid = True
                            if not valid: continue
                            # try: dub = re.findall('ΜΕΤΑΓΛΩΤΙΣΜΕΝΟ', post, re.S|re.I)[0]
                            # except: dub = None
                            # info = ' / '.join((name, 'DUB')) if dub else name

                            sources.append({
                                'source': host,
                                'quality': 'sd',
                                'language': 'gr',
                                'url': url,
                                'direct': False,
                                'debridonly': False
                            })
                        except:
                            pass

                except:
                    log_utils.log('xrysoi_exc', 1)
                    pass

            return sources
        except:
            return sources
Ejemplo n.º 22
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None: return sources

            hostDict = hostprDict + hostDict

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            title = cleantitle.get_query(title)
            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
            query = '%s season %d' % (title, int(data['season'])) if 'tvshowtitle' in data else title
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            query = quote_plus(query)

            url = urljoin(self.base_link, self.search_link % query)

            ua = {'User-Agent': client.agent()}
            r = cfScraper.get(url, headers=ua).content
            r = six.ensure_text(r, errors='replace')
            _posts = client.parseDOM(r, 'div', attrs={'class': 'item'})
            posts = []
            for p in _posts:
                try:
                    post = (client.parseDOM(p, 'a', ret='href')[1],
                              client.parseDOM(p, 'a')[1],
                              re.findall(r'Release:\s*?(\d{4})</', p, re.I|re.S)[1])
                    posts.append(post)
                except:
                    pass
            posts = [(i[0], client.parseDOM(i[1], 'i')[0], i[2]) for i in posts if i]

            if 'tvshowtitle' in data:
                sep = 'season %d' % int(data['season'])
                sepi = 'season-%1d/episode-%1d.html' % (int(data['season']), int(data['episode']))
                post = [i[0] for i in posts if sep in i[1].lower()][0]
                data = cfScraper.get(post, headers=ua).content
                data = six.ensure_text(data, errors='replace')
                link = client.parseDOM(data, 'a', ret='href')
                link = [i for i in link if sepi in i][0]
            else:
                link = [i[0] for i in posts if cleantitle.get_title(title) in cleantitle.get_title(i[1]) and hdlr == i[2]][0]

            r = cfScraper.get(link, headers=ua).content
            r = six.ensure_text(r, errors='replace')
            try:
                v = re.findall('document.write\(Base64.decode\("(.+?)"\)', r)[0]
                v = v.encode('utf-8')
                b64 = base64.b64decode(v)
                b64 = six.ensure_text(b64, errors='ignore')
                url = client.parseDOM(b64, 'iframe', ret='src')[0]
                try:
                    host = re.findall('([\w]+[.][\w]+)$', urlparse(url.strip().lower()).netloc)[0]
                    host = client.replaceHTMLCodes(host)
                    host = six.ensure_str(host)
                    valid, hoster = source_utils.is_host_valid(host, hostDict)
                    if valid:
                        sources.append({
                            'source': hoster,
                            'quality': 'SD',
                            'language': 'en',
                            'url': url.replace('\/', '/'),
                            'direct': False,
                            'debridonly': False
                        })
                except:
                    log_utils.log('plockers4 Exception', 1)
                    pass
            except:
                log_utils.log('plockers3 Exception', 1)
                pass
            r = client.parseDOM(r, 'div', {'class': 'server_line'})
            r = [(client.parseDOM(i, 'a', ret='href')[0],
                  client.parseDOM(i, 'p', attrs={'class': 'server_servername'})[0]) for i in r]
            if r:
                for i in r:
                    try:
                        host = re.sub('Server|Link\s*\d+', '', i[1]).lower()
                        url = i[0].replace('\/', '/')
                        host = client.replaceHTMLCodes(host)
                        host = six.ensure_str(host)
                        if 'other' in host: continue
                        valid, hoster = source_utils.is_host_valid(host, hostDict)
                        if valid:
                            sources.append({
                                'source': hoster,
                                'quality': 'SD',
                                'language': 'en',
                                'url': url,
                                'direct': False,
                                'debridonly': False
                            })
                    except:
                        log_utils.log('plockers5 Exception', 1)
                        pass
            return sources
        except:
            log_utils.log('plockers Exception', 1)
            return
Ejemplo n.º 23
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None:
             return sources
         hostDict = hostDict + hostprDict
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         aliases = eval(data['aliases'])
         headers = {}
         if 'tvshowtitle' in data:
             ep = data['episode']
             url = '%s/film/%s-season-%01d/watching.html?ep=%s' % (
                 self.base_link, cleantitle.geturl(
                     data['tvshowtitle']), int(data['season']), ep)
             # r = client.request(url, headers=headers, timeout='10', output='geturl')
             r = cfScraper.get(url).content
             if url is None:
                 url = self.searchShow(data['tvshowtitle'], data['season'],
                                       aliases, headers)
         else:
             url = self.searchMovie(data['title'], data['year'], aliases,
                                    headers)
             if url is None:
                 url = '%s/film/%s/watching.html?ep=0' % (
                     self.base_link, cleantitle.geturl(data['title']))
         if url is None:
             raise Exception()
         # r = client.request(url, headers=headers, timeout='10')
         r = cfScraper.get(url).content
         r = client.parseDOM(r, 'div', attrs={'class': 'les-content'})
         if 'tvshowtitle' in data:
             ep = data['episode']
             links = client.parseDOM(r,
                                     'a',
                                     attrs={'episode-data': ep},
                                     ret='player-data')
         else:
             links = client.parseDOM(r, 'a', ret='player-data')
         for link in links:
             link = "https:" + link if not link.startswith('http') else link
             if '123movieshd' in link or 'seriesonline' in link:
                 # r = client.request(link, headers=headers, timeout='10')
                 r = cfScraper.get(link).content
                 r = re.findall('(https:.*?redirector.*?)[\'\"]', r)
                 for i in r:
                     sources.append({
                         'source':
                         'gvideo',
                         'quality':
                         directstream.googletag(i)[0]['quality'],
                         'language':
                         'en',
                         'url':
                         i,
                         'direct':
                         True,
                         'debridonly':
                         False
                     })
             else:
                 valid, host = source_utils.is_host_valid(link, hostDict)
                 if valid:
                     quality, info = source_utils.get_release_quality(
                         link, link)
                     if 'load.php' not in link:
                         sources.append({
                             'source': host,
                             'quality': quality,
                             'language': 'en',
                             'info': info,
                             'url': link,
                             'direct': False,
                             'debridonly': False
                         })
         return sources
     except:
         return sources
Ejemplo n.º 24
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            hostDict = hostprDict + hostDict

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = cleantitle.get_query(title)

            query = '%s S%02dE%02d' % (
                title, int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else '%s %s' % (title, data['year'])

            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url).replace('%3A+', '+')

            #r = client.request(url)
            r = cfScraper.get(url).content
            r = ensure_text(r, errors='replace')

            posts = client.parseDOM(r, "div", attrs={"class": "postContent"})
            items = []
            for post in posts:
                try:
                    p = client.parseDOM(post, "p", attrs={"dir": "ltr"})[1:]
                    for i in p:
                        items.append(i)
                except:
                    pass

            try:
                for item in items:
                    u = client.parseDOM(item, 'a', ret='href')
                    name = re.findall('<strong>(.*?)</strong>', item,
                                      re.DOTALL)[0]
                    name = client.replaceHTMLCodes(name)
                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '', name)
                    if not cleantitle.get(t) == cleantitle.get(title): continue
                    for url in u:
                        if any(x in url for x in ['.rar', '.zip', '.iso']):
                            continue
                        quality, info = source_utils.get_release_quality(
                            name, url)
                        try:
                            size = re.findall(
                                '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB|gb|mb))',
                                item, re.DOTALL)[0]
                            dsize, isize = source_utils._size(size)
                        except:
                            dsize, isize = 0.0, ''
                        info.insert(0, isize)
                        info = ' | '.join(info)
                        valid, host = source_utils.is_host_valid(url, hostDict)
                        if valid:
                            sources.append({
                                'source': host,
                                'quality': quality,
                                'language': 'en',
                                'url': url,
                                'info': info,
                                'direct': False,
                                'debridonly': True,
                                'size': dsize,
                                'name': name
                            })
            except:
                pass
            return sources
        except:
            log_utils.log('max_rls Exception', 1)
            return sources
Ejemplo n.º 25
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if debrid.status() is False:
                return sources

            if url is None:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url)

            r = client.request(url)

            posts = re.findall('<h2 class="title">(.+?)</h2>', r,
                               re.IGNORECASE)

            hostDict = hostprDict + hostDict

            urls = []
            for item in posts:

                try:
                    link, name = re.findall('href="(.+?)" title="(.+?)"', item,
                                            re.IGNORECASE)[0]
                    if not cleantitle.get(title) in cleantitle.get(name):
                        continue
                    name = client.replaceHTMLCodes(name)
                    try:
                        _name = name.lower().replace('permalink to', '')
                    except:
                        _name = name

                    quality, info = source_utils.get_release_quality(
                        name, link)

                    try:
                        size = re.findall(
                            '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                            name)[-1]
                        dsize, isize = source_utils._size(size)
                    except Exception:
                        dsize, isize = 0.0, ''
                    info.insert(0, isize)

                    info = ' | '.join(info)

                    links = self.links(link)
                    urls += [(i, quality, info) for i in links]
                except Exception:
                    pass

            for item in urls:
                if 'earn-money' in item[0]:
                    continue

                if any(x in item[0] for x in ['.rar', '.zip', '.iso']):
                    continue
                url = client.replaceHTMLCodes(item[0])
                #url = url.encode('utf-8')
                url = ensure_text(url)

                valid, host = source_utils.is_host_valid(url, hostDict)
                if not valid:
                    continue
                host = client.replaceHTMLCodes(host)
                #host = host.encode('utf-8')
                host = ensure_text(host)

                sources.append({
                    'source': host,
                    'quality': item[1],
                    'language': 'en',
                    'url': url,
                    'info': item[2],
                    'direct': False,
                    'debridonly': True,
                    'size': dsize,
                    'name': _name
                })
            return sources
        except Exception:
            return sources
Ejemplo n.º 26
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url is None:
                return sources
            hostDict = hostprDict + hostDict

            headers = {
                'User-Agent':
                'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0'
            }
            first_url = url
            r = cfScraper.get(first_url).content
            links = re.compile('<iframe.+?src="(.+?)://(.+?)/(.+?)"',
                               re.DOTALL).findall(r)
            for http, host, url in links:
                host = host.replace('www.', '')
                url = '%s://%s/%s' % (http, host, url)
                if 'seehd' in url:
                    r = cfScraper.get(url).content
                    extra_link = re.compile('<center><iframe.+?src="(.+?)"',
                                            re.DOTALL).findall(r)[0]
                    valid, host = source_utils.is_host_valid(
                        extra_link, hostDict)
                    sources.append({
                        'source': host,
                        'quality': '720p',
                        'language': 'en',
                        'url': extra_link,
                        'direct': False,
                        'debridonly': False
                    })
                elif '24hd' in url:
                    url = url.split('v/')[1]
                    post_link = urlparse.urljoin(self.hdclub_link, url)
                    payload = {'r': first_url, 'd': 'www.24hd.club'}
                    post_data = requests.post(post_link,
                                              headers=headers,
                                              data=payload)
                    response = post_data.content

                    link = re.compile('"file":"(.+?)","label":"(.+?)"',
                                      re.DOTALL).findall(response)
                    for link, quality in link:
                        link = link.replace('\/', '/')

                        if '1080p' in quality:
                            quality = '1080p'
                        elif '720p' in quality:
                            quality = '720p'
                        elif '480p' in quality:
                            quality = 'SD'
                        else:
                            quality = 'SD'

                        sources.append({
                            'source': 'Direct',
                            'quality': quality,
                            'language': 'en',
                            'url': link,
                            'direct': True,
                            'debridonly': False
                        })
                else:
                    sources.append({
                        'source': host,
                        'quality': '720p',
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })

            return sources
        except Exception:
            return sources