Beispiel #1
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url == None: return sources
         for link in url:
             try:
                 lang = link[u'quality']
                 video_link = link[u'url']
                 lang, info = self.get_lang_by_type(lang)
                 q = source_utils.check_sd_url(video_link)
                 valid, host = source_utils.is_host_valid(video_link, hostDict)
                 if 'rapidvideo' in video_link:
                     content = requests.get(video_link, timeout=3, allow_redirects=True).content
                     q = re.findall("""data-res=\"(.*?)\"""", content)[0]
                     if int(q) == 720:
                         q = 'HD'
                     elif int(q) > 720:
                         q = '1080'
                     elif int(q) < 720:
                         q = 'SD'
                 if 'streamango' in video_link or 'openload' in video_link:
                     content = requests.get(video_link, timeout=3, allow_redirects=True).content
                     q = re.findall("""og:title\" content=\"(.*?)\"""", content)[0]
                     q = source_utils.get_release_quality('', q)[0]
                 if valid:
                     if 'ebd' in host.lower():
                         host = 'CDA'
                     sources.append({'source': host, 'quality': q, 'language': lang, 'url': video_link,
                                     'info': info, 'direct': False, 'debridonly': False})
             except:
                 pass
         return sources
     except:
         return sources
Beispiel #2
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources

            html = client.request(url)
            try:
                iframe = client.parseDOM(html, 'iframe', attrs={'class': 'embed-responsive-item'}, ret='src')[0]
                host = iframe.split('//')[1].replace('www.', '')
                host = host.split('/')[0].split('.')[0].title()
                sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': iframe, 'direct': False,
                                'debridonly': False})
            except:
                flashvar = client.parseDOM(html, 'param', attrs={'name': 'flashvars'}, ret='value')[0]
                link = flashvar.split('file=')[1]
                host = link.split('//')[1].replace('www.', '')
                host = host.split('/')[0].split('.')[0].title()
                sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': link, 'direct': False,
                                'debridonly': False})

            containers = client.parseDOM(html, 'div', attrs={'class': 'dwn-box'})

            for list in containers:
                link = client.parseDOM(list, 'a', attrs={'rel': 'nofollow'}, ret='href')[0]
                redirect = client.request(link, output='geturl')
                quality, info = source_utils.get_release_quality(redirect)
                sources.append(
                    {'source': 'DirectLink', 'quality': quality, 'language': 'en', 'url': redirect, 'info': info,
                     'direct': True, 'debridonly': False})
            return sources
        except:
            failure = traceback.format_exc()
            print('CoolTV - Exception: \n' + str(failure))
            return
Beispiel #3
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            r = client.request(url)
            try:
                data = client.parseDOM(r, 'div', attrs={'class': 'playex'})
                data = [client.parseDOM(i, 'iframe', ret='src') for i in data if i]
                try:
                    for url in data[0]:
                        quality, info = source_utils.get_release_quality(url, None)
                        valid, host = source_utils.is_host_valid(url, hostDict)
                        if not valid: continue
                        host = host.encode('utf-8')
                        sources.append({
                            'source': host,
                            'quality': quality,
                            'language': 'en',
                            'url': url.replace('\/', '/'),
                            'direct': False,
                            'debridonly': False
                        })
                except:
                    pass
            except:
                pass

            return sources
        except Exception:
            return
Beispiel #4
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            headers = {'User-Agent': client.randomagent()}
            html = client.request(url, headers=headers)

            Links = re.compile('id="link_.+?target="_blank" id="(.+?)"', re.DOTALL).findall(html)
            for vid_url in Links:
                if 'openload' in vid_url:
                    try:
                        source_html = client.request(vid_url, headers=headers)
                        source_string = re.compile('description" content="(.+?)"', re.DOTALL).findall(source_html)[0]
                        quality, info = source_utils.get_release_quality(source_string, vid_url)
                    except:
                        quality = 'DVD'
                        info = []
                    sources.append(
                        {'source': 'Openload', 'quality': quality, 'language': 'en', 'url': vid_url, 'info': info,
                         'direct': False, 'debridonly': False})
                elif 'streamango' in vid_url:
                    try:
                        source_html = client.request(vid_url, headers=headers)
                        source_string = re.compile('description" content="(.+?)"', re.DOTALL).findall(source_html)[0]
                        quality, info = source_utils.get_release_quality(source_string, vid_url)
                    except:
                        quality = 'DVD'
                        info = []
                    sources.append(
                        {'source': 'Streamango', 'quality': quality, 'language': 'en', 'url': vid_url, 'info': info,
                         'direct': False, 'debridonly': False})
                else:
                    if resolveurl.HostedMediaFile(vid_url):
                        quality, info = source_utils.get_release_quality(vid_url, vid_url)
                        host = vid_url.split('//')[1].replace('www.', '')
                        host = host.split('/')[0].split('.')[0].title()
                        sources.append(
                            {'source': host, 'quality': quality, 'language': 'en', 'url': vid_url, 'info': info,
                             'direct': False, 'debridonly': False})
            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('SolarMovie - Exception: \n' + str(failure))
            return sources
Beispiel #5
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(
                data['season']), int(data['episode']))
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            url = self.search_link % urllib.quote_plus(query)
            r = urlparse.urljoin(self.base_link, url)
            r = client.request(r)
            r = client.parseDOM(r, 'item')
            title = client.parseDOM(r, 'title')[0]
            if hdlr in title:
                r = re.findall(
                    '<h3.+?>(.+?)</h3>\s*<h5.+?<strong>(.+?)</strong.+?h3.+?adze.+?href="(.+?)">.+?<h3',
                    r[0], re.DOTALL)
                for name, size, url in r:
                    quality, info = source_utils.get_release_quality(name, url)
                    try:
                        size = re.sub('i', '', size)
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    info = ' | '.join(info)

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })

            return sources
        except:
            return sources
Beispiel #6
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url is None: return sources
            result = client.request(url)
            res_chk = re.compile('<title>(.+?)</title>',
                                 re.DOTALL).findall(result)[0]
            r = client.parseDOM(result, 'tbody')
            r = client.parseDOM(r, 'tr')
            r = [(re.findall('<td>(.+?)</td>',
                             i)[0], client.parseDOM(i, 'a', ret='href')[0])
                 for i in r]

            if r:
                for i in r:
                    try:
                        hostchk = i[0]
                        if 'other' in hostchk: continue

                        vid_page = urlparse.urljoin(self.base_link, i[1])
                        html = client.request(vid_page)
                        vid_div = re.compile('<div class="wrap">(.+?)</div>',
                                             re.DOTALL).findall(html)[0]
                        vid_url = re.compile('href="(.+?)"',
                                             re.DOTALL).findall(vid_div)[0]
                        quality, info = source_utils.get_release_quality(
                            res_chk, vid_url)
                        host = vid_url.split('//')[1].replace('www.', '')
                        host = host.split('/')[0].lower()
                        sources.append({
                            'source': host,
                            'quality': quality,
                            'language': 'en',
                            'url': vid_url,
                            'info': info,
                            'direct': False,
                            'debridonly': False
                        })
                    except:
                        pass
            return sources
        except:
            failure = traceback.format_exc()
            print('Vodly - Exception: \n' + str(failure))
            return
Beispiel #7
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources
            r = client.request(url)
            r = dom_parser2.parse_dom(r, 'div', {'class': 'll-item'})
            r = [(dom_parser2.parse_dom(i, 'a', req='href'), \
                  dom_parser2.parse_dom(i, 'div', {'class': 'notes'})) \
                 for i in r if i]
            r = [(i[0][0].attrs['href'], i[0][0].content,
                  i[1][0].content if i[1] else 'None') for i in r]
            for i in r:
                try:
                    url = i[0]
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')
                    valid, host = source_utils.is_host_valid(i[1], hostDict)
                    if not valid: continue
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    info = []
                    quality, info = source_utils.get_release_quality(
                        i[2], i[2])

                    info = ' | '.join(info)
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': False
                    })
                except:
                    pass

            return sources
        except:
            return sources
Beispiel #8
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            posts = client.parseDOM(r, 'item')

            for post in posts:
                Links = client.parseDOM(post, 'enclosure', ret='url')
                if not len(Links) == None:
                    for vid_url in Links:
                        quality, info = source_utils.get_release_quality(url, vid_url)
                        host = vid_url.split('//')[1].replace('www.', '')
                        host = host.split('/')[0].lower()
                        sources.append(
                            {'source': host, 'quality': quality, 'language': 'en', 'url': vid_url, 'info': info,
                             'direct': False, 'debridonly': False})
            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('Best-Moviez - Exception: \n' + str(failure))
            return sources
Beispiel #9
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources
            if debrid.status() == False: raise Exception()

            hostDict = hostprDict + hostDict
            pages = url
            for page_url in pages:
                r = client.request(page_url)
                urls = client.parseDOM(r, 'a', ret='href')
                for url in urls:
                    try:
                        host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                        if not host in hostDict: raise Exception()

                        if any(x in url for x in ['.rar', '.zip', '.iso']): continue

                        quality, infoo = source_utils.get_release_quality(url)

                        info = []

                        if any(x in url.upper() for x in ['HEVC', 'X265', 'H265']): info.append('HEVC')

                        info = ' | '.join(info)

                        host = client.replaceHTMLCodes(host)
                        host = host.encode('utf-8')
                        sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info,
                                        'direct': False, 'debridonly': True})
                    except:
                        pass
            return sources
        except:
            failure = traceback.format_exc()
            print('ALLRLS - Exception: \n' + str(failure))
            return sources
Beispiel #10
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            # if no link returned in movie and tvshow searches, nothing to do here, return out.
            if url == None or len(url) == 0:
                return sources

            # Time to get scraping
            title = url['title']
            year = url['title']
            # Build up the search url
            searchLink = self.search_link + title + ' ' + year
            url = urlparse.urljoin(self.base_link, searchLink)

            # Site uses cf, so we need cfscrape
            scraper = cfscrape.create_scraper()
            html = scraper.get(url).content
            result_soup = BeautifulSoup(html, "html.parser")

            # Parse the table of results
            table = result_soup.find("table")
            table_body = table.find("tbody")
            rows = table_body.findAll("tr")
            fileLinks = []
            for row in rows:
                cols = row.findAll("td")
                for col in cols:
                    links = col.findAll("a", href=True)
                    for link in links:
                        if "/file/" in link['href']:
                            # Use this onse
                            fileLinks.append(link['href'])
                            break
            # Retrieve actual links from result pages
            actualLinks = []
            for fileLink in fileLinks:
                actual_url = urlparse.urljoin(self.base_link, fileLink)
                html = scraper.get(actual_url.encode('ascii')).content
                linkSoup = BeautifulSoup(html, "html.parser")
                link = str(
                    linkSoup.find(
                        "button",
                        {"title": "Copy Link"})['data-clipboard-text'])
                # Exclude zip and rar files
                if link.lower().endswith('rar') or link.lower().endswith(
                        'zip'):
                    continue
                else:
                    actualLinks.append(link)

            for link in actualLinks:
                quality, info = source_utils.get_release_quality(link, url)
                sources.append({
                    'source': 'DIRECT',
                    'quality': quality,
                    'language': 'en',
                    'url': link,
                    'info': info,
                    'direct': True,
                    'debridonly': False
                })
            return sources
        except Exception as e:
            # log_utils.log('EXCEPTION MSG: '+str(e))
            return sources
Beispiel #11
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if not debrid.status(): raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            r = client.parseDOM(r, 'h2')
            r = [re.findall('''<a.+?href=["']([^"']+)["']>(.+?)</a>''', i, re.DOTALL) for i in r]

            hostDict = hostprDict + hostDict

            items = []

            for item in r:
                try:
                    t = item[0][1]
                    t = re.sub('(\[.*?\])|(<.+?>)', '', t)
                    t1 = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', t)

                    if not cleantitle.get(t1) == cleantitle.get(title): raise Exception()

                    y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', t)[-1].upper()

                    if not y == hdlr: raise Exception()

                    data = client.request(urlparse.urljoin(self.base_link, item[0][0]))
                    data = dom_parser2.parse_dom(data, 'a', attrs={'target': '_blank'})
                    u = [(t, i.content) for i in data]
                    items += u

                except:
                    pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    quality, info = source_utils.get_release_quality(name, item[1])

                    try:
                        size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', name)[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    info = ' | '.join(info)

                    url = item[1]
                    if not url.startswith('http'): continue
                    if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if not valid: continue
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info,
                                    'direct': False, 'debridonly': True})
                except:
                    pass

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check

            return sources
        except:
            failure = traceback.format_exc()
            print('TVRelease - Exception: \n' + str(failure))
            return sources
Beispiel #12
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            hostDict = hostprDict + hostDict

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = '%sS%02dE%02d' % (
                data['year'], int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else data['year']

            query = '%s %s S%02dE%02d' % (
                data['tvshowtitle'], data['year'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            try:
                url = self.search_link % urllib.quote_plus(query)
                url = urlparse.urljoin(self.base_link, url)

                r = client.request(url)

                posts = client.parseDOM(r, 'div', attrs={'class': 'post'})

                items = []
                dupes = []

                for post in posts:
                    try:
                        t = client.parseDOM(post, 'a')[0]
                        t = re.sub('<.+?>|</.+?>', '', t)

                        x = re.sub(
                            '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                            '', t)
                        if not cleantitle.get(title) in cleantitle.get(x):
                            raise Exception()
                        y = re.findall(
                            '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                            t)[-1].upper()
                        if not y == hdlr: raise Exception()

                        fmt = re.sub(
                            '(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)',
                            '', t.upper())
                        fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
                        fmt = [i.lower() for i in fmt]
                        # if not any(i in ['1080p', '720p'] for i in fmt): raise Exception()

                        if len(dupes) > 2: raise Exception()
                        dupes += [x]

                        u = client.parseDOM(post, 'a', ret='href')[0]

                        r = client.request(u)
                        u = client.parseDOM(r, 'a', ret='href')
                        u = [(i.strip('/').split('/')[-1], i) for i in u]
                        items += u
                    except:
                        pass
            except:
                pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '', name)

                    if not cleantitle.get(t) == cleantitle.get(title):
                        raise Exception()

                    y = re.findall(
                        '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                        name)[-1].upper()

                    if not y == hdlr: raise Exception()

                    quality, info = source_utils.get_release_quality(
                        name, item[1])

                    url = item[1]
                    if any(x in url for x in ['.rar', '.zip', '.iso']):
                        raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })
                except:
                    pass

            return sources
        except:
            return
Beispiel #13
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s s%02de%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('[\\\\:;*?"<>|/ \+\']+', '-', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)
            r = client.request(url)

            r = client.parseDOM(r, "div", attrs={'class': 'entry-content'})[0]
            r = re.sub('shareaholic-canvas.+', '', r, flags=re.DOTALL)

            a_txt = ''
            a_url = ''
            a_txt = client.parseDOM(r, "a", attrs={'href': '.+?'})
            a_url = client.parseDOM(r, "a", ret="href")
            r = re.sub('<a .+?</a>', '', r, flags=re.DOTALL)
            r = re.sub('<img .+?>', '', r, flags=re.DOTALL)

            size = ''
            pre_txt = []
            pre_url = []
            pres = client.parseDOM(r, "pre", attrs={'style': '.+?'})
            for pre in pres:
                try:
                    size = re.findall('([0-9,\.]+ ?(?:GB|GiB|MB|MiB))', pre)[0]
                except:
                    pass

                url0 = re.findall('https?://[^ <"\'\s]+', pre, re.DOTALL)
                txt0 = [size] * len(url0)
                pre_url = pre_url + url0
                pre_txt = pre_txt + txt0

            r = re.sub('<pre .+?</pre>', '', r, flags=re.DOTALL)

            size = ''
            if not 'tvshowtitle' in data:
                try:
                    size = " " + re.findall('([0-9,\.]+ ?(?:GB|GiB|MB|MiB))',
                                            r)[0]
                except:
                    pass

            raw_url = re.findall('https?://[^ <"\'\s]+', r, re.DOTALL)
            raw_txt = [size] * len(raw_url)

            pairs = zip(a_url + pre_url + raw_url, a_txt + pre_txt + raw_txt)

            for pair in pairs:
                try:
                    url = str(pair[0])
                    info = re.sub('<.+?>', '', pair[1])

                    if any(x in url for x in ['.rar', '.zip', '.iso']):
                        raise Exception()
                    if not query.lower() in re.sub('[\\\\:;*?"<>|/ \+\'\.]+',
                                                   '-', url + info).lower():
                        raise Exception()

                    size0 = info + " " + size

                    try:
                        size0 = re.findall('([0-9,\.]+ ?(?:GB|GiB|MB|MiB))',
                                           size0)[0]
                        div = 1 if size0.endswith(('GB', 'GiB')) else 1024
                        size0 = float(re.sub('[^0-9\.]', '', size0)) / div
                        size0 = '%.2f GB' % size0
                    except:
                        size0 = ''
                        pass

                    quality, info = source_utils.get_release_quality(url, info)
                    info.append(size0)
                    info = ' | '.join(info)

                    url = url.encode('utf-8')
                    hostDict = hostDict + hostprDict

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if not valid: continue
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })

                except:
                    pass

            return sources
        except:
            return sources
Beispiel #14
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            t = data['tvshowtitle']
            season = '%01d' % int(data['season'])
            episode = '%02d' % int(data['episode'])

            query = cleantitle.getsearch(t)
            r = urlparse.urljoin(self.base_link, self.search_link2)
            post = {'query': query}
            r = client.request(r, post=post)
            r = dom_parser2.parse_dom(r, 'a')
            r = [(i.attrs['href'], dom_parser2.parse_dom(i.content, 'span', attrs={'class': 'searchheading'})) for i in
                 r]
            try:
                url = []
                for i in r:
                    t1 = i[1][0].content
                    t2 = re.sub('[Ss]eason\s*\d+', '', t1)
                    if not str(int(season)) in t1: continue
                    if cleantitle.get(t) == cleantitle.get(t2) and not 'pack' in i[0]:
                        url.append(i[0])
                    if len(url) > 1:
                        url = [(i) for i in url if 'hd' in i][0]
                    else:
                        url = url[0]

            except:
                pass
            if len(url) < 0:
                try:
                    r = urlparse.urljoin(self.base_link, self.search_link)
                    t = '%s season %s' % (t, season)
                    post = 'do=search&subaction=search&story=%s' % urllib.quote_plus(cleantitle.getsearch(t))
                    r = client.request(r, post=post)
                    r = dom_parser2.parse_dom(r, 'h4')
                    r = [dom_parser2.parse_dom(i.content, 'a', req=['href']) for i in r if i]
                    r = [(i[0].attrs['href'], i[0].content) for i in r if i]
                    r = [(i[0], i[1]) for i in r if t.lower() == i[1].replace(' -', '').lower()]
                    r = [(i[0]) for i in r if not 'pack' in i[0]]
                    url = r[0][0]

                except:
                    pass

            links = []

            r = client.request(url)
            name = re.findall('<b>Release Name :.+?">(.+?)</span>', r, re.DOTALL)[0]
            link = client.parseDOM(r, 'span', attrs={'class': 'downloads nobr'})
            link = [(re.findall('<a href="(.+?)"\s*target="_blank">[Ee]pisode\s*(\d+)</a>', i, re.DOTALL)) for i in
                    link]
            for item in link:
                link = [(i[0], i[1]) for i in item if i[1] == str(episode)]
                links.append(link[0][0])

            quality, info = source_utils.get_release_quality(name, None)

            for url in links:
                try:
                    if "protect" in url:
                        redirect = client.request(url)
                        url = re.findall('<a href="(.*?)" target="_blank">', redirect)
                        url = url[0]

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False,
                                    'debridonly': True})
                except:
                    pass

            return sources
        except:
            return sources
Beispiel #15
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url == None:
                raise Exception()

            if not (self.api and not self.api == ''):
                raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            year = int(
                data['year']
            ) if 'year' in data and not data['year'] == None else None
            season = int(
                data['season']
            ) if 'season' in data and not data['season'] == None else None
            episode = int(
                data['episode']
            ) if 'episode' in data and not data['episode'] == None else None
            query = '%s S%02dE%02d' % (
                title, season,
                episode) if 'tvshowtitle' in data else '%s %d' % (title, year)

            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            query += ' lang:%s' % self.language[0]
            query = urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, self.search_link)

            hostDict = hostprDict + hostDict

            iterations = self.streamLimit / self.streamIncrease
            last = self.streamLimit - (iterations * self.streamIncrease)
            if not last:
                iterations = iterations - 1
                last = self.streamIncrease
            iterations = iterations + 1

            seen_urls = set()
            for type in self.types:
                searchFrom = 0
                searchCount = self.streamIncrease
                for offset in range(iterations):
                    if iterations == offset + 1: searchCount = last
                    urlNew = url % (type, self.api, query, searchCount,
                                    searchFrom)
                    searchFrom = searchFrom + self.streamIncrease

                    results = client.request(urlNew)
                    results = json.loads(results)

                    apistatus = results['status']
                    if apistatus != 'success': break

                    results = results['result']

                    added = False
                    for result in results:
                        jsonName = result['title']
                        jsonSize = result['sizeinternal']
                        jsonExtension = result['extension']
                        jsonLanguage = result['lang']
                        jsonHoster = result['hostername'].lower()
                        jsonLink = result['hosterurls'][0]['url']

                        if jsonLink in seen_urls: continue
                        seen_urls.add(jsonLink)

                        if not hdlr in jsonName.upper(): continue

                        if not self.releaseValid(title, jsonName):
                            continue  # filter non en releases

                        if not jsonHoster in hostDict: continue

                        if jsonExtension == 'rar': continue

                        quality, info = source_utils.get_release_quality(
                            jsonName)
                        info.append(self.formatSize(jsonSize))
                        info.append(jsonName)
                        info = '|'.join(info)

                        sources.append({
                            'source': jsonHoster,
                            'quality': quality,
                            'language': jsonLanguage,
                            'url': jsonLink,
                            'info': info,
                            'direct': False,
                            'debridonly': False
                        })
                        added = True

                    if not added:
                        break

            return sources
        except:
            return sources
Beispiel #16
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            aliases = eval(data['aliases'])
            mozhdr = {
                'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'}
            headers = mozhdr
            headers['X-Requested-With'] = 'XMLHttpRequest'

            self.s = cfscrape.create_scraper()
            if 'tvshowtitle' in data:
                episode = int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers)
            else:
                episode = 0
                url = self.searchMovie(data['title'], data['year'], aliases, headers)

            headers['Referer'] = url
            ref_url = url
            mid = re.findall('-(\d*)\.', url)[0]
            data = {'id': mid}
            r = self.s.post(url, headers=headers)
            try:
                u = urlparse.urljoin(self.base_link, self.server_link % mid)
                r = self.s.get(u, headers=mozhdr).content
                r = json.loads(r)['html']
                rl = client.parseDOM(r, 'div', attrs={'class': 'pas-list'})
                rh = client.parseDOM(r, 'div', attrs={'class': 'pas-header'})
                ids = client.parseDOM(rl, 'li', ret='data-id')
                servers = client.parseDOM(rl, 'li', ret='data-server')
                labels = client.parseDOM(rl, 'a', ret='title')
                r = zip(ids, servers, labels)
                rrr = zip(client.parseDOM(rh, 'li', ret='data-id'), client.parseDOM(rh, 'li', ret='class'))
                types = {}
                for rr in rrr:
                    types[rr[0]] = rr[1]

                for eid in r:
                    try:
                        try:
                            ep = re.findall('episode.*?(\d+).*?', eid[2].lower())[0]
                        except:
                            ep = 0
                        if (episode == 0) or (int(ep) == episode):
                            t = str(int(time.time() * 1000))
                            quali = source_utils.get_release_quality(eid[2])[0]
                            if 'embed' in types[eid[1]]:
                                url = urlparse.urljoin(self.base_link, self.embed_link % (eid[0]))
                                xml = self.s.get(url, headers=headers).content
                                url = json.loads(xml)['src']
                                valid, hoster = source_utils.is_host_valid(url, hostDict)
                                if not valid: continue
                                q = source_utils.check_sd_url(url)
                                q = q if q != 'SD' else quali
                                sources.append(
                                    {'source': hoster, 'quality': q, 'language': 'en', 'url': url, 'direct': False,
                                     'debridonly': False})
                                continue
                            else:
                                url = urlparse.urljoin(self.base_link, self.token_link % (eid[0], mid, t))
                            script = self.s.get(url, headers=headers).content
                            if '$_$' in script:
                                params = self.uncensored1(script)
                            elif script.startswith('[]') and script.endswith('()'):
                                params = self.uncensored2(script)
                            elif '_x=' in script:
                                x = re.search('''_x=['"]([^"']+)''', script).group(1)
                                y = re.search('''_y=['"]([^"']+)''', script).group(1)
                                params = {'x': x, 'y': y}
                            else:
                                raise Exception()
                            u = urlparse.urljoin(self.base_link, self.source_link % (eid[0], params['x'], params['y']))
                            length = 0
                            count = 0
                            while length == 0 and count < 11:
                                r = self.s.get(u, headers=headers).text
                                length = len(r)
                                if length == 0: count += 1
                            uri = None
                            uri = json.loads(r)['playlist'][0]['sources']
                            try:
                                uri = [i['file'] for i in uri if 'file' in i]
                            except:
                                try:
                                    uri = [uri['file']]
                                except:
                                    continue

                            for url in uri:
                                if 'googleapis' in url:
                                    q = source_utils.check_sd_url(url)
                                    sources.append(
                                        {'source': 'gvideo', 'quality': q, 'language': 'en', 'url': url, 'direct': True,
                                         'debridonly': False})
                                    continue

                                valid, hoster = source_utils.is_host_valid(url, hostDict)
                                # urls, host, direct = source_utils.check_directstreams(url, hoster)
                                q = quali
                                if valid:
                                    # for z in urls:
                                    if hoster == 'gvideo':
                                        direct = True
                                        try:
                                            q = directstream.googletag(url)[0]['quality']
                                        except:
                                            pass
                                        url = directstream.google(url, ref=ref_url)
                                    else:
                                        direct = False
                                    sources.append(
                                        {'source': hoster, 'quality': q, 'language': 'en', 'url': url, 'direct': direct,
                                         'debridonly': False})
                                else:
                                    sources.append(
                                        {'source': 'CDN', 'quality': q, 'language': 'en', 'url': url, 'direct': True,
                                         'debridonly': False})
                    except:
                        pass
            except:
                pass

            return sources
        except:
            return sources
Beispiel #17
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            imdb = data['imdb']

            try:
                query = urlparse.urljoin(self.base_link, self.search_link)
                result = client.request(query)
                m = re.findall(
                    'Movie Size:(.+?)<.+?href="(.+?)".+?href="(.+?)"\s*onMouse',
                    result, re.DOTALL)
                m = [(i[0], i[1], i[2]) for i in m if imdb in i[1]]
                if m:
                    link = m
                else:
                    query = urlparse.urljoin(self.base_link, self.search_link2)
                    result = client.request(query)
                    m = re.findall(
                        'Movie Size:(.+?)<.+?href="(.+?)".+?href="(.+?)"\s*onMouse',
                        result, re.DOTALL)
                    m = [(i[0], i[1], i[2]) for i in m if imdb in i[1]]
                    if m:
                        link = m
                    else:
                        query = urlparse.urljoin(self.base_link,
                                                 self.search_link3)
                        result = client.request(query)
                        m = re.findall(
                            'Movie Size:(.+?)<.+?href="(.+?)".+?href="(.+?)"\s*onMouse',
                            result, re.DOTALL)
                        m = [(i[0], i[1], i[2]) for i in m if imdb in i[1]]
                        if m: link = m

            except:
                return

            for item in link:
                try:

                    quality, info = source_utils.get_release_quality(
                        item[2], None)

                    try:
                        size = re.findall(
                            '((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))',
                            item[0])[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    info = ' | '.join(info)

                    url = item[2]
                    if any(x in url for x in ['.rar', '.zip', '.iso']):
                        raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    sources.append({
                        'source': 'DL',
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': True,
                        'debridonly': False
                    })
                except:
                    pass

            return sources
        except:
            return sources
Beispiel #18
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            posts = client.parseDOM(r, 'item')

            hostDict = hostprDict + hostDict

            items = []

            for post in posts:
                try:
                    t = client.parseDOM(post, 'title')[0]
                    u = client.parseDOM(post, 'link')[0]
                    s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', t)
                    s = s[0] if s else '0'

                    items += [(t, u, s)]

                except:
                    pass

            urls = []
            for item in items:

                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)

                    if not cleantitle.get(t) == cleantitle.get(title): raise Exception()

                    y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()

                    if not y == hdlr: raise Exception()

                    quality, info = source_utils.get_release_quality(name, item[1])
                    if any(x in quality for x in ['CAM', 'SD']): continue

                    try:
                        size = re.sub('i', '', item[2])
                        div = 1 if size.endswith('GB') else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    info = ' | '.join(info)

                    url = item[1]
                    links = self.links(url)
                    urls += [(i, quality, info) for i in links]

                except:
                    pass

            for item in urls:

                if 'earn-money' in item[0]: continue
                if any(x in item[0] for x in ['.rar', '.zip', '.iso']): continue
                url = client.replaceHTMLCodes(item[0])
                url = url.encode('utf-8')

                valid, host = source_utils.is_host_valid(url, hostDict)
                if not valid: continue
                host = client.replaceHTMLCodes(host)
                host = host.encode('utf-8')

                sources.append(
                    {'source': host, 'quality': item[1], 'language': 'en', 'url': url, 'info': item[2], 'direct': False,
                     'debridonly': True})

            return sources
        except:
            return sources
Beispiel #19
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            html = client.request(url)
            posts = client.parseDOM(html, 'item')

            hostDict = hostprDict + hostDict

            items = []

            for post in posts:
                try:
                    t = client.parseDOM(post, 'title')[0]
                    u = client.parseDOM(post, 'a', ret='href')
                    s = re.search(
                        '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                        post)
                    s = s.groups()[0] if s else '0'
                    items += [(t, i, s) for i in u]
                except:
                    pass

            for item in items:
                try:

                    url = item[1]
                    if any(x in url for x in ['.rar', '.zip', '.iso']):
                        raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if not valid: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '',
                        name,
                        flags=re.I)

                    if not cleantitle.get(t) == cleantitle.get(title):
                        raise Exception()

                    y = re.findall(
                        '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                        name)[-1].upper()

                    if not y == hdlr: raise Exception()

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        size = re.findall(
                            '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                            item[2])[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    info = ' | '.join(info)

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })
                except:
                    pass

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('2DDL - Exception: \n' + str(failure))
            return sources
Beispiel #20
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            try:
                if 'tvshowtitle' in data:
                    epi = 'EP%d' % int(data['episode'])
                    links = self.searchShow(data['tvshowtitle'],
                                            data['season'])
                    url = [i[1] for i in links if epi.lower() == i[0].lower()]

                else:
                    url = self.searchMovie(data['title'], data['year'])
                    try:
                        url = client.parseDOM(url,
                                              'iframe',
                                              ret='src',
                                              attrs={'id': 'advanced_iframe'})
                    except:
                        url = re.findall(
                            '''<h4>server\d+</h4>.+?src=['"]([^'"]+)''', url,
                            re.I | re.DOTALL)

            except:
                pass

            for u in url:
                if 'entervideo' in u:
                    r = client.request(u)
                    url = client.parseDOM(r, 'source', ret='src')[0]
                    quality, info = source_utils.get_release_quality(url, url)
                    sources.append({
                        'source': 'CDN',
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'direct': True,
                        'debridonly': False
                    })
                elif 'vidnode' in u:
                    headers = {
                        'Host': 'vidnode.net',
                        'User-Agent':
                        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',
                        'Upgrade-Insecure-Requests': '1',
                        'Accept-Language': 'en-US,en;q=0.9'
                    }
                    r = client.request(u, headers=headers)
                    links = re.findall(
                        '''\{file:\s*['"]([^'"]+).*?label:\s*['"](\d+\s*P)['"]''',
                        r, re.DOTALL | re.I)
                    for u, qual in links:
                        quality, info = source_utils.get_release_quality(
                            qual, u)
                        url = u
                        sources.append({
                            'source': 'CDN',
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'direct': True,
                            'debridonly': False
                        })

            return sources
        except:
            return sources