Exemple #1
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         r = self.scraper.get(url).content
         match = re.compile('<div><iframe src="(.+?)"').findall(r)
         for url in match:
             host = url.split('//')[1].replace('www.', '')
             host = host.split('/')[0].split('.')[0].title()
             quality = source_utils.check_sd_url(url)
             r = self.scraper.get(url).content
             if 'http' in url:
                 match = re.compile("url: '(.+?)',").findall(r)
             else:
                 match = re.compile('file: "(.+?)",').findall(r)
             for url in match:
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
     except:
         return
     return sources
Exemple #2
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            with requests.Session() as s:
                headers = {"Referer": self.domain, \
                           "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8", \
                           "Host": "www.BitLord.com",
                           "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:59.0) Gecko/20100101 Firefox/59.0", \
                           "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", \
                           "Accept-Encoding": "gzip, deflate, br", "Accept-Language": "en-US,en;q=0.5", \
                           "Connection": "keep-alive", "DNT": "1"}
                if 'episode' in url:
                    iep = url['episode'].zfill(2)
                    ise = url['season'].zfill(2)
                    se = 's' + ise + 'e' + iep
                    sel = url['tvshowtitle'].replace(' ', '.') + '.' + se
                    cate = '4'

                else:
                    sel = url['title'].replace(' ', '.') + '.' + url['year']
                    cate = '3'

                sel = sel.lower()
                bdata = {'filters[adult]': 'false', 'filters[category]': cate, 'filters[field]': 'category',
                         'filters[sort]': 'asc', \
                         'filters[time]': '4', 'limit': '25', 'offset': '0', 'query': sel}

                gs = s.post(self.search_link, data=bdata).text

                gl = re.compile('me\W+(.*?)[\'"].*?tih:(.*?)\W',
                                re.I).findall(gs)
                for nam, haas in gl:
                    print('FDGDFGDFGFD-----45345345', haas)
                    checkca = s.get(self.checkc %
                                    (self.api_key, haas, self.api_key)).text
                    quality = source_utils.check_sd_url(nam)
                    if 'finished' in checkca:
                        url = self.pr_link % (self.api_key, haas)
                        sources.append({
                            'source': 'cached',
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'direct': False,
                            'debridonly': False,
                            'info': nam,
                        })
            return sources
        except:
            print("Unexpected error in BitLord Script: Sources",
                  sys.exc_info()[0])
            exc_type, exc_obj, exc_tb = sys.exc_info()
            print(exc_type, exc_tb.tb_lineno)
            return sources
Exemple #3
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         r = self.scraper.get(url).content
         match = re.compile(
             '<td align="center"><strong><a href="(.+?)"').findall(r)
         for url in match:
             host = url.split('//')[1].replace('www.', '')
             host = host.split('/')[0].split('.')[0].title()
             quality = source_utils.check_sd_url(url)
             sources.append({
                 'source': host,
                 'quality': quality,
                 'language': 'en',
                 'url': url,
                 'direct': False,
                 'debridonly': False
             })
     except Exception:
         return
     return sources
Exemple #4
0
    def sources(self, url, hostDict, hostprDict):
        try:
            hostDict = hostDict + hostprDict

            sources = []
            query_bases = []
            options = []

            if url is None:
                return sources

            if not debrid.status():
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = (data['tvshowtitle'] if 'tvshowtitle' in data else data['title'])
            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            # tvshowtitle
            if 'tvshowtitle' in data:
                query_bases.append('%s ' % (data['tvshowtitle'].replace("-", "")))
                if 'year' in data:
                    query_bases.append('%s %s ' % (data['tvshowtitle'], data['year']))
                options.append('S%02dE%02d' % (int(data['season']), int(data['episode'])))
                options.append('S%02d' % (int(data['season'])))
            else:
                query_bases.append('%s %s ' % (data['title'], data['year']))
                query_bases.append('%s ' % (data['title']))
                query_bases.append('2160p')
                query_bases.append('')

            for option in options:
                for query_base in query_bases:
                    q = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query_base + option)
                    q = q.replace("  ", " ").replace(" ", "+")
                    url = self.base_link + self.search_link % q
                    html = self.scraper.get(url)
                    if html.status_code == 200:
                        posts = client.parseDOM(html.content, "div", attrs={"class": "title"})
                        for post in posts:
                            url = client.parseDOM(post, "a", ret='href')
                            if len(url) > 0:
                                html = self.scraper.get(url[0])
                                if html.status_code == 200:
                                    quotes = client.parseDOM(html.content, "div", attrs={"class": "dlinks"})
                                    for quote in quotes:
                                        hrefs = client.parseDOM(quote, "a", ret='href')
                                        if not hrefs:
                                            continue
                                        for href in hrefs:
                                            quality = source_utils.check_sd_url(href)
                                            href = href.encode('utf-8')
                                            valid, host = source_utils.is_host_valid(href, hostDict)
                                            if any(x in href for x in ['.rar', '.zip', '.iso']):
                                                continue
                                            if not valid:
                                                continue
                                            if hdlr in href.upper() and cleantitle.get(title) in cleantitle.get(href):
                                                sources.append(
                                                    {'source': host, 'quality': quality, 'language': 'en', 'url': href,
                                                     'direct': False, 'debridonly': False})
                if len(sources) > 0:
                    return sources
            return sources
        except:
            return sources
Exemple #5
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None: return sources
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
             'title']
         imdb = data['imdb']
         aliases = eval(data['aliases'])
         headers = {}
         if 'tvshowtitle' in data:
             url = self.searchShow(title, int(data['season']),
                                   int(data['episode']), aliases, headers)
         else:
             url = self.searchMovie(title, data['year'], aliases, headers)
         r = client.request(url,
                            headers=headers,
                            output='extended',
                            timeout='10')
         if not imdb in r[0]: raise Exception()
         cookie = r[4]
         headers = r[3]
         result = r[0]
         try:
             r = re.findall('(https:.*?redirector.*?)[\'\"]', result)
             for i in r:
                 try:
                     sources.append({
                         'source':
                         'gvideo',
                         'quality':
                         directstream.googletag(i)[0]['quality'],
                         'language':
                         'en',
                         'url':
                         i,
                         'direct':
                         True,
                         'debridonly':
                         False
                     })
                 except:
                     pass
         except:
             pass
         try:
             auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0]
         except:
             auth = 'false'
         auth = 'Bearer %s' % urllib.unquote_plus(auth)
         headers['Authorization'] = auth
         headers['Referer'] = url
         u = '/ajax/vsozrflxcw.php'
         self.base_link = client.request(self.base_link,
                                         headers=headers,
                                         output='geturl')
         u = urlparse.urljoin(self.base_link, u)
         action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb'
         elid = urllib.quote(
             base64.encodestring(str(int(time.time()))).strip())
         token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0]
         idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0]
         post = {
             'action': action,
             'idEl': idEl,
             'token': token,
             'nopop': '',
             'elid': elid
         }
         post = urllib.urlencode(post)
         cookie += ';%s=%s' % (idEl, elid)
         headers['Cookie'] = cookie
         r = client.request(u,
                            post=post,
                            headers=headers,
                            cookie=cookie,
                            XHR=True)
         r = str(json.loads(r))
         r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r)
         for i in r:
             try:
                 if 'google' in i:
                     quality = 'SD'
                     if 'googleapis' in i:
                         try:
                             quality = source_utils.check_sd_url(i)
                         except:
                             pass
                     if 'googleusercontent' in i:
                         i = directstream.googleproxy(i)
                         try:
                             quality = directstream.googletag(
                                 i)[0]['quality']
                         except:
                             pass
                     sources.append({
                         'source': 'gvideo',
                         'quality': quality,
                         'language': 'en',
                         'url': i,
                         'direct': True,
                         'debridonly': False
                     })
                 elif 'llnwi.net' in i or 'vidcdn.pro' in i:
                     try:
                         quality = source_utils.check_sd_url(i)
                         sources.append({
                             'source': 'CDN',
                             'quality': quality,
                             'language': 'en',
                             'url': i,
                             'direct': True,
                             'debridonly': False
                         })
                     except:
                         pass
                 else:
                     valid, hoster = source_utils.is_host_valid(i, hostDict)
                     if not valid: continue
                     sources.append({
                         'source': hoster,
                         'quality': '720p',
                         'language': 'en',
                         'url': i,
                         'direct': False,
                         'debridonly': False
                     })
             except:
                 pass
         return sources
     except:
         return sources