Ejemplo n.º 1
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            try:
                search_url = url['url']
                post = url['post']
                referer = urlparse.urljoin(self.film_web, post['urlstrony'])
                result = client.request(search_url, post=post, referer=referer)
                if not result.startswith('http'):
                    return sources

                valid, host = source_utils.is_host_valid(result, hostDict)
                q = source_utils.check_sd_url(result)
                info = ''
                if 'lektor' in result:
                    info = 'Lektor'
                if 'napisy' in result:
                    info = 'Napisy'
                first_found = {
                    'source': host,
                    'quality': '720p',
                    'language': 'pl',
                    'url': result,
                    'info': info,
                    'direct': False,
                    'debridonly': False
                }
                first_found['info'] = self.get_info_from_others(sources)
                sources.append(first_found)
            except:
                pass
            search_more_post = url['more']
            #search_url = urlparse.urljoin(self.base_link, self.search_more)
            result = client.request(self.base_link2, post=search_more_post)
            provider = client.parseDOM(result, 'option', ret='value')
            links = client.parseDOM(result, 'div', ret='data')
            wersja = client.parseDOM(result, 'div', attrs={'class': 'wersja'})
            #result = dom_parser.parse_dom(result, 'a')
            counter = 0
            for link in links:
                valid, host = source_utils.is_host_valid(link, hostDict)
                if not valid: continue
                q = source_utils.check_sd_url(link)
                sources.append({
                    'source': host,
                    'quality': q,
                    'language': 'pl',
                    'url': link,
                    'info': wersja[counter],
                    'direct': False,
                    'debridonly': False
                })
                counter += 1
            return sources
        except:
            return sources
    def sources(self, url, hostDict, hostprDict):

        sources = []
        try:

            if url == None: return sources
            result = client.request(urlparse.urljoin(self.base_link, url),
                                    redirect=False)

            section = client.parseDOM(result,
                                      'section',
                                      attrs={'id': 'video_player'})[0]
            link = client.parseDOM(section, 'iframe', ret='src')[0]
            valid, host = source_utils.is_host_valid(link, hostDict)
            if not valid: return sources
            spans = client.parseDOM(section, 'span')
            info = None
            for span in spans:
                if span == 'Z lektorem':
                    info = 'Lektor'

            q = source_utils.check_sd_url(link)
            sources.append({
                'source': host,
                'quality': q,
                'language': 'pl',
                'url': link,
                'info': info,
                'direct': False,
                'debridonly': False
            })

            return sources
        except:
            return sources
    def sources(self, url, hostDict, hostprDict):

        sources = []
        try:
            if url == None: return sources
            r = client.request(urlparse.urljoin(self.base_link, url),
                               redirect=False)
            info = self.get_lang_by_type(client.parseDOM(r, 'title')[0])
            r = client.parseDOM(r, 'div', attrs={'class':
                                                 'tab-pane active'})[0]
            r = client.parseDOM(r, 'script')[0]
            script = r.split('"')[1]
            decoded = self.shwp(script)

            link = client.parseDOM(decoded, 'iframe', ret='src')[0]
            valid, host = source_utils.is_host_valid(link, hostDict)
            if not valid: return sources
            q = source_utils.check_sd_url(link)
            sources.append({
                'source': host,
                'quality': q,
                'language': 'pl',
                'url': link,
                'info': info,
                'direct': False,
                'debridonly': False
            })

            return sources
        except:
            return sources
def more_rapidvideo(link, hostDict, lang, info):
    if "rapidvideo.com" in link:
        sources = []
        try:
            headers = {
                'User-Agent':
                "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3555.0 Safari/537.36"
            }
            response = requests.get(link, headers=headers).content
            test = re.findall("""(https:\/\/www.rapidvideo.com\/e\/.*)">""",
                              response)
            numGroups = len(test)
            for i in range(1, numGroups):
                url = test[i]
                valid, host = source_utils.is_host_valid(url, hostDict)
                q = source_utils.check_sd_url(url)
                sources.append({
                    'source': host,
                    'quality': q,
                    'language': lang,
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': False
                })
            return sources
        except:
            return sources
    else:
        return []
def more_cdapl(link, hostDict, lang, info):
    if "cda.pl" in link:
        sources = []
        try:
            headers = {
                'User-Agent':
                "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3555.0 Safari/537.36"
            }
            response = requests.get(link, headers=headers).content
            test = client.parseDOM(response,
                                   'div',
                                   attrs={'class': 'wrapqualitybtn'})
            urls = client.parseDOM(test, 'a', ret='href')
            if urls:
                for url in urls:
                    valid, host = source_utils.is_host_valid(url, hostDict)
                    q = source_utils.check_sd_url(url)
                    direct = re.findall(
                        """file":"(.*)","file_cast""",
                        requests.get(url, headers=headers).content)[0].replace(
                            "\\/", "/")
                    sources.append({
                        'source': 'CDA',
                        'quality': q,
                        'language': lang,
                        'url': direct,
                        'info': info,
                        'direct': True,
                        'debridonly': False
                    })
            return sources
        except:
            return sources
    else:
        return []
Ejemplo n.º 6
0
 def work(self, link, testDict):
     if str(link).startswith("http"):
         link = self.getlink(link)
         q = source_utils.check_sd_url(link)
         valid, host = source_utils.is_host_valid(link, testDict)
         if not valid: return 0
         return host, q, link
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         r = self.scraper.get(url).content
         match = re.compile('<div><iframe src="(.+?)"').findall(r)
         for url in match:
             host = url.split('//')[1].replace('www.', '')
             host = host.split('/')[0].split('.')[0].title()
             quality = source_utils.check_sd_url(url)
             r = self.scraper.get(url).content
             if 'http' in url:
                 match = re.compile("url: '(.+?)',").findall(r)
             else:
                 match = re.compile('file: "(.+?)",').findall(r)
             for url in match:
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
     except:
         return
     return sources
Ejemplo n.º 8
0
 def sources(self, url, hostDict, hostprDict):
     
     sources = []
     try:
         if url == None: return sources
         headers = {
                 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0',
                 'Referer': 'http://iitv.pl/'
         }
         result = client.request(url)
         result = client.parseDOM(result, 'div', attrs={'class':'tab-wrapper'})[0]
         lektor = client.parseDOM(result, 'ul', attrs={'id':'lecPL'})
         if len(lektor) > 0:
             links = client.parseDOM(lektor, 'a', ret='href')
             for link in links:
                 if str(link).startswith("http://"):
                     link = self.getlink(link)
                     q = source_utils.check_sd_url(link)
                     valid, host = source_utils.is_host_valid(link, hostDict)
                     if not valid: continue
                     sources.append({'source': host, 'quality': q, 'language': 'pl', 'url': link, 'info': 'Lektor', 'direct': False, 'debridonly': False})
         napisy = client.parseDOM(result, 'ul', attrs={'id':'subPL'})
         if len(napisy) > 0:
             links = client.parseDOM(napisy, 'a', ret='href')
             for link in links:
                 if str(link).startswith("http://"):
                     link = self.getlink(link)
                     q = source_utils.check_sd_url(link)
                     valid, host = source_utils.is_host_valid(link, hostDict)
                     if not valid: continue
                     sources.append({'source': host, 'quality': q, 'language': 'pl', 'url': link, 'info': 'Napisy', 'direct': False, 'debridonly': False})
         eng = client.parseDOM(result, 'ul', attrs={'id':'org'})
         if len(eng) > 0:
             links = client.parseDOM(eng, 'a', ret='href')
             for link in links:
                 if str(link).startswith("http://"):
                     link = self.getlink(link)
                     q = source_utils.check_sd_url(link)
                     valid, host = source_utils.is_host_valid(link, hostDict)
                     if not valid: continue
                     sources.append({'source': host, 'quality': q, 'language': 'en', 'url': link, 'info': 'ENG', 'direct': False, 'debridonly': False})
         debug =1;
         return sources
     except Exception, e:
         print str(e)
         return sources
Ejemplo n.º 9
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            if 'tvshowtitle' in data:
                url = self.searchShow(data['tvshowtitle'], data['season'])
            else:
                url = self.searchMovie(data['title'], data['year'])

            if url is None:
                return sources

            r = self.scraper.get(url, params={'link_web': self.base_link}).content
            quality = client.parseDOM(r, 'span', attrs={'class': 'quality'})[0]
            quality = source_utils.check_sd_url(quality)
            r = client.parseDOM(r, 'div', attrs={'class': 'les-content'})

            if 'tvshowtitle' in data:
                ep = data['episode']
                links = client.parseDOM(r, 'a', attrs={'episode-data': ep}, ret='player-data')
            else:
                links = client.parseDOM(r, 'a', ret='player-data')

            for link in links:
                if '123movieshd' in link or 'seriesonline' in link:
                    r = self.scraper.get(url, data={'link_web': self.base_link}).content
                    r = re.findall('(https:.*?redirector.*?)[\'\"]', r)

                    for i in r:
                        try:
                            sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'],
                                            'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
                        except:
                            traceback.print_exc()
                            pass
                else:
                    try:
                        host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(link.strip().lower()).netloc)[0]
                        if host not in hostDict:
                            pass
                        host = client.replaceHTMLCodes(host)
                        host = host.encode('utf-8')

                        sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': link, 'info': [],
                                        'direct': False, 'debridonly': False})
                    except:
                        pass
            return sources
        except:
            traceback.print_exc()
            return sources
Ejemplo n.º 10
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            with requests.Session() as s:
                headers = {"Referer": self.domain,\
                           "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",\
                           "Host": "www.BitLord.com","User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:59.0) Gecko/20100101 Firefox/59.0",\
                           "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",\
                           "Accept-Encoding": "gzip, deflate, br","Accept-Language": "en-US,en;q=0.5",\
                           "Connection": "keep-alive","DNT":"1"}
                if 'episode' in url:
                    iep = url['episode'].zfill(2)
                    ise = url['season'].zfill(2)
                    se = 's' + ise + 'e' + iep
                    sel = url['tvshowtitle'].replace(' ', '.') + '.' + se
                    cate = '4'

                else:
                    sel = url['title'].replace(' ', '.') + '.' + url['year']
                    cate = '3'

                sel = sel.lower()
                bdata = {'filters[adult]': 'false', 'filters[category]': cate, 'filters[field]': 'category', 'filters[sort]': 'asc',\
                         'filters[time]': '4', 'limit': '25', 'offset': '0', 'query': sel}

                gs = s.post(self.search_link, data=bdata).text

                gl = re.compile('me\W+(.*?)[\'"].*?tih:(.*?)\W',
                                re.I).findall(gs)
                for nam, haas in gl:
                    print('FDGDFGDFGFD-----45345345', haas)
                    checkca = s.get(self.checkc %
                                    (self.api_key, haas, self.api_key)).text
                    quality = source_utils.check_sd_url(nam)
                    if 'finished' in checkca:
                        url = self.pr_link % (self.api_key, haas)
                        sources.append({
                            'source': 'cached',
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'direct': False,
                            'debridonly': False,
                            'info': nam,
                        })
            return sources
        except:
            print("Unexpected error in BitLord Script: Sources",
                  sys.exc_info()[0])
            exc_type, exc_obj, exc_tb = sys.exc_info()
            print(exc_type, exc_tb.tb_lineno)
            return sources
Ejemplo n.º 11
0
 def sources(self, url, hostDict, hostprDict):
     try:
         # import pydevd
         # pydevd.settrace('localhost', port=5678, stdoutToServer=True, stderrToServer=True)
         sources = []
         result = self.session.get(url).content
         result = result.decode('utf-8')
         h = HTMLParser()
         result = h.unescape(result)
         result = client.parseDOM(result,
                                  'div',
                                  attrs={'class': 'tabela_wiersz mb-1'})
         for counter, item in enumerate(result, 0):
             try:
                 test = client.parseDOM(result,
                                        'span',
                                        attrs={'class': 'tabela_text'})
                 info = test[(2 + (3 * counter))]
                 info = self.get_lang_by_type(info)
                 quality = test[(1 + (3 * counter))]
                 quality = source_utils.check_sd_url(quality)
                 try:
                     id = re.findall("""ShowMovie\('(.*?)'\)""", item)[0]
                 except:
                     id = re.findall("""ShowSer\('(.*?)'\)""", item)[0]
                 try:
                     host = re.findall("""<\/i> (.*?)<\/span>""", item)[0]
                     if 'serial' in url:
                         id = id + '/s'
                     sources.append({
                         'source': host,
                         'quality': quality,
                         'language': info[0],
                         'url': id,
                         'info': info[1],
                         'direct': False,
                         'debridonly': False
                     })
                 except:
                     continue
             except:
                 continue
         return sources
     except:
         return sources
Ejemplo n.º 12
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         with requests.Session() as s:
             gettoken = s.get(self.tokenta).text
             time.sleep(2)
             tokenapi = re.compile('n\W+(.*?)[\'"]',
                                   re.I).findall(gettoken)[0]
             if 'episode' in url:
                 iep = url['episode'].zfill(2)
                 ise = url['season'].zfill(2)
                 se = 's' + ise + 'e' + iep
                 sel = url['tvshowtitle'].replace(' ', '.') + '.' + se
                 search_link = self.tvsearch
             else:
                 sel = url['title'].replace(' ', '.') + '.' + url['year']
                 search_link = self.msearch
             gs = s.get(search_link % (sel, tokenapi)).text
             gl = re.compile('ame\W+(.*?)[\'"].*?ih:(.*?)\W',
                             re.I).findall(gs)
             for nam, hass in gl:
                 checkca = s.get(self.checkc %
                                 (self.api_key, hass, self.api_key)).text
                 quality = source_utils.check_sd_url(nam)
                 if 'finished' in checkca:
                     url = self.pr_link % (self.api_key, hass)
                     sources.append({
                         'source': 'cached',
                         'quality': quality,
                         'language': 'en',
                         'url': url,
                         'direct': False,
                         'debridonly': False,
                         'info': nam,
                     })
         return sources
     except:
         print("Unexpected error in Torrentapi Script: Sources",
               sys.exc_info()[0])
         exc_type, exc_obj, exc_tb = sys.exc_info()
         print(exc_type, exc_tb.tb_lineno)
         return sources
Ejemplo n.º 13
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources
            headers = {
                'User-Agent':
                'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0'
            }
            result = self.scraper.get(url, headers=headers).content
            streams = re.compile(
                'data-player="&lt;[A-Za-z]{6}\s[A-Za-z]{3}=&quot;(.+?)&quot;',
                re.DOTALL).findall(result)

            for link in streams:
                quality = source_utils.check_sd_url(link)
                host = link.split('//')[1].replace('www.', '')
                host = host.split('/')[0].lower()

                if quality == 'SD':
                    sources.append({
                        'source': host,
                        'quality': '720p',
                        'language': 'en',
                        'url': link,
                        'direct': False,
                        'debridonly': False
                    })
                else:
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': link,
                        'direct': False,
                        'debridonly': False
                    })

            return sources
        except Exception:
            return sources
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         r = client.request(url)
         match = re.compile(
             '<td align="center"><strong><a href="(.+?)"').findall(r)
         for url in match:
             host = url.split('//')[1].replace('www.', '')
             host = host.split('/')[0].split('.')[0].title()
             quality = source_utils.check_sd_url(url)
             sources.append({
                 'source': host,
                 'quality': quality,
                 'language': 'en',
                 'url': url,
                 'direct': False,
                 'debridonly': False
             })
     except Exception:
         return
     return sources
Ejemplo n.º 15
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         User_Agent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'
         headers = {'User-Agent':User_Agent}
         r = requests.get(url,headers=headers,allow_redirects=False).content
         try:
             match = re.compile('<div><iframe src="(.+?)"').findall(r)
             for url in match:
                 host = url.split('//')[1].replace('www.','')
                 host = host.split('/')[0].split('.')[0].title()
                 quality = source_utils.check_sd_url(url)
                 r = requests.get(url,headers=headers,allow_redirects=False).content
                 if 'http' in url:
                     match = re.compile("url: '(.+?)',").findall(r)
                 else:
                     match = re.compile('file: "(.+?)",').findall(r)
                 for url in match:
                     sources.append({'source': host, 'quality': quality, 'language': 'en','url': url,'direct': False,'debridonly': False}) 
         except:
             return
     except Exception:
         return
     return sources
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None: return sources
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
             'title']
         imdb = data['imdb']
         aliases = eval(data['aliases'])
         headers = {}
         if 'tvshowtitle' in data:
             url = self.searchShow(title, int(data['season']),
                                   int(data['episode']), aliases, headers)
         else:
             url = self.searchMovie(title, data['year'], aliases, headers)
         r = client.request(url,
                            headers=headers,
                            output='extended',
                            timeout='10')
         if not imdb in r[0]: raise Exception()
         cookie = r[4]
         headers = r[3]
         result = r[0]
         try:
             r = re.findall('(https:.*?redirector.*?)[\'\"]', result)
             for i in r:
                 try:
                     sources.append({
                         'source':
                         'gvideo',
                         'quality':
                         directstream.googletag(i)[0]['quality'],
                         'language':
                         'en',
                         'url':
                         i,
                         'direct':
                         True,
                         'debridonly':
                         False
                     })
                 except:
                     pass
         except:
             pass
         try:
             auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0]
         except:
             auth = 'false'
         auth = 'Bearer %s' % urllib.unquote_plus(auth)
         headers['Authorization'] = auth
         headers['Referer'] = url
         u = '/ajax/vsozrflxcw.php'
         self.base_link = client.request(self.base_link,
                                         headers=headers,
                                         output='geturl')
         u = urlparse.urljoin(self.base_link, u)
         action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb'
         elid = urllib.quote(
             base64.encodestring(str(int(time.time()))).strip())
         token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0]
         idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0]
         post = {
             'action': action,
             'idEl': idEl,
             'token': token,
             'nopop': '',
             'elid': elid
         }
         post = urllib.urlencode(post)
         cookie += ';%s=%s' % (idEl, elid)
         headers['Cookie'] = cookie
         r = client.request(u,
                            post=post,
                            headers=headers,
                            cookie=cookie,
                            XHR=True)
         r = str(json.loads(r))
         r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r)
         for i in r:
             try:
                 if 'google' in i:
                     quality = 'SD'
                     if 'googleapis' in i:
                         try:
                             quality = source_utils.check_sd_url(i)
                         except:
                             pass
                     if 'googleusercontent' in i:
                         i = directstream.googleproxy(i)
                         try:
                             quality = directstream.googletag(
                                 i)[0]['quality']
                         except:
                             pass
                     sources.append({
                         'source': 'gvideo',
                         'quality': quality,
                         'language': 'en',
                         'url': i,
                         'direct': True,
                         'debridonly': False
                     })
                 elif 'llnwi.net' in i or 'vidcdn.pro' in i:
                     try:
                         quality = source_utils.check_sd_url(i)
                         sources.append({
                             'source': 'CDN',
                             'quality': quality,
                             'language': 'en',
                             'url': i,
                             'direct': True,
                             'debridonly': False
                         })
                     except:
                         pass
                 else:
                     valid, hoster = source_utils.is_host_valid(i, hostDict)
                     if not valid: continue
                     sources.append({
                         'source': hoster,
                         'quality': '720p',
                         'language': 'en',
                         'url': i,
                         'direct': False,
                         'debridonly': False
                     })
             except:
                 pass
         return sources
     except:
         return sources
Ejemplo n.º 17
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            aliases = eval(data['aliases'])
            mozhdr = {
                'User-Agent':
                'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'
            }
            headers = mozhdr
            headers['X-Requested-With'] = 'XMLHttpRequest'

            if 'tvshowtitle' in data:
                episode = int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'],
                                      aliases, headers)
            else:
                episode = 0
                url = self.searchMovie(data['title'], data['year'], aliases,
                                       headers)

            headers['Referer'] = url
            ref_url = url
            mid = re.findall('-(\d*)\.', url)[0]
            data = {'id': mid}
            r = self.scraper.post(url, headers=headers).content
            try:
                u = urlparse.urljoin(self.base_link, self.server_link % mid)
                r = self.scraper.get(u, headers=mozhdr).content
                r = json.loads(r)['html']
                rl = client.parseDOM(r, 'div', attrs={'class': 'pas-list'})
                rh = client.parseDOM(r, 'div', attrs={'class': 'pas-header'})
                ids = client.parseDOM(rl, 'li', ret='data-id')
                servers = client.parseDOM(rl, 'li', ret='data-server')
                labels = client.parseDOM(rl, 'a', ret='title')
                r = zip(ids, servers, labels)
                rrr = zip(client.parseDOM(rh, 'li', ret='data-id'),
                          client.parseDOM(rh, 'li', ret='class'))
                types = {}
                for rr in rrr:
                    types[rr[0]] = rr[1]

                for eid in r:
                    try:
                        try:
                            ep = re.findall('episode.*?(\d+).*?',
                                            eid[2].lower())[0]
                        except:
                            ep = 0
                        if (episode == 0) or (int(ep) == episode):
                            t = str(int(time.time() * 1000))
                            quali = source_utils.get_release_quality(eid[2])[0]
                            if 'embed' in types[eid[1]]:
                                url = urlparse.urljoin(
                                    self.base_link, self.embed_link % (eid[0]))
                                xml = self.scraperget(url,
                                                      headers=headers).content
                                url = json.loads(xml)['src']
                                valid, hoster = source_utils.is_host_valid(
                                    url, hostDict)
                                if not valid: continue
                                q = source_utils.check_sd_url(url)
                                q = q if q != 'SD' else quali
                                sources.append({
                                    'source': hoster,
                                    'quality': q,
                                    'language': 'en',
                                    'url': url,
                                    'direct': False,
                                    'debridonly': False
                                })
                                continue
                            else:
                                url = urlparse.urljoin(
                                    self.base_link,
                                    self.token_link % (eid[0], mid, t))
                            script = self.scraper.get(url,
                                                      headers=headers).content
                            if '$_$' in script:
                                params = self.uncensored1(script)
                            elif script.startswith('[]') and script.endswith(
                                    '()'):
                                params = self.uncensored2(script)
                            elif '_x=' in script:
                                x = re.search('''_x=['"]([^"']+)''',
                                              script).group(1)
                                y = re.search('''_y=['"]([^"']+)''',
                                              script).group(1)
                                params = {'x': x, 'y': y}
                            else:
                                raise Exception()
                            u = urlparse.urljoin(
                                self.base_link, self.source_link %
                                (eid[0], params['x'], params['y']))
                            length = 0
                            count = 0
                            while length == 0 and count < 11:
                                r = self.scraper.get(u, headers=headers).text
                                length = len(r)
                                if length == 0: count += 1
                            uri = None
                            uri = json.loads(r)['playlist'][0]['sources']
                            try:
                                uri = [i['file'] for i in uri if 'file' in i]
                            except:
                                try:
                                    uri = [uri['file']]
                                except:
                                    continue

                            for url in uri:
                                if 'googleapis' in url:
                                    q = source_utils.check_sd_url(url)
                                    sources.append({
                                        'source': 'gvideo',
                                        'quality': q,
                                        'language': 'en',
                                        'url': url,
                                        'direct': True,
                                        'debridonly': False
                                    })
                                    continue

                                valid, hoster = source_utils.is_host_valid(
                                    url, hostDict)
                                # urls, host, direct = source_utils.check_directstreams(url, hoster)
                                q = quali
                                if valid:
                                    # for z in urls:
                                    if hoster == 'gvideo':
                                        direct = True
                                        try:
                                            q = directstream.googletag(
                                                url)[0]['quality']
                                        except:
                                            pass
                                        url = directstream.google(url,
                                                                  ref=ref_url)
                                    else:
                                        direct = False
                                    sources.append({
                                        'source': hoster,
                                        'quality': q,
                                        'language': 'en',
                                        'url': url,
                                        'direct': direct,
                                        'debridonly': False
                                    })
                                else:
                                    sources.append({
                                        'source': 'CDN',
                                        'quality': q,
                                        'language': 'en',
                                        'url': url,
                                        'direct': True,
                                        'debridonly': False
                                    })
                    except:
                        pass
            except:
                pass

            return sources
        except:
            return sources
Ejemplo n.º 18
0
    def sources(self, url, hostDict, hostprDict):
        try:
            hostDict = hostDict + hostprDict

            sources = []
            query_bases = []
            options = []

            if url is None:
                return sources

            if not debrid.status():
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = (data['tvshowtitle']
                     if 'tvshowtitle' in data else data['title'])
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            # tvshowtitle
            if 'tvshowtitle' in data:
                query_bases.append('%s ' %
                                   (data['tvshowtitle'].replace("-", "")))
                if 'year' in data:
                    query_bases.append('%s %s ' %
                                       (data['tvshowtitle'], data['year']))
                options.append('S%02dE%02d' %
                               (int(data['season']), int(data['episode'])))
                options.append('S%02d' % (int(data['season'])))
            else:
                query_bases.append('%s %s ' % (data['title'], data['year']))
                query_bases.append('%s ' % (data['title']))
                query_bases.append('2160p')
                query_bases.append('')

            for option in options:
                for query_base in query_bases:
                    q = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '',
                               query_base + option)
                    q = q.replace("  ", " ").replace(" ", "+")
                    url = self.search_link % q
                    url = self.base_link + url + self.search_options
                    html = self.scraper.get(url)
                    if html.status_code == 200:
                        posts = client.parseDOM(html.content,
                                                "div",
                                                attrs={"class": "shd"})
                        for post in posts:
                            url = client.parseDOM(post, "a", ret='href')
                            if len(url) > 0:
                                html = self.scraper.get(url[0])
                                if html.status_code == 200:
                                    quotes = client.parseDOM(
                                        html.content,
                                        "div",
                                        attrs={"class": "quote"})
                                    for quote in quotes:
                                        hrefs = client.parseDOM(quote,
                                                                "a",
                                                                ret='href')
                                        if not hrefs:
                                            continue
                                        for href in hrefs:
                                            quality = source_utils.check_sd_url(
                                                href)
                                            href = href.encode('utf-8')
                                            valid, host = source_utils.is_host_valid(
                                                href, hostDict)
                                            if any(x in href for x in
                                                   ['.rar', '.zip', '.iso']):
                                                continue
                                            if not valid:
                                                continue
                                            if hdlr in href.upper(
                                            ) and cleantitle.get(
                                                    title) in cleantitle.get(
                                                        href):
                                                sources.append({
                                                    'source':
                                                    host,
                                                    'quality':
                                                    quality,
                                                    'language':
                                                    'en',
                                                    'url':
                                                    href,
                                                    'direct':
                                                    False,
                                                    'debridonly':
                                                    False
                                                })
                if len(sources) > 0:
                    return sources
            return sources
        except:
            return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            query = '%s Season %d Episode %d' % (
                data['tvshowtitle'], int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else '%s' % (data['title'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            year = data['year']
            search = cleantitle.getsearch(query.lower())
            url = urlparse.urljoin(
                self.base_link, self.search_link % (search.replace(' ', '+')))

            headers = {
                'Referer':
                self.base_link,
                'User-Agent':
                'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0'
            }
            r = client.request(url, headers=headers)

            scrape = re.compile(
                '<div data-movie-id=.+?class="ml-item">\s+<a href="(.+?)" data-url="" class="ml-mask jt".+?oldtitle="(.+?)"'
            ).findall(r)

            for url, title_data in scrape:
                if cleantitle.getsearch(query).lower() == cleantitle.getsearch(
                        title_data).lower():
                    r = client.request(url, headers=headers)
                    year_data = re.compile(
                        '<strong>Release:\s+</strong>\s+<a href=.+?rel="tag">(.+?)</a>'
                    ).findall(r)
                    if year in year_data:
                        if 'tvshowtitle' in data:
                            year is None

                    parse_a_bitch = client.parseDOM(
                        r, 'div', attrs={'id': 'lnk list-downloads'})
                    for url in parse_a_bitch:
                        gold_links = client.parseDOM(url, 'a', ret='href')

                    for url in gold_links:
                        quality = source_utils.check_sd_url(url)
                        url = url.split('php?')[1]
                        sources.append({
                            'source': 'Direct',
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'direct': True,
                            'debridonly': False
                        })

                return sources
        except Exception:
            return sources
Ejemplo n.º 20
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None:
                return sources

            if debrid.status() is False:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['title'].replace(':', '').lower()
            year = data['year']

            query = '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = urlparse.urljoin(self.base_link, self.post_link)
            # log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

            post = 'do=search&subaction=search&search_start=0&full_search=0&result_from=1&story=%s' % urllib.quote_plus(
                query)

            r = client.request(url, post=post)
            r = client.parseDOM(r, 'div', attrs={'class': 'box-out margin'})
            r = [(dom_parser.parse_dom(i, 'div', attrs={'class':
                                                        'news-title'}))
                 for i in r if data['imdb'] in i]
            r = [(dom_parser.parse_dom(i[0], 'a', req='href')) for i in r if i]
            r = [(i[0].attrs['href'], i[0].content) for i in r if i]

            hostDict = hostprDict + hostDict

            for item in r:
                try:
                    name = item[0].replace(' ', '.')

                    s = re.findall(
                        '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))',
                        name)
                    s = s[0] if s else '0'

                    data = client.request(item[0])
                    data = dom_parser.parse_dom(data,
                                                'div',
                                                attrs={'id': 'r-content'})
                    data = re.findall('\s*<b><a href="(.+?)".+?</a></b>',
                                      data[0].content, re.DOTALL)

                    for url in data:
                        url = client.replaceHTMLCodes(url)
                        url = url.encode('utf-8')

                        if 'turbobit' not in url:
                            continue

                        valid, host = source_utils.is_host_valid(url, hostDict)
                        if not valid:
                            continue

                        try:
                            qual = client.request(url)
                            quals = re.findall(
                                'span class="file-title" id="file-title">(.+?)</span',
                                qual)
                            for quals in quals:
                                quality = source_utils.check_sd_url(quals)

                            info = []
                            if '3D' in name or '.3D.' in quals:
                                info.append('3D')
                                quality = '1080p'
                            if any(i in ['hevc', 'h265', 'x265']
                                   for i in quals):
                                info.append('HEVC')

                            info = ' | '.join(info)

                            sources.append({
                                'source': host,
                                'quality': quality,
                                'language': 'en',
                                'url': url,
                                'info': info,
                                'direct': True,
                                'debridonly': False
                            })
                        except:
                            source_utils.scraper_error('ULTRAHDINDIR')
                            pass

                except:
                    source_utils.scraper_error('ULTRAHDINDIR')
                    pass

            return sources

        except Exception:
            source_utils.scraper_error('ULTRAHDINDIR')
            return sources
Ejemplo n.º 21
0
    def sources(self, url, hostDict, hostprDict):

        sources = []
        try:
            if url is None:
                return sources

            typ = url[4]

            headers = {
                "User-Agent":
                "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:60.0) Gecko/20100101 Firefox/60.0",
                "http.content_type":
                "application/x-www-form-urlencoded; charset=UTF-8"
            }
            data = ''
            if typ == "SERIAL":
                title = url[0]
                id = url[1]
                year = url[2]
                orgtitle = url[3]
                sezon = url[5]
                epizod = url[6]
                if orgtitle != "0":
                    data = {
                        "id": int(id),
                        "type": typ,
                        "title": title,
                        "year": int(year),
                        "sezon": str(sezon),
                        "odcinek": str(epizod),
                        "site": "filmdb",
                        "browser": "chrome"
                    }
                else:
                    data = {
                        "id": int(id),
                        "type": typ,
                        "title": title,
                        "originalTitle": str(orgtitle),
                        "year": int(year),
                        "sezon": str(sezon),
                        "odcinek": str(epizod),
                        "site": "filmdb",
                        "browser": "chrome"
                    }
            if typ == "FILM":
                title = url[0]
                id = url[1]
                year = url[2]
                orgtitle = url[3]
                if orgtitle != "0":
                    data = {
                        "id": int(id),
                        "type": typ,
                        "title": str(title),
                        "originalTitle": str(orgtitle),
                        "year": int(year),
                        "site": "filmdb",
                        "browser": "chrome"
                    }
                else:
                    data = {
                        "id": int(id),
                        "type": typ,
                        "title": str(title),
                        "year": int(year),
                        "site": "filmdb",
                        "browser": "chrome"
                    }
            data = {"json": json.dumps(data, ensure_ascii=False)}
            response = requests.post("http://fboost.pl/api/api.php",
                                     data=data,
                                     headers=headers)
            content = json.loads(response.content)
            for code in zip(content[u'link'], content[u'wersja']):
                wersja = str(code[1])
                lang, info = self.get_lang_by_type(wersja)
                test = requests.post("http://fboost.pl/api/player.php?src=%s" %
                                     code[0]).content
                link = re.search("""iframe src="(.*)" style""", test)
                link = link.group(1)
                if len(link) < 2:
                    continue
                if "cda.pl" in link:
                    try:
                        response = requests.get(link).content
                        test = client.parseDOM(
                            response, 'div', attrs={'class': 'wrapqualitybtn'})
                        urls = client.parseDOM(test, 'a', ret='href')
                        for url in urls:
                            valid, host = source_utils.is_host_valid(
                                url, hostDict)
                            q = source_utils.check_sd_url(url)
                            sources.append({
                                'source': host,
                                'quality': q,
                                'language': lang,
                                'url': url,
                                'info': info,
                                'direct': False,
                                'debridonly': False
                            })
                        continue
                    except:
                        pass
                if "rapidvideo.com" in link:
                    try:
                        response = requests.get(link).content
                        test = re.findall(
                            """(https:\/\/www.rapidvideo.com\/e\/.*)">""",
                            response)
                        numGroups = len(test)
                        for i in range(1, numGroups):
                            url = test[i]
                            valid, host = source_utils.is_host_valid(
                                url, hostDict)
                            q = source_utils.check_sd_url(url)
                            sources.append({
                                'source': host,
                                'quality': q,
                                'language': lang,
                                'url': url,
                                'info': info,
                                'direct': False,
                                'debridonly': False
                            })
                        continue
                    except:
                        pass
                valid, host = source_utils.is_host_valid(link, hostDict)
                q = source_utils.check_sd_url(link)
                sources.append({
                    'source': host,
                    'quality': q,
                    'language': lang,
                    'url': link,
                    'info': info,
                    'direct': False,
                    'debridonly': False
                })
            return sources
        except:
            return sources