def more_rapidvideo(link, hostDict, lang, info): sources = [] if "rapidvideo.com" in link: try: response = requests.get(link).content test = re.findall("""(https:\/\/www.rapidvideo.com\/e\/.*)">""", response) numGroups = len(test) for i in range(1, numGroups): url = test[i] valid, host = source_utils.is_host_valid(url, hostDict) q = source_utils.check_sd_url(url) sources.append({ 'source': host, 'quality': q, 'language': lang, 'url': url, 'info': info, 'direct': False, 'debridonly': False }) return sources except Exception, e: print e return []
def more_cdapl(link, hostDict, lang, info): sources = [] if "cda.pl" in link: try: headers = { 'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3555.0 Safari/537.36" } response = requests.get(link, headers=headers).content test = client.parseDOM(response, 'div', attrs={'class': 'wrapqualitybtn'}) urls = client.parseDOM(test, 'a', ret='href') for url in urls: valid, host = source_utils.is_host_valid(url, hostDict) q = source_utils.check_sd_url(url) direct = re.findall( """file":"(.*)","file_cast""", requests.get(url, headers=headers).content)[0].replace( "\\/", "/") sources.append({ 'source': 'CDA', 'quality': q, 'language': lang, 'url': direct, 'info': info, 'direct': True, 'debridonly': False }) return sources except Exception, e: print e return []
def sources(self, url, hostDict, hostprDict): try: sources = [] hostDict = hostprDict + hostDict r = client.request(url) u = client.parseDOM(r, "ul", attrs={"id": "serverul"}) for t in u: try: u = client.parseDOM(t, 'a', ret='href') for url in u: if 'getlink' in url: continue quality = source_utils.check_sd_url(url) valid, host = source_utils.is_host_valid(url, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) return sources except: return sources except: return
def work(self, link, testDict): if str(link).startswith("http"): link = self.getlink(link) q = source_utils.check_sd_url(link) valid, host = source_utils.is_host_valid(link, testDict) if not valid: return 0 return host, q, link
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources cookies = cache.cache_get('naszekino_cookie') result = client.request(url, cookie=cookies) result = client.parseDOM(result, 'table', attrs={'class': 'table table-bordered'}) result = client.parseDOM(result, 'tr') for item in result: try: link = client.parseDOM(item, 'td', attrs={'class': 'link-to-video'}) link = str(client.parseDOM(link, 'a', ret='href')[0]) temp = client.parseDOM(item, 'td') wersja = str(temp[1]) lang, info = self.get_lang_by_type(wersja) valid, host = source_utils.is_host_valid(link, hostDict) jakosc = str(temp[2]).lower() if "wysoka" in jakosc: q = "HD" else: q = source_utils.check_sd_url(link) sources.append( {'source': host, 'quality': q, 'language': lang, 'url': link, 'info': info, 'direct': False, 'debridonly': False}) except: continue return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources for link in url: try: lang = link[u'quality'] video_link = link[u'url'] lang, info = self.get_lang_by_type(lang) q = source_utils.check_sd_url(video_link) valid, host = source_utils.is_host_valid(video_link, hostDict) if 'rapidvideo' in video_link: content = requests.get(video_link, timeout=3, allow_redirects=True).content q = re.findall("""data-res=\"(.*?)\"""", content)[0] if int(q) == 720: q = 'HD' elif int(q) > 720: q = '1080' elif int(q) < 720: q = 'SD' if 'streamango' in video_link or 'openload' in video_link: content = requests.get(video_link, timeout=3, allow_redirects=True).content q = re.findall("""og:title\" content=\"(.*?)\"""", content)[0] q = source_utils.get_release_quality('', q)[0] if valid: if 'ebd' in host.lower(): host = 'CDA' sources.append({'source': host, 'quality': q, 'language': lang, 'url': video_link, 'info': info, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, rows, hostDict, hostprDict): sources = [] try: if type(rows) == tuple: quality = source_utils.check_sd_url(rows[0]) info = self.get_lang_by_type(rows[0]) sources.append({ 'source': 'tb7', 'quality': quality, 'language': info[0], 'url': rows[0], 'info': 'Moje Pliki', 'direct': True, 'debridonly': False }) return sources for row in rows: try: nazwa = client.parseDOM(row, 'label')[0] if not '.mkv' and '.avi' and '.mp4' in nazwa: continue link = client.parseDOM(row, 'input', ret='value')[0] size = client.parseDOM(row, 'td')[3] if any(size in s['info'] for s in sources): continue quality = source_utils.check_sd_url(nazwa) info = self.get_lang_by_type(nazwa) if not info[1]: info2 = '' else: info2 = info[1] sources.append({ 'source': 'tb7', 'quality': quality, 'language': info[0], 'url': link, 'info': size + ' ' + info2, 'direct': True, 'debridonly': False }) except: continue return sources except: log_exception() return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources result = client.request(urlparse.urljoin(self.base_link, url), redirect=False) cookies = client.request(urlparse.urljoin(self.base_link, url), output='cookie') headers = { 'cookie': cookies, 'dnt': '1', 'accept-encoding': 'gzip, deflate, br', 'accept-language': 'pl-PL,pl;q=0.9,en-US;q=0.8,en;q=0.7', 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.91 Safari/537.36', 'accept': 'text/html, */*; q=0.01', 'referer': self.base_link + url, 'authority': 'www.boxfilm.pl', 'x-requested-with': 'XMLHttpRequest', } response = requests.get( 'https://www.boxfilm.pl/include/player.php', headers=headers).content section = client.parseDOM(result, 'section', attrs={'id': 'video_player'})[0] link = client.parseDOM(response, 'iframe', ret='src')[0] valid, host = source_utils.is_host_valid(link, hostDict) if not valid: return sources spans = client.parseDOM(section, 'span') info = None for span in spans: if span == 'Z lektorem': info = 'Lektor' q = source_utils.check_sd_url(link) sources.append({ 'source': host, 'quality': q, 'language': 'pl', 'url': link, 'info': info, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] r = client.request(url) match = re.compile('<td align="center"><strong><a href="(.+?)"').findall(r) for url in match: host = url.split('//')[1].replace('www.', '') host = host.split('/')[0].split('.')[0].title() quality = source_utils.check_sd_url(url) sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) except Exception: return return sources
def sources(self, url, hostDict, hostprDict): try: # import pydevd # pydevd.settrace('localhost', port=5678, stdoutToServer=True, stderrToServer=True) sources = [] result = self.session.get(url).content result = result.decode('utf-8') h = HTMLParser() result = h.unescape(result) result = client.parseDOM(result, 'div', attrs={'class': 'tabela_wiersz mb-1'}) for counter, item in enumerate(result, 0): try: test = client.parseDOM(result, 'span', attrs={'class': 'tabela_text'}) info = test[(2 + (3 * counter))] info = self.get_lang_by_type(info) quality = test[(1 + (3 * counter))] quality = source_utils.check_sd_url(quality) try: id = re.findall("""ShowMovie\('(.*?)'\)""", item)[0] except: id = re.findall("""ShowSer\('(.*?)'\)""", item)[0] try: host = re.findall("""<\/i> (.*?)<\/span>""", item)[0] if 'serial' in url: id = id + '/s' sources.append({ 'source': host, 'quality': quality, 'language': info[0], 'url': id, 'info': info[1], 'direct': False, 'debridonly': False }) except: continue except Exception as e: print(e) continue return sources except Exception as e: print(e) log_exception() return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] r = client.request(url) r = re.findall('<iframe src="(.+?)"', r) for url in r: valid, host = source_utils.is_host_valid(url, hostDict) quality = source_utils.check_sd_url(url) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) return sources except: return
def sources(self, url, hostDict, hostprDict): sources = [] try: with requests.Session() as s: gettoken = s.get(self.tokenta).text xbmc.sleep(2000) tokenapi = re.compile('n\W+(.*?)[\'"]', re.I).findall(gettoken)[0] if 'episode' in url: iep = url['episode'].zfill(2) ise = url['season'].zfill(2) se = 's' + ise + 'e' + iep sel = url['tvshowtitle'].replace(' ', '.') + '.' + se search_link = self.tvsearch else: sel = url['title'].replace(' ', '.') + '.' + url['year'] search_link = self.msearch gs = s.get(search_link % (sel, tokenapi)).text gl = re.compile('ame\W+(.*?)[\'"].*?ih:(.*?)\W', re.I).findall(gs) for nam, hass in gl: checkca = s.get(self.checkc % (self.api_key, hass, self.api_key)).text quality = source_utils.check_sd_url(nam) if 'finished' in checkca: url = self.pr_link % (self.api_key, hass) sources.append({ 'source': 'cached', 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False, 'info': nam, }) return sources except: print("Unexpected error in Torrentapi Script: Sources", sys.exc_info()[0]) exc_type, exc_obj, exc_tb = sys.exc_info() print(exc_type, exc_tb.tb_lineno) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: import re if url == None: return sources r = client.request(urlparse.urljoin(self.base_link, url), redirect=False) info = self.get_lang_by_type(client.parseDOM(r, 'title')[0]) r = client.parseDOM(r, 'div', attrs={'class': 'tab-pane active'})[0] r = client.parseDOM(r, 'script')[0] script = r.split('"')[1] decoded = self.shwp(script) link = re.findall("src=\"(.*?)\"", decoded.replace("\n", ""))[0] valid, host = source_utils.is_host_valid(link, hostDict) if not valid: return sources q = source_utils.check_sd_url(link) sources.append({'source': host, 'quality': q, 'language': 'pl', 'url': link, 'info': info, 'direct': False, 'debridonly': False}) return sources except: return sources
def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): try: sources = [] if control.setting('Provider-%s' % name) == False: log('INFO', 'get_sources', 'Provider Disabled by User') log('INFO', 'get_sources', 'Completed') return sources if url == None: log('FAIL', 'get_sources', 'url == None. Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing) log('INFO', 'get_sources', 'Completed') return sources urls = [] vidtype = 'Movie' if not str(url).startswith('http'): try: data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data[ 'tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] if 'year' in data: year = data['year'] if 'season' in data: query = { 'keyword': '%s %s %s' % (title, 'season', data['season']) } else: query = {'keyword': title} search_url = urlparse.urljoin(self.base_link, '/search.html') search_url = search_url + '?' + urllib.urlencode(query) result = proxies.request(search_url, headers=self.headers, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True) r = client.parseDOM(result, 'div', attrs={'class': 'wrapper'}) try: r = r[1] except: raise Exception() r1 = client.parseDOM(r, 'figure') r2 = [] for res in r1: l = client.parseDOM(res, 'a', ret='href')[0] t = client.parseDOM(res, 'div', attrs={'class': 'title'})[0] r = (l, t) r2.append(r) r = r2 if 'season' in data: vidtype = 'Show' episode = int(data['episode']) r = [(i[0], re.sub(' \(\w*\)', '', i[1])) for i in r] url = [(i[0], re.findall('(.+?) (\d+)$', i[1])) for i in r] url = [(i[0], i[1][0][0], i[1][0][1]) for i in url if len(i[1]) > 0] url = [ i for i in url if cleantitle.get(title) in cleantitle.get(i[1]) ] url = [ i for i in url if '%01d' % int(data['season']) == '%01d' % int(i[2]) ] ep_url = [] for i in url: result = proxies.request( urlparse.urljoin(self.base_link, i[0]), headers=self.headers, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True) t = client.parseDOM(result, 'div', attrs={'class': 'eps'}) for tt in t: if 'watch' in tt: tt = client.parseDOM( tt, 'div', attrs={'class': 'server'})[0] section_links = client.parseDOM(tt, 'a', ret='href') for a_link in section_links: if episode < 100: f_key = '-episode-%02d-' % episode else: f_key = '-episode-%03d-' % episode if f_key in a_link: log('INFO', 'get_sources', 'episode url = %s' % a_link) ep_url.append(a_link) break for i in ep_url: urls.append(urlparse.urljoin(self.base_link, i)) else: for i in r: if cleantitle.get(title) in cleantitle.get(i[1]): urls.append( urlparse.urljoin(self.base_link, i[0])) except: urls == [self.base_link] links_m = [] page = None for url in urls: try: log('INFO', 'get_sources', 'url == %s' % url, dolog=False, doPrint=True) page_url = url page = result = proxies.request( url, headers=self.headers, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True) quality = '480p' type = 'BRRIP' atr = '' qtr = '' try: qtr = client.parseDOM(result, 'span', attrs={'class': 'quanlity'})[0] # q, t = cleantitle.getQuality(atr) # if q != None: # quality = q # type = t except: try: qtr = client.parseDOM(result, 'span', attrs={'class': 'quality'})[0] # q, t = cleantitle.getQuality(atr) # if q != None: # quality = q # type = t except: pass try: quality = source_utils.check_sd_url(qtr) type = source_utils.check_sd_url_rip(qtr) except Exception as e: quality = '480p' type = 'BRRIP' try: atr = client.parseDOM(result, 'span', attrs={'class': 'year'})[0] except: atr = '' try: atr_release = client.parseDOM(result, 'div', attrs={'class': 'meta'})[1] except: atr_release = '' if 'season' in data: vidtype = 'Show' pass else: vidtype = 'Movie' resultx = result if str(int(year)) in atr else None if resultx == None: resultx = result if str( int(year)) in atr_release else None if resultx == None: raise Exception() try: poster = client.parseDOM(page, 'div', attrs={'class': 'detail-l'})[0] poster = client.parseDOM(poster, 'img', ret='src')[0] if 'http' not in poster: poster = 'http:' + poster except: poster = None #print result #r = client.parseDOM(result, 'article', attrs = {'class': 'player current'})[0] #r = client.parseDOM(r, 'iframe', ret='src')[0] #r = r.split('?') try: servers = re.findall(r'link_server_.*\"(.*)\";', page) servers = list(set(servers)) for server in servers: try: if 'http' not in server: server = 'http:' + server result = proxies.request( server, headers=self.headers, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True) server = client.parseDOM(result, 'iframe', ret='src')[0] if len(server) > 0: if 'http' not in server: server = 'http:' + server l = resolvers.createMeta(server, self.name, self.logo, quality, [], key, poster=poster, riptype=type, vidtype=vidtype, testing=testing, page_url=page_url) for ll in l: if ll != None and 'key' in ll.keys(): links_m.append(ll) except Exception as e: pass if testing and len(links_m) > 0: break except Exception as e: pass try: servers = re.findall(r'link_server_.*\"(.*)\";', page) servers = list(set(servers)) for server in servers: if server != None: if 'http' not in server: server = 'http:' + server try: l = resolvers.createMeta(server, self.name, self.logo, quality, [], key, poster=poster, riptype=type, vidtype=vidtype, testing=testing, page_url=page_url) for ll in l: if ll != None and 'key' in ll.keys(): links_m.append(ll) except: pass except: pass break except: pass for link in links_m: if link != None and 'key' in link.keys(): sources.append(link) if len(sources) == 0: log( 'FAIL', 'get_sources', 'Could not find a matching title: %s' % cleantitle.title_from_key(key)) else: log( 'SUCCESS', 'get_sources', '%s sources : %s' % (cleantitle.title_from_key(key), len(sources))) log('INFO', 'get_sources', 'Completed') return sources except Exception as e: log('ERROR', 'get_sources', '%s' % e) log('INFO', 'get_sources', 'Completed') return sources
def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): #try: try: sources = [] if control.setting('Provider-%s' % name) == False: log('INFO', 'get_sources', 'Provider Disabled by User') return sources if url == None: log('FAIL', 'get_sources', 'url == None. Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing) return sources base_link = self.base_link try: if url[0].startswith('http'): base_link = url[0] mid = re.findall('-(\d+)', url[0])[-1] except: if url.startswith('http'): base_link = url mid = re.findall('-(\d+)', url)[-1] try: if len(url[1]) > 0: episode = url[1] else: episode = None except: episode = None #print mid links_m = [] trailers = [] headers = {'Referer': self.base_link} u = urlparse.urljoin(self.base_link, url[0]) #print u #r = client.request(u, headers=headers, IPv4=True) r = proxies.request(u, headers=headers, IPv4=True, proxy_options=proxy_options, use_web_proxy=self.proxyrequired) try: elem = client.parseDOM(r, 'span', attrs={'class': 'quality'})[0] qual = source_utils.check_sd_url(elem) riptype = source_utils.check_sd_url_rip(elem) except Exception as e: qual = '480p' riptype = 'BRRIP' try: poster = client.parseDOM(r, 'div', attrs={'class': 'dm-thumb'})[0] poster = client.parseDOM(poster, 'img', ret='src')[0] except: poster = None if testing == False: try: #regex = r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+" #matches = re.finditer(regex, r, re.MULTILINE) matches = re.compile( 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+' ).findall(r) for match in matches: try: #print match if 'youtube.com' in match: match = match.replace('embed/', 'watch?v=') trailers.append(match) except: pass except Exception as e: pass for trailer in trailers: links_m = resolvers.createMeta(trailer, self.name, self.logo, '720p', links_m, key, vidtype='Trailer', testing=testing) try: u = urlparse.urljoin(self.base_link, self.server_link % mid) #print u #r = client.request(u, headers=headers, XHR=True, IPv4=True) r = proxies.request(u, headers=headers, XHR=True, IPv4=True, proxy_options=proxy_options, use_web_proxy=self.proxyrequired) r = json.loads(r)['html'] r = client.parseDOM(r, 'div', attrs={'class': 'pas-list'}) ids = client.parseDOM(r, 'li', ret='data-id') servers = client.parseDOM(r, 'li', ret='data-server') labels = client.parseDOM(r, 'a', ret='title') r = zip(ids, servers, labels) for eid in r: #print r try: sub_url = None try: ep = re.findall('episode.*?(\d+):.*?', eid[2].lower())[0] except: ep = 0 if (episode is None) or (int(ep) == int(episode)): url = urlparse.urljoin( self.base_link, self.token_link % (eid[0], mid)) #script = client.request(url, IPv4=True) script = proxies.request( url, IPv4=True, proxy_options=proxy_options, use_web_proxy=self.proxyrequired) #print script if '$_$' in script: params = self.uncensored1(script) elif script.startswith('[]') and script.endswith( '()'): params = self.uncensored2(script) elif '_x=' in script and '_y=' in script: params = self.uncensored3(script) else: raise Exception() u = urlparse.urljoin( self.base_link, self.sourcelink % (eid[0], params['x'], params['y'])) #print u #r = client.request(u, IPv4=True) r = proxies.request( u, IPv4=True, proxy_options=proxy_options, use_web_proxy=self.proxyrequired) if r == None or len(r) == 0: u = urlparse.urljoin( self.base_link, self.embed_link % (eid[0])) #print u #r = client.request(u, IPv4=True) r = proxies.request( u, IPv4=True, proxy_options=proxy_options, use_web_proxy=self.proxyrequired) try: url = json.loads(r)['playlist'][0]['sources'] except: url = [{'file': json.loads(r)['src']}] try: url = [i['file'] for i in url] except: url = [url['file']] try: sub_url = json.loads( r)['playlist'][0]['tracks'][0]['file'] except: pass vidtype = 'Movie' if int(ep) > 0: vidtype = 'Show' for s in url: links_m = resolvers.createMeta(s, self.name, self.logo, qual, links_m, key, poster=poster, riptype=riptype, vidtype=vidtype, sub_url=sub_url, testing=testing) except: pass except: pass sources += [l for l in links_m] if len(sources) == 0: log( 'FAIL', 'get_sources', 'Could not find a matching title: %s' % cleantitle.title_from_key(key)) return sources log('SUCCESS', 'get_sources', '%s sources : %s' % (cleantitle.title_from_key(key), len(sources)), dolog=not testing) return sources except Exception as e: log('ERROR', 'get_sources', '%s' % e, dolog=not testing) return sources
def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): #try: try: sources = [] if control.setting('Provider-%s' % name) == False: log('INFO', 'get_sources', 'Provider Disabled by User') return sources if url == None: log('FAIL', 'get_sources', 'url == None. Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing) return sources links_m = [] trailers = [] headers = self.headers headers = {'Referer': self.base_link} sub_url = None u = url[0] ep = url[1] #r = client.request(u, headers=headers IPv4=True) r = proxies.request(u, headers=self.headers, IPv4=True, proxy_options=proxy_options, use_web_proxy=self.proxyrequired) if testing == False: try: #regex = r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+" #matches = re.finditer(regex, r, re.MULTILINE) matches = re.compile( 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+' ).findall(r) for match in matches: try: #print match if 'youtube.com' in match: match = match.replace('embed/', 'watch?v=') trailers.append(match) except: pass except Exception as e: pass for trailer in trailers: links_m = resolvers.createMeta(trailer, self.name, self.logo, '720p', links_m, key, vidtype='Trailer', testing=testing) try: if ep == None: srcs = client.parseDOM(r, 'a', ret='player-data') else: srcs = client.parseDOM(r, 'a', ret='player-data', attrs={'episode-data': str(ep)}) try: elem = client.parseDOM(r, 'span', attrs={'class': 'quality'})[0] qual = source_utils.check_sd_url(elem) riptype = source_utils.check_sd_url_rip(elem) except Exception as e: qual = '480p' riptype = 'BRRIP' try: poster = client.parseDOM(r, 'div', attrs={'class': 'dm-thumb'})[0] poster = client.parseDOM(poster, 'img', ret='src')[0] except: poster = None for s in srcs: try: if s.startswith('//'): s = 'https:%s' % s links_m = resolvers.createMeta(s, self.name, self.logo, qual, links_m, key, poster=poster, riptype=riptype, vidtype='Movie', sub_url=sub_url, testing=testing) if testing == True and len(links_m) > 0: break except: pass except: pass sources += [l for l in links_m] if len(sources) == 0: log( 'FAIL', 'get_sources', 'Could not find a matching title: %s' % cleantitle.title_from_key(key)) return sources log('SUCCESS', 'get_sources', '%s sources : %s' % (cleantitle.title_from_key(key), len(sources)), dolog=not testing) return sources except Exception as e: log('ERROR', 'get_sources', '%s' % e, dolog=not testing) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] imdb = data['imdb'] aliases = eval(data['aliases']) headers = {} if 'tvshowtitle' in data: url = self.searchShow(title, int(data['season']), int(data['episode']), aliases, headers) else: url = self.searchMovie(title, data['year'], aliases, headers) r = client.request(url, headers=headers, output='extended', timeout='10') if not imdb in r[0]: raise Exception() cookie = r[4] headers = r[3] result = r[0] try: r = re.findall('(https:.*?redirector.*?)[\'\"]', result) for i in r: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass except: pass try: auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0] except: auth = 'false' auth = 'Bearer %s' % urllib.unquote_plus(auth) headers['Authorization'] = auth headers['Referer'] = url u = '/ajax/vsozrflxcw.php' self.base_link = client.request(self.base_link, headers=headers, output='geturl') u = urlparse.urljoin(self.base_link, u) action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb' elid = urllib.quote( base64.encodestring(str(int(time.time()))).strip()) token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0] idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0] post = { 'action': action, 'idEl': idEl, 'token': token, 'nopop': '', 'elid': elid } post = urllib.urlencode(post) cookie += ';%s=%s' % (idEl, elid) headers['Cookie'] = cookie r = client.request(u, post=post, headers=headers, cookie=cookie, XHR=True) r = str(json.loads(r)) r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r) for i in r: try: if 'google' in i: quality = 'SD' if 'googleapis' in i: try: quality = source_utils.check_sd_url(i) except Exception: pass if 'googleusercontent' in i: i = directstream.googleproxy(i) try: quality = directstream.googletag( i)[0]['quality'] except Exception: pass sources.append({ 'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) elif 'llnwi.net' in i or 'vidcdn.pro' in i: try: quality = source_utils.check_sd_url(i) sources.append({ 'source': 'CDN', 'quality': quality, 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except Exception: pass else: valid, hoster = source_utils.is_host_valid(i, hostDict) if not valid: continue sources.append({ 'source': hoster, 'quality': '720p', 'language': 'en', 'url': i, 'direct': False, 'debridonly': False }) except Exception: pass return sources except: failure = traceback.format_exc() print('CartoonHD - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) aliases = eval(data['aliases']) mozhdr = { 'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'} headers = mozhdr headers['X-Requested-With'] = 'XMLHttpRequest' self.s = cfscrape.create_scraper() if 'tvshowtitle' in data: episode = int(data['episode']) url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers) else: episode = 0 url = self.searchMovie(data['title'], data['year'], aliases, headers) headers['Referer'] = url ref_url = url mid = re.findall('-(\d*)\.', url)[0] data = {'id': mid} r = self.s.post(url, headers=headers) try: u = urlparse.urljoin(self.base_link, self.server_link % mid) r = self.s.get(u, headers=mozhdr).content r = json.loads(r)['html'] rl = client.parseDOM(r, 'div', attrs={'class': 'pas-list'}) rh = client.parseDOM(r, 'div', attrs={'class': 'pas-header'}) ids = client.parseDOM(rl, 'li', ret='data-id') servers = client.parseDOM(rl, 'li', ret='data-server') labels = client.parseDOM(rl, 'a', ret='title') r = zip(ids, servers, labels) rrr = zip(client.parseDOM(rh, 'li', ret='data-id'), client.parseDOM(rh, 'li', ret='class')) types = {} for rr in rrr: types[rr[0]] = rr[1] for eid in r: try: try: ep = re.findall('episode.*?(\d+).*?', eid[2].lower())[0] except: ep = 0 if (episode == 0) or (int(ep) == episode): t = str(int(time.time() * 1000)) quali = source_utils.get_release_quality(eid[2])[0] if 'embed' in types[eid[1]]: url = urlparse.urljoin(self.base_link, self.embed_link % (eid[0])) xml = self.s.get(url, headers=headers).content url = json.loads(xml)['src'] valid, hoster = source_utils.is_host_valid(url, hostDict) if not valid: continue q = source_utils.check_sd_url(url) q = q if q != 'SD' else quali sources.append( {'source': hoster, 'quality': q, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) continue else: url = urlparse.urljoin(self.base_link, self.token_link % (eid[0], mid, t)) script = self.s.get(url, headers=headers).content if '$_$' in script: params = self.uncensored1(script) elif script.startswith('[]') and script.endswith('()'): params = self.uncensored2(script) elif '_x=' in script: x = re.search('''_x=['"]([^"']+)''', script).group(1) y = re.search('''_y=['"]([^"']+)''', script).group(1) params = {'x': x, 'y': y} else: raise Exception() u = urlparse.urljoin(self.base_link, self.source_link % (eid[0], params['x'], params['y'])) length = 0 count = 0 while length == 0 and count < 11: r = self.s.get(u, headers=headers).text length = len(r) if length == 0: count += 1 uri = None uri = json.loads(r)['playlist'][0]['sources'] try: uri = [i['file'] for i in uri if 'file' in i] except: try: uri = [uri['file']] except: continue for url in uri: if 'googleapis' in url: q = source_utils.check_sd_url(url) sources.append( {'source': 'gvideo', 'quality': q, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False}) continue valid, hoster = source_utils.is_host_valid(url, hostDict) # urls, host, direct = source_utils.check_directstreams(url, hoster) q = quali if valid: # for z in urls: if hoster == 'gvideo': direct = True try: q = directstream.googletag(url)[0]['quality'] except: pass url = directstream.google(url, ref=ref_url) else: direct = False sources.append( {'source': hoster, 'quality': q, 'language': 'en', 'url': url, 'direct': direct, 'debridonly': False}) else: sources.append( {'source': 'CDN', 'quality': q, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False}) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.75 Safari/537.36', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'pl,en-US;q=0.7,en;q=0.3', 'Connection': 'keep-alive', 'Upgrade-Insecure-Requests': '1', 'Pragma': 'no-cache', 'Cache-Control': 'no-cache', 'TE': 'Trailers', 'Cookie': self.cookies } content = requests.get(url, headers=headers).content results = client.parseDOM( content, 'section', attrs={'class': 'box episode-player-list'}) results = client.parseDOM(results, 'tr') for item in results: try: item = client.parseDOM(item, 'td') host = item[0] if 'vidoza' in item[0].lower(): host = 'VIDOZA' quality = source_utils.check_sd_url(item[1]) audio = client.parseDOM(item[2], 'span', attrs={'class': 'mobile-hidden'})[0] if 'Polski' in audio: jezyk = 'pl' else: jezyk = 'en' try: napisy = client.parseDOM( item[3], 'span', attrs={'class': 'mobile-hidden'})[0] if 'Polski' in napisy: jezyk = 'pl' except: napisy = '' if napisy: napisy = "Napisy " + napisy id = re.findall('''data_(.*?)\"''', str(item[5]))[0] code = re.findall("""_Storage\.basic.*=.*'(.*?)'""", content)[0] video_link = "https://api4.shinden.pl/xhr/%s/player_load?auth=%s" % ( id, code) if 'Polski' in audio: sources.append({ 'source': host, 'quality': quality, 'language': jezyk, 'url': video_link, 'info': 'Polskie Audio', 'direct': False, 'debridonly': False }) else: sources.append({ 'source': host, 'quality': quality, 'language': jezyk, 'url': video_link, 'info': napisy, 'direct': False, 'debridonly': False }) continue except Exception as e: print(str(e)) continue return sources except Exception as e: print(str(e)) return sources
def process(self, url, q, r, headers, page_url): items = [] try: if 'vcstream.to' in url: id = re.compile('//.+?/(?:embed|f)/([0-9a-zA-Z-_]+)').findall(url)[0] headersx = {'Referer': url, 'User-Agent': client.agent()} page_data = client.request('https://vcstream.to/player?fid=%s&page=embed' % id, headers=headersx) srcs = re.findall(r'sources:.\[(.*?)\]', page_data)[0] srcs = srcs.replace('\\n','').replace('\\','') srcs = '''[%s]''' % srcs j_data = json.loads(srcs) for j in j_data: t = j['name'] label = j['label'] u = j['src'] if label.lower() == 'raw': q = source_utils.check_sd_url(t) else: q = label r = source_utils.check_sd_url_rip(t) fs = client.getFileSize(u, retry429=True, headers=headers) if fs == None or int(fs) == 0: fs = client.getFileSize(u, retry429=True) q = qual_based_on_fs(q,fs) online = check(u) urldata = client.b64encode(json.dumps('', encoding='utf-8')) params = client.b64encode(json.dumps('', encoding='utf-8')) if headers != None: paramsx = {'headers':headers} params = client.b64encode(json.dumps(paramsx, encoding='utf-8')) items.append({'quality':q, 'riptype':r, 'src':u, 'fs':fs, 'online':online, 'params':params, 'urldata':urldata, 'allowsStreaming':True, 'allowsDownload':True}) elif '3donlinefilms.com' in url or '3dmoviesfullhd.com' in url or 'freedocufilms.com' in url: data = urlparse.parse_qs(url) headers = {} if '3donlinefilms.com' in url: headers['Referer'] = 'http://3donlinefilms.com' l0 = 'https://3donlinefilms.com/update.php' elif 'freedocufilms.com' in url: headers['Referer'] = 'http://freedocufilms.com' l0 = 'https://freedocufilms.com/update.php' else: headers['Referer'] = 'http://3dmoviesfullhd.com' l0 = 'https://3dmoviesfullhd.com/update.php' page = data['page'][0] cook = client.request(page, output='cookie') post_data = {'file':data['src_file'][0]} cookie = '%s; zeroday=; visit=yes; jwplayer.qualityLabel=HD' % cook headers['Referer'] = page headers['User-Agent'] = client.agent() headers['Cookie'] = cookie u = data['file'][0] u = u.replace('//freedocufilms','//www.freedocufilms') try: ret = client.request(l0, post=client.encodePostData(post_data),headers=headers, output='extended', XHR=True, cookie=cookie) except Exception as e: log(type='FAIL', method='process', err='%s' % e, dolog=False, logToControl=False, doPrint=True) ret = client.request(u, output='headers', headers=headers, XHR=True) try: fs = int(re.findall(r'Content-Length:(.*)', str(ret), re.MULTILINE)[0].strip()) except: fs = 0 q = qual_based_on_fs(q,fs) online = False if int(fs) > 0: online = True urldata = client.b64encode(json.dumps('', encoding='utf-8')) paramsx = {'headers':headers} params = client.b64encode(json.dumps(paramsx, encoding='utf-8')) items.append({'quality':q, 'riptype':r, 'src':url, 'fs':fs, 'online':online, 'params':params, 'urldata':urldata, 'allowsStreaming':False, 'allowsDownload':True}) elif 'cooltvseries.com' in url: urlx = client.request(url, output='geturl', headers=headers) urlx = '%s?e=file.mp4' % urlx fs = client.getFileSize(url, retry429=True, headers=headers) if fs == None or int(fs) == 0: fs = client.getFileSize(url, retry429=True) q = qual_based_on_fs(q,fs) online = check(url) urldata = client.b64encode(json.dumps('', encoding='utf-8')) params = client.b64encode(json.dumps('', encoding='utf-8')) if headers != None: paramsx = {'headers':headers} params = client.b64encode(json.dumps(paramsx, encoding='utf-8')) allowsDownload = True items.append({'quality':q, 'riptype':r, 'src':urlx, 'fs':fs, 'online':online, 'params':params, 'urldata':urldata, 'allowsStreaming':True, 'allowsDownload':allowsDownload}) else: fs = client.getFileSize(url, retry429=True, headers=headers) if fs == None or int(fs) == 0: fs = client.getFileSize(url, retry429=True) q = qual_based_on_fs(q,fs) online = check(url) urldata = client.b64encode(json.dumps('', encoding='utf-8')) params = client.b64encode(json.dumps('', encoding='utf-8')) if headers != None: paramsx = {'headers':headers} params = client.b64encode(json.dumps(paramsx, encoding='utf-8')) allowsDownload = True if '.m3u8' in url: allowsDownload = False items.append({'quality':q, 'riptype':r, 'src':url, 'fs':fs, 'online':online, 'params':params, 'urldata':urldata, 'allowsStreaming':True, 'allowsDownload':allowsDownload}) except Exception as e: log(type='ERROR',method='process', err=u'%s' % e) if len(items) == 0: fs = client.getFileSize(url, retry429=True, headers=headers) if fs == None or int(fs) == 0: fs = client.getFileSize(url, retry429=True) q = qual_based_on_fs(q,fs) online = check(url) urldata = client.b64encode(json.dumps('', encoding='utf-8')) params = client.b64encode(json.dumps('', encoding='utf-8')) if headers != None: paramsx = {'headers':headers} params = client.b64encode(json.dumps(paramsx, encoding='utf-8')) items.append({'quality':q, 'riptype':r, 'src':url, 'fs':fs, 'online':online, 'params':params, 'urldata':urldata, 'allowsStreaming':True, 'allowsDownload':True}) return items
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources typ = url[4] headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:60.0) Gecko/20100101 Firefox/60.0", "http.content_type": "application/x-www-form-urlencoded; charset=UTF-8" } data = '' if typ == "SERIAL": title = url[0] id = url[1] year = url[2] orgtitle = url[3] sezon = url[5] epizod = url[6] if orgtitle != "0": data = { "id": int(id), "type": typ, "title": title, "year": int(year), "sezon": str(sezon), "odcinek": str(epizod), "site": "filmdb", "browser": "chrome" } else: data = { "id": int(id), "type": typ, "title": title, "originalTitle": str(orgtitle), "year": int(year), "sezon": str(sezon), "odcinek": str(epizod), "site": "filmdb", "browser": "chrome" } if typ == "FILM": title = url[0] id = url[1] year = url[2] orgtitle = url[3] if orgtitle != "0": data = { "id": int(id), "type": typ, "title": str(title), "originalTitle": str(orgtitle), "year": int(year), "site": "filmdb", "browser": "chrome" } else: data = { "id": int(id), "type": typ, "title": str(title), "year": int(year), "site": "filmdb", "browser": "chrome" } data = {"json": json.dumps(data, ensure_ascii=False)} response = requests.post("http://fboost.pl/api/api.php", data=data, headers=headers) content = json.loads(response.content) for code in zip(content[u'link'], content[u'wersja']): wersja = str(code[1]) lang, info = self.get_lang_by_type(wersja) test = requests.post("http://fboost.pl/api/player.php?src=%s" % code[0]).content link = re.search("""iframe src="(.*)" style""", test) link = link.group(1) if len(link) < 2: continue if "cda.pl" in link: try: response = requests.get(link).content test = client.parseDOM( response, 'div', attrs={'class': 'wrapqualitybtn'}) urls = client.parseDOM(test, 'a', ret='href') for url in urls: valid, host = source_utils.is_host_valid( url, hostDict) q = source_utils.check_sd_url(url) sources.append({ 'source': host, 'quality': q, 'language': lang, 'url': url, 'info': info, 'direct': False, 'debridonly': False }) continue except: pass if "rapidvideo.com" in link: try: response = requests.get(link).content test = re.findall( """(https:\/\/www.rapidvideo.com\/e\/.*)">""", response) numGroups = len(test) for i in range(1, numGroups): url = test[i] valid, host = source_utils.is_host_valid( url, hostDict) q = source_utils.check_sd_url(url) sources.append({ 'source': host, 'quality': q, 'language': lang, 'url': url, 'info': info, 'direct': False, 'debridonly': False }) continue except Exception, e: print e pass valid, host = source_utils.is_host_valid(link, hostDict) q = source_utils.check_sd_url(link) sources.append({ 'source': host, 'quality': q, 'language': lang, 'url': link, 'info': info, 'direct': False, 'debridonly': False }) return sources