def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources hostDict += ['akamaized.net', 'google.com', 'picasa.com', 'blogspot.com'] result = client.request(url, timeout=10) dom = dom_parser.parse_dom(result, 'a', req='data-video') urls = [i.attrs['data-video'] if i.attrs['data-video'].startswith('https') else 'https:' + i.attrs['data-video'] for i in dom] for url in urls: dom = [] if 'vidnode.net' in url: result = client.request(url, timeout=10) dom = dom_parser.parse_dom(result, 'source', req=['src','label']) dom = [(i.attrs['src'] if i.attrs['src'].startswith('https') else 'https:' + i.attrs['src'], i.attrs['label']) for i in dom if i] elif 'ocloud.stream' in url: result = client.request(url, timeout=10) base = re.findall('<base href="([^"]+)">', result)[0] hostDict += [base] dom = dom_parser.parse_dom(result, 'a', req=['href','id']) dom = [(i.attrs['href'].replace('./embed',base+'embed'), i.attrs['id']) for i in dom if i] dom = [(re.findall("var\s*ifleID\s*=\s*'([^']+)", client.request(i[0]))[0], i[1]) for i in dom if i] if dom: try: for r in dom: valid, hoster = source_utils.is_host_valid(r[0], hostDict) if not valid: continue quality = source_utils.label_to_quality(r[1]) urls, host, direct = source_utils.check_directstreams(r[0], hoster) for x in urls: if direct: size = source_utils.get_size(x['url']) if size: sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False, 'info': size}) else: sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) except: pass else: valid, hoster = source_utils.is_host_valid(url, hostDict) if not valid: continue try: url.decode('utf-8') sources.append({'source': hoster, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources
def more_rapidvideo(link, hostDict, lang, info): sources = [] if "rapidvideo.com" in link: try: response = requests.get(link).content test = re.findall("""(https:\/\/www.rapidvideo.com\/e\/.*)">""", response) numGroups = len(test) for i in range(1, numGroups): url = test[i] valid, host = source_utils.is_host_valid(url, hostDict) q = source_utils.check_sd_url(url) sources.append({ 'source': host, 'quality': q, 'language': lang, 'url': url, 'info': info, 'direct': False, 'debridonly': False }) return sources except Exception, e: print e return []
def ListujLinki(): url = urllib.unquote_plus(params['url']) result = requests.get(url).content result = client.parseDOM(result, 'table', attrs={'class': 'lista'}) result = client.parseDOM(result, 'tr', attrs={'class': 'lista_hover'}) odtwarzacz = "%sodtwarzacz-%s.html" hostDict = resolveurl.relevant_resolvers(order_matters=True) hostDict = [i.domains for i in hostDict if not '*' in i.domains] hostDict = [i.lower() for i in reduce(lambda x, y: x + y, hostDict)] hostDict = [x for y, x in enumerate(hostDict) if x not in hostDict[:y]] url = url.split("pl/")[0] + "pl/" for item in result: id = client.parseDOM(item, 'span', ret='rel')[0] content = odtwarzacz % (url, id) xbmc.log('Wbijam.pl | Listuje z url: %s' % content, xbmc.LOGNOTICE) temp = requests.get(content).content try: link = client.parseDOM(temp, 'iframe', ret='src') except: continue for item2 in link: try: if str(item2).startswith("//"): item2 = str(item2).replace("//", "http://") valid, host = source_utils.is_host_valid(str(item2), hostDict) if valid == False: continue xbmc.log('Wbijam.pl | Video Link: %s' % str(item2), xbmc.LOGNOTICE) addDir("[B]" + host + "[/B]", str(item2), 6, '', '', '', False) except: continue
def more_cdapl(link, hostDict, lang, info): sources = [] if "cda.pl" in link: try: response = requests.get(link).content test = client.parseDOM(response, 'div', attrs={'class': 'wrapqualitybtn'}) urls = client.parseDOM(test, 'a', ret='href') for url in urls: valid, host = source_utils.is_host_valid(url, hostDict) q = source_utils.check_sd_url(url) direct = re.findall("""file":"(.*)","file_cast""", requests.get(url).content)[0].replace( "\\/", "/") sources.append({ 'source': host, 'quality': q, 'language': lang, 'url': direct, 'info': info, 'direct': True, 'debridonly': False }) return sources except Exception, e: print e return []
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources result = client.request(urlparse.urljoin(self.base_link, url), redirect=False) section = client.parseDOM(result, 'section', attrs={'id': 'video_player'})[0] link = client.parseDOM(section, 'iframe', ret='src')[0] valid, host = source_utils.is_host_valid(link, hostDict) if not valid: return sources spans = client.parseDOM(section, 'span') info = None for span in spans: if span == 'Z lektorem': info = 'Lektor' q = source_utils.check_sd_url(link) sources.append({ 'source': host, 'quality': q, 'language': 'pl', 'url': link, 'info': info, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: #import pydevd #pydevd.settrace(stdoutToServer=True, stderrToServer=True) if url == None: return sources r = client.request(urlparse.urljoin(self.base_link, url), redirect=False) info = self.get_lang_by_type(client.parseDOM(r, 'title')[0]) r = client.parseDOM(r, 'div', attrs={'class': 'tab-pane active'})[0] r = client.parseDOM(r, 'script')[0] script = r.split('"')[1] decoded = self.shwp(script) link = client.parseDOM(decoded, 'iframe', ret='src')[0] valid, host = source_utils.is_host_valid(link, hostDict) if not valid: return sources q = source_utils.check_sd_url(link) sources.append({ 'source': host, 'quality': q, 'language': 'pl', 'url': link, 'info': info, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources r = client.request(url) data = client.parseDOM(r, 'div', attrs={'class': 'anime_muti_link'}) data = [ client.parseDOM(i, 'a', ret='data-video') for i in data if i ] try: for link in data[0]: url = 'http:' + link valid, host = source_utils.is_host_valid(url, hostDict) quality, info = source_utils.get_release_quality(url, None) if not valid: continue sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except Exception: pass return sources except: return sources
def get_epsiode_link(sess, data): headers = { 'Accept': '*/*', 'Accept-Language': 'pl,en-US;q=0.7,en;q=0.3', 'Cache-Control': 'max-age=0', 'Connection': 'keep-alive', 'Host': 'www.animezone.pl', 'Referer': str(url).replace("http://", "http://www."), 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0', } verify = sess.get('http://animezone.pl/images/statistics.gif', headers=headers) hostDict = resolveurl.relevant_resolvers(order_matters=True) hostDict = [i.domains for i in hostDict if not '*' in i.domains] hostDict = [i.lower() for i in reduce(lambda x, y: x + y, hostDict)] hostDict = [x for y, x in enumerate(hostDict) if x not in hostDict[:y]] headers = { 'Host': 'www.animezone.pl', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0', 'Accept': '*/*', 'Accept-Language': 'pl,en-US;q=0.7,en;q=0.3', 'Referer': str(url).replace("http://", "http://www."), 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 'X-Requested-With': 'XMLHttpRequest', 'Connection': 'keep-alive', 'Pragma': 'no-cache', 'Cache-Control': 'no-cache', } data = {'data': data} response = sess.post(str(url).replace("http://", "https://www."), headers=headers, data=data).content try: link = client.parseDOM(response, 'a', ret='href')[0] except: link = client.parseDOM(response, 'iframe', ret='src')[0] if not link: raise InvalidLink('No link') if str(link).startswith('//'): link = str(link).replace("//", "http://") try: valid, host = source_utils.is_host_valid(str(link), hostDict) except Exception as e: log_exception() raise InvalidLink('Exception {!r}'.format(e)) if not valid: raise InvalidLink('Invalid host') return host, link
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return urldata = urlparse.parse_qs(url) urldata = dict((i, urldata[i][0]) for i in urldata) clean_title = cleantitle.geturl(urldata['title']) start_url = self.search_link % (self.base_link, clean_title) data = self.scraper.get(start_url).content r = dom_parser2.parse_dom(data, 'button', {'id': 'iframelink'}) links = [i.attrs['value'] for i in r] for i in links: try: valid, host = source_utils.is_host_valid(i, hostDict) if not valid: continue sources.append({ 'source': host, 'quality': '1080p', 'language': 'en', 'url': i, 'info': [], 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def work(self, link): if str(link).startswith("http://"): link = self.getlink(link) q = source_utils.check_sd_url(link) valid, host = source_utils.is_host_valid(link, testDict) if not valid: return 0 return host, q, link
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) for i in range(3): result = client.request(url, timeout=10) if not result == None: break dom = dom_parser.parse_dom(result, 'div', attrs={'class':'links', 'id': 'noSubs'}) result = dom[0].content links = re.compile('<tr\s*>\s*<td><i\s+class="fa fa-youtube link-logo"></i>([^<]+).*?href="([^"]+)"\s+class="watch',re.DOTALL).findall(result) for link in links:#[:5]: try: url2 = urlparse.urljoin(self.base_link, link[1]) for i in range(2): result2 = client.request(url2, timeout=3) if not result2 == None: break r = re.compile('href="([^"]+)"\s+class="action-btn').findall(result2)[0] valid, hoster = source_utils.is_host_valid(r, hostDict) if not valid: continue #log_utils.log('JairoxDebug1: %s - %s' % (url2,r), log_utils.LOGDEBUG) urls, host, direct = source_utils.check_directstreams(r, hoster) for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) except: #traceback.print_exc() pass #log_utils.log('JairoxDebug2: %s' % (str(sources)), log_utils.LOGDEBUG) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] data = urlparse.parse_qs(url) data = dict((i, data[i][0]) for i in data) if 'tvshowtitle' in data: url = self.__get_episode_url(data) else: url = self.__get_movie_url(data) token = urlparse.parse_qs(urlparse.urlparse(url).query)['token'][0] if any(s in token for s in ("oload", "openload")): valid, host = source_utils.is_host_valid(token, hostDict) sources.append({ 'source': host, 'quality': '720p', 'language': 'en', 'url': token, 'info': [], 'direct': False, 'debridonly': False }) else: response = client.request(url, headers=self.headers) manifest_info_encrpyted = json.loads(response)['hash'] manifest_info = self.__decrypt(manifest_info_encrpyted) manifest_info = manifest_info.split(':') url = self.server % (manifest_info[0], token, manifest_info[2], manifest_info[1]) response = client.request(url, headers=self.headers) manifest = json.loads(response) for k, v in manifest.iteritems(): try: quality = k + 'p' if int(k) >= 720 else 'SD' sources.append({ 'source': 'CDN', 'quality': quality, 'language': 'en', 'url': v, 'direct': True, 'debridonly': False }) except Exception: pass return sources except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources if debrid.status() is False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) query = '%s S%02dE%02d' % (data['tvshowtitle'], int( data['season']), int(data['episode'])) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) r = urlparse.urljoin(self.base_link, url) r = client.request(r) r = client.parseDOM(r, 'item') title = client.parseDOM(r, 'title')[0] if hdlr in title: r = re.findall( '<h3.+?>(.+?)</h3>\s*<h5.+?<strong>(.+?)</strong.+?h3.+?adze.+?href="(.+?)">.+?<h3', r[0], re.DOTALL) for name, size, url in r: quality, info = source_utils.get_release_quality(name, url) try: size = re.sub('i', '', size) div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass info = ' | '.join(info) valid, host = source_utils.is_host_valid(url, hostDict) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) return sources except: return sources
def ListujLinki(): try: url = urllib.unquote_plus(params['url']) except: pass id = url.split("/")[5] s = requests.session() referer = "https://www.kreskoweczki.pl/fullscreen/" + id headers = { 'Referer': referer, 'User-Agent': "User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3424.0 Safari/537.36", } result = client.request(url) h = HTMLParser() result = h.unescape(result) source_id = client.parseDOM(result, 'form', attrs={'action': '/fullscreen/' + id}) source_id = client.parseDOM(source_id, 'input', ret='value') hostDict = resolveurl.relevant_resolvers(order_matters=True) hostDict = [i.domains for i in hostDict if not '*' in i.domains] hostDict = [i.lower() for i in reduce(lambda x, y: x + y, hostDict)] hostDict = [x for y, x in enumerate(hostDict) if x not in hostDict[:y]] for item in source_id: data = {'source_id': str(item)} content = s.post("https://www.kreskoweczki.pl/fullscreen/" + id, data=data).content try: temp = client.parseDOM(content, 'div', attrs={'class': 'playerholder'}) video_link = client.parseDOM(temp, 'a', ret='href')[0] except: try: video_link = client.parseDOM(content, 'iframe', ret='src')[0] except: continue if str(video_link).startswith("//"): video_link = str(video_link).replace("//", "http://") valid, host = source_utils.is_host_valid(video_link, hostDict) if valid == False: continue else: nazwa = "[COLOR green]" + host + " [/COLOR]" addLink(nazwa, str(video_link), 6, "", "", default_background, "", "")
def sources(self, url, hostDict, hostprDict): try: sources = [] r = client.request(url) try: data = client.parseDOM(r, 'div', attrs={'class': 'playex'}) data = [client.parseDOM(i, 'a', ret='href') for i in data if i] r = client.request(data[0][0]) data = client.parseDOM(r, 'div', attrs={'class': 'playex'}) data = [client.parseDOM(i, 'iframe', ret='src') for i in data if i] try: for url in data[0]: valid, host = source_utils.is_host_valid(url,hostDict) if host == self.domains[1]: r = requests.head(url) url = r.headers['location'] valid, host = source_utils.is_host_valid(url,hostDict) quality, info = source_utils.get_release_quality(url, None) if not valid: continue host = host.encode('utf-8') sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url.replace('\/', '/'), 'direct': False, 'debridonly': False }) except Exception: pass except Exception: pass return sources except Exception: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = self.base_link + url result = client.request(url) title = client.parseDOM(result, 'span', attrs={'style': 'margin-right: 3px;'})[0] lang, info = self.get_lang_by_type(title) valid, host = source_utils.is_host_valid(url, hostDict) if not valid: return sources if "?wersja=1080p" in result: sources.append({ 'source': host, 'quality': '1080p', 'language': lang, 'url': url + "?wersja=1080p", 'info': info, 'direct': False, 'debridonly': False }) if "?wersja=720p" in result: sources.append({ 'source': host, 'quality': 'HD', 'language': lang, 'url': url + "?wersja=720p", 'info': info, 'direct': False, 'debridonly': False }) if "?wersja=480p" in result: sources.append({ 'source': host, 'quality': 'SD', 'language': lang, 'url': url + "?wersja=480p", 'info': info, 'direct': False, 'debridonly': False }) return sources except: log_exception() return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources links = self.links_found(url) hostdict = hostDict + hostprDict for url in links: try: valid, host = source_utils.is_host_valid(url, hostdict) if 'mehliz' in url: host = 'MZ' direct = True urls = (self.mz_server(url)) elif 'ok.ru' in url: host = 'vk' direct = True urls = (directstream.odnoklassniki(url)) else: direct = False urls = [{ 'quality': 'SD', 'url': url }] for x in urls: sources.append({ 'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): ''' Loops over site sources and returns a dictionary with corresponding file locker sources and information Keyword arguments: url -- string - url params Returns: sources -- string - a dictionary of source information ''' sources = [] try: data = urlparse.parse_qs(url) data = dict((i, data[i][0]) for i in data) data['sources'] = ast.literal_eval(data['sources']) for i in data['sources']: try: valid, host = source_utils.is_host_valid(i, hostDict) quality = 'HD' if '720p' in data['quality'] else data[ 'quality'] sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': i, 'direct': False, 'debridonly': False }) except Exception: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources cookies = cache.cache_get('naszekino_cookie') result = client.request(url, cookie=cookies) result = client.parseDOM(result, 'table', attrs={'class': 'table table-bordered'}) result = client.parseDOM(result, 'tr') for item in result: try: link = client.parseDOM(item, 'td', attrs={'class': 'link-to-video'}) link = str(client.parseDOM(link, 'a', ret='href')[0]) temp = client.parseDOM(item, 'td') wersja = str(temp[1]) lang, info = self.get_lang_by_type(wersja) valid, host = source_utils.is_host_valid(link, hostDict) jakosc = str(temp[2]).lower() if "wysoka" in jakosc: q = "HD" else: q = source_utils.check_sd_url(link) sources.append({ 'source': host, 'quality': q, 'language': lang, 'url': link, 'info': info, 'direct': False, 'debridonly': False }) except: continue return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources result = client.request(url) result = client.parseDOM(result, 'div', attrs={'id': 'downloads'})[0] rows = client.parseDOM(result, 'tr') for row in rows: try: cols = client.parseDOM(row, 'td') host = client.parseDOM(cols[0], 'img', ret='src')[0] host = host.rpartition('=')[-1] link = client.parseDOM(cols[0], 'a', ret='href')[0] valid, host = source_utils.is_host_valid(host, hostDict) if not valid: continue q = 'SD' if 'Wysoka' in cols[1]: q = 'HD' lang, info = self.get_lang_by_type(cols[2]) sources.append({ 'source': host, 'quality': q, 'language': lang, 'url': link, 'info': info, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def ListujLinki(): import resolveurl url = urllib.unquote_plus(params['url']) result = client.request(url) h = HTMLParser() result = h.unescape(result) result = client.parseDOM(result, 'table', attrs={'class': 'table table-bordered'}) linki = client.parseDOM(result, 'a', ret='href') for item in linki: temp = client.request(str(item)) link = client.parseDOM(temp, 'iframe', ret='src')[0] hostDict = resolveurl.relevant_resolvers(order_matters=True) hostDict = [i.domains for i in hostDict if not '*' in i.domains] hostDict = [i.lower() for i in reduce(lambda x, y: x + y, hostDict)] hostDict = [x for y, x in enumerate(hostDict) if x not in hostDict[:y]] valid, host = source_utils.is_host_valid(str(link), hostDict) if valid == False: continue addon.addLink("[B]" + host + "[/B]", link, mode=6)
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources r = client.request(url) links = re.findall('callvalue\((.+?)\)', r) try: quality = client.parseDOM(r, 'td', attrs={'id': 'quality'})[0] except: quality = 'SD' for i in links: try: url = re.findall('(http.+?)(?:\'|\")', i)[0] valid, host = source_utils.is_host_valid(url,hostDict) sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources year = url['year'] h = {'User-Agent': client.randomagent()} title = cleantitle.geturl(url['title']).replace('-', '+') url = urlparse.urljoin(self.base_link, self.search_link % title) r = requests.get(url, headers=h) r = BeautifulSoup(r.text, 'html.parser').find('div', {'class': 'item'}) r = r.find('a')['href'] r = requests.get(r, headers=h) r = BeautifulSoup(r.content, 'html.parser') quality = r.find('span', {'class': 'calidad2'}).text url = r.find('div', {'class': 'movieplay'}).find('iframe')['src'] if not quality in ['1080p', '720p']: quality = 'SD' valid, host = source_utils.is_host_valid(url, hostDict) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) return sources except: print("Unexpected error in Furk Script: check_api", sys.exc_info()[0]) exc_type, exc_obj, exc_tb = sys.exc_info() print(exc_type, exc_tb.tb_lineno) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0', 'Referer': 'https://segos.es/?page=login' } data = { "login": self.user_name, 'password': self.user_pass, 'loguj': '' } s = requests.Session() s.post('https://segos.es/?page=login', data=data, headers=headers) k = s.get(url) result = k.text result = client.parseDOM( result, 'table', attrs={'class': 'table table-hover table-bordered'}) results = client.parseDOM(result, 'tr') for result in results: try: quality = client.parseDOM(result, 'td')[1] quality = quality.replace(' [EXTENDED]', '').replace(' [Extended]', '') lang = 'en' info = client.parseDOM(result, 'td')[0] info = client.parseDOM(info, 'img', ret='src') if 'napisy' in info[0]: info[0] = 'Napisy' lang = 'pl' if 'lektor' in info[0]: info[0] = 'Lektor' lang = 'pl' if 'dubbing' in info[0]: info[0] = 'Dubbing' lang = 'pl' link = client.parseDOM(result, 'td')[5] link = client.parseDOM(link, 'a', ret='href') link = urlparse.urljoin(self.base_link, str(link[0])) k = s.get(link) video_link_content = k.text video_link_content = client.parseDOM( video_link_content, 'div', attrs={ 'class': 'embed-responsive embed-responsive-16by9' }) video_link = client.parseDOM(video_link_content, 'iframe', ret='src') valid, host = source_utils.is_host_valid( video_link[0], hostDict) more = False for source in more_sources.more_cdapl( video_link[0], hostDict, lang, info[0]): sources.append(source) more = True for source in more_sources.more_rapidvideo( video_link[0], hostDict, lang, info[0]): sources.append(source) more = True if not more: sources.append({ 'source': host, 'quality': quality, 'language': lang, 'url': video_link[0], 'info': info[0], 'direct': False, 'debridonly': False }) except: continue return sources except: return sources
def sources(self, url, hostDict, hostprDict): ''' Loops over site sources and returns a dictionary with corresponding file locker sources and information Keyword arguments: url -- string - url params Returns: sources -- string - a dictionary of source information ''' sources = [] try: data = urlparse.parse_qs(url) data = dict((i, data[i][0]) for i in data) data['sources'] = re.findall("[^', u\]\[]+", data['sources']) try: q = re.findall("\.(.*)", data['id'])[0] except: q = data['id'] query = (self.tooltip_path % q) url = urlparse.urljoin(self.base_link, query) q = client.request(url) quality = re.findall('ty">(.*?)<', q)[0] if '1080p' in quality: quality = '1080p' elif '720p' in quality: quality = 'HD' for i in data['sources']: token = str( self.__token({ 'id': i, 'server': 28, 'update': 0, 'ts': data['ts'] })) query = (self.info_path % (data['ts'], token, i)) url = urlparse.urljoin(self.base_link, query) info_response = client.request(url, XHR=True) grabber_dict = json.loads(info_response) try: if grabber_dict['type'] == 'direct': token64 = grabber_dict['params']['token'] query = (self.grabber_path % (data['ts'], i, token64)) url = urlparse.urljoin(self.base_link, query) response = client.request(url, XHR=True) sources_list = json.loads(response)['data'] for j in sources_list: quality = j[ 'label'] if not j['label'] == '' else 'SD' #quality = 'HD' if quality in ['720p','1080p'] else 'SD' quality = source_utils.label_to_quality(quality) if 'googleapis' in j['file']: sources.append({ 'source': 'GVIDEO', 'quality': quality, 'language': 'en', 'url': j['file'], 'direct': True, 'debridonly': False }) continue #source = directstream.googlepass(j['file']) valid, hoster = source_utils.is_host_valid( j['file'], hostDict) urls, host, direct = source_utils.check_directstreams( j['file'], hoster) for x in urls: sources.append({ 'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': x['url'], 'direct': True, 'debridonly': False }) elif not grabber_dict['target'] == '': url = 'https:' + grabber_dict[ 'target'] if not grabber_dict['target'].startswith( 'http') else grabber_dict['target'] #host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] valid, hoster = source_utils.is_host_valid( url, hostDict) if not valid: continue urls, host, direct = source_utils.check_directstreams( url, hoster) sources.append({ 'source': hoster, 'quality': quality, 'language': 'en', 'url': urls[0]['url'], #url.replace('\/','/'), 'direct': False, 'debridonly': False }) except: pass return sources except Exception: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) aliases = eval(data['aliases']) headers = {} if 'tvshowtitle' in data: episode = int(data['episode']) url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers) else: episode = 0 url = self.searchMovie(data['title'], data['year'], aliases, headers) mid = re.findall('-(\d+)', url)[-1] try: headers = {'Referer': url} u = urlparse.urljoin(self.base_link, self.server_link % mid) r = client.request(u, headers=headers, XHR=True) r = json.loads(r)['html'] r = client.parseDOM(r, 'div', attrs={'class': 'pas-list'}) ids = client.parseDOM(r, 'li', ret='data-id') servers = client.parseDOM(r, 'li', ret='data-server') labels = client.parseDOM(r, 'a', ret='title') r = zip(ids, servers, labels) u = urlparse.urljoin(self.base_link, self.info_link % mid) quality = client.request(u, headers=headers) quality = dom_parser.parse_dom(quality, 'div', attrs={'class': 'jtip-quality' })[0].content if quality == "HD": quality = "720p" for eid in r: try: try: ep = re.findall('episode.*?(\d+).*?', eid[2].lower())[0] except: ep = 0 if (episode == 0) or (int(ep) == episode): if eid[1] != '6': url = urlparse.urljoin( self.base_link, self.embed_link % eid[0]) link = client.request(url) link = json.loads(link)['src'] valid, host = source_utils.is_host_valid( link, hostDict) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': link, 'info': [], 'direct': False, 'debridonly': False }) else: url = urlparse.urljoin( self.base_link, self.token_link % (eid[0], mid)) script = client.request(url) if '$_$' in script: params = self.uncensored1(script) elif script.startswith( '[]') and script.endswith('()'): params = self.uncensored2(script) elif '_x=' in script: x = re.search('''_x=['"]([^"']+)''', script).group(1) y = re.search('''_y=['"]([^"']+)''', script).group(1) params = {'x': x, 'y': y} else: raise Exception() u = urlparse.urljoin( self.base_link, self.source_link % (eid[0], params['x'], params['y'])) r = client.request(u, XHR=True) url = json.loads(r)['playlist'][0]['sources'] url = [i['file'] for i in url if 'file' in i] url = [directstream.googletag(i) for i in url] url = [i[0] for i in url if i] for s in url: if 'lh3.googleusercontent.com' in s['url']: s['url'] = directstream.googleredirect( s['url']) sources.append({ 'source': 'gvideo', 'quality': s['quality'], 'language': 'en', 'url': s['url'], 'direct': True, 'debridonly': False }) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if debrid.status() == False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) r = client.request(url) posts = client.parseDOM(r, 'item') hostDict = hostprDict + hostDict items = [] for post in posts: try: t = client.parseDOM(post, 'title')[0] c = client.parseDOM(post, 'content.+?') u = c[0].split('<h1 ') u = [i for i in u if 'Download Links' in i] u = client.parseDOM(u, 'a', ret='href') try: s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', c[0])[0] except: s = '0' items += [(t, i, s) for i in u] except: pass for item in items: try: name = item[0] name = client.replaceHTMLCodes(name) t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name) if not cleantitle.get(t) == cleantitle.get(title): raise Exception() y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: raise Exception() quality, info = source_utils.get_release_quality(name, item[1]) try: size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass info = ' | '.join(info) url = item[1] if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception() url = client.replaceHTMLCodes(url) url = url.encode('utf-8') valid, host = source_utils.is_host_valid(url, hostDict) if not valid: continue host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': 'HEVC', 'direct': False, 'debridonly': True}) except: pass check = [i for i in sources if not i['quality'] == 'CAM'] if check: sources = check return sources except: return sources
def Wyciaganie_Linkow(): import json basePath = "special://temp/cookie.txt" path = xbmc.translatePath(basePath) with open(path, 'r') as f: cookie = requests.utils.cookiejar_from_dict(json.load(f)) s.cookies = cookie url = urllib.unquote_plus(params['url']) r = s.get(url, cookies=s.cookies).content results = client.parseDOM(r, 'tr', attrs={'class': 'wiersz'}) counter = -1 for result in results: counter += 1 nazwa = client.parseDOM(result, 'a', ret='title')[0] index = str(result).find('\" rel') r = str(result)[index + 10:] index = str(r).find("\"") r = r[:index] data = {"o": str(r)} headers = { 'Host': 'www.kreskowkazone.pl', 'DNT': '1', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'pl-PL,pl;q=0.9,en-US;q=0.8,en;q=0.7', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36', 'Accept': 'image/webp,image/apng,image/*,*/*;q=0.8', 'Referer': url, 'Connection': 'keep-alive', } s.get('http://www.kreskowkazone.pl/images/statystyki.gif', headers=headers, cookies=s.cookies) headers = { 'Accept': 'text/html, */*; q=0.01', 'Referer': url, 'Origin': 'https://www.kreskowkazone.pl', 'X-Requested-With': 'XMLHttpRequest', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.39 Safari/537.36', 'DNT': '1', 'Content-Type': 'application/x-www-form-urlencoded', } hostDict = resolveurl.relevant_resolvers(order_matters=True) hostDict = [i.domains for i in hostDict if not '*' in i.domains] hostDict = [i.lower() for i in reduce(lambda x, y: x + y, hostDict)] hostDict = [x for y, x in enumerate(hostDict) if x not in hostDict[:y]] response = s.post("https://www.kreskowkazone.pl/odcinki_ajax", data=data, headers=headers) link = client.parseDOM(response.text, 'a', ret='href') try: if link == '': continue if str(link[0]).startswith('//'): link[0] = str(link[0]).replace("//", "http://") valid, host = source_utils.is_host_valid(str(link[0]), hostDict) if valid == False: continue nazwa = "[COLOR green]" + host + ": [/COLOR]" + nazwa addLink("[B]" + str(nazwa) + "[/B]", str(link[0]), 6, "", "", "", "", "") except: continue
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] imdb = data['imdb'] aliases = eval(data['aliases']) headers = {} if 'tvshowtitle' in data: url = self.searchShow(title, int(data['season']), int(data['episode']), aliases, headers) else: url = self.searchMovie(title, data['year'], aliases, headers) r = client.request(url, headers=headers, output='extended', timeout='10') if not imdb in r[0]: raise Exception() cookie = r[4] headers = r[3] result = r[0] try: r = re.findall('(https:.*?redirector.*?)[\'\"]', result) for i in r: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass except: pass try: auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0] except: auth = 'false' auth = 'Bearer %s' % urllib.unquote_plus(auth) headers['Authorization'] = auth headers['Referer'] = url u = '/ajax/vsozrflxcw.php' self.base_link = client.request(self.base_link, headers=headers, output='geturl') u = urlparse.urljoin(self.base_link, u) action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb' elid = urllib.quote( base64.encodestring(str(int(time.time()))).strip()) token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0] idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0] post = { 'action': action, 'idEl': idEl, 'token': token, 'nopop': '', 'elid': elid } post = urllib.urlencode(post) cookie += ';%s=%s' % (idEl, elid) headers['Cookie'] = cookie r = client.request(u, post=post, headers=headers, cookie=cookie, XHR=True) r = str(json.loads(r)) r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r) for i in r: try: if 'google' in i: quality = 'SD' if 'googleapis' in i: try: quality = source_utils.check_sd_url(i) except Exception: pass if 'googleusercontent' in i: i = directstream.googleproxy(i) try: quality = directstream.googletag( i)[0]['quality'] except Exception: pass sources.append({ 'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) elif 'llnwi.net' in i or 'vidcdn.pro' in i: try: quality = source_utils.check_sd_url(i) sources.append({ 'source': 'CDN', 'quality': quality, 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except Exception: pass else: valid, hoster = source_utils.is_host_valid(i, hostDict) sources.append({ 'source': hoster, 'quality': '720p', 'language': 'en', 'url': i, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources if debrid.status() == False: raise Exception() data = url title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) r = requests.get(url).text posts = re.findall(r'(?s)<item>(.*?)</item>', r) hostDict = hostprDict + hostDict items = [] for post in posts: try: title = re.findall(r'<title>(.*?)</title>', post)[0] if query.lower() in title.lower(): linksDivs = re.findall( r'(?s)<singlelink></singlelink><br />(.*?)<br />.<strong>', post) for div in linksDivs: links = re.findall(r'<a href="(.*?)"', div) for link in links: quality = source_utils.get_quality_simple(link) valid, host = source_utils.is_host_valid( link, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': link, 'info': '', 'direct': False, 'debridonly': True }) except: traceback.print_exc() pass return sources except: traceback.print_exc() return sources