def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources content = 'episode' if 'episode' in url else 'movie' result = client.request(url) try: url = re.findall(r"class\s*=\s*'play_container'\s+href\s*=\s*'([^']+)", result)[0] result = client.request(url, timeout='10') except: pass try: url = re.compile('ajax\(\{\s*url\s*:\s*[\'"]([^\'"]+)').findall(result)[0] post = 'post' except: url = re.compile(r'onclick=.*?show_player.*?,.*?"([^\\]+)').findall(result)[0] post = None if content <> 'movie': try: if post == 'post': id, episode = re.compile('id=(\d+).*?&e=(\d*)').findall(url)[0] post = {'id': id, 'e': episode, 'cat': 'episode'} except: pass else: if post == 'post': id = re.compile('id=(\d+)').findall(url)[0] post = {'id': id, 'cat': 'movie'} if post <> None: result = client.request(url, post=post) url = re.findall(r"(https?:.*?)'\s+id='avail_links", result)[0] try: if 'google' in url: valid, hoster = source_utils.is_host_valid(url, hostDict) urls, host, direct = source_utils.check_directstreams(url, hoster) for x in urls: sources.append( {'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) else: valid, hoster = source_utils.is_host_valid(url, hostDict) sources.append({'source': hoster, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] r = requests.get(url).content qual = re.compile('class="quality">(.+?)<').findall(r) for i in qual: if '1080' in i: quality = '1080p' elif '720' in i: quality = '720p' else: quality = 'SD' u = client.parseDOM(r, "div", attrs={"class": "pa-main anime_muti_link"}) for t in u: u = re.findall('<li class=".+?" data-video="(.+?)"', t) for url in u: if 'vidcloud' in url: url = 'https:' + url r = requests.get(url).content t = re.findall( 'li data-status=".+?" data-video="(.+?)"', r) for url in t: if 'vidcloud' in url: continue valid, host = source_utils.is_host_valid( url, hostDict) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) if 'vidcloud' in url: continue valid, host = source_utils.is_host_valid(url, hostDict) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) return sources except: return
def sources(self, url, hostDict, hostprDict): try: # data = {'login': self.user_name, 'password': self.user_pass} # result = self.session.post('https://zalukaj.com/account.php', headers={'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36"}, data=data) headers = { 'Cookie': '__cfduid=d61b42b729455a590ff291892cb688ea11546349293; PHPSESSID=7u6cbc5pagnhqfm84jgjhg9hc2; __PHPSESSIDS=de81fa674b436a948cb337b7f4d2fa3898bd308c' } if url.startswith('//'): url = "https:" + url sources = [] if url == None: return sources url = url result = self.session.get(url, headers=headers).content link = "https://zalukaj.com" + str( client.parseDOM(result, 'iframe', ret='src')[0]) + "&x=1" details = str( client.parseDOM(result, 'div', attrs={'class': 'details'})[0]) lang, info = self.get_lang_by_type(str(details)) result = self.session.get(link, headers=headers).content try: url = str(client.parseDOM(result, 'source', ret='src')[0]) valid, host = source_utils.is_host_valid(url, hostDict) sources.append({ 'source': host, 'quality': 'HD', 'language': lang, 'url': url, 'info': info, 'direct': True, 'debridonly': False }) return sources except: url = str(client.parseDOM(result, 'iframe', ret='src')[0]) valid, host = source_utils.is_host_valid(url, hostDict) sources.append({ 'source': host, 'quality': 'HD', 'language': lang, 'url': url, 'info': info, 'direct': False, 'debridonly': False }) return sources except: log_exception() return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] hostDict = hostprDict + hostDict r = client.request(url) u = client.parseDOM(r, "ul", attrs={"id": "serverul"}) for t in u: try: u = client.parseDOM(t, 'a', ret='href') for url in u: if 'getlink' in url: continue quality = source_utils.check_sd_url(url) valid, host = source_utils.is_host_valid(url, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) return sources except: return sources except: return
def work(self, link, testDict): if str(link).startswith("http"): link = self.getlink(link) q = source_utils.check_sd_url(link) valid, host = source_utils.is_host_valid(link, testDict) if not valid: return 0 return host, q, link
def sources(self, url, hostDict, hostprDict): try: sources = [] r = client.request(url) try: match = re.compile('<iframe .+?src="(.+?)"').findall(r) for url in match: if 'youtube' in url: continue valid, hoster = source_utils.is_host_valid(url, hostDict) if not valid: continue sources.append({ 'source': hoster, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except Exception: return sources except Exception: return sources return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] result = self.session.get(url).content result = result.decode('utf-8') h = HTMLParser() result = h.unescape(result) result = client.parseDOM(result, 'table', attrs={'class': 'table table-bordered'}) result = client.parseDOM(result, 'tr') for item in result: try: tabela = client.parseDOM(item, 'td') info = self.get_lang_by_type(tabela[1]) quality = tabela[2] if 'wysoka' in quality.lower(): quality = 'HD' else: quality = 'SD' try: video_link = str(client.parseDOM(tabela[0], 'a', ret='href')[0]) valid, host = source_utils.is_host_valid(video_link, hostDict) sources.append( {'source': host, 'quality': quality, 'language': info[0], 'url': video_link, 'info': info[1], 'direct': False, 'debridonly': False}) except: continue except: continue return sources except: log_exception() return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources cookies = cache.cache_get('naszekino_cookie') result = client.request(url, cookie=cookies) result = client.parseDOM(result, 'table', attrs={'class': 'table table-bordered'}) result = client.parseDOM(result, 'tr') for item in result: try: link = client.parseDOM(item, 'td', attrs={'class': 'link-to-video'}) link = str(client.parseDOM(link, 'a', ret='href')[0]) temp = client.parseDOM(item, 'td') wersja = str(temp[1]) lang, info = self.get_lang_by_type(wersja) valid, host = source_utils.is_host_valid(link, hostDict) jakosc = str(temp[2]).lower() if "wysoka" in jakosc: q = "HD" else: q = source_utils.check_sd_url(link) sources.append( {'source': host, 'quality': q, 'language': lang, 'url': link, 'info': info, 'direct': False, 'debridonly': False}) except: continue return sources except: return sources
def sources(self, linki, hostDict, hostprDict): sources = [] try: for url in linki: try: if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = client.request(url) title = client.parseDOM(result, 'span', attrs={'style': 'margin-right: 3px;'})[0] lang, info = self.get_lang_by_type(title) valid, host = source_utils.is_host_valid(url, hostDict) if not valid: continue if "?wersja=1080p" in result: sources.append( {'source': host, 'quality': '1080p', 'language': lang, 'url': url + "?wersja=1080p", 'info': info, 'direct': False, 'debridonly': False}) if "?wersja=720p" in result: sources.append({'source': host, 'quality': 'HD', 'language': lang, 'url': url + "?wersja=720p", 'info': info, 'direct': False, 'debridonly': False}) if "?wersja=480p" in result: sources.append({'source': host, 'quality': 'SD', 'language': lang, 'url': url + "?wersja=480p", 'info': info, 'direct': False, 'debridonly': False}) except: continue return sources except Exception as e: print(e) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: import base64 import json result = client.request(url) result = result.decode('utf-8') h = HTMLParser() result = h.unescape(result) tabela = client.parseDOM(result, 'div', attrs={'class': "link-to-video"}) # items = client.parseDOM(tabela, 'tr') for item in tabela: try: jezyk = client.parseDOM(item, 'span')[0].replace('<b>', '').replace("</b>", '') jezyk, wersja = self.get_lang_by_type(jezyk) # quality = client.parseDOM(item, 'td')[2] link = json.loads(client.parseDOM(result, 'a', ret='data-iframe')[0].decode('base64'))['src'] valid, host = source_utils.is_host_valid(link, hostDict) sources.append( {'source': host, 'quality': 'SD', 'language': jezyk, 'url': link, 'info': wersja, 'direct': False, 'debridonly': False}) except: pass return sources except: log_exception() return sources
def more_rapidvideo(link, hostDict, lang, info): sources = [] if "rapidvideo.com" in link: try: response = requests.get(link).content test = re.findall("""(https:\/\/www.rapidvideo.com\/e\/.*)">""", response) numGroups = len(test) for i in range(1, numGroups): url = test[i] valid, host = source_utils.is_host_valid(url, hostDict) q = source_utils.check_sd_url(url) sources.append({ 'source': host, 'quality': q, 'language': lang, 'url': url, 'info': info, 'direct': False, 'debridonly': False }) return sources except Exception, e: print e return []
def sources(self, url, hostDict, hostprDict): try: sources = [] scraper = cfscrape.create_scraper() r = scraper.get(url).content try: qual = re.compile('class="quality">(.+?)<').findall(r) print qual for i in qual: if 'HD' in i: quality = '1080p' else: quality = 'SD' match = re.compile('<iframe src="(.+?)"').findall(r) for url in match: valid, host = source_utils.is_host_valid(url, hostDict) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: return except Exception: return return sources
def more_cdapl(link, hostDict, lang, info): sources = [] if "cda.pl" in link: try: headers = { 'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3555.0 Safari/537.36" } response = requests.get(link, headers=headers).content test = client.parseDOM(response, 'div', attrs={'class': 'wrapqualitybtn'}) urls = client.parseDOM(test, 'a', ret='href') for url in urls: valid, host = source_utils.is_host_valid(url, hostDict) q = source_utils.check_sd_url(url) direct = re.findall( """file":"(.*)","file_cast""", requests.get(url, headers=headers).content)[0].replace( "\\/", "/") sources.append({ 'source': 'CDA', 'quality': q, 'language': lang, 'url': direct, 'info': info, 'direct': True, 'debridonly': False }) return sources except Exception, e: print e return []
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36' } holder = client.request(url, headers=headers) Alternates = re.compile( '<button class="text-capitalize dropdown-item" value="(.+?)"', re.DOTALL).findall(holder) for alt_link in Alternates: alt_url = alt_link.split("e=")[1] valid, host = source_utils.is_host_valid(alt_url, hostDict) sources.append({ 'source': host, 'quality': '1080p', 'language': 'en', 'url': alt_url, 'info': [], 'direct': False, 'debridonly': False }) return sources except Exception: return
def sources(self, url, hostDict, hostprDict): try: sources = [] r = client.request(url) try: match = re.compile( 'href="(.+?)" rel="noindex\,nofollow">Watch This Link</a>' ).findall(r) for url in match: r = client.request(url) match = re.compile( '<a href="(.+?)://(.+?)/(.+?)"><button class="wpb\_button wpb\_btn\-primary wpb\_regularsize"> Click Here To Play</button> </a>' ).findall(r) for http, host, url in match: url = '%s://%s/%s' % (http, host, url) info = source_utils.check_url(url) quality = source_utils.check_url(url) valid, host = source_utils.is_host_valid( host, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': False, 'debridonly': False }) except: return except Exception: return return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] r = client.request(url) try: data = client.parseDOM(r, 'div', attrs={'class': 'playex'}) data = [client.parseDOM(i, 'iframe', ret='src') for i in data if i] try: for url in data[0]: quality, info = source_utils.get_release_quality(url, None) valid, host = source_utils.is_host_valid(url, hostDict) if not valid: continue host = host.encode('utf-8') sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url.replace('\/', '/'), 'direct': False, 'debridonly': False }) except: pass except: pass return sources except Exception: return
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources url = urlparse.urljoin(self.base_link, url) r = client.request(url) r = r.replace('\\"', '"') links = dom_parser.parse_dom(r, 'tr', attrs={'id': 'tablemoviesindex2'}) for i in links: try: host = dom_parser.parse_dom(i, 'img', req='alt')[0].attrs['alt'] host = host.split()[0].rsplit('.', 1)[0].strip().lower() host = host.encode('utf-8') valid, host = source_utils.is_host_valid(host, hostDict) if not valid: continue url = dom_parser.parse_dom(i, 'a', req='href')[0].attrs['href'] url = client.replaceHTMLCodes(url) url = urlparse.urljoin(self.base_link, url) url = url.encode('utf-8') sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources for link in url: try: lang = link[u'quality'] video_link = link[u'url'] lang, info = self.get_lang_by_type(lang) q = source_utils.check_sd_url(video_link) valid, host = source_utils.is_host_valid(video_link, hostDict) if 'rapidvideo' in video_link: content = requests.get(video_link, timeout=3, allow_redirects=True).content q = re.findall("""data-res=\"(.*?)\"""", content)[0] if int(q) == 720: q = 'HD' elif int(q) > 720: q = '1080' elif int(q) < 720: q = 'SD' if 'streamango' in video_link or 'openload' in video_link: content = requests.get(video_link, timeout=3, allow_redirects=True).content q = re.findall("""og:title\" content=\"(.*?)\"""", content)[0] q = source_utils.get_release_quality('', q)[0] if valid: if 'ebd' in host.lower(): host = 'CDA' sources.append({'source': host, 'quality': q, 'language': lang, 'url': video_link, 'info': info, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources year = url['year'] h = {'User-Agent': client.randomagent()} title = cleantitle.geturl(url['title']).replace('-', '+') url = urlparse.urljoin(self.base_link, self.search_link % title) r = client.request(url, headers=h) r = BeautifulSoup(r, 'html.parser').find('div', {'class': 'item'}) r = r.find('a')['href'] r = client.request(r, headers=h) r = BeautifulSoup(r, 'html.parser') quality = r.find('span', {'class': 'calidad2'}).text url = r.find('div', {'class': 'movieplay'}).find('iframe')['src'] if not quality in ['1080p', '720p']: quality = 'SD' valid, host = source_utils.is_host_valid(url, hostDict) sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] if 'movie' in url: quality = '720p' if 'show' in url: quality = 'SD' try: r = client.request(url) try: match = re.compile( '<IFRAME style="z-index:9999;WIDTH:100%; " SRC="(.+?)://(.+?)/(.+?)"' ).findall(r) for http, host, url in match: url = '%s://%s/%s' % (http, host, url) host = host.replace('www.', '') valid, host = source_utils.is_host_valid(host, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) match2 = re.compile( 'onclick="window.open\("(.+?)://(.+?)/(.+?)"\)').findall(r) for http, host, url in match2: url = '%s://%s/%s' % (http, host, url) host = host.replace('www.', '') valid, host = source_utils.is_host_valid(host, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: return except Exception: return return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources if debrid.status() is False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) query = '%s S%02dE%02d' % (data['tvshowtitle'], int( data['season']), int(data['episode'])) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) r = urlparse.urljoin(self.base_link, url) r = client.request(r) r = client.parseDOM(r, 'item') title = client.parseDOM(r, 'title')[0] if hdlr in title: r = re.findall( '<h3.+?>(.+?)</h3>\s*<h5.+?<strong>(.+?)</strong.+?h3.+?adze.+?href="(.+?)">.+?<h3', r[0], re.DOTALL) for name, size, url in r: quality, info = source_utils.get_release_quality(name, url) try: size = re.sub('i', '', size) div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass info = ' | '.join(info) valid, host = source_utils.is_host_valid(url, hostDict) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources result = client.request(urlparse.urljoin(self.base_link, url), redirect=False) cookies = client.request(urlparse.urljoin(self.base_link, url), output='cookie') headers = { 'cookie': cookies, 'dnt': '1', 'accept-encoding': 'gzip, deflate, br', 'accept-language': 'pl-PL,pl;q=0.9,en-US;q=0.8,en;q=0.7', 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.91 Safari/537.36', 'accept': 'text/html, */*; q=0.01', 'referer': self.base_link + url, 'authority': 'www.boxfilm.pl', 'x-requested-with': 'XMLHttpRequest', } response = requests.get( 'https://www.boxfilm.pl/include/player.php', headers=headers).content section = client.parseDOM(result, 'section', attrs={'id': 'video_player'})[0] link = client.parseDOM(response, 'iframe', ret='src')[0] valid, host = source_utils.is_host_valid(link, hostDict) if not valid: return sources spans = client.parseDOM(section, 'span') info = None for span in spans: if span == 'Z lektorem': info = 'Lektor' q = source_utils.check_sd_url(link) sources.append({ 'source': host, 'quality': q, 'language': 'pl', 'url': link, 'info': info, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = url result = client.request(url) tabela = client.parseDOM(result, 'table', attrs={'class': 'table table-bordered'})[0] tabela = client.parseDOM(tabela, 'tr') for item in tabela: if 'fa fa-sort' in item: continue lang, info = self.get_lang_by_type(str(item)) url = str(client.parseDOM(item, 'a', ret='href')[0]) valid, host = source_utils.is_host_valid(url, hostDict) if not valid: continue if "Wysoka" in item: sources.append({ 'source': host, 'quality': 'HD', 'language': lang, 'url': url, 'info': info, 'direct': False, 'debridonly': False }) elif "Średnia" in item: sources.append({ 'source': host, 'quality': 'SD', 'language': lang, 'url': url, 'info': info, 'direct': False, 'debridonly': False }) elif "Niska" in item: sources.append({ 'source': host, 'quality': 'SD', 'language': lang, 'url': url, 'info': info, 'direct': False, 'debridonly': False }) return sources except: log_exception() return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): #{'source': host, 'quality': i[1], 'provider': 'Sezonlukdizi', 'url': i[0]}) sources = [] try: if not url: return sources url = urlparse.urljoin(self.base_link, url) result = client.request(url) result = re.sub(r'[^\x00-\x7F]+', ' ', result) pages = dom_parser.parse_dom(result, 'div', attrs={'class': 'item'}, req='data-id') pages = [i.attrs['data-id'] for i in pages] for page in pages: try: url = urlparse.urljoin(self.base_link, self.video_link) result = client.request(url, post={'id': page}) if not result: continue url = dom_parser.parse_dom(result, 'iframe', req='src')[0].attrs['src'] if url.startswith('//'): url = 'http:' + url if url.startswith('/'): url = urlparse.urljoin(self.base_link, url) valid, host = source_utils.is_host_valid(url, hostDict) if valid: sources.append({'source': host, 'quality': 'HD', 'url': url,'provider': 'Sezonlukdizi'}) if '.asp' not in url: continue result = client.request(url) captions = re.search('kind\s*:\s*(?:\'|\")captions(?:\'|\")', result) if not captions: continue matches = [(match[0], match[1]) for match in re.findall('''["']?label\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)''', result, re.DOTALL | re.I)] matches += [(match[1], match[0]) for match in re.findall('''["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)''', result, re.DOTALL | re.I)] result = [(source_utils.label_to_quality(x[0]), x[1].replace('\/', '/')) for x in matches] result = [(i[0], i[1]) for i in result if not i[1].endswith('.vtt')] for quality, url in result: sources.append({'source': 'gvideo', 'quality': quality, 'url': url, 'provider': 'Sezonlukdizi'}) except: pass return sources except Exception as e: control.log('ERROR sezonlukidz %s' % e) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] data = { 'fid_name': url[0], 'sezon': url[1], 'odcinek': url[2], 'title': url[0] } result = requests.post( 'http://178.19.110.218/forumserialeco/skrypt/szukaj3.php', data=data).content result = result.decode('utf-8') h = HTMLParser() result = h.unescape(result) if result: wersja = re.findall("""wersja: <b>(.*?)<\/b>""", result) id = re.findall("""url='(.*?)'""", result) for item in zip(wersja, id): try: if item[1]: info = self.get_lang_by_type(item[0]) content = client.request( "http://seriale.co/frame.php?src=" + item[1]) video_link = str( client.parseDOM(content, 'iframe', ret='src')[0]) valid, host = source_utils.is_host_valid( video_link, hostDict) if valid: sources.append({ 'source': host, 'quality': 'SD', 'language': info[0], 'url': video_link, 'info': info[1], 'direct': False, 'debridonly': False }) else: continue except: continue return sources except: log_exception() return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return urldata = urlparse.parse_qs(url) urldata = dict((i, urldata[i][0]) for i in urldata) title = urldata['title'].replace(':', ' ').lower() year = urldata['year'] search_id = title.lower() start_url = self.search_link % (self.base_link, search_id.replace(' ', '%20')) headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36' } html = client.request(start_url, headers=headers) Links = re.compile( '"post","link":"(.+?)","title".+?"rendered":"(.+?)"', re.DOTALL).findall(html) for link, name in Links: link = link.replace('\\', '') if title.lower() in name.lower(): if year in name: holder = client.request(link, headers=headers) new = re.compile('<iframe src="(.+?)"', re.DOTALL).findall(holder)[0] end = client.request(new, headers=headers) final_url = re.compile('<iframe src="(.+?)"', re.DOTALL).findall(end)[0] valid, host = source_utils.is_host_valid( final_url, hostDict) sources.append({ 'source': host, 'quality': '1080p', 'language': 'en', 'url': final_url, 'info': [], 'direct': False, 'debridonly': False }) return sources except: failure = traceback.format_exc() # print('1080PMovies - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] r = client.request(url) r = re.findall('<iframe src="(.+?)"', r) for url in r: valid, host = source_utils.is_host_valid(url, hostDict) quality = source_utils.check_sd_url(url) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) return sources except: return
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources links = self.links_found(url) hostdict = hostDict + hostprDict for url in links: try: valid, host = source_utils.is_host_valid(url, hostdict) if 'mehliz' in url: host = 'MZ' direct = True urls = (self.mz_server(url)) elif 'ok.ru' in url: host = 'vk' direct = True urls = (directstream.odnoklassniki(url)) else: direct = False urls = [{'quality': 'SD', 'url': url}] for x in urls: sources.append({ 'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources result = client.request(url) result = client.parseDOM(result, 'div', attrs={'id': 'downloads'})[0] rows = client.parseDOM(result, 'tr') for row in rows: try: cols = client.parseDOM(row, 'td') host = client.parseDOM(cols, 'img', ret='src')[0] host = host.rpartition('=')[-1] link = client.parseDOM(cols, 'a', ret='href')[0] valid, host = source_utils.is_host_valid(host, hostDict) if not valid: continue q = 'SD' if 'Wysoka' in cols[2]: q = 'HD' lang, info = self.get_lang_by_type(cols[3]) sources.append({ 'source': host, 'quality': q, 'language': lang, 'url': link, 'info': info, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources r = client.request(url) r = dom_parser2.parse_dom(r, 'div', {'class': 'll-item'}) r = [(dom_parser2.parse_dom(i, 'a', req='href'), \ dom_parser2.parse_dom(i, 'div', {'class': 'notes'})) \ for i in r if i] r = [(i[0][0].attrs['href'], i[0][0].content, i[1][0].content if i[1] else 'None') for i in r] for i in r: try: url = i[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') valid, host = source_utils.is_host_valid(i[1], hostDict) if not valid: continue host = client.replaceHTMLCodes(host) host = host.encode('utf-8') info = [] quality, info = source_utils.get_release_quality( i[2], i[2]) info = ' | '.join(info) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources