def sources(self, url, hostDict, hostprDict): try: sources = [] try: search_url = url['url'] post = url['post'] referer = urlparse.urljoin(self.film_web, post['urlstrony']) result = client.request(search_url, post=post, referer=referer) if not result.startswith('http'): return sources valid, host = source_utils.is_host_valid(result, hostDict) q = source_utils.check_sd_url(result) info = '' if 'lektor' in result: info = 'Lektor' if 'napisy' in result: info = 'Napisy' first_found = { 'source': host, 'quality': '720p', 'language': 'pl', 'url': result, 'info': info, 'direct': False, 'debridonly': False } first_found['info'] = self.get_info_from_others(sources) sources.append(first_found) except: pass search_more_post = url['more'] #search_url = urlparse.urljoin(self.base_link, self.search_more) result = client.request(self.base_link2, post=search_more_post) provider = client.parseDOM(result, 'option', ret='value') links = client.parseDOM(result, 'div', ret='data') wersja = client.parseDOM(result, 'div', attrs={'class': 'wersja'}) #result = dom_parser.parse_dom(result, 'a') counter = 0 for link in links: valid, host = source_utils.is_host_valid(link, hostDict) if not valid: continue q = source_utils.check_sd_url(link) sources.append({ 'source': host, 'quality': q, 'language': 'pl', 'url': link, 'info': wersja[counter], 'direct': False, 'debridonly': False }) counter += 1 return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] r = requests.get(url).content qual = re.compile('class="quality">(.+?)<').findall(r) for i in qual: if '1080' in i: quality = '1080p' elif '720' in i: quality = '720p' else: quality = 'SD' u = client.parseDOM(r, "div", attrs={"class": "pa-main anime_muti_link"}) for t in u: u = re.findall('<li class=".+?" data-video="(.+?)"', t) for url in u: if 'vidcloud' in url: url = 'https:' + url r = requests.get(url).content t = re.findall( 'li data-status=".+?" data-video="(.+?)"', r) for url in t: if 'vidcloud' in url: continue valid, host = source_utils.is_host_valid( url, hostDict) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) if 'vidcloud' in url: continue valid, host = source_utils.is_host_valid(url, hostDict) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) return sources except: return
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources r = client.request(urlparse.urljoin(self.base_link, url), redirect=False) info = self.get_lang_by_type(client.parseDOM(r, 'title')[0]) r = client.parseDOM(r, 'div', attrs={'class': 'tab-pane active'})[0] r = client.parseDOM(r, 'script')[0] script = r.split('"')[1] decoded = self.shwp(script) link = client.parseDOM(decoded, 'iframe', ret='src')[0] valid, host = source_utils.is_host_valid(link, hostDict) if not valid: return sources q = source_utils.check_sd_url(link) sources.append({ 'source': host, 'quality': q, 'language': 'pl', 'url': link, 'info': info, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] hostDict = hostprDict + hostDict r = requests.get(url).content try: qual = re.compile('class="quality">(.+?)<').findall(r) for i in qual: if 'HD' in i: quality = '1080p' else: quality = 'SD' match = re.compile('<iframe.+?src="(.+?)"').findall(r) for url in match: if 'youtube' in url: continue valid, host = source_utils.is_host_valid(url, hostDict) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: return except Exception: return return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources r = client.request(urlparse.urljoin(self.base_link, url)) r = re.findall('''vicode\s*=\s*["'](.*?)["'];''', r)[0].decode('string_escape') r = dom_parser.parse_dom(r, 'iframe', req='src') r = [i.attrs['src'] for i in r] for i in r: valid, host = source_utils.is_host_valid(i, hostDict) if not valid: continue sources.append({ 'source': host, 'quality': 'SD', 'language': 'de', 'url': i, 'direct': False, 'debridonly': False, 'checkquality': True }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources j = self.__get_json(url) j = [i for i in j['links'] if 'links' in j] j = [(i['hoster'].lower(), i['id']) for i in j] j = [(re.sub('hd$', '', i[0]), i[1], 'HD' if i[0].endswith('hd') else 'SD') for i in j] j = [(i[0], i[1], i[2]) for i in j] for hoster, url, quality in j: valid, hoster = source_utils.is_host_valid(hoster, hostDict) if not valid: continue sources.append({ 'source': hoster, 'quality': quality, 'language': 'de', 'url': ('watch/%s' % url), 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources url = url.replace('/en/', '/de/') video_id = re.search('(?<=\/)(\d*?)(?=-)', url).group() if not video_id: return sources # load player query = self.get_player % (video_id) query = urlparse.urljoin(self.base_link, query) r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'le-server'}) # for each hoster for i in r: hoster = dom_parser.parse_dom(i, 'div', attrs={'class': 'les-title'}) hoster = dom_parser.parse_dom(hoster, 'strong') hoster = hoster[0][1] valid, hoster = source_utils.is_host_valid(hoster, hostDict) if not valid: continue links = dom_parser.parse_dom(i, 'a', attrs={'class': 'ep-item'}) # for each link for i in links: if '1080p' in i[0]['title']: quality = '1080p' elif 'HD' in i[0]['title']: quality = 'HD' else: quality = 'SD' url = i[0]['id'] if not url: continue sources.append({ 'source': hoster, 'quality': quality, 'language': 'de', 'url': url, 'direct': False, 'debridonly': False, 'checkquality': True }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = data.get('url') episode = int(data.get('episode', 1)) r = client.request(urlparse.urljoin(self.base_link, url)) r = {'': dom_parser.parse_dom(r, 'div', attrs={'id': 'gerdub'}), 'subbed': dom_parser.parse_dom(r, 'div', attrs={'id': 'gersub'})} for info, data in r.iteritems(): data = dom_parser.parse_dom(data, 'tr') data = [dom_parser.parse_dom(i, 'a', req='href') for i in data if dom_parser.parse_dom(i, 'a', attrs={'id': str(episode)})] data = [(link.attrs['href'], dom_parser.parse_dom(link.content, 'img', req='src')) for i in data for link in i] data = [(i[0], i[1][0].attrs['src']) for i in data if i[1]] data = [(i[0], re.findall('/(\w+)\.\w+', i[1])) for i in data] data = [(i[0], i[1][0]) for i in data if i[1]] for link, hoster in data: valid, hoster = source_utils.is_host_valid(hoster, hostDict) if not valid: continue sources.append({'source': hoster, 'quality': 'SD', 'language': 'de', 'url': link, 'info': info, 'direct': False, 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return urldata = urlparse.parse_qs(url) urldata = dict((i, urldata[i][0]) for i in urldata) title = urldata['title'].replace(':', ' ').lower() year = urldata['year'] search_id = title.lower() start_url = urlparse.urljoin(self.base_link, self.search_link % (search_id.replace(' ','+') + '+' + year)) headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'} html = client.request(start_url,headers=headers) Links = re.compile('a href="(.+?)" title="(.+?)"',re.DOTALL).findall(html) for link,name in Links: if title.lower() in name.lower(): if year in name: holder = client.request(link,headers=headers) Alterjnates = re.compile('<button class="text-capitalize dropdown-item" value="(.+?)"',re.DOTALL).findall(holder) for alt_link in Alterjnates: alt_url = alt_link.split ("e=")[1] valid, host = source_utils.is_host_valid(alt_url, hostDict) sources.append({'source':host,'quality':'1080p','language': 'en','url':alt_url,'info':[],'direct':False,'debridonly':False}) return sources except: failure = traceback.format_exc() log_utils.log('1080PMovies - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) j = self.__get_json(data['url']) if not j: return sid = data['sid'] if 'sid' in data else j.keys()[0] pcnt = int(j[sid]['1']) if '1' in j[sid] else 1 for jHoster in j[sid]['links']: jLinks = [i[3] for i in j[sid]['links'][jHoster] if i[5] == 'stream'] if len(jLinks) < pcnt: continue h_url = jLinks[0] valid, hoster = source_utils.is_host_valid(h_url, hostDict) if not valid: continue h_url = h_url if pcnt == 1 else 'stack://' + ' , '.join(jLinks) try: sources.append({'source': hoster, 'quality': 'SD', 'language': 'de', 'info' : '' if pcnt == 1 else 'multi-part', 'url': h_url, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources result = client.request(urlparse.urljoin(self.base_link, url), redirect=False) section = client.parseDOM(result, 'section', attrs={'id': 'video_player'})[0] link = client.parseDOM(section, 'iframe', ret='src')[0] valid, host = source_utils.is_host_valid(link, hostDict) if not valid: return sources spans = client.parseDOM(section, 'span') info = None for span in spans: if span == 'Z lektorem': info = 'Lektor' q = source_utils.check_sd_url(link) sources.append({ 'source': host, 'quality': q, 'language': 'pl', 'url': link, 'info': info, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = urlparse.urljoin(self.base_link, data.get('url')) season = data.get('season') episode = data.get('episode') if season and episode: r = urllib.urlencode({ 'imdbid': data['imdb'], 'language': 'de', 'season': season, 'episode': episode }) r = client.request(urlparse.urljoin(self.base_link, self.hoster_link), XHR=True, post=r) else: r = client.request(url) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'linkbox'})[0].content r = re.compile('(<a.+?/a>)', re.DOTALL).findall(r) r = [(dom_parser.parse_dom(i, 'a', req='href'), dom_parser.parse_dom( i, 'img', attrs={'class': re.compile('.*linkbutton')}, req='class')) for i in r] r = [(i[0][0].attrs['href'], i[1][0].attrs['class'].lower()) for i in r if i[0] and i[1]] r = [(i[0].strip(), 'HD' if i[1].startswith('hd') else 'SD') for i in r] for url, quli in r: valid, host = source_utils.is_host_valid(url, hostDict) if not valid: continue sources.append({ 'source': host, 'quality': quli, 'language': 'de', 'url': url, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources query = urlparse.urljoin(self.base_link, url) r = client.request(query) quality = dom_parser.parse_dom( r, 'span', attrs={'id': 'release_text'})[0].content.split(' ')[0] quality, info = source_utils.get_release_quality(quality) r = dom_parser.parse_dom(r, 'ul', attrs={'class': 'currentStreamLinks'}) r = [(dom_parser.parse_dom(i, 'p', attrs={'class': 'hostName'}), dom_parser.parse_dom(i, 'a', attrs={'class': 'stream-src'}, req='data-id')) for i in r] r = [(re.sub(' hd$', '', i[0][0].content.lower()), [x.attrs['data-id'] for x in i[1]]) for i in r if i[0] and i[1]] for hoster, id in r: valid, hoster = source_utils.is_host_valid(hoster, hostDict) if not valid: continue sources.append({ 'source': hoster, 'quality': quality, 'language': 'de', 'info': ' | '.join(info + ['' if len(id) == 1 else 'multi-part']), 'url': id, 'direct': False, 'debridonly': False, 'checkquality': True }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) for i in range(3): result = client.request(url, timeout=10) if not result == None: break dom = dom_parser.parse_dom(result, 'div', attrs={ 'class': 'links', 'id': 'noSubs' }) result = dom[0].content links = re.compile( '<tr\s*>\s*<td><i\s+class="fa fa-youtube link-logo"></i>([^<]+).*?href="([^"]+)"\s+class="watch', re.DOTALL).findall(result) for link in links[:5]: try: url2 = urlparse.urljoin(self.base_link, link[1]) for i in range(2): result2 = client.request(url2, timeout=3) if not result2 == None: break r = re.compile('href="([^"]+)"\s+class="action-btn' ).findall(result2)[0] valid, hoster = source_utils.is_host_valid(r, hostDict) if not valid: continue #log_utils.log('JairoxDebug1: %s - %s' % (url2,r), log_utils.LOGDEBUG) urls, host, direct = source_utils.check_directstreams( r, hoster) for x in urls: sources.append({ 'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False }) except: #traceback.print_exc() pass #log_utils.log('JairoxDebug2: %s' % (str(sources)), log_utils.LOGDEBUG) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) query = '%s S%02dE%02d' % (data['tvshowtitle'], int( data['season']), int(data['episode'])) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) r = urlparse.urljoin(self.base_link, url) r = client.request(r) r = client.parseDOM(r, 'item') title = client.parseDOM(r, 'title')[0] if hdlr in title: r = re.findall( '<h3.+?>(.+?)</h3>\s*<h5.+?<strong>(.+?)</strong.+?h3.+?adze.+?href="(.+?)">.+?<h3', r[0], re.DOTALL) for name, size, url in r: quality, info = source_utils.get_release_quality(name, url) try: size = re.sub('i', '', size) div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass info = ' | '.join(info) valid, host = source_utils.is_host_valid(url, hostDict) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) return sources except: failure = traceback.format_exc() log_utils.log('SeriesCR - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = urlparse.urljoin(self.base_link, data.get('url', '')) imdb = data.get('imdb') season = data.get('season') episode = data.get('episode') if season and episode and imdb: r = urllib.urlencode({'val': 's%se%s' % (season, episode), 'IMDB': imdb}) r = client.request(urlparse.urljoin(self.base_link, self.episode_link), XHR=True, post=r) else: r = client.request(url) l = dom_parser.parse_dom(r, 'select', attrs={'id': 'sel_sprache'}) l = dom_parser.parse_dom(l, 'option', req='id') r = [(dom_parser.parse_dom(r, 'div', attrs={'id': i.attrs['id']})) for i in l if i.attrs['id'] == 'deutsch'] r = [(i[0], dom_parser.parse_dom(i[0], 'option', req='id')) for i in r] r = [(id.attrs['id'], dom_parser.parse_dom(content, 'div', attrs={'id': id.attrs['id']})) for content, ids in r for id in ids] r = [(re.findall('hd(\d{3,4})', i[0]), dom_parser.parse_dom(i[1], 'a', req='href')) for i in r if i[1]] r = [(i[0][0] if i[0] else '0', [x.attrs['href'] for x in i[1]]) for i in r if i[1]] r = [(source_utils.label_to_quality(i[0]), i[1]) for i in r] for quality, urls in r: for link in urls: try: data = urlparse.parse_qs(urlparse.urlparse(link).query, keep_blank_values=True) if 'm' in data: data = data.get('m')[0] link = base64.b64decode(data) link = link.strip() valid, host = source_utils.is_host_valid(link, hostDict) if not valid: continue sources.append({'source': host, 'quality': quality, 'language': 'de', 'url': link, 'direct': False, 'debridonly': False, 'checkquality': True}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources query = urlparse.urljoin(self.base_link, url) r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'id': 'ko-bind'}) r = dom_parser.parse_dom(r, 'table', attrs={'class': 'links-table'}) r = dom_parser.parse_dom(r, 'tbody') r = dom_parser.parse_dom(r, 'tr') for i in r: if re.search('(?<=<td>)(HD)(?=</td>)', i[1]): quality = 'HD' else: quality = 'SD' x = dom_parser.parse_dom(i, 'td', attrs={'class': 'name'}, req='data-bind') hoster = re.search("(?<=>).*$", x[0][1]) hoster = hoster.group().lower() url = re.search("http(.*?)(?=')", x[0][0]['data-bind']) url = url.group() valid, hoster = source_utils.is_host_valid(hoster, hostDict) if not valid: continue sources.append({ 'source': hoster, 'quality': 'SD', 'language': 'de', 'url': url, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) id = data.get('id') season = data.get('season') episode = data.get('episode') if season and episode: r = client.request(urlparse.urljoin(self.base_link, self.get_episodes), post={ 'series_id': id, 'mlang': 'de', 'season': season, 'episode': episode }) r = json.loads(r).get('episode_links', []) r = [([i.get('id')], i.get('hostername')) for i in r] else: data.update({'lang': 'de'}) r = client.request(urlparse.urljoin(self.base_link, self.get_links), post=data) r = json.loads(r).get('links', []) r = [(i.get('ids'), i.get('hoster')) for i in r] for link_ids, hoster in r: valid, host = source_utils.is_host_valid(hoster, hostDict) if not valid: continue for link_id in link_ids: sources.append({ 'source': host, 'quality': 'SD', 'language': 'de', 'url': self.out_link % (link_id, hoster), 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources query = urlparse.urljoin(self.base_link, url) r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'id': 'downloads'}) r = dom_parser.parse_dom(r, 'table') r = dom_parser.parse_dom(r, 'tbody') r = dom_parser.parse_dom(r, 'tr') for i in r: if re.search('German', i[1]): hoster = re.search('(?<=domain=)(.*?)(?=\")', i[1]) hoster = hoster.group().lower() valid, hoster = source_utils.is_host_valid( hoster, hostDict) if not valid: continue link = re.search('(?<=links/)(.*?)(?=/)', i[1]) link = link.group() if re.search('<td>HD</td>', i[1]): quality = 'HD' else: quality = 'SD' url = self.__get_link(link) sources.append({ 'source': hoster, 'quality': quality, 'language': 'de', 'url': url, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources query = urlparse.urljoin(self.base_link, url) r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'id': 'player'}) r = dom_parser.parse_dom(r, 'iframe', req='src') r = client.request(r[0][0]['src']) r = dom_parser.parse_dom(r, 'a', attrs={'class': 'play_container'}, req='href') r = client.request(r[0][0]['href']) url = self.get_link % ( re.search('(?<=var id = \")(.*?)(?=\")', r).group(), re.search('(?<=var links = \")(.*?)(?=\")', r).group()) r = client.request(url) r = dom_parser.parse_dom(r, 'ul', attrs={'id': 'articleList'}) r = dom_parser.parse_dom(r, 'a') for i in r: if 'http' in i[0]['href']: link = i[0]['href'] elif 'http' in i[0]['onclick']: link = re.search('http(.*?)(?=\")', i[0]['onclick']).group() else: return sources valid, hoster = source_utils.is_host_valid(link, hostDict) if not valid: continue sources.append({ 'source': hoster, 'quality': 'SD', 'language': 'de', 'url': link, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = urlparse.urljoin(self.base_link, data.get('url', '')) episode = data.get('episode') r = client.request(url) r = r.replace('\n', ' ') r = dom_parser.parse_dom(r, 'div', attrs={'class': 'fullstory'}) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'row'}) if episode: r = dom_parser.parse_dom(r, 'select', attrs={'id': 'series'}) r = dom_parser.parse_dom(r, 'option', req='value') r = [(i.attrs['value'], i.content) for i in r] r = [(i[0], re.findall('\s+(\d+)\s+episode', i[1], re.IGNORECASE)) for i in r] r = [i[0].strip() for i in r if i[1] and episode in i[1]] else: r = dom_parser.parse_dom(r, 'div', attrs={'class': 'inner'}) r = dom_parser.parse_dom(r, 'a', req='href') r = [i.attrs['href'].strip() for i in r] for link in r: valid, host = source_utils.is_host_valid(link, hostDict) if not valid: continue sources.append({ 'source': host, 'quality': 'SD', 'language': 'de', 'url': link, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) data = urllib.urlencode({ 'ID': re.sub('[^0-9]', '', str(data['imdb'])), 'lang': 'de' }) data = client.request(urlparse.urljoin(self.base_link, self.request_link), post=data, XHR=True) data = json.loads(data) data = [(i, data['links'][i]) for i in data['links'] if 'links' in data] data = [(i[0], i[1][0], (i[1][1:])) for i in data] for hoster, quli, links in data: valid, hoster = source_utils.is_host_valid(hoster, hostDict) if not valid: continue for link in links: try: sources.append({ 'source': hoster, 'quality': 'SD', 'language': 'de', 'url': self.out_link % link, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources url = urlparse.urljoin(self.base_link, url) r = client.request(url) r = r.replace('\\"', '"') links = dom_parser.parse_dom(r, 'tr', attrs={'id': 'tablemoviesindex2'}) for i in links: try: host = dom_parser.parse_dom(i, 'img', req='alt')[0].attrs['alt'] host = host.split()[0].rsplit('.', 1)[0].strip().lower() host = host.encode('utf-8') valid, host = source_utils.is_host_valid(host, hostDict) if not valid: continue url = dom_parser.parse_dom(i, 'a', req='href')[0].attrs['href'] url = client.replaceHTMLCodes(url) url = urlparse.urljoin(self.base_link, url) url = url.encode('utf-8') sources.append({ 'source': host, 'quality': 'SD', 'language': 'de', 'url': url, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources r = client.request(urlparse.urljoin(self.base_link, url)) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'hosterSiteVideo'}) r = dom_parser.parse_dom( r, 'li', attrs={'data-lang-key': re.compile('[1|3]')}) r = [(dom_parser.parse_dom(i, 'a', req='href'), dom_parser.parse_dom(i, 'h4'), 'subbed' if i.attrs['data-lang-key'] == '3' else '') for i in r] r = [(i[0][0].attrs['href'], i[1][0].content.lower(), i[2]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [(i[0], i[1], re.findall('(.+?)\s*<br\s*/?>(.+?)$', i[1], re.DOTALL), i[2]) for i in r] r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '', i[3]) for i in r] r = [(i[0], i[1], 'HD' if 'hosterhdvideo' in i[2] else 'SD', i[3]) for i in r] for link, host, quality, info in r: valid, host = source_utils.is_host_valid(host, hostDict) if not valid: continue sources.append({ 'source': host, 'quality': quality, 'language': 'de', 'url': link, 'info': info, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources query = urlparse.urljoin(self.base_link, url) r = client.request(query) r = dom_parser.parse_dom(r, 'ul', attrs={'id': 'mainmenu'}) r = dom_parser.parse_dom(r, 'li') for i in r: i = dom_parser.parse_dom(i, 'a') i = i[0][0]['href'] i = client.request(i) i = dom_parser.parse_dom(i, 'select', attrs={'id': 'selecthost'}) i = dom_parser.parse_dom(i, 'option') for x in i: hoster = re.search('^\S*', x[1]).group().lower() url = x[0]['value'] valid, hoster = source_utils.is_host_valid( hoster, hostDict) if not valid: continue sources.append({ 'source': hoster, 'quality': 'SD', 'language': 'de', 'url': url, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources result = client.request(url) rows = client.parseDOM(result, 'tr', attrs={'data-id': '.*?'}) for row in rows: try: link = client.parseDOM(row, 'td', attrs={'class': 'name hover'}, ret='data-bind')[0] link = re.findall(r"'(.*?)'", link, re.DOTALL)[0] valid, host = source_utils.is_host_valid(link, hostDict) if not valid: continue found_quality = client.parseDOM(row, 'td')[1] q = 'SD' if 'Wysoka' in found_quality: q = 'HD' type_desc = client.parseDOM(row, 'font')[0] lang, info = self.get_lang_by_type(type_desc) sources.append({ 'source': host, 'quality': q, 'language': lang, 'url': link, 'info': info, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources query = urlparse.urljoin(self.base_link, url) r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'TpRwCont'}) r = dom_parser.parse_dom(r, 'main') options1 = dom_parser.parse_dom(r, 'li', attrs={'class': 'STPb'}) options2 = dom_parser.parse_dom(r, 'div', attrs={'class': 'TPlayerTb'}) for o1,o2 in itertools.izip(options1,options2): if 'trailer' in o1[1].lower(): continue elif '1080p' in o1[1].lower(): quality = '1080p' elif '720p' in o1[1].lower(): quality = 'HD' else: quality = 'SD' s = '(?<=src=\")(.*?)(?=\")' if re.match(s, o2[1]) is not None: url = re.search(s, o2[1]).group() else: h = HTMLParser.HTMLParser() h = h.unescape(o2[1]) url = re.search(s, h).group() valid, hoster = source_utils.is_host_valid(url, hostDict) if not valid: continue sources.append({'source': hoster, 'quality': quality, 'language': 'de', 'url': url, 'direct': False, 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources query = urlparse.urljoin(self.base_link, url) r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'id': 'tab-plot_german'}) r = dom_parser.parse_dom(r, 'tbody') r = dom_parser.parse_dom(r, 'tr') for i in r: if re.search('(?<=">)(\n.*?)(?=<\/a>)', i[1]).group().strip(): hoster = re.search('(?<=">)(\n.*?)(?=<\/a>)', i[1]).group().strip() link = re.search('(?<=href=\")(.*?)(?=\")', i[1]).group() rel = re.search( '(?<=oddCell qualityCell">)(\n.*?)(?=<\/td>)', i[1]).group().strip() quality, info = source_utils.get_release_quality(rel) if not quality: quality = 'SD' valid, hoster = source_utils.is_host_valid( hoster, hostDict) if not valid: continue sources.append({ 'source': hoster, 'quality': quality, 'language': 'de', 'url': link, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if debrid.status() == False: raise Exception() r = self.scraper.get(url).content r = re.findall('<iframe src="(.+?)"', r) for url in r: valid, host = source_utils.is_host_valid(url, hostDict) quality = source_utils.check_sd_url(url) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) return sources except: return
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources result = client.request(url) result = client.parseDOM(result, 'div', attrs={'id': 'downloads'})[0] rows = client.parseDOM(result, 'tr') for row in rows: try: cols = client.parseDOM(row, 'td') host = client.parseDOM(cols[0], 'img', ret='src')[0] host = host.rpartition('=')[-1] link = client.parseDOM(cols[0], 'a', ret='href')[0] valid, host = source_utils.is_host_valid(host, hostDict) if not valid: continue q = 'SD' if 'Wysoka' in cols[1]: q = 'HD' lang, info = self.get_lang_by_type(cols[2]) sources.append({ 'source': host, 'quality': q, 'language': lang, 'url': link, 'info': info, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources