def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources r = client.request(urlparse.urljoin(self.base_link, self.conf_link), XHR=True) r = json.loads(r).get('streamer') r = client.request(r + '%s.mp4/master.m3u8' % url, XHR=True) r = re.findall('RESOLUTION\s*=\s*\d+x(\d+).*?\n(http.*?)(?:\n|$)', r, re.IGNORECASE) r = [(source_utils.label_to_quality(i[0]), i[1]) for i in r] for quality, link in r: sources.append({ 'source': 'CDN', 'quality': quality, 'language': 'de', 'url': link, 'direct': True, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = urlparse.urljoin(self.base_link, data.get('url', '')) imdb = data.get('imdb') season = data.get('season') episode = data.get('episode') if season and episode and imdb: r = urllib.urlencode({'val': 's%se%s' % (season, episode), 'IMDB': imdb}) r = client.request(urlparse.urljoin(self.base_link, self.episode_link), XHR=True, post=r) else: r = client.request(url) l = dom_parser.parse_dom(r, 'select', attrs={'id': 'sel_sprache'}) l = dom_parser.parse_dom(l, 'option', req='id') r = [(dom_parser.parse_dom(r, 'div', attrs={'id': i.attrs['id']})) for i in l if i.attrs['id'] == 'deutsch'] r = [(i[0], dom_parser.parse_dom(i[0], 'option', req='id')) for i in r] r = [(id.attrs['id'], dom_parser.parse_dom(content, 'div', attrs={'id': id.attrs['id']})) for content, ids in r for id in ids] r = [(re.findall('hd(\d{3,4})', i[0]), dom_parser.parse_dom(i[1], 'a', req='href')) for i in r if i[1]] r = [(i[0][0] if i[0] else '0', [x.attrs['href'] for x in i[1]]) for i in r if i[1]] r = [(source_utils.label_to_quality(i[0]), i[1]) for i in r] for quality, urls in r: for link in urls: try: data = urlparse.parse_qs(urlparse.urlparse(link).query, keep_blank_values=True) if 'm' in data: data = data.get('m')[0] link = base64.b64decode(data) link = link.strip() valid, host = source_utils.is_host_valid(link, hostDict) if not valid: continue sources.append({'source': host, 'quality': quality, 'language': 'de', 'url': link, 'direct': False, 'debridonly': False, 'checkquality': True}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources r = re.findall('(\d+)-stream(?:\?episode=(\d+))?', url) r = [(i[0], i[1] if i[1] else '1') for i in r][0] r = client.request(urlparse.urljoin(self.base_link, self.get_link % r), output='extended') headers = r[3] headers.update({'Cookie': r[2].get('Set-Cookie'), 'Referer': self.base_link}) r = r[0] r += '=' * (-len(r) % 4) r = base64.b64decode(r) i = [(match[1], match[0]) for match in re.findall('''["']?label\s*["']?\s*[:=]\s*["']?([^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)''', r, re.DOTALL)] i += [(match[0], match[1]) for match in re.findall('''["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?([^"',]+)''', r, re.DOTALL)] r = [(x[0].replace('\/', '/'), source_utils.label_to_quality(x[1])) for x in i] for u, q in r: try: tag = directstream.googletag(u) if tag: sources.append({'source': 'gvideo', 'quality': tag[0].get('quality', 'SD'), 'language': 'de', 'url': u, 'direct': True, 'debridonly': False}) else: sources.append({'source': 'CDN', 'quality': q, 'language': 'de', 'url': u + '|%s' % urllib.urlencode(headers), 'direct': True, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources url = urlparse.urljoin(self.base_link, url) r = client.request(url, output='extended') headers = r[3] headers.update({ 'Cookie': r[2].get('Set-Cookie'), 'Referer': self.base_link }) r = r[0] rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'}) rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'}) rels = dom_parser.parse_dom(rels, 'li') rels = [(dom_parser.parse_dom(i, 'a', attrs={'class': 'options'}, req='href'), dom_parser.parse_dom(i, 'img', req='src')) for i in rels] rels = [(i[0][0].attrs['href'][1:], re.findall('/flags/(\w+)\.png$', i[1][0].attrs['src'])) for i in rels if i[0] and i[1]] rels = [ i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de' ] r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels] links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''', ''.join([i[0].content for i in r])) links += [ l.attrs['src'] for i in r for l in dom_parser.parse_dom( i, 'iframe', attrs={'class': 'metaframe'}, req='src') ] links += [ l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'source', req='src') ] for i in links: try: i = re.sub('\[.+?\]|\[/.+?\]', '', i) i = client.replaceHTMLCodes(i) if '/play/' in i: i = urlparse.urljoin(self.base_link, i) if self.domains[0] in i: i = client.request(i, headers=headers, referer=url) for x in re.findall('''\(["']?(.*)["']?\)''', i): try: i += jsunpack.unpack( base64.decodestring( re.sub('"\s*\+\s*"', '', x))).replace('\\', '') except: pass for x in re.findall('(eval\s*\(function.*?)</script>', i, re.DOTALL): try: i += jsunpack.unpack(x).replace('\\', '') except: pass links = [(match[0], match[1]) for match in re.findall( '''['"]?file['"]?\s*:\s*['"]([^'"]+)['"][^}]*['"]?label['"]?\s*:\s*['"]([^'"]*)''', i, re.DOTALL)] links = [(x[0].replace('\/', '/'), source_utils.label_to_quality(x[1])) for x in links if '/no-video.mp4' not in x[0]] doc_links = [ directstream.google( 'https://drive.google.com/file/d/%s/view' % match) for match in re.findall( '''file:\s*["'](?:[^"']+youtu.be/([^"']+))''', i, re.DOTALL) ] doc_links = [(u['url'], u['quality']) for x in doc_links if x for u in x] links += doc_links for url, quality in links: if self.base_link in url: url = url + '|Referer=' + self.base_link sources.append({ 'source': 'gvideo', 'quality': quality, 'language': 'de', 'url': url, 'direct': True, 'debridonly': False }) else: try: # as long as resolveurl get no Update for this URL (So just a Temp-Solution) did = re.findall( 'youtube.googleapis.com.*?docid=(\w+)', i) if did: i = 'https://drive.google.com/file/d/%s/view' % did[ 0] valid, host = source_utils.is_host_valid( i, hostDict) if not valid: continue urls, host, direct = source_utils.check_directstreams( i, host) for x in urls: sources.append({ 'source': host, 'quality': x['quality'], 'language': 'de', 'url': x['url'], 'direct': direct, 'debridonly': False }) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = urlparse.urljoin(self.base_link, data.get('url')) episode = data.get('episode') r = client.request(url) aj = self.__get_ajax_object(r) b = dom_parser.parse_dom(r, 'img', attrs={'class': 'dgvaup'}, req='data-img')[0].attrs['data-img'] if episode: r = dom_parser.parse_dom(r, 'a', attrs={ 'class': 'btn-stream-ep', 'data-episode': episode }, req=['data-episode', 'data-server']) else: r = dom_parser.parse_dom(r, 'div', attrs={'id': 'lang-de'}) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'movie'}) r = dom_parser.parse_dom(r, 'a', attrs={'class': 'btn-stream'}, req=['data-episode', 'data-server']) r = [(i.attrs['data-episode'], i.attrs['data-server']) for i in r] for epi, server in r: try: x = { 'action': aj.get('load_episodes'), 'episode': epi, 'pid': aj.get('postid'), 'server': server, 'nonce': aj.get('nonce'), 'b': b } x = client.request(aj.get('ajax_url'), post=x, XHR=True, referer=url) x = json.loads(x) q = source_utils.label_to_quality(x.get('q')) x = json.loads(base64.decodestring(x.get('u'))) u = source_utils.evp_decode(x.get('ct'), base64.decodestring(b), x.get('s').decode("hex")) u = u.replace('\/', '/').strip('"') valid, host = source_utils.is_host_valid(u, hostDict) if not valid: continue sources.append({ 'source': host, 'quality': q, 'language': 'de', 'url': u, 'direct': False, 'debridonly': False, 'checkquality': True }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) for item_id, episode, content_type in self.__get_episode( data.get('url'), data.get('episode')): stream_link = urlparse.urljoin( self.base_link, '/watch/%s/%s/%s' % (item_id, episode, content_type)) info = 'subbed' if content_type.endswith('sub') else '' r = client.request(stream_link) r = dom_parser.parse_dom(r, 'script') r = ' '.join([i.content for i in r if i.content]) r = json.loads( re.findall('var\s*streams\s*=\s*(\[.*?\])\s*;', r)[0]) r = [(i.get('replace'), i.get('code')) for i in r] r = [(i[0].replace('#', i[1])) for i in r if i[0] and i[1]] for stream_link in r: if stream_link.startswith('/'): stream_link = 'http:%s' % stream_link if self.domains[0] in stream_link: stream_link = client.request(stream_link, cookie=urllib.urlencode({ 'proxerstream_player': 'flash' })) i = [(match[0], match[1]) for match in re.findall( '''["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)(?:[^}>\]]+)["']?\s*width\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)''', stream_link, re.DOTALL)] i = [(x[0].replace('\/', '/'), source_utils.label_to_quality(x[1])) for x in i] for url, quality in i: sources.append({ 'source': 'cdn', 'quality': quality, 'language': 'de', 'url': url, 'info': info, 'direct': True, 'debridonly': False }) else: valid, host = source_utils.is_host_valid( stream_link, hostDict) if not valid: continue sources.append({ 'source': host, 'quality': 'SD', 'language': 'de', 'url': stream_link, 'info': info, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] if 'season' in data: season = data['season'] if 'episode' in data: episode = data['episode'] year = data['year'] r = client.request(self.base_link, output='extended', timeout='10') cookie = r[4] headers = r[3] result = r[0] headers['Cookie'] = cookie query = urlparse.urljoin( self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title))) r = client.request(query, headers=headers, XHR=True) r = json.loads(r)['content'] r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a')) if 'tvshowtitle' in data: cltitle = cleantitle.get(title + 'season' + season) cltitle2 = cleantitle.get(title + 'season%02d' % int(season)) r = [ i for i in r if cltitle == cleantitle.get(i[1]) or cltitle2 == cleantitle.get(i[1]) ] vurl = '%s%s-episode-%s' % (self.base_link, str( r[0][0]).replace('/info', ''), episode) vurl2 = None else: cltitle = cleantitle.getsearch(title) cltitle2 = cleantitle.getsearch('%s (%s)' % (title, year)) r = [ i for i in r if cltitle2 == cleantitle.getsearch(i[1]) or cltitle == cleantitle.getsearch(i[1]) ] vurl = '%s%s-episode-0' % (self.base_link, str( r[0][0]).replace('/info', '')) vurl2 = '%s%s-episode-1' % (self.base_link, str( r[0][0]).replace('/info', '')) r = client.request(vurl, headers=headers) headers['Referer'] = vurl slinks = client.parseDOM(r, 'div', attrs={'class': 'anime_muti_link'}) slinks = client.parseDOM(slinks, 'li', ret='data-video') if len(slinks) == 0 and not vurl2 == None: r = client.request(vurl2, headers=headers) headers['Referer'] = vurl2 slinks = client.parseDOM( r, 'div', attrs={'class': 'anime_muti_link'}) slinks = client.parseDOM(slinks, 'li', ret='data-video') for slink in slinks: try: if 'vidnode.net/streaming.php' in slink: r = client.request('https:%s' % slink, headers=headers) clinks = re.findall(r'sources:\[(.*?)\]', r)[0] clinks = re.findall( r'file:\s*\'(http[^\']+)\',label:\s*\'(\d+)', clinks) for clink in clinks: q = source_utils.label_to_quality(clink[1]) sources.append({ 'source': 'cdn', 'quality': q, 'language': 'en', 'url': clink[0], 'direct': True, 'debridonly': False }) else: valid, hoster = source_utils.is_host_valid( slink, hostDict) if valid: sources.append({ 'source': hoster, 'quality': 'SD', 'language': 'en', 'url': slink, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = data.get('url') episode = int(data.get('episode', 1)) r = client.request(urlparse.urljoin(self.base_link, url)) r = dom_parser.parse_dom(r, 'div', attrs={'id': 'streams'}) rels = dom_parser.parse_dom(r, 'ul', attrs={'class': 'nav'}) rels = dom_parser.parse_dom(rels, 'li') rels = dom_parser.parse_dom( rels, 'a', attrs={'href': re.compile('#stream_\d*')}, req='href') rels = [(re.findall('stream_(\d+)', i.attrs['href']), re.findall('flag-(\w{2})', i.content)) for i in rels if i] rels = [(i[0][0], ['subbed'] if i[1][0] != 'de' else []) for i in rels if i[0] and 'de' in i[1]] for id, info in rels: rel = dom_parser.parse_dom(r, 'div', attrs={'id': 'stream_%s' % id}) rel = [(dom_parser.parse_dom( i, 'div', attrs={'id': 'streams_episodes_%s' % id}), dom_parser.parse_dom(i, 'tr')) for i in rel] rel = [(i[0][0].content, [x for x in i[1] if 'fa-desktop' in x.content]) for i in rel if i[0] and i[1]] rel = [(i[0], dom_parser.parse_dom(i[1][0].content, 'td')) for i in rel if i[1]] rel = [(i[0], re.findall('\d{3,4}x(\d{3,4})$', i[1][0].content)) for i in rel if i[1]] rel = [(i[0], source_utils.label_to_quality(i[1][0])) for i in rel if len(i[1]) > 0] for html, quality in rel: try: s = dom_parser.parse_dom( html, 'a', attrs={ 'href': re.compile('#streams_episodes_%s_\d+' % id) }) s = [(dom_parser.parse_dom( i, 'div', attrs={'data-loop': re.compile('\d+')}, req='data-loop'), dom_parser.parse_dom(i, 'span')) for i in s] s = [(i[0][0].attrs['data-loop'], [ x.content for x in i[1] if '<strong' in x.content ]) for i in s if i[0]] s = [(i[0], re.findall('<.+?>(\d+)</.+?> (.+?)$', i[1][0])) for i in s if len(i[1]) > 0] s = [(i[0], i[1][0]) for i in s if len(i[1]) > 0] s = [(i[0], int(i[1][0]), re.findall('Episode (\d+):', i[1][1]), re.IGNORECASE) for i in s if len(i[1]) > 1] s = [(i[0], i[1], int(i[2][0]) if len(i[2]) > 0 else -1) for i in s] s = [(i[0], i[2] if i[2] >= 0 else i[1]) for i in s] s = [i[0] for i in s if i[1] == episode][0] enc = dom_parser.parse_dom( html, 'div', attrs={ 'id': re.compile('streams_episodes_%s_%s' % (id, s)) }, req='data-enc')[0].attrs['data-enc'] hosters = dom_parser.parse_dom( html, 'a', attrs={ 'href': re.compile('#streams_episodes_%s_%s' % (id, s)) }) hosters = [ dom_parser.parse_dom(i, 'i', req='class') for i in hosters ] hosters = [ re.findall('hoster-(\w+)', ' '.join([x.attrs['class'] for x in i])) for i in hosters if i ][0] hosters = [(source_utils.is_host_valid( re.sub('(co|to|net|pw|sx|tv|moe|ws|icon)$', '', i), hostDict), i) for i in hosters] hosters = [(i[0][1], i[1]) for i in hosters if i[0] and i[0][0]] info = ' | '.join(info) for source, hoster in hosters: sources.append({ 'source': source, 'quality': quality, 'language': 'de', 'url': [enc, hoster], 'info': info, 'direct': False, 'debridonly': False, 'checkquality': True }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources ref = urlparse.urljoin(self.base_link, url) url = urlparse.urljoin( self.base_link, self.ajax_link % re.findall('-(\w+)$', ref)[0]) headers = {'Referer': ref, 'User-Agent': client.randomagent()} result = client.request(url, headers=headers, post='') result = base64.decodestring(result) result = json.loads(result).get('playinfo', []) if isinstance(result, basestring): result = result.replace('embed.html', 'index.m3u8') base_url = re.sub('index\.m3u8\?token=[\w\-]+[^/$]*', '', result) r = client.request(result, headers=headers) r = [(i[0], i[1]) for i in re.findall( '#EXT-X-STREAM-INF:.*?RESOLUTION=\d+x(\d+)[^\n]+\n([^\n]+)', r, re.DOTALL) if i] r = [(source_utils.label_to_quality(i[0]), i[1] + source_utils.append_headers(headers)) for i in r] r = [{'quality': i[0], 'url': base_url + i[1]} for i in r] for i in r: sources.append({ 'source': 'CDN', 'quality': i['quality'], 'language': 'de', 'url': i['url'], 'direct': True, 'debridonly': False }) elif result: result = [i.get('link_mp4') for i in result] result = [i for i in result if i] for i in result: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'de', 'url': i, 'direct': True, 'debridonly': False }) except: pass return sources except: return