Пример #1
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if not url:
             return sources
         r = client.request(urlparse.urljoin(self.base_link,
                                             self.conf_link),
                            XHR=True)
         r = json.loads(r).get('streamer')
         r = client.request(r + '%s.mp4/master.m3u8' % url, XHR=True)
         r = re.findall('RESOLUTION\s*=\s*\d+x(\d+).*?\n(http.*?)(?:\n|$)',
                        r, re.IGNORECASE)
         r = [(source_utils.label_to_quality(i[0]), i[1]) for i in r]
         for quality, link in r:
             sources.append({
                 'source': 'CDN',
                 'quality': quality,
                 'language': 'de',
                 'url': link,
                 'direct': True,
                 'debridonly': False
             })
         return sources
     except:
         return sources
Пример #2
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if not url:
             return sources
         r = re.findall('(\d+)-stream(?:\?episode=(\d+))?', url)
         r = [(i[0], i[1] if i[1] else '1') for i in r][0]
         r = self.scraper.get(urlparse.urljoin(self.base_link,
                                               self.get_link % r),
                              output='extended').content
         headers = r[3]
         headers.update({
             'Cookie': r[2].get('Set-Cookie'),
             'Referer': self.base_link
         })
         r = r[0]
         r += '=' * (-len(r) % 4)
         r = base64.b64decode(r)
         i = [(match[1], match[0]) for match in re.findall(
             '''["']?label\s*["']?\s*[:=]\s*["']?([^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)''',
             r, re.DOTALL)]
         i += [(match[0], match[1]) for match in re.findall(
             '''["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?([^"',]+)''',
             r, re.DOTALL)]
         r = [(x[0].replace('\/', '/'), source_utils.label_to_quality(x[1]))
              for x in i]
         for u, q in r:
             try:
                 tag = directstream.googletag(u)
                 if tag:
                     sources.append({
                         'source': 'gvideo',
                         'quality': tag[0].get('quality', 'SD'),
                         'language': 'de',
                         'url': u,
                         'direct': True,
                         'debridonly': False
                     })
                 else:
                     sources.append({
                         'source':
                         'CDN',
                         'quality':
                         q,
                         'language':
                         'de',
                         'url':
                         u + '|%s' % urllib.urlencode(headers),
                         'direct':
                         True,
                         'debridonly':
                         False
                     })
             except:
                 pass
         return sources
     except:
         return sources
Пример #3
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            url = urlparse.urljoin(self.base_link, data.get('url'))
            episode = data.get('episode')

            r = client.request(url)

            aj = self.__get_ajax_object(r)

            b = dom_parser.parse_dom(r, 'img', attrs={'class': 'dgvaup'}, req='data-img')[0].attrs['data-img']

            if episode:
                r = dom_parser.parse_dom(r, 'a', attrs={'class': 'btn-stream-ep', 'data-episode': episode},
                                         req=['data-episode', 'data-server'])
            else:
                r = dom_parser.parse_dom(r, 'div', attrs={'id': 'lang-de'})
                r = dom_parser.parse_dom(r, 'div', attrs={'class': 'movie'})
                r = dom_parser.parse_dom(r, 'a', attrs={'class': 'btn-stream'}, req=['data-episode', 'data-server'])

            r = [(i.attrs['data-episode'], i.attrs['data-server']) for i in r]

            for epi, server in r:
                try:
                    x = {'action': aj.get('load_episodes'), 'episode': epi, 'pid': aj.get('postid'), 'server': server,
                         'nonce': aj.get('nonce'), 'b': b}
                    x = client.request(aj.get('ajax_url'), post=x, XHR=True, referer=url)
                    x = json.loads(x)

                    q = source_utils.label_to_quality(x.get('q'))
                    x = json.loads(base64.decodestring(x.get('u')))

                    u = source_utils.evp_decode(x.get('ct'), base64.decodestring(b), x.get('s').decode("hex"))
                    u = u.replace('\/', '/').strip('"')

                    valid, host = source_utils.is_host_valid(u, hostDict)
                    if not valid: continue

                    sources.append(
                        {'source': host, 'quality': q, 'language': 'de', 'url': u, 'direct': False, 'debridonly': False,
                         'checkquality': True})
                except:
                    pass

            return sources
        except:
            return sources
Пример #4
0
def __get_moonwalk(url, ref, info=''):
    try:
        host = urlparse.urlparse(url)
        host = '%s://%s' % (host.scheme, host.netloc)
        r = client.request(url, referer=ref, output='extended')
        headers = r[3]
        headers.update({'Cookie': r[2].get('Set-Cookie')})
        r = r[0]
        csrf = re.findall('name="csrf-token" content="(.*?)"', r)[0]
        story = re.findall(
            '''["']X-CSRF-Token["']\s*:\s*[^,]+,\s*["']([\w\-]+)["']\s*:\s*["'](\w+)["']''',
            r)[0]
        headers.update({'X-CSRF-Token': csrf, story[0]: story[1]})
        for i in re.findall('window\[(.*?)\]', r):
            r = r.replace(i, re.sub('''["']\s*\+\s*["']''', '', i))
        varname, post_url = re.findall(
            '''var\s*(\w+)\s*=\s*["'](.*?/all/?)["']\s*;''', r)[0]
        jsid = re.findall('''\.post\(\s*%s\s*,\s*([^(\);)]+)''' % varname,
                          r)[0]
        jsdata = re.findall('(?:var\s*)?%s\s*=\s*({.*?})' % re.escape(jsid), r,
                            re.DOTALL)[0]
        jsdata = re.sub(r'([\{\s,])(\w+)(:)', r'\1"\2"\3', jsdata)
        jsdata = re.sub(r'''(?<=:)\s*\'''', ' "', jsdata)
        jsdata = re.sub(r'''(?<=\w)\'''', '"', jsdata)
        jsdata = re.sub(''':\s*\w+\s*\?[^,}]+''', ': 0', jsdata)
        jsdata = re.sub(''':\s*[a-zA-Z]+[^,}]+''', ': 0', jsdata)
        jsdata = json.loads(jsdata)
        mw_key = re.findall('''var\s*mw_key\s*=\s*["'](\w+)["']''', r)[0]
        newatt = re.findall(
            '''%s\[["']([^=]+)["']\]\s*=\s*["']([^;]+)["']''' %
            re.escape(jsid), r)[0]
        newatt = [re.sub('''["']\s*\+\s*["']''', '', i) for i in newatt]
        jsdata.update({'mw_key': mw_key, newatt[0]: newatt[1]})
        r = client.request(urlparse.urljoin(host, post_url),
                           post=jsdata,
                           headers=headers,
                           XHR=True)
        r = json.loads(r).get('mans', {}).get('manifest_m3u8')
        r = client.request(r, headers=headers)
        r = [(i[0], i[1]) for i in re.findall(
            '#EXT-X-STREAM-INF:.*?RESOLUTION=\d+x(\d+).*?(http.*?(?:\.abst|\.f4m|\.m3u8)).*?',
            r, re.DOTALL) if i]
        r = [(source_utils.label_to_quality(i[0]),
              i[1] + '|%s' % urllib.urlencode(headers)) for i in r]
        r = [{'quality': i[0], 'url': i[1], 'info': info} for i in r]
        return r
    except:
        return []
Пример #5
0
	def sources(self, url, hostDict, hostprDict):
		sources = []
		try:
			if not url:
				return sources
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			url = data.get('url')
			season = data.get('season')
			episode = data.get('episode')
			abs_episode = 0
			if season and episode:
				abs_episode = str(tvmaze.tvMaze().episodeAbsoluteNumber(data.get('tvdb'), int(season), int(episode)))
			url = urlparse.urljoin(self.base_link, url)
			r = client.request(url)
			r = r.decode('cp1251').encode('utf-8')
			r = dom_parser.parse_dom(r, 'div', attrs={'class': 'players'}, req='data-player')
			r = [(i.attrs['data-player'], dom_parser.parse_dom(i, 'a', req='href')) for i in r]
			r = [(i[0], i[1][0].attrs['href']) for i in r if i[1]]
			for post_id, play_url in r:
				i = client.request(play_url, referer=url, output='extended')
				headers = i[3]
				headers.update({'Cookie': i[2].get('Set-Cookie')})
				i = client.request(urlparse.urljoin(self.base_link, self.player_link), post={'post_id': post_id},
				                   headers=headers, referer=i, XHR=True)
				i = json.loads(i).get('message', {}).get('translations', {}).get('flash', {})
				for title, link in i.iteritems():
					try:
						link = self.decode_direct_media_url(link)
						if link.endswith('.txt'):
							link = self.decode_direct_media_url(client.request(link))
							link = json.loads(link).get('playlist', [])
							link = [i.get('playlist', []) for i in link]
							link = [x.get('file') for i in link for x in i if
							        (x.get('season') == season and x.get('serieId') == episode) or (
										        x.get('season') == '0' and x.get('serieId') == abs_episode)][0]
						urls = [(source_utils.label_to_quality(q), self.format_direct_link(link, q)) for q in
						        self.get_qualitys(link)]
						urls = [{'quality': x[0], 'url': x[1]} for x in urls if x[0] in ['SD', 'HD']]  # filter premium
						for i in urls:
							sources.append({'source': 'CDN', 'quality': i['quality'], 'info': title, 'language': 'ru',
							                'url': i['url'], 'direct': True, 'debridonly': False})
					except:
						pass
			return sources
		except:
			return sources
Пример #6
0
	def sources(self, url, hostDict, hostprDict):
		sources = []
		try:
			if not url:
				return sources
			url = urlparse.urljoin(self.base_link, url)
			r = client.request(url)
			r = dom_parser.parse_dom(r, 'div', attrs={'class': 'watch_video'})
			r = [i.attrs['data-src'] for i in dom_parser.parse_dom(r, 'iframe', req='data-src')]
			for i in r:
				try:
					if 'k-vid' in i:
						i = client.request(i, referer=url)
						i = dom_parser.parse_dom(i, 'div', attrs={'class': 'videocontent'})
						gvid = dom_parser.parse_dom(i, 'source', req='src')
						gvid = [(g.attrs['src'], g.attrs['label'] if 'label' in g.attrs else 'SD') for g in gvid]
						gvid = [(x[0], source_utils.label_to_quality(x[1])) for x in gvid if x[0] != 'auto']
						for u, q in gvid:
							try:
								tag = directstream.googletag(u)
								if tag:
									sources.append(
										{'source': 'gvideo', 'quality': tag[0].get('quality', 'SD'), 'language': 'ko',
										 'url': u, 'direct': True, 'debridonly': False})
								else:
									sources.append(
										{'source': 'CDN', 'quality': q, 'language': 'ko', 'url': u, 'direct': True,
										 'debridonly': False})
							except:
								pass
						i = dom_parser.parse_dom(i, 'iframe', attrs={'id': 'embedvideo'}, req='src')[0].attrs['src']
					valid, host = source_utils.is_host_valid(i, hostDict)
					if not valid: continue
					sources.append({'source': host, 'quality': 'SD', 'language': 'ko', 'url': i, 'direct': False,
					                'debridonly': False})
				except:
					pass
			return sources
		except:
			return sources
Пример #7
0
	def sources(self, url, hostDict, hostprDict):
		sources = []
		try:
			if not url:
				return sources
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			for item_id, episode, content_type in self.__get_episode(data.get('url'), data.get('episode')):
				stream_link = urlparse.urljoin(self.base_link, '/watch/%s/%s/%s' % (item_id, episode, content_type))
				info = 'subbed' if content_type.endswith('sub') else ''
				r = client.request(stream_link)
				r = dom_parser.parse_dom(r, 'script')
				r = ' '.join([i.content for i in r if i.content])
				r = json.loads(re.findall('var\s*streams\s*=\s*(\[.*?\])\s*;', r)[0])
				r = [(i.get('replace'), i.get('code')) for i in r]
				r = [(i[0].replace('#', i[1])) for i in r if i[0] and i[1]]
				for stream_link in r:
					if stream_link.startswith('/'): stream_link = 'http:%s' % stream_link
					if self.domains[0] in stream_link:
						stream_link = client.request(stream_link,
						                             cookie=urllib.urlencode({'proxerstream_player': 'flash'}))
						i = [(match[0], match[1]) for match in re.findall(
							'''["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)(?:[^}>\]]+)["']?\s*width\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)''',
							stream_link, re.DOTALL)]
						i = [(x[0].replace('\/', '/'), source_utils.label_to_quality(x[1])) for x in i]
						for url, quality in i:
							sources.append(
								{'source': 'cdn', 'quality': quality, 'language': 'de', 'url': url, 'info': info,
								 'direct': True, 'debridonly': False})
					else:
						valid, host = source_utils.is_host_valid(stream_link, hostDict)
						if not valid: continue
						sources.append(
							{'source': host, 'quality': 'SD', 'language': 'de', 'url': stream_link, 'info': info,
							 'direct': False, 'debridonly': False})
			return sources
		except:
			return sources
Пример #8
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)
            c = client.request(url, output='cookie')
            result = client.request(url)

            result = re.sub(r'[^\x00-\x7F]+', ' ', result)

            pages = dom_parser.parse_dom(result,
                                         'div',
                                         attrs={'class': 'item'},
                                         req='data-id')
            pages = [i.attrs['data-id'] for i in pages]

            for page in pages:
                try:
                    url = urlparse.urljoin(self.base_link, self.video_link)

                    result = client.request(url, post={'id': page}, cookie=c)
                    if not result: continue

                    url = dom_parser.parse_dom(result, 'iframe',
                                               req='src')[0].attrs['src']
                    if url.startswith('//'): url = 'http:' + url
                    if url.startswith('/'):
                        url = urlparse.urljoin(self.base_link, url)

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if valid:
                        if 'ok.ru' in host:
                            okinfo = directstream.odnoklassniki(url)
                            for x in okinfo:
                                sources.append({
                                    'source': host,
                                    'quality': x['quality'],
                                    'language': 'en',
                                    'url': x['url'],
                                    'direct': True,
                                    'debridonly': False
                                })

                        else:
                            sources.append({
                                'source': host,
                                'quality': 'HD',
                                'language': 'en',
                                'url': url,
                                'direct': False,
                                'debridonly': False
                            })

                    if '.asp' not in url: continue

                    result = client.request(url, cookie=c)

                    try:
                        url = dom_parser.parse_dom(result, 'iframe',
                                                   req='src')[0].attrs['src']
                        url = url.replace('https://href.li/?', '')
                        valid, host = source_utils.is_host_valid(url, hostDict)
                        if valid:
                            if host == 'gvideo':
                                ginfo = directstream.google(url)
                                for g in ginfo:
                                    sources.append({
                                        'source': host,
                                        'quality': g['quality'],
                                        'language': 'en',
                                        'url': g['url'],
                                        'direct': True,
                                        'debridonly': False
                                    })
                            else:
                                sources.append({
                                    'source': host,
                                    'quality': 'HD',
                                    'language': 'en',
                                    'url': url,
                                    'direct': False,
                                    'debridonly': False
                                })
                    except BaseException:
                        pass

                    captions = re.search(
                        '''["']?kind["']?\s*:\s*(?:\'|\")captions(?:\'|\")''',
                        result)
                    if not captions: continue

                    matches = [(match[0], match[1]) for match in re.findall(
                        '''["']?label\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)''',
                        result, re.DOTALL | re.I)]
                    matches += [(match[1], match[0]) for match in re.findall(
                        '''["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)''',
                        result, re.DOTALL | re.I)]

                    result = [(source_utils.label_to_quality(x[0]),
                               x[1].replace('\/', '/')) for x in matches]
                    result = [(i[0], i[1]) for i in result
                              if not i[1].endswith('.vtt')]

                    for quality, url in result:
                        sources.append({
                            'source': 'gvideo',
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'direct': True,
                            'debridonly': False
                        })
                except BaseException:
                    pass

            return sources
        except BaseException:
            return sources
Пример #9
0
	def sources(self, url, hostDict, hostprDict):
		sources = []
		try:
			if not url:
				return sources
			url = urlparse.urljoin(self.base_link, url)
			r = client.request(url, output='extended')
			headers = r[3]
			headers.update({'Cookie': r[2].get('Set-Cookie'), 'Referer': self.base_link})
			r = r[0]
			rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'})
			rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'})
			rels = dom_parser.parse_dom(rels, 'li')
			rels = [(dom_parser.parse_dom(i, 'a', attrs={'class': 'options'}, req='href'),
			         dom_parser.parse_dom(i, 'img', req='src')) for i in rels]
			rels = [(i[0][0].attrs['href'][1:], re.findall('/flags/(\w+)\.png$', i[1][0].attrs['src'])) for i in rels if
			        i[0] and i[1]]
			rels = [i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de']
			r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels]
			links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''', ''.join([i[0].content for i in r]))
			links += [l.attrs['src'] for i in r for l in
			          dom_parser.parse_dom(i, 'iframe', attrs={'class': 'metaframe'}, req='src')]
			links += [l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'source', req='src')]
			for i in links:
				try:
					i = re.sub('\[.+?\]|\[/.+?\]', '', i)
					i = client.replaceHTMLCodes(i)
					if '/play/' in i: i = urlparse.urljoin(self.base_link, i)
					if self.domains[0] in i:
						i = client.request(i, headers=headers, referer=url)
						for x in re.findall('''\(["']?(.*)["']?\)''', i):
							try:
								i += jsunpack.unpack(base64.decodestring(re.sub('"\s*\+\s*"', '', x))).replace('\\', '')
							except:
								pass
						for x in re.findall('(eval\s*\(function.*?)</script>', i, re.DOTALL):
							try:
								i += jsunpack.unpack(x).replace('\\', '')
							except:
								pass
						links = [(match[0], match[1]) for match in re.findall(
							'''['"]?file['"]?\s*:\s*['"]([^'"]+)['"][^}]*['"]?label['"]?\s*:\s*['"]([^'"]*)''', i,
							re.DOTALL)]
						links = [(x[0].replace('\/', '/'), source_utils.label_to_quality(x[1])) for x in links if
						         '/no-video.mp4' not in x[0]]
						doc_links = [directstream.google('https://drive.google.com/file/d/%s/view' % match) for match in
						             re.findall('''file:\s*["'](?:[^"']+youtu.be/([^"']+))''', i, re.DOTALL)]
						doc_links = [(u['url'], u['quality']) for x in doc_links if x for u in x]
						links += doc_links
						for url, quality in links:
							if self.base_link in url:
								url = url + '|Referer=' + self.base_link
							sources.append(
								{'source': 'gvideo', 'quality': quality, 'language': 'de', 'url': url, 'direct': True,
								 'debridonly': False})
					else:
						try:
							# as long as URLResolver get no Update for this URL (So just a Temp-Solution)
							did = re.findall('youtube.googleapis.com.*?docid=(\w+)', i)
							if did: i = 'https://drive.google.com/file/d/%s/view' % did[0]
							valid, host = source_utils.is_host_valid(i, hostDict)
							if not valid: continue
							urls, host, direct = source_utils.check_directstreams(i, host)
							for x in urls: sources.append(
								{'source': host, 'quality': x['quality'], 'language': 'de', 'url': x['url'],
								 'direct': direct, 'debridonly': False})
						except:
							pass
				except:
					pass
			return sources
		except:
			return sources
Пример #10
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '')
                             for i in data])

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                    'title']
                if 'season' in data: season = data['season']
                if 'episode' in data: episode = data['episode']
                year = data['year']

                query = urlparse.urljoin(
                    self.base_link, self.search_link %
                    urllib.quote_plus(cleantitle.getsearch(title)))
                r = self.scraper.get(query).content
                r = json.loads(r)['content']
                r = zip(client.parseDOM(r, 'a', ret='href'),
                        client.parseDOM(r, 'a'))

                if 'tvshowtitle' in data:
                    cltitle = cleantitle.get(title + 'season' + season)
                    cltitle2 = cleantitle.get(title +
                                              'season%02d' % int(season))
                    r = [
                        i for i in r if cltitle == cleantitle.get(i[1])
                        or cltitle2 == cleantitle.get(i[1])
                    ]
                    vurl = '%s%s-episode-%s' % (self.base_link, str(
                        r[0][0]).replace('/info', ''), episode)
                    vurl2 = None
                else:
                    cltitle = cleantitle.getsearch(title)
                    cltitle2 = cleantitle.getsearch('%s (%s)' % (title, year))
                    r = [
                        i for i in r if cltitle2 == cleantitle.getsearch(i[1])
                        or cltitle == cleantitle.getsearch(i[1])
                    ]
                    vurl = '%s%s-episode-0' % (self.base_link, str(
                        r[0][0]).replace('/info', ''))
                    vurl2 = '%s%s-episode-1' % (self.base_link, str(
                        r[0][0]).replace('/info', ''))

                r = self.scraper.get(vurl).content

                slinks = client.parseDOM(r,
                                         'div',
                                         attrs={'class': 'anime_muti_link'})
                slinks = client.parseDOM(slinks, 'li', ret='data-video')
                if len(slinks) == 0 and not vurl2 is None:
                    r = self.scraper.get(vurl2).content
                    slinks = client.parseDOM(
                        r, 'div', attrs={'class': 'anime_muti_link'})
                    slinks = client.parseDOM(slinks, 'li', ret='data-video')

                for slink in slinks:
                    try:
                        if 'vidnode.net/streaming.php' in slink:
                            r = self.scraper.get('https:%s' % slink)
                            clinks = re.findall(r'sources:\[(.*?)\]', r)[0]
                            clinks = re.findall(
                                r'file:\s*\'(http[^\']+)\',label:\s*\'(\d+)',
                                clinks)
                            for clink in clinks:
                                q = source_utils.label_to_quality(clink[1])
                                sources.append({
                                    'source': 'cdn',
                                    'quality': q,
                                    'language': 'en',
                                    'url': clink[0],
                                    'direct': True,
                                    'debridonly': False
                                })
                        else:
                            valid, hoster = source_utils.is_host_valid(
                                slink, hostDict)
                            if valid:
                                sources.append({
                                    'source': hoster,
                                    'quality': 'SD',
                                    'language': 'en',
                                    'url': slink,
                                    'direct': False,
                                    'debridonly': False
                                })
                    except:
                        pass

            return sources
        except:
            return sources
Пример #11
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            url = data.get('url')
            episode = int(data.get('episode', 1))

            r = client.request(urlparse.urljoin(self.base_link, url))
            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'streams'})

            rels = dom_parser.parse_dom(r, 'ul', attrs={'class': 'nav'})
            rels = dom_parser.parse_dom(rels, 'li')
            rels = dom_parser.parse_dom(rels, 'a', attrs={'href': re.compile('#stream_\d*')}, req='href')
            rels = [(re.findall('stream_(\d+)', i.attrs['href']), re.findall('flag-(\w{2})', i.content)) for i in rels
                    if i]
            rels = [(i[0][0], ['subbed'] if i[1][0] != 'de' else []) for i in rels if i[0] and 'de' in i[1]]

            for id, info in rels:
                rel = dom_parser.parse_dom(r, 'div', attrs={'id': 'stream_%s' % id})
                rel = [(dom_parser.parse_dom(i, 'div', attrs={'id': 'streams_episodes_%s' % id}),
                        dom_parser.parse_dom(i, 'tr')) for i in rel]
                rel = [(i[0][0].content, [x for x in i[1] if 'fa-desktop' in x.content]) for i in rel if i[0] and i[1]]
                rel = [(i[0], dom_parser.parse_dom(i[1][0].content, 'td')) for i in rel if i[1]]
                rel = [(i[0], re.findall('\d{3,4}x(\d{3,4})$', i[1][0].content)) for i in rel if i[1]]
                rel = [(i[0], source_utils.label_to_quality(i[1][0])) for i in rel if len(i[1]) > 0]

                for html, quality in rel:
                    try:
                        s = dom_parser.parse_dom(html, 'a', attrs={'href': re.compile('#streams_episodes_%s_\d+' % id)})
                        s = [(dom_parser.parse_dom(i, 'div', attrs={'data-loop': re.compile('\d+')}, req='data-loop'),
                              dom_parser.parse_dom(i, 'span')) for i in s]
                        s = [(i[0][0].attrs['data-loop'], [x.content for x in i[1] if '<strong' in x.content]) for i in
                             s if i[0]]
                        s = [(i[0], re.findall('<.+?>(\d+)</.+?> (.+?)$', i[1][0])) for i in s if len(i[1]) > 0]
                        s = [(i[0], i[1][0]) for i in s if len(i[1]) > 0]
                        s = [(i[0], int(i[1][0]), re.findall('Episode (\d+):', i[1][1]), re.IGNORECASE) for i in s if
                             len(i[1]) > 1]
                        s = [(i[0], i[1], int(i[2][0]) if len(i[2]) > 0 else -1) for i in s]
                        s = [(i[0], i[2] if i[2] >= 0 else i[1]) for i in s]
                        s = [i[0] for i in s if i[1] == episode][0]

                        enc = \
                            dom_parser.parse_dom(html, 'div',
                                                 attrs={'id': re.compile('streams_episodes_%s_%s' % (id, s))},
                                                 req='data-enc')[0].attrs['data-enc']

                        hosters = dom_parser.parse_dom(html, 'a',
                                                       attrs={'href': re.compile('#streams_episodes_%s_%s' % (id, s))})
                        hosters = [dom_parser.parse_dom(i, 'i', req='class') for i in hosters]
                        hosters = \
                            [re.findall('hoster-(\w+)', ' '.join([x.attrs['class'] for x in i])) for i in hosters if i][
                                0]
                        hosters = [(source_utils.is_host_valid(re.sub('(co|to|net|pw|sx|tv|moe|ws|icon)$', '', i),
                                                               hostDict), i) for i in hosters]
                        hosters = [(i[0][1], i[1]) for i in hosters if i[0] and i[0][0]]

                        info = ' | '.join(info)

                        for source, hoster in hosters:
                            sources.append(
                                {'source': source, 'quality': quality, 'language': 'de', 'url': [enc, hoster],
                                 'info': info, 'direct': False, 'debridonly': False, 'checkquality': True})
                    except:
                        pass

            return sources
        except:
            return sources
Пример #12
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if not url:
                return sources

            ref = urlparse.urljoin(self.base_link, url)
            url = urlparse.urljoin(
                self.base_link, self.ajax_link % re.findall('-(\w+)$', ref)[0])

            headers = {'Referer': ref, 'User-Agent': client.randomagent()}

            result = client.request(url, headers=headers, post='')
            result = base64.decodestring(result)
            result = json.loads(result).get('playinfo', [])

            if isinstance(result, basestring):
                result = result.replace('embed.html', 'index.m3u8')

                base_url = re.sub('index\.m3u8\?token=[\w\-]+[^/$]*', '',
                                  result)

                r = client.request(result, headers=headers)
                r = [(i[0], i[1]) for i in re.findall(
                    '#EXT-X-STREAM-INF:.*?RESOLUTION=\d+x(\d+)[^\n]+\n([^\n]+)',
                    r, re.DOTALL) if i]
                r = [(source_utils.label_to_quality(i[0]),
                      i[1] + source_utils.append_headers(headers)) for i in r]
                r = [{'quality': i[0], 'url': base_url + i[1]} for i in r]
                for i in r:
                    sources.append({
                        'source': 'CDN',
                        'quality': i['quality'],
                        'language': 'de',
                        'url': i['url'],
                        'direct': True,
                        'debridonly': False
                    })
            elif result:
                result = [i.get('link_mp4') for i in result]
                result = [i for i in result if i]
                for i in result:
                    try:
                        sources.append({
                            'source':
                            'gvideo',
                            'quality':
                            directstream.googletag(i)[0]['quality'],
                            'language':
                            'de',
                            'url':
                            i,
                            'direct':
                            True,
                            'debridonly':
                            False
                        })
                    except:
                        pass

            return sources
        except:
            return
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None: return sources
         hostDict += [
             'akamaized.net', 'google.com', 'picasa.com', 'blogspot.com'
         ]
         result = self.scraper.get(url, timeout=10).content
         dom = dom_parser.parse_dom(result, 'a', req='data-video')
         urls = [
             i.attrs['data-video']
             if i.attrs['data-video'].startswith('https') else 'https:' +
             i.attrs['data-video'] for i in dom
         ]
         for url in urls:
             dom = []
             if 'vidnode.net' in url:
                 result = self.scraper.get(url, timeout=10).content
                 dom = dom_parser.parse_dom(result,
                                            'source',
                                            req=['src', 'label'])
                 dom = [
                     (i.attrs['src'] if i.attrs['src'].startswith('https')
                      else 'https:' + i.attrs['src'], i.attrs['label'])
                     for i in dom if i
                 ]
             elif 'ocloud.stream' in url:
                 result = self.scraper.get(url, timeout=10).content
                 base = re.findall('<base href="([^"]+)">', result)[0]
                 hostDict += [base]
                 dom = dom_parser.parse_dom(result, 'a', req=['href', 'id'])
                 dom = [(i.attrs['href'].replace('./embed', base + 'embed'),
                         i.attrs['id']) for i in dom if i]
                 dom = [(re.findall("var\s*ifleID\s*=\s*'([^']+)",
                                    client.request(i[0]))[0], i[1])
                        for i in dom if i]
             if dom:
                 try:
                     for r in dom:
                         valid, hoster = source_utils.is_host_valid(
                             r[0], hostDict)
                         if not valid: continue
                         quality = source_utils.label_to_quality(r[1])
                         urls, host, direct = source_utils.check_directstreams(
                             r[0], hoster)
                         for x in urls:
                             if direct:
                                 size = source_utils.get_size(x['url'])
                             if size:
                                 sources.append({
                                     'source': host,
                                     'quality': quality,
                                     'language': 'en',
                                     'url': x['url'],
                                     'direct': direct,
                                     'debridonly': False,
                                     'info': size
                                 })
                             else:
                                 sources.append({
                                     'source': host,
                                     'quality': quality,
                                     'language': 'en',
                                     'url': x['url'],
                                     'direct': direct,
                                     'debridonly': False
                                 })
                 except:
                     pass
             else:
                 valid, hoster = source_utils.is_host_valid(url, hostDict)
                 if not valid: continue
                 try:
                     url.decode('utf-8')
                     sources.append({
                         'source': hoster,
                         'quality': 'SD',
                         'language': 'en',
                         'url': url,
                         'direct': False,
                         'debridonly': False
                     })
                 except:
                     pass
         return sources
     except:
         return sources
Пример #14
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            content_type = 'episode' if 'tvshowtitle' in data else 'movie'

            years = (data['year'], str(int(data['year']) + 1), str(int(data['year']) - 1))

            if content_type == 'movie':
                title = cleantitle.get(data['title'])
                localtitle = cleantitle.get(data['localtitle'])
                ids = [data['imdb']]

                r = control.jsonrpc(
                    '{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovies", "params": {"filter":{"or": [{"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}]}, "properties": ["imdbnumber", "title", "originaltitle", "file"]}, "id": 1}' % years)
                r = unicode(r, 'utf-8', errors='ignore')
                r = json.loads(r)['result']['movies']

                r = [i for i in r if
                     str(i['imdbnumber']) in ids or title in [cleantitle.get(i['title'].encode('utf-8')),
                                                              cleantitle.get(i['originaltitle'].encode('utf-8'))]]
                r = [i for i in r if not i['file'].encode('utf-8').endswith('.strm')][0]

                r = control.jsonrpc(
                    '{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovieDetails", "params": {"properties": ["streamdetails", "file"], "movieid": %s }, "id": 1}' % str(
                        r['movieid']))
                r = unicode(r, 'utf-8', errors='ignore')
                r = json.loads(r)['result']['moviedetails']
            elif content_type == 'episode':
                title = cleantitle.get(data['tvshowtitle'])
                localtitle = cleantitle.get(data['localtvshowtitle'])
                season, episode = data['season'], data['episode']
                ids = [data['imdb'], data['tvdb']]

                r = control.jsonrpc(
                    '{"jsonrpc": "2.0", "method": "VideoLibrary.GetTVShows", "params": {"filter":{"or": [{"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}]}, "properties": ["imdbnumber", "title"]}, "id": 1}' % years)
                r = unicode(r, 'utf-8', errors='ignore')
                r = json.loads(r)['result']['tvshows']

                r = [i for i in r if
                     str(i['imdbnumber']) in ids or title in [cleantitle.get(i['title'].encode('utf-8')),
                                                              cleantitle.get(i['originaltitle'].encode('utf-8'))]][0]

                r = control.jsonrpc(
                    '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params": {"filter":{"and": [{"field": "season", "operator": "is", "value": "%s"}, {"field": "episode", "operator": "is", "value": "%s"}]}, "properties": ["file"], "tvshowid": %s }, "id": 1}' % (
                        str(season), str(episode), str(r['tvshowid'])))
                r = unicode(r, 'utf-8', errors='ignore')
                r = json.loads(r)['result']['episodes']

                r = [i for i in r if not i['file'].encode('utf-8').endswith('.strm')][0]

                r = control.jsonrpc(
                    '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodeDetails", "params": {"properties": ["streamdetails", "file"], "episodeid": %s }, "id": 1}' % str(
                        r['episodeid']))
                r = unicode(r, 'utf-8', errors='ignore')
                r = json.loads(r)['result']['episodedetails']

            url = r['file'].encode('utf-8')

            try:
                quality = int(r['streamdetails']['video'][0]['width'])
            except:
                quality = -1

            quality = source_utils.label_to_quality(quality)

            info = []
            try:
                f = control.openFile(url);
                s = f.size();
                f.close()
                s = '%.2f GB' % (float(s) / 1024 / 1024 / 1024)
                info.append(s)
            except:
                pass
            try:
                e = urlparse.urlparse(url).path.split('.')[-1].upper()
                info.append(e)
            except:
                pass
            info = ' | '.join(info)
            info = info.encode('utf-8')

            sources.append(
                {'source': '0', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'local': True,
                 'direct': True, 'debridonly': False})

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('Library - Exception: \n' + str(failure))
            return sources
Пример #15
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if not url:
             return sources
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         url = urlparse.urljoin(self.base_link, data.get('url', ''))
         imdb = data.get('imdb')
         season = data.get('season')
         episode = data.get('episode')
         if season and episode and imdb:
             r = urllib.urlencode({
                 'val': 's%se%s' % (season, episode),
                 'IMDB': imdb
             })
             r = client.request(urlparse.urljoin(self.base_link,
                                                 self.episode_link),
                                XHR=True,
                                post=r)
         else:
             r = client.request(url)
         l = dom_parser.parse_dom(r, 'select', attrs={'id': 'sel_sprache'})
         l = dom_parser.parse_dom(l, 'option', req='id')
         r = [(dom_parser.parse_dom(r, 'div', attrs={'id': i.attrs['id']}))
              for i in l if i.attrs['id'] == 'deutsch']
         r = [(i[0], dom_parser.parse_dom(i[0], 'option', req='id'))
              for i in r]
         r = [(id.attrs['id'],
               dom_parser.parse_dom(content,
                                    'div',
                                    attrs={'id': id.attrs['id']}))
              for content, ids in r for id in ids]
         r = [(re.findall('hd(\d{3,4})',
                          i[0]), dom_parser.parse_dom(i[1], 'a',
                                                      req='href'))
              for i in r if i[1]]
         r = [(i[0][0] if i[0] else '0', [x.attrs['href'] for x in i[1]])
              for i in r if i[1]]
         r = [(source_utils.label_to_quality(i[0]), i[1]) for i in r]
         for quality, urls in r:
             for link in urls:
                 try:
                     data = urlparse.parse_qs(urlparse.urlparse(link).query,
                                              keep_blank_values=True)
                     if 'm' in data:
                         data = data.get('m')[0]
                         link = base64.b64decode(data)
                     link = link.strip()
                     valid, host = source_utils.is_host_valid(
                         link, hostDict)
                     if not valid: continue
                     sources.append({
                         'source': host,
                         'quality': quality,
                         'language': 'de',
                         'url': link,
                         'direct': False,
                         'debridonly': False,
                         'checkquality': True
                     })
                 except:
                     pass
         return sources
     except:
         return sources
Пример #16
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if not url:
             return
         url = urlparse.urljoin(self.base_link, url)
         r = client.request(url)
         r = dom_parser.parse_dom(r, 'div', attrs={'class': 'tab-pane'})
         r = dom_parser.parse_dom(r, 'iframe', req='src')
         r = [i.attrs['src'] for i in r]
         for i in r:
             try:
                 if 'drama4u' in i or 'k-vid' in i:
                     r = client.request(i, referer=url)
                     r = re.findall(
                         '''var\s*source\s*=\s*\[({.*?})\]\s*;''', r)[0]
                     i = [(match[1], match[0]) for match in re.findall(
                         '''["']?label\s*["']?\s*[:=]\s*["']?([^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)''',
                         r, re.DOTALL)]
                     i += [(match[0], match[1]) for match in re.findall(
                         '''["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?([^"',]+)''',
                         r, re.DOTALL)]
                     r = [(x[0].replace('\/', '/'),
                           source_utils.label_to_quality(x[1])) for x in i]
                     for u, q in list(set(r)):
                         try:
                             tag = directstream.googletag(u)
                             if tag:
                                 sources.append({
                                     'source':
                                     'gvideo',
                                     'quality':
                                     tag[0].get('quality', 'SD'),
                                     'language':
                                     'ko',
                                     'url':
                                     u,
                                     'direct':
                                     True,
                                     'debridonly':
                                     False
                                 })
                             else:
                                 sources.append({
                                     'source': 'CDN',
                                     'quality': q,
                                     'language': 'ko',
                                     'url': u,
                                     'direct': True,
                                     'debridonly': False
                                 })
                         except:
                             pass
                 else:
                     valid, host = source_utils.is_host_valid(i, hostDict)
                     if not valid: continue
                     sources.append({
                         'source': host,
                         'quality': 'SD',
                         'language': 'ko',
                         'url': i,
                         'direct': False,
                         'debridonly': False
                     })
             except:
                 pass
         return sources
     except:
         return sources