Example #1
0
def check_directstreams(url, hoster='', quality='SD'):
	urls = []
	host = hoster

	if 'google' in url or any(x in url for x in ['youtube.', 'docid=']):
		urls = directstream.google(url)
		if not urls:
			tag = directstream.googletag(url)
			if tag:
				urls = [{'quality': tag[0]['quality'], 'url': url}]
		if urls:
			host = 'gvideo'

	elif 'ok.ru' in url:
		urls = directstream.odnoklassniki(url)
		if urls:
			host = 'vk'

	elif 'vk.com' in url:
		urls = directstream.vk(url)
		if urls:
			host = 'vk'

	elif any(x in url for x in ['akamaized', 'blogspot', 'ocloud.stream']):
		urls = [{'url': url}]
		if urls: host = 'CDN'

	direct = True if urls else False

	if not urls:
		urls = [{'quality': quality, 'url': url}]

	return urls, host, direct
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            url = urlparse.urljoin(self.base_link, '/sources?%s' % urllib.urlencode(data))
            r = client.request(url)
            if not r: raise Exception()
            result = json.loads(r)
            try:
                gvideos = [i['url'] for i in result if i['source'] == 'GVIDEO']
                for url in gvideos:
                    gtag = directstream.googletag(url)[0]
                    sources.append(
                        {'source': 'gvideo', 'quality': gtag['quality'], 'language': 'en', 'url': gtag['url'],
                         'direct': True, 'debridonly': False})
            except:
                pass

            try:
                oloads = [i['url'] for i in result if i['source'] == 'CDN']
                for url in oloads:
                    sources.append({'source': 'CDN', 'quality': 'HD', 'language': 'en', 'url': url, 'direct': False,
                                    'debridonly': False})
            except:
                pass

            return sources
        except:
            return sources
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if not url:
             return sources
         r = re.findall('(\d+)-stream(?:\?episode=(\d+))?', url)
         r = [(i[0], i[1] if i[1] else '1') for i in r][0]
         r = self.scraper.get(urlparse.urljoin(self.base_link, self.get_link % r), output='extended').content
         headers = r[3]
         headers.update({'Cookie': r[2].get('Set-Cookie'), 'Referer': self.base_link})
         r = r[0]
         r += '=' * (-len(r) % 4)
         r = base64.b64decode(r)
         i = [(match[1], match[0]) for match in re.findall('''["']?label\s*["']?\s*[:=]\s*["']?([^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)''', r, re.DOTALL)]
         i += [(match[0], match[1]) for match in re.findall('''["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?([^"',]+)''', r, re.DOTALL)]
         r = [(x[0].replace('\/', '/'), source_utils.label_to_quality(x[1])) for x in i]
         for u, q in r:
             try:
                 tag = directstream.googletag(u)
                 if tag:
                     sources.append({'source': 'gvideo', 'quality': tag[0].get('quality', 'SD'), 'language': 'de', 'url': u, 'direct': True, 'debridonly': False})
                 else:
                     sources.append({'source': 'CDN', 'quality': q, 'language': 'de', 'url': u + '|%s' % urllib.urlencode(headers), 'direct': True, 'debridonly': False})
             except:
                 pass
         return sources
     except:
         return sources
Example #4
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            if 'tvshowtitle' in data:
                url = self.searchShow(data['tvshowtitle'], data['season'])
            else:
                url = self.searchMovie(data['title'], data['year'])

            if url is None:
                return sources

            r = self.scraper.get(url, params={'link_web': self.base_link}).content
            quality = client.parseDOM(r, 'span', attrs={'class': 'quality'})[0]
            quality = source_utils.check_sd_url(quality)
            r = client.parseDOM(r, 'div', attrs={'class': 'les-content'})

            if 'tvshowtitle' in data:
                ep = data['episode']
                links = client.parseDOM(r, 'a', attrs={'episode-data': ep}, ret='player-data')
            else:
                links = client.parseDOM(r, 'a', ret='player-data')

            for link in links:
                if '123movieshd' in link or 'seriesonline' in link:
                    r = self.scraper.get(url, data={'link_web': self.base_link}).content
                    r = re.findall('(https:.*?redirector.*?)[\'\"]', r)

                    for i in r:
                        try:
                            sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'],
                                            'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
                        except:
                            traceback.print_exc()
                            pass
                else:
                    try:
                        host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(link.strip().lower()).netloc)[0]
                        if host not in hostDict:
                            pass
                        host = client.replaceHTMLCodes(host)
                        host = host.encode('utf-8')

                        sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': link, 'info': [],
                                        'direct': False, 'debridonly': False})
                    except:
                        pass
            return sources
        except:
            traceback.print_exc()
            return sources
Example #5
0
	def sources(self, url, hostDict, hostprDict):
		try:
			sources = []
			if url is None:
				return sources
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			aliases = eval(data['aliases'])
			headers = {}
			if 'tvshowtitle' in data:
				ep = data['episode']
				url = '%s/film/%s-season-%01d/watching.html?ep=%s' % (
					self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), ep)
				r = client.request(url, headers=headers, timeout='10', output='geturl')
				if url is None:
					url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers)
			else:
				url = self.searchMovie(data['title'], data['year'], aliases, headers)
				if url is None:
					url = '%s/film/%s/watching.html?ep=0' % (self.base_link, cleantitle.geturl(data['title']))
			if url is None:
				raise Exception()
			r = client.request(url, headers=headers, timeout='10')
			r = client.parseDOM(r, 'div', attrs={'class': 'les-content'})
			if 'tvshowtitle' in data:
				ep = data['episode']
				links = client.parseDOM(r, 'a', attrs={'episode-data': ep}, ret='player-data')
			else:
				links = client.parseDOM(r, 'a', ret='player-data')
			for link in links:
				if '123movieshd' in link or 'seriesonline' in link:
					r = client.request(link, headers=headers, timeout='10')
					r = re.findall('(https:.*?redirector.*?)[\'\"]', r)
					for i in r:
						try:
							sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'],
							                'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
						except:
							pass
				else:
					try:
						host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(link.strip().lower()).netloc)[0]
						if not host in hostDict:
							raise Exception()
						host = client.replaceHTMLCodes(host)
						host = host.encode('utf-8')
						sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': link, 'direct': False,
						                'debridonly': False})
					except:
						pass
			return sources
		except:
			return sources
Example #6
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            r = client.parseDOM(r, 'iframe', ret='src')

            for u in r:
                try:
                    if not u.startswith('http') and not 'vidstreaming' in u:
                        raise Exception()

                    url = client.request(u)
                    url = client.parseDOM(url, 'source', ret='src')

                    for i in url:
                        try:
                            sources.append({
                                'source':
                                'gvideo',
                                'quality':
                                directstream.googletag(i)[0]['quality'],
                                'language':
                                'en',
                                'url':
                                i,
                                'direct':
                                True,
                                'debridonly':
                                False
                            })
                        except:
                            pass
                except:
                    pass

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('GoGoAnime - Exception: \n' + str(failure))
            return sources
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None: return sources
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
             'title']
         imdb = data['imdb']
         aliases = eval(data['aliases'])
         headers = {}
         if 'tvshowtitle' in data:
             url = self.searchShow(title, int(data['season']),
                                   int(data['episode']), aliases, headers)
         else:
             url = self.searchMovie(title, data['year'], aliases, headers)
         r = client.request(url,
                            headers=headers,
                            output='extended',
                            timeout='10')
         if not imdb in r[0]: raise Exception()
         cookie = r[4]
         headers = r[3]
         result = r[0]
         try:
             r = re.findall('(https:.*?redirector.*?)[\'\"]', result)
             for i in r:
                 try:
                     sources.append({
                         'source':
                         'gvideo',
                         'quality':
                         directstream.googletag(i)[0]['quality'],
                         'language':
                         'en',
                         'url':
                         i,
                         'direct':
                         True,
                         'debridonly':
                         False
                     })
                 except:
                     pass
         except:
             pass
         try:
             auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0]
         except:
             auth = 'false'
         auth = 'Bearer %s' % urllib.unquote_plus(auth)
         headers['Authorization'] = auth
         headers['Referer'] = url
         u = '/ajax/vsozrflxcw.php'
         self.base_link = client.request(self.base_link,
                                         headers=headers,
                                         output='geturl')
         u = urlparse.urljoin(self.base_link, u)
         action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb'
         elid = urllib.quote(
             base64.encodestring(str(int(time.time()))).strip())
         token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0]
         idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0]
         post = {
             'action': action,
             'idEl': idEl,
             'token': token,
             'nopop': '',
             'elid': elid
         }
         post = urllib.urlencode(post)
         cookie += ';%s=%s' % (idEl, elid)
         headers['Cookie'] = cookie
         r = client.request(u,
                            post=post,
                            headers=headers,
                            cookie=cookie,
                            XHR=True)
         r = str(json.loads(r))
         r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r)
         for i in r:
             try:
                 if 'google' in i:
                     quality = 'SD'
                     if 'googleapis' in i:
                         try:
                             quality = source_utils.check_sd_url(i)
                         except:
                             pass
                     if 'googleusercontent' in i:
                         i = directstream.googleproxy(i)
                         try:
                             quality = directstream.googletag(
                                 i)[0]['quality']
                         except:
                             pass
                     sources.append({
                         'source': 'gvideo',
                         'quality': quality,
                         'language': 'en',
                         'url': i,
                         'direct': True,
                         'debridonly': False
                     })
                 elif 'llnwi.net' in i or 'vidcdn.pro' in i:
                     try:
                         quality = source_utils.check_sd_url(i)
                         sources.append({
                             'source': 'CDN',
                             'quality': quality,
                             'language': 'en',
                             'url': i,
                             'direct': True,
                             'debridonly': False
                         })
                     except:
                         pass
                 else:
                     valid, hoster = source_utils.is_host_valid(i, hostDict)
                     if not valid: continue
                     sources.append({
                         'source': hoster,
                         'quality': '720p',
                         'language': 'en',
                         'url': i,
                         'direct': False,
                         'debridonly': False
                     })
             except:
                 pass
         return sources
     except:
         return sources
Example #8
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if not url:
             return sources
         r = client.request(urlparse.urljoin(self.base_link, url))
         r = dom_parser.parse_dom(r, 'article')
         r = dom_parser.parse_dom(r,
                                  'div',
                                  attrs={'class': 'entry-content'})
         links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''',
                            ''.join([i.content for i in r]))
         links += [
             l.attrs['src'] for i in r
             for l in dom_parser.parse_dom(i, 'iframe', req='src')
         ]
         links += [
             l.attrs['src'] for i in r
             for l in dom_parser.parse_dom(i, 'source', req='src')
         ]
         for i in links:
             try:
                 valid, hoster = source_utils.is_host_valid(i, hostDict)
                 if not valid: continue
                 urls = []
                 if 'google' in i:
                     host = 'gvideo'
                     direct = True
                     urls = directstream.google(i)
                 if 'google' in i and not urls and directstream.googletag(
                         i):
                     host = 'gvideo'
                     direct = True
                     urls = [{
                         'quality':
                         directstream.googletag(i)[0]['quality'],
                         'url':
                         i
                     }]
                 elif 'ok.ru' in i:
                     host = 'vk'
                     direct = True
                     urls = directstream.odnoklassniki(i)
                 elif 'vk.com' in i:
                     host = 'vk'
                     direct = True
                     urls = directstream.vk(i)
                 else:
                     host = hoster
                     direct = False
                     urls = [{
                         'quality': 'SD',
                         'url': i
                     }]
                 for x in urls:
                     sources.append({
                         'source': host,
                         'quality': x['quality'],
                         'language': 'ko',
                         'url': x['url'],
                         'direct': direct,
                         'debridonly': False
                     })
             except:
                 pass
         return sources
     except:
         return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                episode = int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers)
            else:
                episode = 0
                url = self.searchMovie(data['title'], data['year'], aliases, headers)

            mid = re.findall('-(\d+)', url)[-1]

            try:
                headers = {'Referer': url}
                u = urlparse.urljoin(self.base_link, self.server_link % mid)
                r = self.scraper.get(u).content
                r = json.loads(r)['html']
                r = client.parseDOM(r, 'div', attrs={'class': 'pas-list'})
                ids = client.parseDOM(r, 'li', ret='data-id')
                servers = client.parseDOM(r, 'li', ret='data-server')
                labels = client.parseDOM(r, 'a', ret='title')
                r = zip(ids, servers, labels)
                u = urlparse.urljoin(self.base_link, self.info_link % mid)
                quality = self.scraper.get(u).content
                quality = dom_parser.parse_dom(quality, 'div', attrs={'class': 'jtip-quality'})[0].content
                if quality == "HD":
                    quality = "720p"
                for eid in r:
                    try:
                        try:
                            ep = re.findall('episode.*?(\d+).*?', eid[2].lower())[0]
                        except Exception:
                            ep = 0
                        if (episode == 0) or (int(ep) == episode):
                            if eid[1] != '6':
                                url = urlparse.urljoin(self.base_link, self.embed_link % eid[0])
                                link = self.scraper.get(url).content
                                link = json.loads(link)['src']
                                valid, host = source_utils.is_host_valid(link, hostDict)
                                sources.append({'source': host, 'quality': quality, 'language': 'en',
                                                'url': link, 'info': [], 'direct': False, 'debridonly': False})
                            else:
                                url = urlparse.urljoin(self.base_link, self.token_link % (eid[0], mid))
                                script = self.scraper.get(url).content
                                if '$_$' in script:
                                    params = self.uncensored1(script)
                                elif script.startswith('[]') and script.endswith('()'):
                                    params = self.uncensored2(script)
                                elif '_x=' in script:
                                    x = re.search('''_x=['"]([^"']+)''', script).group(1)
                                    y = re.search('''_y=['"]([^"']+)''', script).group(1)
                                    params = {'x': x, 'y': y}
                                else:
                                    raise Exception()

                                u = urlparse.urljoin(self.base_link, self.source_link %
                                                     (eid[0], params['x'], params['y']))
                                r = self.scraper.get(u).content
                                url = json.loads(r)['playlist'][0]['sources']
                                url = [i['file'] for i in url if 'file' in i]
                                url = [directstream.googletag(i) for i in url]
                                url = [i[0] for i in url if i]

                                for s in url:
                                    if 'lh3.googleusercontent.com' in s['url']:
                                        s['url'] = directstream.googleredirect(s['url'])

                                    sources.append({'source': 'gvideo', 'quality': s['quality'], 'language': 'en',
                                                    'url': s['url'], 'direct': True, 'debridonly': False})
                    except Exception:
                        pass
            except Exception:
                pass

            return sources
        except Exception:
            return sources
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if not url:
                return sources

            ref = urlparse.urljoin(self.base_link, url)
            url = urlparse.urljoin(
                self.base_link, self.ajax_link % re.findall('-(\w+)$', ref)[0])

            headers = {'Referer': ref, 'User-Agent': client.randomagent()}

            result = client.request(url, headers=headers, post='')
            result = base64.decodestring(result)
            result = json.loads(result).get('playinfo', [])

            if isinstance(result, basestring):
                result = result.replace('embed.html', 'index.m3u8')

                base_url = re.sub('index\.m3u8\?token=[\w\-]+[^/$]*', '',
                                  result)

                r = client.request(result, headers=headers)
                r = [(i[0], i[1]) for i in re.findall(
                    '#EXT-X-STREAM-INF:.*?RESOLUTION=\d+x(\d+)[^\n]+\n([^\n]+)',
                    r, re.DOTALL) if i]
                r = [(source_utils.label_to_quality(i[0]),
                      i[1] + source_utils.append_headers(headers)) for i in r]
                r = [{'quality': i[0], 'url': base_url + i[1]} for i in r]
                for i in r:
                    sources.append({
                        'source': 'CDN',
                        'quality': i['quality'],
                        'language': 'de',
                        'url': i['url'],
                        'direct': True,
                        'debridonly': False
                    })
            elif result:
                result = [i.get('link_mp4') for i in result]
                result = [i for i in result if i]
                for i in result:
                    try:
                        sources.append({
                            'source':
                            'gvideo',
                            'quality':
                            directstream.googletag(i)[0]['quality'],
                            'language':
                            'de',
                            'url':
                            i,
                            'direct':
                            True,
                            'debridonly':
                            False
                        })
                    except:
                        pass

            return sources
        except:
            return
Example #11
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if not url:
             return
         url = urlparse.urljoin(self.base_link, url)
         r = client.request(url)
         r = dom_parser.parse_dom(r, 'div', attrs={'class': 'tab-pane'})
         r = dom_parser.parse_dom(r, 'iframe', req='src')
         r = [i.attrs['src'] for i in r]
         for i in r:
             try:
                 if 'drama4u' in i or 'k-vid' in i:
                     r = client.request(i, referer=url)
                     r = re.findall(
                         '''var\s*source\s*=\s*\[({.*?})\]\s*;''', r)[0]
                     i = [(match[1], match[0]) for match in re.findall(
                         '''["']?label\s*["']?\s*[:=]\s*["']?([^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)''',
                         r, re.DOTALL)]
                     i += [(match[0], match[1]) for match in re.findall(
                         '''["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?([^"',]+)''',
                         r, re.DOTALL)]
                     r = [(x[0].replace('\/', '/'),
                           source_utils.label_to_quality(x[1])) for x in i]
                     for u, q in list(set(r)):
                         try:
                             tag = directstream.googletag(u)
                             if tag:
                                 sources.append({
                                     'source':
                                     'gvideo',
                                     'quality':
                                     tag[0].get('quality', 'SD'),
                                     'language':
                                     'ko',
                                     'url':
                                     u,
                                     'direct':
                                     True,
                                     'debridonly':
                                     False
                                 })
                             else:
                                 sources.append({
                                     'source': 'CDN',
                                     'quality': q,
                                     'language': 'ko',
                                     'url': u,
                                     'direct': True,
                                     'debridonly': False
                                 })
                         except:
                             pass
                 else:
                     valid, host = source_utils.is_host_valid(i, hostDict)
                     if not valid: continue
                     sources.append({
                         'source': host,
                         'quality': 'SD',
                         'language': 'ko',
                         'url': i,
                         'direct': False,
                         'debridonly': False
                     })
             except:
                 pass
         return sources
     except:
         return sources
Example #12
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            aliases = eval(data['aliases'])
            mozhdr = {
                'User-Agent':
                'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'
            }
            headers = mozhdr
            headers['X-Requested-With'] = 'XMLHttpRequest'

            if 'tvshowtitle' in data:
                episode = int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'],
                                      aliases, headers)
            else:
                episode = 0
                url = self.searchMovie(data['title'], data['year'], aliases,
                                       headers)

            headers['Referer'] = url
            ref_url = url
            mid = re.findall('-(\d*)\.', url)[0]
            data = {'id': mid}
            r = self.scraper.post(url, headers=headers).content
            try:
                u = urlparse.urljoin(self.base_link, self.server_link % mid)
                r = self.scraper.get(u, headers=mozhdr).content
                r = json.loads(r)['html']
                rl = client.parseDOM(r, 'div', attrs={'class': 'pas-list'})
                rh = client.parseDOM(r, 'div', attrs={'class': 'pas-header'})
                ids = client.parseDOM(rl, 'li', ret='data-id')
                servers = client.parseDOM(rl, 'li', ret='data-server')
                labels = client.parseDOM(rl, 'a', ret='title')
                r = zip(ids, servers, labels)
                rrr = zip(client.parseDOM(rh, 'li', ret='data-id'),
                          client.parseDOM(rh, 'li', ret='class'))
                types = {}
                for rr in rrr:
                    types[rr[0]] = rr[1]

                for eid in r:
                    try:
                        try:
                            ep = re.findall('episode.*?(\d+).*?',
                                            eid[2].lower())[0]
                        except:
                            ep = 0
                        if (episode == 0) or (int(ep) == episode):
                            t = str(int(time.time() * 1000))
                            quali = source_utils.get_release_quality(eid[2])[0]
                            if 'embed' in types[eid[1]]:
                                url = urlparse.urljoin(
                                    self.base_link, self.embed_link % (eid[0]))
                                xml = self.scraperget(url,
                                                      headers=headers).content
                                url = json.loads(xml)['src']
                                valid, hoster = source_utils.is_host_valid(
                                    url, hostDict)
                                if not valid: continue
                                q = source_utils.check_sd_url(url)
                                q = q if q != 'SD' else quali
                                sources.append({
                                    'source': hoster,
                                    'quality': q,
                                    'language': 'en',
                                    'url': url,
                                    'direct': False,
                                    'debridonly': False
                                })
                                continue
                            else:
                                url = urlparse.urljoin(
                                    self.base_link,
                                    self.token_link % (eid[0], mid, t))
                            script = self.scraper.get(url,
                                                      headers=headers).content
                            if '$_$' in script:
                                params = self.uncensored1(script)
                            elif script.startswith('[]') and script.endswith(
                                    '()'):
                                params = self.uncensored2(script)
                            elif '_x=' in script:
                                x = re.search('''_x=['"]([^"']+)''',
                                              script).group(1)
                                y = re.search('''_y=['"]([^"']+)''',
                                              script).group(1)
                                params = {'x': x, 'y': y}
                            else:
                                raise Exception()
                            u = urlparse.urljoin(
                                self.base_link, self.source_link %
                                (eid[0], params['x'], params['y']))
                            length = 0
                            count = 0
                            while length == 0 and count < 11:
                                r = self.scraper.get(u, headers=headers).text
                                length = len(r)
                                if length == 0: count += 1
                            uri = None
                            uri = json.loads(r)['playlist'][0]['sources']
                            try:
                                uri = [i['file'] for i in uri if 'file' in i]
                            except:
                                try:
                                    uri = [uri['file']]
                                except:
                                    continue

                            for url in uri:
                                if 'googleapis' in url:
                                    q = source_utils.check_sd_url(url)
                                    sources.append({
                                        'source': 'gvideo',
                                        'quality': q,
                                        'language': 'en',
                                        'url': url,
                                        'direct': True,
                                        'debridonly': False
                                    })
                                    continue

                                valid, hoster = source_utils.is_host_valid(
                                    url, hostDict)
                                # urls, host, direct = source_utils.check_directstreams(url, hoster)
                                q = quali
                                if valid:
                                    # for z in urls:
                                    if hoster == 'gvideo':
                                        direct = True
                                        try:
                                            q = directstream.googletag(
                                                url)[0]['quality']
                                        except:
                                            pass
                                        url = directstream.google(url,
                                                                  ref=ref_url)
                                    else:
                                        direct = False
                                    sources.append({
                                        'source': hoster,
                                        'quality': q,
                                        'language': 'en',
                                        'url': url,
                                        'direct': direct,
                                        'debridonly': False
                                    })
                                else:
                                    sources.append({
                                        'source': 'CDN',
                                        'quality': q,
                                        'language': 'en',
                                        'url': url,
                                        'direct': True,
                                        'debridonly': False
                                    })
                    except:
                        pass
            except:
                pass

            return sources
        except:
            return sources
Example #13
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if not url:
             return sources
         url = urljoin(self.base_link, url)
         r = client.request(url)
         r = dom_parser.parse_dom(r, 'div', attrs={'class': 'watch_video'})
         r = [
             i.attrs['data-src']
             for i in dom_parser.parse_dom(r, 'iframe', req='data-src')
         ]
         for i in r:
             try:
                 if 'k-vid' in i:
                     i = client.request(i, referer=url)
                     i = dom_parser.parse_dom(
                         i, 'div', attrs={'class': 'videocontent'})
                     gvid = dom_parser.parse_dom(i, 'source', req='src')
                     gvid = [
                         (g.attrs['src'],
                          g.attrs['label'] if 'label' in g.attrs else 'SD')
                         for g in gvid
                     ]
                     gvid = [(x[0], source_utils.label_to_quality(x[1]))
                             for x in gvid if x[0] != 'auto']
                     for u, q in gvid:
                         try:
                             tag = directstream.googletag(u)
                             if tag:
                                 sources.append({
                                     'source':
                                     'gvideo',
                                     'quality':
                                     tag[0].get('quality', 'SD'),
                                     'language':
                                     'ko',
                                     'url':
                                     u,
                                     'direct':
                                     True,
                                     'debridonly':
                                     False
                                 })
                             else:
                                 sources.append({
                                     'source': 'CDN',
                                     'quality': q,
                                     'language': 'ko',
                                     'url': u,
                                     'direct': True,
                                     'debridonly': False
                                 })
                         except:
                             pass
                     i = dom_parser.parse_dom(i,
                                              'iframe',
                                              attrs={'id': 'embedvideo'},
                                              req='src')[0].attrs['src']
                 valid, host = source_utils.is_host_valid(i, hostDict)
                 if not valid: continue
                 sources.append({
                     'source': host,
                     'quality': 'SD',
                     'language': 'ko',
                     'url': i,
                     'direct': False,
                     'debridonly': False
                 })
             except:
                 pass
         return sources
     except:
         return sources
Example #14
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            aliases = eval(data['aliases'])
            headers = {}
            if 'tvshowtitle' in data:
                episode = int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'],
                                      aliases, headers)
            else:
                episode = 0
                url = self.searchMovie(data['title'], data['year'], aliases,
                                       headers)
            url += '/'
            ref_url = url
            mozhdr = {
                'User-Agent':
                'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'
            }
            headers = mozhdr
            headers['X-Requested-With'] = 'XMLHttpRequest'
            headers['Referer'] = url
            self.s = cfscrape.create_scraper()
            mid = re.findall('-(\d*)/', url)[0]
            try:
                u = urlparse.urljoin(self.base_link, self.server_link % mid)
                r = self.s.get(u, headers=mozhdr).content
                r = json.loads(r)['html']

                r = dom_parser2.parse_dom(r,
                                          'li',
                                          req=['data-id', 'data-server'])
                r = [(i.attrs['data-id'], i.attrs['data-server'],
                      dom_parser2.parse_dom(i.content, 'a', req='title')[0])
                     for i in r]
                r = [(i[0], i[1], i[2].content)
                     for i in r]  #r = zip(ids, servers, labels)

                urls = []
                for eid in r:
                    try:
                        ep = re.findall('episode.*?(\d+).*?',
                                        eid[2].lower())[0]
                        ep = '%01d' % int(ep)
                    except BaseException:
                        ep = 0
                    if (episode == 0) or (int(ep) == int(episode)):
                        t = int(time.time() * 1000)
                        url = urlparse.urljoin(
                            self.base_link, self.token_link % (eid[0], mid, t))
                        script = self.s.get(url, headers=headers).content
                        if '$_$' in script:
                            params = self.uncensored1(script)
                        elif script.startswith('[]') and script.endswith('()'):
                            params = self.uncensored2(script)
                        elif '_x=' in script:
                            x = re.search('''_x=['"]([^"']+)''',
                                          script).group(1)
                            y = re.search('''_y=['"]([^"']+)''',
                                          script).group(1)
                            params = {'x': x, 'y': y}
                        else:
                            raise Exception()
                        u = urlparse.urljoin(
                            self.base_link, self.source_link %
                            (eid[0], params['x'], params['y']))
                        length = 0
                        count = 0
                        while length == 0 and count < 11:
                            r = self.s.get(u, headers=headers).content
                            length = len(r)
                            if length == 0:
                                if count == 9:
                                    u = u.replace('_sources', '_embed')
                                count += 1

                        try:
                            frames = re.findall('''file['"]:['"]([^'"]+)''', r)
                            for i in frames:
                                if '.srt' in i: continue
                                urls.append((i, eid[2]))
                        except BaseException:
                            pass

                        r1 = json.loads(r)

                        try:
                            frame = r1['src']
                            urls.append((frame, eid[2]))
                        except BaseException:
                            pass
                        try:
                            frame = r1['playlist'][0]
                            frame = frame['sources'][0]
                            frame = frame['file']
                            urls.append((frame, eid[2]))
                        except BaseException:
                            pass

                for i in urls:

                    s, eid = i[0], i[1]
                    try:
                        if 'googleapis' in s:
                            urls = directstream.googletag(s)
                            if not urls:
                                quality, info = source_utils.get_release_quality(
                                    url, eid)
                                sources.append({
                                    'source': 'gvideo',
                                    'quality': quality,
                                    'language': 'en',
                                    'url': s,
                                    'direct': True,
                                    'debridonly': False
                                })
                            else:
                                for i in urls:
                                    sources.append({
                                        'source': 'gvideo',
                                        'quality': i['quality'],
                                        'language': 'en',
                                        'url': i['url'],
                                        'direct': True,
                                        'debridonly': False
                                    })
                        elif 'lh3.' in s:
                            urls = directstream.googletag(s)
                            for i in urls:
                                try:
                                    url2 = directstream.google(
                                        i['url'], ref=ref_url
                                    ) if 'lh3.' in i['url'] else i['url']
                                    if not url2: url2 = i['url']
                                    sources.append({
                                        'source': 'gvideo',
                                        'quality': i['quality'],
                                        'language': 'en',
                                        'url': url2,
                                        'direct': True,
                                        'debridonly': False
                                    })
                                except BaseException:
                                    sources.append({
                                        'source': 'gvideo',
                                        'quality': i['quality'],
                                        'language': 'en',
                                        'url': i['url'],
                                        'direct': True,
                                        'debridonly': False
                                    })
                        elif 'lemonstream' in s:
                            quality, info = source_utils.get_release_quality(
                                s, eid)
                            sources.append({
                                'source': 'CDN',
                                'quality': quality,
                                'language': 'en',
                                'url': s,
                                'direct': True,
                                'debridonly': False
                            })
                        elif 'notcool' in s:
                            s = s.replace('\\', '')
                            quality, info = source_utils.get_release_quality(
                                s, eid)
                            sources.append({
                                'source': 'CDN',
                                'quality': quality,
                                'language': 'en',
                                'url': s,
                                'direct': True,
                                'debridonly': False
                            })
                        else:
                            quality, info = source_utils.get_release_quality(
                                s, eid)
                            valid, host = source_utils.is_host_valid(
                                s, hostDict)
                            if valid:
                                sources.append({
                                    'source': host,
                                    'quality': quality,
                                    'language': 'en',
                                    'url': s,
                                    'direct': False,
                                    'debridonly': False
                                })
                    except BaseException:
                        pass

            except BaseException:
                pass

            return sources
        except BaseException:
            return sources