Esempio n. 1
0
def check_directstreams(url, hoster='', quality='SD'):
	urls = []
	host = hoster

	if 'google' in url or any(x in url for x in ['youtube.', 'docid=']):
		urls = directstream.google(url)
		if not urls:
			tag = directstream.googletag(url)
			if tag:
				urls = [{'quality': tag[0]['quality'], 'url': url}]
		if urls:
			host = 'gvideo'

	elif 'ok.ru' in url:
		urls = directstream.odnoklassniki(url)
		if urls:
			host = 'vk'

	elif 'vk.com' in url:
		urls = directstream.vk(url)
		if urls:
			host = 'vk'

	elif any(x in url for x in ['akamaized', 'blogspot', 'ocloud.stream']):
		urls = [{'url': url}]
		if urls: host = 'CDN'

	direct = True if urls else False

	if not urls:
		urls = [{'quality': quality, 'url': url}]

	return urls, host, direct
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)
            c = client.request(url, output='cookie')
            result = client.request(url)

            result = re.sub(r'[^\x00-\x7F]+', ' ', result)

            pages = dom_parser.parse_dom(result, 'div', attrs={'class': 'item'}, req='data-id')
            pages = [i.attrs['data-id'] for i in pages]

            for page in pages:
                try:
                    url = urlparse.urljoin(self.base_link, self.video_link)

                    result = client.request(url, post={'id': page}, cookie=c)
                    if not result: continue

                    url = dom_parser.parse_dom(result, 'iframe', req='src')[0].attrs['src']
                    if url.startswith('//'): url = 'http:' + url
                    if url.startswith('/'): url = urlparse.urljoin(self.base_link, url)

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if valid: sources.append({'source': host, 'quality': 'HD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})

                    if '.asp' not in url: continue

                    result = client.request(url, cookie=c)

                    try:
                        url = dom_parser.parse_dom(result, 'iframe', req='src')[0].attrs['src']
                        url = url.replace('https://href.li/?', '')
                        valid, host = source_utils.is_host_valid(url, hostDict)
                        if valid:
                            if host == 'gvideo':
                                ginfo = directstream.google(url)
                                for g in ginfo: sources.append({'source': host, 'quality': g['quality'], 'language': 'en', 'url': g['url'], 'direct': True, 'debridonly': False})
                            else: sources.append({'source': host, 'quality': 'HD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
                    except: pass

                    captions = re.search('''["']?kind["']?\s*:\s*(?:\'|\")captions(?:\'|\")''', result)
                    if not captions: continue

                    matches = [(match[0], match[1]) for match in re.findall('''["']?label\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)''', result, re.DOTALL | re.I)]
                    matches += [(match[1], match[0]) for match in re.findall('''["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)''', result, re.DOTALL | re.I)]

                    result = [(source_utils.label_to_quality(x[0]), x[1].replace('\/', '/')) for x in matches]
                    result = [(i[0], i[1]) for i in result if not i[1].endswith('.vtt')]

                    for quality, url in result: sources.append({'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
Esempio n. 3
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if not url:
             return sources
         url = urlparse.urljoin(self.base_link, url)
         r = client.request(url, output='extended')
         headers = r[3]
         headers.update({
             'Cookie': r[2].get('Set-Cookie'),
             'Referer': self.base_link
         })
         r = r[0]
         rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'})
         rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'})
         rels = dom_parser.parse_dom(rels, 'li')
         rels = [(dom_parser.parse_dom(i,
                                       'a',
                                       attrs={'class': 'options'},
                                       req='href'),
                  dom_parser.parse_dom(i, 'img', req='src')) for i in rels]
         rels = [(i[0][0].attrs['href'][1:],
                  re.findall('/flags/(\w+)\.png$', i[1][0].attrs['src']))
                 for i in rels if i[0] and i[1]]
         rels = [
             i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de'
         ]
         r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels]
         links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''',
                            ''.join([i[0].content for i in r]))
         links += [
             l.attrs['src'] for i in r for l in dom_parser.parse_dom(
                 i, 'iframe', attrs={'class': 'metaframe'}, req='src')
         ]
         links += [
             l.attrs['src'] for i in r
             for l in dom_parser.parse_dom(i, 'source', req='src')
         ]
         for i in links:
             try:
                 i = re.sub('\[.+?\]|\[/.+?\]', '', i)
                 i = client.replaceHTMLCodes(i)
                 if '/play/' in i: i = urlparse.urljoin(self.base_link, i)
                 if self.domains[0] in i:
                     i = client.request(i, headers=headers, referer=url)
                     for x in re.findall('''\(["']?(.*)["']?\)''', i):
                         try:
                             i += jsunpack.unpack(
                                 base64.decodestring(
                                     re.sub('"\s*\+\s*"', '',
                                            x))).replace('\\', '')
                         except:
                             pass
                     for x in re.findall('(eval\s*\(function.*?)</script>',
                                         i, re.DOTALL):
                         try:
                             i += jsunpack.unpack(x).replace('\\', '')
                         except:
                             pass
                     links = [(match[0], match[1]) for match in re.findall(
                         '''['"]?file['"]?\s*:\s*['"]([^'"]+)['"][^}]*['"]?label['"]?\s*:\s*['"]([^'"]*)''',
                         i, re.DOTALL)]
                     links = [(x[0].replace('\/', '/'),
                               source_utils.label_to_quality(x[1]))
                              for x in links if '/no-video.mp4' not in x[0]]
                     doc_links = [
                         directstream.google(
                             'https://drive.google.com/file/d/%s/view' %
                             match)
                         for match in re.findall(
                             '''file:\s*["'](?:[^"']+youtu.be/([^"']+))''',
                             i, re.DOTALL)
                     ]
                     doc_links = [(u['url'], u['quality'])
                                  for x in doc_links if x for u in x]
                     links += doc_links
                     for url, quality in links:
                         if self.base_link in url:
                             url = url + '|Referer=' + self.base_link
                         sources.append({
                             'source': 'gvideo',
                             'quality': quality,
                             'language': 'de',
                             'url': url,
                             'direct': True,
                             'debridonly': False
                         })
                 else:
                     try:
                         # as long as URLResolver get no Update for this URL (So just a Temp-Solution)
                         did = re.findall(
                             'youtube.googleapis.com.*?docid=(\w+)', i)
                         if did:
                             i = 'https://drive.google.com/file/d/%s/view' % did[
                                 0]
                         valid, host = source_utils.is_host_valid(
                             i, hostDict)
                         if not valid: continue
                         urls, host, direct = source_utils.check_directstreams(
                             i, host)
                         for x in urls:
                             sources.append({
                                 'source': host,
                                 'quality': x['quality'],
                                 'language': 'de',
                                 'url': x['url'],
                                 'direct': direct,
                                 'debridonly': False
                             })
                     except:
                         pass
             except:
                 pass
         return sources
     except:
         return sources
Esempio n. 4
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if not url:
             return sources
         r = client.request(urlparse.urljoin(self.base_link, url))
         r = dom_parser.parse_dom(r, 'article')
         r = dom_parser.parse_dom(r,
                                  'div',
                                  attrs={'class': 'entry-content'})
         links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''',
                            ''.join([i.content for i in r]))
         links += [
             l.attrs['src'] for i in r
             for l in dom_parser.parse_dom(i, 'iframe', req='src')
         ]
         links += [
             l.attrs['src'] for i in r
             for l in dom_parser.parse_dom(i, 'source', req='src')
         ]
         for i in links:
             try:
                 valid, hoster = source_utils.is_host_valid(i, hostDict)
                 if not valid: continue
                 urls = []
                 if 'google' in i:
                     host = 'gvideo'
                     direct = True
                     urls = directstream.google(i)
                 if 'google' in i and not urls and directstream.googletag(
                         i):
                     host = 'gvideo'
                     direct = True
                     urls = [{
                         'quality':
                         directstream.googletag(i)[0]['quality'],
                         'url':
                         i
                     }]
                 elif 'ok.ru' in i:
                     host = 'vk'
                     direct = True
                     urls = directstream.odnoklassniki(i)
                 elif 'vk.com' in i:
                     host = 'vk'
                     direct = True
                     urls = directstream.vk(i)
                 else:
                     host = hoster
                     direct = False
                     urls = [{
                         'quality': 'SD',
                         'url': i
                     }]
                 for x in urls:
                     sources.append({
                         'source': host,
                         'quality': x['quality'],
                         'language': 'ko',
                         'url': x['url'],
                         'direct': direct,
                         'debridonly': False
                     })
             except:
                 pass
         return sources
     except:
         return sources
Esempio n. 5
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            aliases = eval(data['aliases'])
            mozhdr = {
                'User-Agent':
                'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'
            }
            headers = mozhdr
            headers['X-Requested-With'] = 'XMLHttpRequest'

            if 'tvshowtitle' in data:
                episode = int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'],
                                      aliases, headers)
            else:
                episode = 0
                url = self.searchMovie(data['title'], data['year'], aliases,
                                       headers)

            headers['Referer'] = url
            ref_url = url
            mid = re.findall('-(\d*)\.', url)[0]
            data = {'id': mid}
            r = self.scraper.post(url, headers=headers).content
            try:
                u = urlparse.urljoin(self.base_link, self.server_link % mid)
                r = self.scraper.get(u, headers=mozhdr).content
                r = json.loads(r)['html']
                rl = client.parseDOM(r, 'div', attrs={'class': 'pas-list'})
                rh = client.parseDOM(r, 'div', attrs={'class': 'pas-header'})
                ids = client.parseDOM(rl, 'li', ret='data-id')
                servers = client.parseDOM(rl, 'li', ret='data-server')
                labels = client.parseDOM(rl, 'a', ret='title')
                r = zip(ids, servers, labels)
                rrr = zip(client.parseDOM(rh, 'li', ret='data-id'),
                          client.parseDOM(rh, 'li', ret='class'))
                types = {}
                for rr in rrr:
                    types[rr[0]] = rr[1]

                for eid in r:
                    try:
                        try:
                            ep = re.findall('episode.*?(\d+).*?',
                                            eid[2].lower())[0]
                        except:
                            ep = 0
                        if (episode == 0) or (int(ep) == episode):
                            t = str(int(time.time() * 1000))
                            quali = source_utils.get_release_quality(eid[2])[0]
                            if 'embed' in types[eid[1]]:
                                url = urlparse.urljoin(
                                    self.base_link, self.embed_link % (eid[0]))
                                xml = self.scraperget(url,
                                                      headers=headers).content
                                url = json.loads(xml)['src']
                                valid, hoster = source_utils.is_host_valid(
                                    url, hostDict)
                                if not valid: continue
                                q = source_utils.check_sd_url(url)
                                q = q if q != 'SD' else quali
                                sources.append({
                                    'source': hoster,
                                    'quality': q,
                                    'language': 'en',
                                    'url': url,
                                    'direct': False,
                                    'debridonly': False
                                })
                                continue
                            else:
                                url = urlparse.urljoin(
                                    self.base_link,
                                    self.token_link % (eid[0], mid, t))
                            script = self.scraper.get(url,
                                                      headers=headers).content
                            if '$_$' in script:
                                params = self.uncensored1(script)
                            elif script.startswith('[]') and script.endswith(
                                    '()'):
                                params = self.uncensored2(script)
                            elif '_x=' in script:
                                x = re.search('''_x=['"]([^"']+)''',
                                              script).group(1)
                                y = re.search('''_y=['"]([^"']+)''',
                                              script).group(1)
                                params = {'x': x, 'y': y}
                            else:
                                raise Exception()
                            u = urlparse.urljoin(
                                self.base_link, self.source_link %
                                (eid[0], params['x'], params['y']))
                            length = 0
                            count = 0
                            while length == 0 and count < 11:
                                r = self.scraper.get(u, headers=headers).text
                                length = len(r)
                                if length == 0: count += 1
                            uri = None
                            uri = json.loads(r)['playlist'][0]['sources']
                            try:
                                uri = [i['file'] for i in uri if 'file' in i]
                            except:
                                try:
                                    uri = [uri['file']]
                                except:
                                    continue

                            for url in uri:
                                if 'googleapis' in url:
                                    q = source_utils.check_sd_url(url)
                                    sources.append({
                                        'source': 'gvideo',
                                        'quality': q,
                                        'language': 'en',
                                        'url': url,
                                        'direct': True,
                                        'debridonly': False
                                    })
                                    continue

                                valid, hoster = source_utils.is_host_valid(
                                    url, hostDict)
                                # urls, host, direct = source_utils.check_directstreams(url, hoster)
                                q = quali
                                if valid:
                                    # for z in urls:
                                    if hoster == 'gvideo':
                                        direct = True
                                        try:
                                            q = directstream.googletag(
                                                url)[0]['quality']
                                        except:
                                            pass
                                        url = directstream.google(url,
                                                                  ref=ref_url)
                                    else:
                                        direct = False
                                    sources.append({
                                        'source': hoster,
                                        'quality': q,
                                        'language': 'en',
                                        'url': url,
                                        'direct': direct,
                                        'debridonly': False
                                    })
                                else:
                                    sources.append({
                                        'source': 'CDN',
                                        'quality': q,
                                        'language': 'en',
                                        'url': url,
                                        'direct': True,
                                        'debridonly': False
                                    })
                    except:
                        pass
            except:
                pass

            return sources
        except:
            return sources
Esempio n. 6
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            aliases = eval(data['aliases'])
            headers = {}
            if 'tvshowtitle' in data:
                episode = int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'],
                                      aliases, headers)
            else:
                episode = 0
                url = self.searchMovie(data['title'], data['year'], aliases,
                                       headers)
            url += '/'
            ref_url = url
            mozhdr = {
                'User-Agent':
                'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'
            }
            headers = mozhdr
            headers['X-Requested-With'] = 'XMLHttpRequest'
            headers['Referer'] = url
            self.s = cfscrape.create_scraper()
            mid = re.findall('-(\d*)/', url)[0]
            try:
                u = urlparse.urljoin(self.base_link, self.server_link % mid)
                r = self.s.get(u, headers=mozhdr).content
                r = json.loads(r)['html']

                r = dom_parser2.parse_dom(r,
                                          'li',
                                          req=['data-id', 'data-server'])
                r = [(i.attrs['data-id'], i.attrs['data-server'],
                      dom_parser2.parse_dom(i.content, 'a', req='title')[0])
                     for i in r]
                r = [(i[0], i[1], i[2].content)
                     for i in r]  #r = zip(ids, servers, labels)

                urls = []
                for eid in r:
                    try:
                        ep = re.findall('episode.*?(\d+).*?',
                                        eid[2].lower())[0]
                        ep = '%01d' % int(ep)
                    except BaseException:
                        ep = 0
                    if (episode == 0) or (int(ep) == int(episode)):
                        t = int(time.time() * 1000)
                        url = urlparse.urljoin(
                            self.base_link, self.token_link % (eid[0], mid, t))
                        script = self.s.get(url, headers=headers).content
                        if '$_$' in script:
                            params = self.uncensored1(script)
                        elif script.startswith('[]') and script.endswith('()'):
                            params = self.uncensored2(script)
                        elif '_x=' in script:
                            x = re.search('''_x=['"]([^"']+)''',
                                          script).group(1)
                            y = re.search('''_y=['"]([^"']+)''',
                                          script).group(1)
                            params = {'x': x, 'y': y}
                        else:
                            raise Exception()
                        u = urlparse.urljoin(
                            self.base_link, self.source_link %
                            (eid[0], params['x'], params['y']))
                        length = 0
                        count = 0
                        while length == 0 and count < 11:
                            r = self.s.get(u, headers=headers).content
                            length = len(r)
                            if length == 0:
                                if count == 9:
                                    u = u.replace('_sources', '_embed')
                                count += 1

                        try:
                            frames = re.findall('''file['"]:['"]([^'"]+)''', r)
                            for i in frames:
                                if '.srt' in i: continue
                                urls.append((i, eid[2]))
                        except BaseException:
                            pass

                        r1 = json.loads(r)

                        try:
                            frame = r1['src']
                            urls.append((frame, eid[2]))
                        except BaseException:
                            pass
                        try:
                            frame = r1['playlist'][0]
                            frame = frame['sources'][0]
                            frame = frame['file']
                            urls.append((frame, eid[2]))
                        except BaseException:
                            pass

                for i in urls:

                    s, eid = i[0], i[1]
                    try:
                        if 'googleapis' in s:
                            urls = directstream.googletag(s)
                            if not urls:
                                quality, info = source_utils.get_release_quality(
                                    url, eid)
                                sources.append({
                                    'source': 'gvideo',
                                    'quality': quality,
                                    'language': 'en',
                                    'url': s,
                                    'direct': True,
                                    'debridonly': False
                                })
                            else:
                                for i in urls:
                                    sources.append({
                                        'source': 'gvideo',
                                        'quality': i['quality'],
                                        'language': 'en',
                                        'url': i['url'],
                                        'direct': True,
                                        'debridonly': False
                                    })
                        elif 'lh3.' in s:
                            urls = directstream.googletag(s)
                            for i in urls:
                                try:
                                    url2 = directstream.google(
                                        i['url'], ref=ref_url
                                    ) if 'lh3.' in i['url'] else i['url']
                                    if not url2: url2 = i['url']
                                    sources.append({
                                        'source': 'gvideo',
                                        'quality': i['quality'],
                                        'language': 'en',
                                        'url': url2,
                                        'direct': True,
                                        'debridonly': False
                                    })
                                except BaseException:
                                    sources.append({
                                        'source': 'gvideo',
                                        'quality': i['quality'],
                                        'language': 'en',
                                        'url': i['url'],
                                        'direct': True,
                                        'debridonly': False
                                    })
                        elif 'lemonstream' in s:
                            quality, info = source_utils.get_release_quality(
                                s, eid)
                            sources.append({
                                'source': 'CDN',
                                'quality': quality,
                                'language': 'en',
                                'url': s,
                                'direct': True,
                                'debridonly': False
                            })
                        elif 'notcool' in s:
                            s = s.replace('\\', '')
                            quality, info = source_utils.get_release_quality(
                                s, eid)
                            sources.append({
                                'source': 'CDN',
                                'quality': quality,
                                'language': 'en',
                                'url': s,
                                'direct': True,
                                'debridonly': False
                            })
                        else:
                            quality, info = source_utils.get_release_quality(
                                s, eid)
                            valid, host = source_utils.is_host_valid(
                                s, hostDict)
                            if valid:
                                sources.append({
                                    'source': host,
                                    'quality': quality,
                                    'language': 'en',
                                    'url': s,
                                    'direct': False,
                                    'debridonly': False
                                })
                    except BaseException:
                        pass

            except BaseException:
                pass

            return sources
        except BaseException:
            return sources