Beispiel #1
0
def check_directstreams(url, hoster='', quality='SD'):
	urls = []
	host = hoster

	if 'google' in url or any(x in url for x in ['youtube.', 'docid=']):
		urls = directstream.google(url)
		if not urls:
			tag = directstream.googletag(url)
			if tag:
				urls = [{'quality': tag[0]['quality'], 'url': url}]
		if urls:
			host = 'gvideo'

	elif 'ok.ru' in url:
		urls = directstream.odnoklassniki(url)
		if urls:
			host = 'vk'

	elif 'vk.com' in url:
		urls = directstream.vk(url)
		if urls:
			host = 'vk'

	elif any(x in url for x in ['akamaized', 'blogspot', 'ocloud.stream']):
		urls = [{'url': url}]
		if urls: host = 'CDN'

	direct = True if urls else False

	if not urls:
		urls = [{'quality': quality, 'url': url}]

	return urls, host, direct
Beispiel #2
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if not url:
             return sources
         r = client.request(urlparse.urljoin(self.base_link, url))
         r = dom_parser.parse_dom(r, 'article')
         r = dom_parser.parse_dom(r,
                                  'div',
                                  attrs={'class': 'entry-content'})
         links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''',
                            ''.join([i.content for i in r]))
         links += [
             l.attrs['src'] for i in r
             for l in dom_parser.parse_dom(i, 'iframe', req='src')
         ]
         links += [
             l.attrs['src'] for i in r
             for l in dom_parser.parse_dom(i, 'source', req='src')
         ]
         for i in links:
             try:
                 valid, hoster = source_utils.is_host_valid(i, hostDict)
                 if not valid: continue
                 urls = []
                 if 'google' in i:
                     host = 'gvideo'
                     direct = True
                     urls = directstream.google(i)
                 if 'google' in i and not urls and directstream.googletag(
                         i):
                     host = 'gvideo'
                     direct = True
                     urls = [{
                         'quality':
                         directstream.googletag(i)[0]['quality'],
                         'url':
                         i
                     }]
                 elif 'ok.ru' in i:
                     host = 'vk'
                     direct = True
                     urls = directstream.odnoklassniki(i)
                 elif 'vk.com' in i:
                     host = 'vk'
                     direct = True
                     urls = directstream.vk(i)
                 else:
                     host = hoster
                     direct = False
                     urls = [{
                         'quality': 'SD',
                         'url': i
                     }]
                 for x in urls:
                     sources.append({
                         'source': host,
                         'quality': x['quality'],
                         'language': 'ko',
                         'url': x['url'],
                         'direct': direct,
                         'debridonly': False
                     })
             except:
                 pass
         return sources
     except:
         return sources
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)
            c = client.request(url, output='cookie')
            result = client.request(url)

            result = re.sub(r'[^\x00-\x7F]+', ' ', result)

            pages = dom_parser.parse_dom(result, 'div', attrs={'class': 'item'}, req='data-id')
            pages = [i.attrs['data-id'] for i in pages]

            for page in pages:
                try:
                    url = urlparse.urljoin(self.base_link, self.video_link)

                    result = client.request(url, post={'id': page}, cookie=c)
                    if not result: continue

                    url = dom_parser.parse_dom(result, 'iframe', req='src')[0].attrs['src']
                    if url.startswith('//'): url = 'http:' + url
                    if url.startswith('/'): url = urlparse.urljoin(self.base_link, url)

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if valid:
                        if 'ok.ru' in host:
                            okinfo = directstream.odnoklassniki(url)
                            for x in okinfo:
                                sources.append(
                                    {'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'],
                                     'direct': True, 'debridonly': False})

                        else:
                            sources.append(
                                {'source': host, 'quality': 'HD', 'language': 'en', 'url': url, 'direct': False,
                                 'debridonly': False})

                    if '.asp' not in url: continue

                    result = client.request(url, cookie=c)

                    try:
                        url = dom_parser.parse_dom(result, 'iframe', req='src')[0].attrs['src']
                        url = url.replace('https://href.li/?', '')
                        valid, host = source_utils.is_host_valid(url, hostDict)
                        if valid:
                            if host == 'gvideo':
                                ginfo = directstream.google(url)
                                for g in ginfo:
                                    sources.append(
                                        {'source': host, 'quality': g['quality'], 'language': 'en', 'url': g['url'],
                                         'direct': True, 'debridonly': False})
                            else:
                                sources.append(
                                    {'source': host, 'quality': 'HD', 'language': 'en', 'url': url, 'direct': False,
                                     'debridonly': False})
                    except BaseException:
                        pass

                    captions = re.search('''["']?kind["']?\s*:\s*(?:\'|\")captions(?:\'|\")''', result)
                    if not captions: continue

                    matches = [(match[0], match[1]) for match in re.findall(
                        '''["']?label\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)''',
                        result, re.DOTALL | re.I)]
                    matches += [(match[1], match[0]) for match in re.findall(
                        '''["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)''',
                        result, re.DOTALL | re.I)]

                    result = [(source_utils.label_to_quality(x[0]), x[1].replace('\/', '/')) for x in matches]
                    result = [(i[0], i[1]) for i in result if not i[1].endswith('.vtt')]

                    for quality, url in result: sources.append(
                        {'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': url, 'direct': True,
                         'debridonly': False})
                except BaseException:
                    pass

            return sources
        except BaseException:
            return sources