示例#1
0
def check_directstreams(url, hoster='', quality='SD'):
    urls = []
    host = hoster

    if 'google' in url or any(x in url for x in ['youtube.', 'docid=']):
        urls = directstream.google(url)
        if not urls:
            tag = directstream.googletag(url)
            if tag: urls = [{'quality': tag[0]['quality'], 'url': url}]
        if urls: host = 'gvideo'
    elif 'ok.ru' in url:
        urls = directstream.odnoklassniki(url)
        if urls: host = 'vk'
    elif 'vk.com' in url:
        urls = directstream.vk(url)
        if urls: host = 'vk'
    elif any(x in url for x in ['akamaized', 'blogspot', 'ocloud.stream']):
        urls = [{'url': url}]
        if urls: host = 'CDN'

    direct = True if urls else False

    if not urls: urls = [{'quality': quality, 'url': url}]

    return urls, host, direct
示例#2
0
    def get_sources(self, url, hostDict, hostprDict, locDict):
        try:
            sources = []
            if url == None: return sources
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = (data['title'].translate(None, '\/:*?"\'<>|!,')).replace(
                ' ', '-').replace('--', '-').lower()
            try:
                is_movie = not (int(data['episode']) > 0)
            except:
                is_movie = True

            if is_movie:
                url = urlparse.urljoin(self.base_link, self.watch_link % title)
            else:
                url = urlparse.urljoin(
                    self.base_link, self.watch_series_link %
                    (title, data['season'], data['episode']))

            r = client.request(url, output='geturl')

            if r is None: raise Exception()

            r = client.request(url)
            r = re.sub(r'[^\x00-\x7F]+', ' ', r)
            result = r

            y = re.findall('Date\s*:\s*.+?>.+?(\d{4})', r)
            y = y[0] if len(y) > 0 else None

            if is_movie:
                if not (data['imdb'] in r or data['year'] == y):
                    raise Exception()

            q = client.parseDOM(r, 'title')
            q = q[0] if len(q) > 0 else None

            quality = '1080p' if ' 1080' in q else 'HD'
            r = client.parseDOM(r, 'div', attrs={'id': '5throw'})[0]
            r = client.parseDOM(r, 'a', ret='href', attrs={'rel': 'nofollow'})

            links = []

            for url in r:
                try:
                    if 'yadi.sk' in url:
                        url = directstream.yandex(url)
                    elif 'mail.ru' in url:
                        url = directstream.cldmailru(url)
                    else:
                        raise Exception()

                    if url == None: raise Exception()
                    links += [{
                        'source': 'cdn',
                        'url': url,
                        'quality': quality,
                        'direct': False
                    }]
                except:
                    pass

            try:
                r = client.parseDOM(result, 'iframe', ret='src')
                if is_movie:
                    r = [i for i in r if 'pasmov' in i][0]
                else:
                    r = [i for i in r if 'pasep' in i][0]

                for i in range(0, 4):
                    try:
                        if not r.startswith('http'):
                            r = urlparse.urljoin(self.base_link, r)
                        r = client.request(r)
                        r = re.sub(r'[^\x00-\x7F]+', ' ', r)
                        r = client.parseDOM(r, 'iframe', ret='src')[0]
                        if 'google' in r: break
                    except:
                        break

                if not 'google' in r: raise Exception()
                r = directstream.google(r)

                for i in r:
                    try:
                        links += [{
                            'source': 'gvideo',
                            'url': i['url'],
                            'quality': i['quality'],
                            'direct': True
                        }]
                    except:
                        pass
            except:
                pass

            for i in links:
                sources.append({
                    'source': i['source'],
                    'quality': i['quality'],
                    'provider': 'Dayt',
                    'url': i['url'],
                    'direct': i['direct'],
                    'debridonly': False
                })

            return sources
        except:
            return sources
示例#3
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            aliases = eval(data['aliases'])
            mozhdr = {
                'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'}
            headers = mozhdr
            headers['X-Requested-With'] = 'XMLHttpRequest'

            self.s = cfscrape.create_scraper()
            if 'tvshowtitle' in data:
                episode = int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers)
            else:
                episode = 0
                url = self.searchMovie(data['title'], data['year'], aliases, headers)

            headers['Referer'] = url
            ref_url = url
            mid = re.findall('-(\d*)\.', url)[0]
            data = {'id': mid}
            r = self.s.post(url, headers=headers)
            try:
                u = urlparse.urljoin(self.base_link, self.server_link % mid)
                r = self.s.get(u, headers=mozhdr).content
                r = json.loads(r)['html']
                rl = client.parseDOM(r, 'div', attrs={'class': 'pas-list'})
                rh = client.parseDOM(r, 'div', attrs={'class': 'pas-header'})
                ids = client.parseDOM(rl, 'li', ret='data-id')
                servers = client.parseDOM(rl, 'li', ret='data-server')
                labels = client.parseDOM(rl, 'a', ret='title')
                r = zip(ids, servers, labels)
                rrr = zip(client.parseDOM(rh, 'li', ret='data-id'), client.parseDOM(rh, 'li', ret='class'))
                types = {}
                for rr in rrr:
                    types[rr[0]] = rr[1]

                for eid in r:
                    try:
                        try:
                            ep = re.findall('episode.*?(\d+).*?', eid[2].lower())[0]
                        except:
                            ep = 0
                        if (episode == 0) or (int(ep) == episode):
                            t = str(int(time.time() * 1000))
                            quali = source_utils.get_release_quality(eid[2])[0]
                            if 'embed' in types[eid[1]]:
                                url = urlparse.urljoin(self.base_link, self.embed_link % (eid[0]))
                                xml = self.s.get(url, headers=headers).content
                                url = json.loads(xml)['src']
                                valid, hoster = source_utils.is_host_valid(url, hostDict)
                                if not valid: continue
                                q = source_utils.check_sd_url(url)
                                q = q if q != 'SD' else quali
                                sources.append(
                                    {'source': hoster, 'quality': q, 'language': 'en', 'url': url, 'direct': False,
                                     'debridonly': False})
                                continue
                            else:
                                url = urlparse.urljoin(self.base_link, self.token_link % (eid[0], mid, t))
                            script = self.s.get(url, headers=headers).content
                            if '$_$' in script:
                                params = self.uncensored1(script)
                            elif script.startswith('[]') and script.endswith('()'):
                                params = self.uncensored2(script)
                            elif '_x=' in script:
                                x = re.search('''_x=['"]([^"']+)''', script).group(1)
                                y = re.search('''_y=['"]([^"']+)''', script).group(1)
                                params = {'x': x, 'y': y}
                            else:
                                raise Exception()
                            u = urlparse.urljoin(self.base_link, self.source_link % (eid[0], params['x'], params['y']))
                            length = 0
                            count = 0
                            while length == 0 and count < 11:
                                r = self.s.get(u, headers=headers).text
                                length = len(r)
                                if length == 0: count += 1
                            uri = None
                            uri = json.loads(r)['playlist'][0]['sources']
                            try:
                                uri = [i['file'] for i in uri if 'file' in i]
                            except:
                                try:
                                    uri = [uri['file']]
                                except:
                                    continue

                            for url in uri:
                                if 'googleapis' in url:
                                    q = source_utils.check_sd_url(url)
                                    sources.append(
                                        {'source': 'gvideo', 'quality': q, 'language': 'en', 'url': url, 'direct': True,
                                         'debridonly': False})
                                    continue

                                valid, hoster = source_utils.is_host_valid(url, hostDict)
                                # urls, host, direct = source_utils.check_directstreams(url, hoster)
                                q = quali
                                if valid:
                                    # for z in urls:
                                    if hoster == 'gvideo':
                                        direct = True
                                        try:
                                            q = directstream.googletag(url)[0]['quality']
                                        except:
                                            pass
                                        url = directstream.google(url, ref=ref_url)
                                    else:
                                        direct = False
                                    sources.append(
                                        {'source': hoster, 'quality': q, 'language': 'en', 'url': url, 'direct': direct,
                                         'debridonly': False})
                                else:
                                    sources.append(
                                        {'source': 'CDN', 'quality': q, 'language': 'en', 'url': url, 'direct': True,
                                         'debridonly': False})
                    except:
                        pass
            except:
                pass

            return sources
        except:
            return sources
示例#4
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)
            c = client.request(url, output='cookie')
            result = client.request(url)

            result = re.sub(r'[^\x00-\x7F]+', ' ', result)

            pages = dom_parser.parse_dom(result,
                                         'div',
                                         attrs={'class': 'item'},
                                         req='data-id')
            pages = [i.attrs['data-id'] for i in pages]

            for page in pages:
                try:
                    url = urlparse.urljoin(self.base_link, self.video_link)

                    result = client.request(url, post={'id': page}, cookie=c)
                    if not result: continue

                    url = dom_parser.parse_dom(result, 'iframe',
                                               req='src')[0].attrs['src']
                    if url.startswith('//'): url = 'http:' + url
                    if url.startswith('/'):
                        url = urlparse.urljoin(self.base_link, url)

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if valid:
                        sources.append({
                            'source': host,
                            'quality': 'HD',
                            'language': 'en',
                            'url': url,
                            'direct': False,
                            'debridonly': False
                        })

                    if '.asp' not in url: continue

                    result = client.request(url, cookie=c)

                    try:
                        url = dom_parser.parse_dom(result, 'iframe',
                                                   req='src')[0].attrs['src']
                        url = url.replace('https://href.li/?', '')
                        valid, host = source_utils.is_host_valid(url, hostDict)
                        if valid:
                            if host == 'gvideo':
                                ginfo = directstream.google(url)
                                for g in ginfo:
                                    sources.append({
                                        'source': host,
                                        'quality': g['quality'],
                                        'language': 'en',
                                        'url': g['url'],
                                        'direct': True,
                                        'debridonly': False
                                    })
                            else:
                                sources.append({
                                    'source': host,
                                    'quality': 'HD',
                                    'language': 'en',
                                    'url': url,
                                    'direct': False,
                                    'debridonly': False
                                })
                    except:
                        pass

                    captions = re.search(
                        '''["']?kind["']?\s*:\s*(?:\'|\")captions(?:\'|\")''',
                        result)
                    if not captions: continue

                    matches = [(match[0], match[1]) for match in re.findall(
                        '''["']?label\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)''',
                        result, re.DOTALL | re.I)]
                    matches += [(match[1], match[0]) for match in re.findall(
                        '''["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)''',
                        result, re.DOTALL | re.I)]

                    result = [(source_utils.label_to_quality(x[0]),
                               x[1].replace('\/', '/')) for x in matches]
                    result = [(i[0], i[1]) for i in result
                              if not i[1].endswith('.vtt')]

                    for quality, url in result:
                        sources.append({
                            'source': 'gvideo',
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'direct': True,
                            'debridonly': False
                        })
                except:
                    pass

            return sources
        except:
            return sources
示例#5
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if (self.user == '' or self.password == ''): raise Exception()
            login = urlparse.urljoin(self.base_link, '/login')
            post = {
                'username': self.user,
                'password': self.password,
                'returnpath': '/'
            }
            post = urllib.urlencode(post)

            headers = {'User-Agent': client.randomagent()}
            rlogin = client.request(login,
                                    headers=headers,
                                    post=post,
                                    output='extended')
            guid = re.findall('(.*?);\s', rlogin[2]['Set-Cookie'])[0]
            headers['Cookie'] += '; ' + guid
            url = urlparse.urljoin(self.base_link, url)

            result = client.request(url, headers=headers)

            url = re.findall("embeds\[\d+\]\s*=\s*'([^']+)", result)[0]
            url = client.parseDOM(url, 'iframe', ret='src')[0]
            url = url.replace('https://', 'http://')

            links = []

            try:
                dec = re.findall('mplanet\*(.+)', url)[0]
                dec = dec.rsplit('&')[0]
                dec = self._gkdecrypt(
                    base64.b64decode('MllVcmlZQmhTM2swYU9BY0lmTzQ='), dec)
                dec = directstream.google(dec)

                links += [(i['url'], i['quality'], 'gvideo') for i in dec]
            except:
                pass

            result = client.request(url, headers=headers)

            try:
                url = re.findall('src\s*=\s*(?:\'|\")(http.+?)(?:\'|\")',
                                 result)
                for i in url:
                    try:
                        links.append({
                            'source':
                            'gvideo',
                            'quality':
                            directstream.googletag(i)[0]['quality'],
                            'url':
                            i
                        })
                    except:
                        pass
            except:
                pass

            try:
                url = client.parseDOM(result, 'source', ret='src')
                url += re.findall('src\s*:\s*\'(.*?)\'', result)
                url = [i for i in url if '://' in i]
                links.append({'source': 'cdn', 'quality': 'HD', 'url': url[0]})
            except:
                pass

            for i in links:
                sources.append({
                    'source': i['source'],
                    'quality': i['quality'],
                    'language': 'en',
                    'url': i['url'],
                    'direct': True,
                    'debridonly': False
                })

            return sources
        except:
            return sources
示例#6
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            ref = urlparse.urljoin(self.base_link, url)

            r = client.request(ref)

            p = re.findall('load_player\((\d+)\)', r)
            r = client.request(urlparse.urljoin(self.base_link,
                                                self.player_link),
                               post={'id': p[0]},
                               referer=ref,
                               XHR=True)
            url = json.loads(r).get('value')
            link = client.request(url, XHR=True, output='geturl', referer=ref)

            if '1movies.' in link:
                r = client.request(link, XHR=True, referer=ref)
                r = [(match[1], match[0]) for match in re.findall(
                    '''['"]?file['"]?\s*:\s*['"]([^'"]+)['"][^}]*['"]?label['"]?\s*:\s*['"]([^'"]*)''',
                    r, re.DOTALL)]
                r = [(re.sub('[^\d]+', '', x[0]), x[1].replace('\/', '/'))
                     for x in r]
                r = [x for x in r if x[0]]

                links = [(x[1], '4K') for x in r if int(x[0]) >= 2160]
                links += [(x[1], '1440p') for x in r if int(x[0]) >= 1440]
                links += [(x[1], '1080p') for x in r if int(x[0]) >= 1080]
                links += [(x[1], 'HD') for x in r if 720 <= int(x[0]) < 1080]
                links += [(x[1], 'SD') for x in r if int(x[0]) < 720]

                for url, quality in links:
                    sources.append({
                        'source': 'gvideo',
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'direct': True,
                        'debridonly': False
                    })
            else:
                valid, host = source_utils.is_host_valid(link, hostDict)
                if not valid: return

                urls = []
                if 'google' in link:
                    host = 'gvideo'
                    direct = True
                    urls = directstream.google(link)
                if 'google' in link and not urls and directstream.googletag(
                        link):
                    host = 'gvideo'
                    direct = True
                    urls = [{
                        'quality':
                        directstream.googletag(link)[0]['quality'],
                        'url':
                        link
                    }]
                elif 'ok.ru' in link:
                    host = 'vk'
                    direct = True
                    urls = directstream.odnoklassniki(link)
                elif 'vk.com' in link:
                    host = 'vk'
                    direct = True
                    urls = directstream.vk(link)
                else:
                    direct = False
                    urls = [{'quality': 'HD', 'url': link}]

                for x in urls:
                    sources.append({
                        'source': host,
                        'quality': x['quality'],
                        'language': 'en',
                        'url': x['url'],
                        'direct': direct,
                        'debridonly': False
                    })

            return sources
        except:
            return sources
示例#7
0
文件: dayt_mv_tv.py 项目: mpie/repo
    def get_sources(self, url, hostDict, hostprDict, locDict):
        try:
            sources = []
            if url == None: return sources
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = (data['title'].translate(None, '\/:*?"\'<>|!,')).replace(' ', '-').replace('--', '-').lower()
            try:
                is_movie = not (int(data['episode']) > 0)
            except:
                is_movie = True

            if is_movie:
                url = urlparse.urljoin(self.base_link, self.watch_link % title)
            else:
                url = urlparse.urljoin(self.base_link, self.watch_series_link % (title, data['season'], data['episode']))

            r = client.request(url, output='geturl')

            if r is None: raise Exception()

            r = client.request(url)
            r = re.sub(r'[^\x00-\x7F]+',' ', r)
            result = r

            y = re.findall('Date\s*:\s*.+?>.+?(\d{4})', r)
            y = y[0] if len(y) > 0 else None

            if is_movie:
                if not (data['imdb'] in r or data['year'] == y): raise Exception()

            q = client.parseDOM(r, 'title')
            q = q[0] if len(q) > 0 else None

            quality = '1080p' if ' 1080' in q else 'HD'
            r = client.parseDOM(r, 'div', attrs = {'id': '5throw'})[0]
            r = client.parseDOM(r, 'a', ret='href', attrs = {'rel': 'nofollow'})

            links = []

            for url in r:
                try:
                    if 'yadi.sk' in url:
                        url = directstream.yandex(url)
                    elif 'mail.ru' in url:
                        url = directstream.cldmailru(url)
                    else:
                        raise Exception()

                    if url == None: raise Exception()
                    links += [{'source': 'cdn', 'url': url, 'quality': quality, 'direct': False}]
                except:
                    pass

            try:
                r = client.parseDOM(result, 'iframe', ret='src')
                if is_movie:
                    r = [i for i in r if 'pasmov' in i][0]
                else:
                    r = [i for i in r if 'pasep' in i][0]

                for i in range(0, 4):
                    try:
                        if not r.startswith('http'):
                            r = urlparse.urljoin(self.base_link, r)
                        r = client.request(r)
                        r = re.sub(r'[^\x00-\x7F]+',' ', r)
                        r = client.parseDOM(r, 'iframe', ret='src')[0]
                        if 'google' in r: break
                    except:
                        break

                if not 'google' in r: raise Exception()
                r = directstream.google(r)

                for i in r:
                    try:
                        links += [{'source': 'gvideo', 'url': i['url'], 'quality': i['quality'], 'direct': True}]
                    except:
                        pass
            except:
                pass

            for i in links:
                sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'Dayt', 'url': i['url'], 'direct': i['direct'], 'debridonly': False})

            return sources
        except:
            return sources