コード例 #1
0
    def resolve(self, url):
        try:
            if 'google' in url:
                ch = []
                from resources.lib.modules import directstream
                res = directstream.google(url)
                for x in res:
                    ch += [
                        x['quality'].replace('HD',
                                             '720p').replace('SD', '480p')
                    ]

                index = control.dialog.select('Select quality:', ch)
                if index > -1:
                    return res[index]['url']

            elif 'my.pcloud' in url:
                from resources.lib.resolvers import pcloud
                return pcloud.resolve(url)

            else:
                import urlresolver
                return urlresolver.resolve(url)
        except:
            return ''
コード例 #2
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            r = client.request(urlparse.urljoin(self.base_link, url))
            r = dom_parser.parse_dom(r, 'article')
            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'entry-content'})

            links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''', ''.join([i.content for i in r]))
            links += [l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'iframe', req='src')]
            links += [l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'source', req='src')]

            for i in links:
                try:
                    valid, hoster = source_utils.is_host_valid(i, hostDict)
                    if not valid: continue

                    urls = []
                    if 'google' in i: host = 'gvideo'; direct = True; urls = directstream.google(i);
                    if 'google' in i and not urls and directstream.googletag(i): host = 'gvideo'; direct = True; urls = [{'quality': directstream.googletag(i)[0]['quality'], 'url': i}]
                    elif 'ok.ru' in i: host = 'vk'; direct = True; urls = directstream.odnoklassniki(i)
                    elif 'vk.com' in i: host = 'vk'; direct = True; urls = directstream.vk(i)
                    else: host = hoster; direct = False; urls = [{'quality': 'SD', 'url': i}]

                    for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'ko', 'url': x['url'], 'direct': direct, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
コード例 #3
0
ファイル: source_utils.py プロジェクト: CYBERxNUKE/xbmc-addon
def check_directstreams(url, hoster='', quality='SD'):
    urls = []
    host = hoster

    if 'google' in url or any(x in url for x in ['youtube.', 'docid=']):
        urls = directstream.google(url)
        if not urls:
            tag = directstream.googletag(url)
            if tag: urls = [{'quality': tag[0]['quality'], 'url': url}]
        if urls: host = 'gvideo'
    elif 'ok.ru' in url:
        urls = directstream.odnoklassniki(url)
        if urls: host = 'vk'
    elif 'vk.com' in url:
        urls = directstream.vk(url)
        if urls: host = 'vk'
    elif any(x in url for x in ['akamaized', 'blogspot', 'ocloud.stream']):
        urls = [{'url': url}]
        if urls: host = 'CDN'
        
    direct = True if urls else False

    if not urls: urls = [{'quality': quality, 'url': url}]

    return urls, host, direct
コード例 #4
0
ファイル: iheartdrama.py プロジェクト: mpie/repo
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            r = client.request(urlparse.urljoin(self.base_link, url))
            r = dom_parser.parse_dom(r, 'article')
            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'entry-content'})

            links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''', ''.join([i.content for i in r]))
            links += [l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'iframe', req='src')]
            links += [l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'source', req='src')]

            for i in links:
                try:
                    valid, hoster = source_utils.is_host_valid(i, hostDict)
                    if not valid: continue

                    urls = []
                    if 'google' in i: host = 'gvideo'; direct = True; urls = directstream.google(i);
                    if 'google' in i and not urls and directstream.googletag(i): host = 'gvideo'; direct = True; urls = [{'quality': directstream.googletag(i)[0]['quality'], 'url': i}]
                    elif 'ok.ru' in i: host = 'vk'; direct = True; urls = directstream.odnoklassniki(i)
                    elif 'vk.com' in i: host = 'vk'; direct = True; urls = directstream.vk(i)
                    else: host = hoster; direct = False; urls = [{'quality': 'SD', 'url': i}]

                    for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'ko', 'url': x['url'], 'direct': direct, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
コード例 #5
0
    def resolve(self, url):
        try:
            url = urlparse.urljoin(self.base_link, url)

            recap = recaptcha_app.recaptchaApp()

            key = recap.getSolutionWithDialog(
                url, "6LeERkUUAAAAAJH4Yqk-gQH1N6psg0KCuEq_Lkxf",
                self.recapInfo)
            print "Recaptcha2 Key: " + key
            response = None
            if key != "" and "skipped" not in key.lower():
                response = self.scraper.post(
                    url,
                    data={'g-recaptcha-response': key},
                    allow_redirects=True)
            elif not response or "skipped" in key.lower():
                return

            if response is not None:
                url = response.url

            if self.base_link not in url:
                if 'google' in url:
                    return directstream.google(url)[0]['url']
            return url
        except Exception as e:
            source_faultlog.logFault(__name__, source_faultlog.tagResolve)
            return
コード例 #6
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = cloudflare.source(url)

            url = client.parseDOM(result, 'embed', ret='src')[0]
            url = client.replaceHTMLCodes(url)

            url = 'https://docs.google.com/file/d/%s/preview' % urlparse.parse_qs(
                urlparse.urlparse(url).query)['docid'][0]

            url = directstream.google(url)

            for i in url:
                sources.append({
                    'source': 'gvideo',
                    'quality': i['quality'],
                    'provider': 'Xmovies',
                    'url': i['url'],
                    'direct': True,
                    'debridonly': False
                })

            return sources
        except:
            return sources
コード例 #7
0
def check_directstreams(url, hoster="", quality="SD"):
    urls = []
    host = hoster

    if "google" in url or any(x in url for x in ["youtube.", "docid="]):
        urls = directstream.google(url)
        if not urls:
            tag = directstream.googletag(url)
            if tag:
                urls = [{"quality": tag[0]["quality"], "url": url}]
        if urls:
            host = "gvideo"
    elif "ok.ru" in url:
        urls = directstream.odnoklassniki(url)
        if urls:
            host = "vk"
    elif "vk.com" in url:
        urls = directstream.vk(url)
        if urls:
            host = "vk"
    elif any(x in url for x in ["akamaized", "blogspot", "ocloud.stream"]):
        urls = [{"url": url}]
        if urls:
            host = "CDN"

    direct = True if urls else False

    if not urls:
        urls = [{"quality": quality, "url": url}]

    return urls, host, direct
コード例 #8
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if (self.user == '' or self.password == ''): raise Exception()

            login = urlparse.urljoin(self.base_link, '/login')
            post = {'username': self.user, 'password': self.password, 'action': 'login'}
            post = urllib.urlencode(post)

            cookie = client.request(login, post=post, XHR=True, output='cookie')


            url = urlparse.urljoin(self.base_link, url)

            result = client.request(url, cookie=cookie)

            url = re.findall("embeds\[\d+\]\s*=\s*'([^']+)", result)[0]
            url = client.parseDOM(url, 'iframe', ret='src')[0]
            url = url.replace('https://', 'http://')


            links = []

            try:
                dec = re.findall('mplanet\*(.+)', url)[0]
                dec = dec.rsplit('&')[0]
                dec = self._gkdecrypt(base64.b64decode('MllVcmlZQmhTM2swYU9BY0lmTzQ='), dec)
                dec = directstream.google(dec)

                links += [(i['url'], i['quality'], 'gvideo') for i in dec]
            except:
                pass

            result = client.request(url)

            try:
                url = re.findall('src\s*=\s*(?:\'|\")(http.+?)(?:\'|\")', result)
                for i in url:
                    try: links.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i})
                    except: pass
            except:
                pass

            try:
                url = client.parseDOM(result, 'source', ret='src')
                url += re.findall('src\s*:\s*\'(.*?)\'', result)
                url = [i for i in url if '://' in i]
                links.append({'source': 'cdn', 'quality': 'HD', 'url': url[0]})
            except:
                pass


            for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'language': 'en', 'url': i['url'], 'direct': True, 'debridonly': False})

            return sources
        except:
            return sources
コード例 #9
0
def check_directstreams(url, hoster='', quality='SD'):
    urls = []
    host = hoster

    if 'google' in url or any(x in url for x in ['youtube.', 'docid=']):
        urls = directstream.google(url)
        if not urls:
            tag = directstream.googletag(url)
            if tag: urls = [{'quality': tag[0]['quality'], 'url': url}]
        if urls: host = 'gvideo'
    elif 'ok.ru' in url:
        urls = directstream.odnoklassniki(url)
        if urls: host = 'vk'
    elif 'vk.com' in url:
        urls = directstream.vk(url)
        if urls: host = 'vk'
    elif any(x in url for x in ['akamaized', 'blogspot', 'ocloud.stream']):
        urls = [{'url': url}]
        if urls: host = 'CDN'

    direct = True if urls else False

    if not urls: urls = [{'quality': quality, 'url': url}]

    return urls, host, direct
コード例 #10
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if (self.user == '' or self.password == ''): raise Exception()
            login = urlparse.urljoin(self.base_link, '/login')
            post = {'username': self.user, 'password': self.password, 'returnpath': '/'}
            post = urllib.urlencode(post)

            headers = {'User-Agent':client.randomagent()}
            rlogin = client.request(login, headers=headers, post=post, output='extended')
            guid = re.findall('(.*?);\s', rlogin[2]['Set-Cookie'])[0]
            headers['Cookie'] += '; '+guid
            url = urlparse.urljoin(self.base_link, url)

            result = client.request(url, headers=headers)

            url = re.findall("embeds\[\d+\]\s*=\s*'([^']+)", result)[0]
            url = client.parseDOM(url, 'iframe', ret='src')[0]
            url = url.replace('https://', 'http://')

            links = []

            try:
                dec = re.findall('mplanet\*(.+)', url)[0]
                dec = dec.rsplit('&')[0]
                dec = self._gkdecrypt(base64.b64decode('MllVcmlZQmhTM2swYU9BY0lmTzQ='), dec)
                dec = directstream.google(dec)

                links += [(i['url'], i['quality'], 'gvideo') for i in dec]
            except:
                pass

            result = client.request(url, headers=headers)

            try:
                url = re.findall('src\s*=\s*(?:\'|\")(http.+?)(?:\'|\")', result)
                for i in url:
                    try: links.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i})
                    except: pass
            except:
                pass

            try:
                url = client.parseDOM(result, 'source', ret='src')
                url += re.findall('src\s*:\s*\'(.*?)\'', result)
                url = [i for i in url if '://' in i]
                links.append({'source': 'cdn', 'quality': 'HD', 'url': url[0]})
            except:
                pass

            for i in links:
                sources.append({'source': i['source'], 'quality': i['quality'], 'language': 'en', 'url': i['url'], 'direct': True, 'debridonly': False})

            return sources
        except:
            return sources
コード例 #11
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)

            links = []

            try:
                try: url = re.compile('proxy\.link=([^"&]+)').findall(result)[0]
                except: url = client.source(re.compile('proxy\.list=([^"&]+)').findall(result)[0])

                url = url.split('*', 1)[-1].rsplit('<')[0]

                dec = self._gkdecrypt(base64.b64decode('aUJocnZjOGdGZENaQWh3V2huUm0='), url)
                if not 'http' in dec: dec = self._gkdecrypt(base64.b64decode('QjZVTUMxUms3VFJBVU56V3hraHI='), url)

                url = directstream.google(dec)

                links += [(i['url'], i['quality']) for i in url]
            except:
                pass

            try:
                url = 'http://miradetodo.com.ar/gkphp/plugins/gkpluginsphp.php'

                post = client.parseDOM(result, 'div', attrs = {'class': 'player.+?'})[0]
                post = post.replace('iframe', 'IFRAME')
                post = client.parseDOM(post, 'IFRAME', ret='.+?')[0]
                post = urlparse.parse_qs(urlparse.urlparse(post).query)

                result = ''
                try: result += client.source(url, post=urllib.urlencode({'link': post['id'][0]}))
                except: pass
                try: result += client.source(url, post=urllib.urlencode({'link': post['id1'][0]}))
                except: pass
                try: result += client.source(url, post=urllib.urlencode({'link': post['id2'][0]}))
                except: pass

                result = re.compile('"?link"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?"').findall(result)
                result = [(i[0].replace('\\/', '/'), i[1])  for i in result]

                links += [(i[0], '1080p') for i in result if int(i[1]) >= 1080]
                links += [(i[0], 'HD') for i in result if 720 <= int(i[1]) < 1080]
                links += [(i[0], 'SD') for i in result if 480 <= int(i[1]) < 720]
                if not 'SD' in [i[1] for i in links]: links += [(i[0], 'SD') for i in result if 360 <= int(i[1]) < 480]
            except:
                pass

            for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'provider': 'MiraDeTodo', 'url': i[0], 'direct': True, 'debridonly': False})

            return sources
        except:
            return sources
コード例 #12
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = cloudflare.source(url)

            links = []

            try:
                try: url = re.compile('proxy\.link=([^"&]+)').findall(result)[0]
                except: url = cloudflare.source(re.compile('proxy\.list=([^"&]+)').findall(result)[0])

                url = url.split('*', 1)[-1].rsplit('<')[0]

                dec = self._gkdecrypt(base64.b64decode('aUJocnZjOGdGZENaQWh3V2huUm0='), url)
                if not 'http' in dec: dec = self._gkdecrypt(base64.b64decode('QjZVTUMxUms3VFJBVU56V3hraHI='), url)

                url = directstream.google(dec)

                links += [(i['url'], i['quality']) for i in url]
            except:
                pass

            try:
                url = 'http://miradetodo.com.ar/gkphp/plugins/gkpluginsphp.php'

                post = client.parseDOM(result, 'div', attrs = {'class': 'player.+?'})[0]
                post = post.replace('iframe', 'IFRAME')
                post = client.parseDOM(post, 'IFRAME', ret='.+?')[0]
                post = urlparse.parse_qs(urlparse.urlparse(post).query)

                result = ''
                try: result += cloudflare.source(url, post=urllib.urlencode({'link': post['id'][0]}))
                except: pass
                try: result += cloudflare.source(url, post=urllib.urlencode({'link': post['id1'][0]}))
                except: pass
                try: result += cloudflare.source(url, post=urllib.urlencode({'link': post['id2'][0]}))
                except: pass

                result = re.compile('"?link"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?"').findall(result)
                result = [(i[0].replace('\\/', '/'), i[1])  for i in result]

                links += [(i[0], '1080p') for i in result if int(i[1]) >= 1080]
                links += [(i[0], 'HD') for i in result if 720 <= int(i[1]) < 1080]
                links += [(i[0], 'SD') for i in result if 480 <= int(i[1]) < 720]
                if not 'SD' in [i[1] for i in links]: links += [(i[0], 'SD') for i in result if 360 <= int(i[1]) < 480]
            except:
                pass

            for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'provider': 'MiraDeTodo', 'url': i[0], 'direct': True, 'debridonly': False})

            return sources
        except:
            return sources
コード例 #13
0
ファイル: pubfilm_mv_tv.py プロジェクト: mitnits/exodus
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            content = re.compile('(.+?)\?episode=\d*$').findall(url)
            content = 'movie' if len(content) == 0 else 'episode'

            try: url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(url)[0]
            except: pass

            result = client.source(url)

            url = zip(client.parseDOM(result, 'a', ret='href', attrs = {'target': 'player_iframe'}), client.parseDOM(result, 'a', attrs = {'target': 'player_iframe'}))
            url = [(i[0], re.compile('(\d+)').findall(i[1])) for i in url]
            url = [(i[0], i[1][-1]) for i in url if len(i[1]) > 0]

            if content == 'episode':
                url = [i for i in url if i[1] == '%01d' % int(episode)]

            links = [client.replaceHTMLCodes(i[0]) for i in url]

            for u in links:

                try:
                    headers = {'X-Requested-With': 'XMLHttpRequest', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0', 'Referer': u}

                    post = urlparse.parse_qs(urlparse.urlparse(u).query)['link'][0]
                    post = urllib.urlencode({'link': post})

                    url = 'http://player.pubfilm.com/smplayer/plugins/gkphp/plugins/gkpluginsphp.php'

                    url = client.source(url, post=post, headers=headers)
                    url = json.loads(url)

                    if 'gklist' in url:
                        url = client.source(u)
                        url = re.findall('sources\s*:\s*\[(.+?)\]', url)[0]
                        url = re.findall('"file"\s*:\s*"(.+?)"', url)
                        url = [i.split()[0].replace('\\/', '/') for i in url]
                    else:
                        url = url['link']
                        url = directstream.google(url)
                        url = [i['url'] for i in url]

                    for i in url:
                        try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'Pubfilm', 'url': i, 'direct': True, 'debridonly': False})
                        except: pass
                except:
                    pass

            return sources
        except:
            return sources
コード例 #14
0
ファイル: dramabus.py プロジェクト: enursha101/xbmc-addon
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'mediaplayer'})
            r = [i.attrs['src'] for i in dom_parser.parse_dom(r, 'iframe', req='src')]

            for i in r:
                try:
                    if 'vidnow.' in i:
                        i = client.request(i, referer=url)

                        gdata = [(match[1], match[0]) for match in re.findall('''["']?label\s*["']?\s*[:=]\s*["']?([^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)''', i, re.DOTALL)]
                        gdata += [(match[0], match[1]) for match in re.findall('''["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?([^"',]+)''', i, re.DOTALL)]
                        gdata = [(x[0].replace('\/', '/'), source_utils.label_to_quality(x[1])) for x in gdata]

                        for u, q in gdata:
                            try:
                                tag = directstream.googletag(u)

                                if tag:
                                    sources.append({'source': 'gvideo', 'quality': tag[0].get('quality', 'SD'), 'language': 'de', 'url': u, 'direct': True, 'debridonly': False})
                                else:
                                    sources.append({'source': 'CDN', 'quality': q, 'language': 'de', 'url': u, 'direct': True,'debridonly': False})
                            except:
                                pass

                        i = dom_parser.parse_dom(i, 'div', attrs={'id': 'myElement'})
                        i = dom_parser.parse_dom(i, 'iframe', req='src')[0].attrs['src']

                    valid, host = source_utils.is_host_valid(i, hostDict)
                    if not valid: continue

                    urls = []
                    if 'google' in i: host = 'gvideo'; direct = True; urls = directstream.google(i);
                    if 'google' in i and not urls and directstream.googletag(i): host = 'gvideo'; direct = True; urls = [{'quality': directstream.googletag(i)[0]['quality'], 'url': i}]
                    elif 'ok.ru' in i:  host = 'vk'; direct = True; urls = directstream.odnoklassniki(i)
                    elif 'vk.com' in i: host = 'vk'; direct = True; urls = directstream.vk(i)
                    else:  direct = False; urls = [{'quality': 'SD', 'url': i}]

                    for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'ko', 'url': x['url'], 'direct': direct, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
コード例 #15
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'mediaplayer'})
            r = [i.attrs['src'] for i in dom_parser.parse_dom(r, 'iframe', req='src')]

            for i in r:
                try:
                    if 'vidnow.' in i:
                        i = client.request(i, referer=url)

                        gdata = [(match[1], match[0]) for match in re.findall('''["']?label\s*["']?\s*[:=]\s*["']?([^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)''', i, re.DOTALL)]
                        gdata += [(match[0], match[1]) for match in re.findall('''["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?([^"',]+)''', i, re.DOTALL)]
                        gdata = [(x[0].replace('\/', '/'), source_utils.label_to_quality(x[1])) for x in gdata]

                        for u, q in gdata:
                            try:
                                tag = directstream.googletag(u)

                                if tag:
                                    sources.append({'source': 'gvideo', 'quality': tag[0].get('quality', 'SD'), 'language': 'de', 'url': u, 'direct': True, 'debridonly': False})
                                else:
                                    sources.append({'source': 'CDN', 'quality': q, 'language': 'de', 'url': u, 'direct': True,'debridonly': False})
                            except:
                                pass

                        i = dom_parser.parse_dom(i, 'div', attrs={'id': 'myElement'})
                        i = dom_parser.parse_dom(i, 'iframe', req='src')[0].attrs['src']

                    valid, host = source_utils.is_host_valid(i, hostDict)
                    if not valid: continue

                    urls = []
                    if 'google' in i: host = 'gvideo'; direct = True; urls = directstream.google(i);
                    if 'google' in i and not urls and directstream.googletag(i): host = 'gvideo'; direct = True; urls = [{'quality': directstream.googletag(i)[0]['quality'], 'url': i}]
                    elif 'ok.ru' in i:  host = 'vk'; direct = True; urls = directstream.odnoklassniki(i)
                    elif 'vk.com' in i: host = 'vk'; direct = True; urls = directstream.vk(i)
                    else:  direct = False; urls = [{'quality': 'SD', 'url': i}]

                    for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'ko', 'url': x['url'], 'direct': direct, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
コード例 #16
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            ref = urlparse.urljoin(self.base_link, url)

            r = client.request(ref)

            p = re.findall('load_player\((\d+)\)', r)
            r = client.request(urlparse.urljoin(self.base_link, self.player_link), post={'id': p[0]}, referer=ref, XHR=True)
            url = json.loads(r).get('value')
            link = client.request(url, XHR=True, output='geturl', referer=ref)

            if '1movies.' in link:
                r = client.request(link, XHR=True, referer=ref)
                r = [(match[1], match[0]) for match in re.findall('''['"]?file['"]?\s*:\s*['"]([^'"]+)['"][^}]*['"]?label['"]?\s*:\s*['"]([^'"]*)''', r, re.DOTALL)]
                r = [(re.sub('[^\d]+', '', x[0]), x[1].replace('\/', '/')) for x in r]
                r = [x for x in r if x[0]]

                links = [(x[1], '4K') for x in r if int(x[0]) >= 2160]
                links += [(x[1], '1440p') for x in r if int(x[0]) >= 1440]
                links += [(x[1], '1080p') for x in r if int(x[0]) >= 1080]
                links += [(x[1], 'HD') for x in r if 720 <= int(x[0]) < 1080]
                links += [(x[1], 'SD') for x in r if int(x[0]) < 720]

                for url, quality in links:
                    sources.append({'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
            else:
                valid, host = source_utils.is_host_valid(link, hostDict)
                if not valid: return

                urls = []
                if 'google' in link: host = 'gvideo'; direct = True; urls = directstream.google(link);
                if 'google' in link and not urls and directstream.googletag(link): host = 'gvideo'; direct = True; urls = [{'quality': directstream.googletag(link)[0]['quality'], 'url': link}]
                elif 'ok.ru' in link: host = 'vk'; direct = True; urls = directstream.odnoklassniki(link)
                elif 'vk.com' in link:  host = 'vk'; direct = True; urls = directstream.vk(link)
                else:  direct = False; urls = [{'quality': 'HD', 'url': link}]

                for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False})

            return sources
        except:
            return sources
コード例 #17
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'})
            rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'})
            rels = dom_parser.parse_dom(rels, 'li')
            rels = [(dom_parser.parse_dom(i, 'a', attrs={'class': 'options'}, req='href'), dom_parser.parse_dom(i, 'img', req='src')) for i in rels]
            rels = [(i[0][0].attrs['href'][1:], re.findall('\/flags\/(\w+)\.png$', i[1][0].attrs['src'])) for i in rels if i[0] and i[1]]
            rels = [i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de']

            r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels]
            r = [(re.findall('link"?\s*:\s*"(.+?)"', ''.join([x.content for x in i])), dom_parser.parse_dom(i, 'iframe', attrs={'class': 'metaframe'}, req='src')) for i in r]
            r = [i[0][0] if i[0] else i[1][0].attrs['src'] for i in r if i[0] or i[1]]

            for i in r:
                try:
                    i = re.sub('\[.+?\]|\[/.+?\]', '', i)
                    i = client.replaceHTMLCodes(i)
                    if not i.startswith('http'): i = self.__decode_hash(i)

                    valid, host = source_utils.is_host_valid(i, hostDict)
                    if not valid: continue

                    if 'google' in i: host = 'gvideo'; direct = True; urls = directstream.google(i)
                    elif 'ok.ru' in i: host = 'vk'; direct = True; urls = directstream.odnoklassniki(i)
                    elif 'vk.com' in i: host = 'vk'; direct = True; urls = directstream.vk(i)
                    else: direct = False; urls = [{'quality': 'SD', 'url': i}]

                    for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'de', 'url': x['url'], 'direct': direct, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
コード例 #18
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'})
            rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'})
            rels = dom_parser.parse_dom(rels, 'li')
            rels = [(dom_parser.parse_dom(i, 'a', attrs={'class': 'options'}, req='href'), dom_parser.parse_dom(i, 'img', req='src')) for i in rels]
            rels = [(i[0][0].attrs['href'][1:], re.findall('\/flags\/(\w+)\.png$', i[1][0].attrs['src'])) for i in rels if i[0] and i[1]]
            rels = [i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de']

            r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels]
            r = [(re.findall('link"?\s*:\s*"(.+?)"', ''.join([x.content for x in i])), dom_parser.parse_dom(i, 'iframe', attrs={'class': 'metaframe'}, req='src')) for i in r]
            r = [i[0][0] if i[0] else i[1][0].attrs['src'] for i in r if i[0] or i[1]]

            for i in r:
                try:
                    i = re.sub('\[.+?\]|\[/.+?\]', '', i)
                    i = client.replaceHTMLCodes(i)
                    if not i.startswith('http'): i = self.__decode_hash(i)

                    valid, host = source_utils.is_host_valid(i, hostDict)
                    if not valid: continue

                    if 'google' in i: host = 'gvideo'; direct = True; urls = directstream.google(i)
                    elif 'ok.ru' in i: host = 'vk'; direct = True; urls = directstream.odnoklassniki(i)
                    elif 'vk.com' in i: host = 'vk'; direct = True; urls = directstream.vk(i)
                    else: direct = False; urls = [{'quality': 'SD', 'url': i}]

                    for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'de', 'url': x['url'], 'direct': direct, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
コード例 #19
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = cloudflare.source(url)

            links = client.parseDOM(result, 'div', attrs = {'class': 'server_line.+?'})

            for link in links:
                try:
                    host = client.parseDOM(link, 'p', attrs = {'class': 'server_servername'})[0]
                    host = host.strip().lower().split(' ')[-1]

                    url = client.parseDOM(link, 'a', ret='href')[0]
                    url = client.replaceHTMLCodes(url)
                    url = urlparse.urljoin(self.base_link, url)
                    url = url.encode('utf-8')

                    if 'google' in host:
                        url = cloudflare.source(url)
                        url = base64.b64decode(re.compile('decode\("(.+?)"').findall(url)[0])
                        url = re.compile('proxy\.link=([^"&]+)').findall(url)[0]
                        url = url.split('*', 1)[-1]
                        url = self._gkdecrypt(base64.b64decode('Q05WTmhPSjlXM1BmeFd0UEtiOGg='), url)
                        url = directstream.google(url)
                        for i in url: sources.append({'source': 'gvideo', 'quality': i['quality'], 'provider': 'Tunemovie', 'url': i['url'], 'direct': True, 'debridonly': False})

                    elif 'openload' in host:
                        sources.append({'source': 'openload.co', 'quality': 'HD', 'provider': 'Tunemovie', 'url': url, 'direct': False, 'debridonly': False})

                    #elif 'videomega' in host:
                        #sources.append({'source': 'videomega.tv', 'quality': 'HD', 'provider': 'Tunemovie', 'url': url, 'direct': False, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
コード例 #20
0
	def resolve(self,url):
		try:
			if 'google' in url:
				ch = []
				from resources.lib.modules import directstream
				res = directstream.google(url)
				for x in res:
					ch += [x['quality'].replace('HD','720p').replace('SD','480p')]

				index = control.dialog.select('Select quality:',ch)
				if index >-1:
					return res[index]['url']

			elif 'my.pcloud' in url:
				from resources.lib.resolvers import pcloud
				return pcloud.resolve(url)

			else:
				import urlresolver
				return urlresolver.resolve(url)
		except:
			return ''
コード例 #21
0
ファイル: xmovies_mv.py プロジェクト: mitnits/exodus
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = cloudflare.source(url)

            url = client.parseDOM(result, 'embed', ret='src')[0]
            url = client.replaceHTMLCodes(url)

            url = 'https://docs.google.com/file/d/%s/preview' % urlparse.parse_qs(urlparse.urlparse(url).query)['docid'][0]

            url = directstream.google(url)

            for i in url: sources.append({'source': 'gvideo', 'quality': i['quality'], 'provider': 'Xmovies', 'url': i['url'], 'direct': True, 'debridonly': False})

            return sources
        except:
            return sources
コード例 #22
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)
            c = client.request(url, output='cookie')
            result = client.request(url)

            result = re.sub(r'[^\x00-\x7F]+', ' ', result)

            pages = dom_parser.parse_dom(result, 'div', attrs={'class': 'item'}, req='data-id')
            pages = [i.attrs['data-id'] for i in pages]

            for page in pages:
                try:
                    url = urlparse.urljoin(self.base_link, self.video_link)

                    result = client.request(url, post={'id': page}, cookie=c)
                    if not result: continue

                    url = dom_parser.parse_dom(result, 'iframe', req='src')[0].attrs['src']
                    if url.startswith('//'): url = 'http:' + url
                    if url.startswith('/'): url = urlparse.urljoin(self.base_link, url)

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if valid: sources.append({'source': host, 'quality': 'HD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})

                    if '.asp' not in url: continue

                    result = client.request(url, cookie=c)

                    try:
                        url = dom_parser.parse_dom(result, 'iframe', req='src')[0].attrs['src']
                        url = url.replace('https://href.li/?', '')
                        valid, host = source_utils.is_host_valid(url, hostDict)
                        if valid:
                            if host == 'gvideo':
                                ginfo = directstream.google(url)
                                for g in ginfo: sources.append({'source': host, 'quality': g['quality'], 'language': 'en', 'url': g['url'], 'direct': True, 'debridonly': False})
                            else: sources.append({'source': host, 'quality': 'HD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
                    except: pass

                    captions = re.search('''["']?kind["']?\s*:\s*(?:\'|\")captions(?:\'|\")''', result)
                    if not captions: continue

                    matches = [(match[0], match[1]) for match in re.findall('''["']?label\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)''', result, re.DOTALL | re.I)]
                    matches += [(match[1], match[0]) for match in re.findall('''["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)''', result, re.DOTALL | re.I)]

                    result = [(source_utils.label_to_quality(x[0]), x[1].replace('\/', '/')) for x in matches]
                    result = [(i[0], i[1]) for i in result if not i[1].endswith('.vtt')]

                    for quality, url in result: sources.append({'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
コード例 #23
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url, output='extended')

            headers = r[3]
            headers.update({
                'Cookie': r[2].get('Set-Cookie'),
                'Referer': self.base_link
            })
            r = r[0]

            rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'})
            rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'})
            rels = dom_parser.parse_dom(rels, 'li')
            rels = [(dom_parser.parse_dom(i,
                                          'a',
                                          attrs={'class': 'options'},
                                          req='href'),
                     dom_parser.parse_dom(i, 'img', req='src')) for i in rels]
            rels = [(i[0][0].attrs['href'][1:],
                     re.findall('/flags/(\w+)\.png$', i[1][0].attrs['src']))
                    for i in rels if i[0] and i[1]]
            rels = [
                i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de'
            ]

            r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels]

            links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''',
                               ''.join([i[0].content for i in r]))
            links += [
                l.attrs['src'] for i in r for l in dom_parser.parse_dom(
                    i, 'iframe', attrs={'class': 'metaframe'}, req='src')
            ]
            links += [
                l.attrs['src'] for i in r
                for l in dom_parser.parse_dom(i, 'source', req='src')
            ]

            for i in links:
                try:
                    i = re.sub('\[.+?\]|\[/.+?\]', '', i)
                    i = client.replaceHTMLCodes(i)

                    if '/play/' in i: i = urlparse.urljoin(self.base_link, i)

                    if self.domains[0] in i:
                        i = client.request(i, headers=headers, referer=url)

                        for x in re.findall('''\(["']?(.*)["']?\)''', i):
                            try:
                                i += jsunpack.unpack(
                                    base64.decodestring(
                                        re.sub('"\s*\+\s*"', '',
                                               x))).replace('\\', '')
                            except:
                                pass

                        for x in re.findall('(eval\s*\(function.*?)</script>',
                                            i, re.DOTALL):
                            try:
                                i += jsunpack.unpack(x).replace('\\', '')
                            except:
                                pass

                        links = [(match[0], match[1]) for match in re.findall(
                            '''['"]?file['"]?\s*:\s*['"]([^'"]+)['"][^}]*['"]?label['"]?\s*:\s*['"]([^'"]*)''',
                            i, re.DOTALL)]
                        links = [(x[0].replace('\/', '/'),
                                  source_utils.label_to_quality(x[1]))
                                 for x in links if '/no-video.mp4' not in x[0]]

                        doc_links = [
                            directstream.google(
                                'https://drive.google.com/file/d/%s/view' %
                                match)
                            for match in re.findall(
                                '''file:\s*["'](?:[^"']+youtu.be/([^"']+))''',
                                i, re.DOTALL)
                        ]
                        doc_links = [(u['url'], u['quality'])
                                     for x in doc_links if x for u in x]
                        links += doc_links

                        for url, quality in links:
                            if self.base_link in url:
                                url = url + '|Referer=' + self.base_link

                            sources.append({
                                'source': 'gvideo',
                                'quality': quality,
                                'language': 'de',
                                'url': url,
                                'direct': True,
                                'debridonly': False
                            })
                    else:
                        try:
                            # as long as urlresolver get no Update for this URL (So just a Temp-Solution)
                            did = re.findall(
                                'youtube.googleapis.com.*?docid=(\w+)', i)
                            if did:
                                i = 'https://drive.google.com/file/d/%s/view' % did[
                                    0]

                            valid, host = source_utils.is_host_valid(
                                i, hostDict)
                            if not valid: continue

                            urls, host, direct = source_utils.check_directstreams(
                                i, host)

                            for x in urls:
                                sources.append({
                                    'source': host,
                                    'quality': x['quality'],
                                    'language': 'de',
                                    'url': x['url'],
                                    'direct': direct,
                                    'debridonly': False
                                })
                        except:
                            pass
                except:
                    pass

            return sources
        except:
            return sources
コード例 #24
0
ファイル: foxx.py プロジェクト: enursha101/xbmc-addon
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'})
            rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'})
            rels = dom_parser.parse_dom(rels, 'li')
            rels = [(dom_parser.parse_dom(i, 'a', attrs={'class': 'options'}, req='href'), dom_parser.parse_dom(i, 'img', req='src')) for i in rels]
            rels = [(i[0][0].attrs['href'][1:], re.findall('\/flags\/(\w+)\.png$', i[1][0].attrs['src'])) for i in rels if i[0] and i[1]]
            rels = [i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de']

            r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels]

            links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''', ''.join([i[0].content for i in r]))
            links += [l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'iframe', attrs={'class': 'metaframe'}, req='src')]
            links += [l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'source', req='src')]

            for i in links:
                try:
                    i = re.sub('\[.+?\]|\[/.+?\]', '', i)
                    i = client.replaceHTMLCodes(i)

                    if self.domains[0] in i:
                        i = client.request(i, referer=url)

                        for x in re.findall('''\(["']?(.*)["']?\)''', i):
                            try: i += jsunpack.unpack(base64.decodestring(re.sub('"\s*\+\s*"', '', x)))
                            except: pass

                        s = re.compile('(eval\(function.*?)</script>', re.DOTALL).findall(i)

                        for x in s:
                            try: i += jsunpack.unpack(x)
                            except: pass

                        i = [(match[0], match[1]) for match in re.findall('''['"]?file['"]?\s*:\s*['"]([^'"]+)['"][^}]*['"]?label['"]?\s*:\s*['"]([^'"]*)''', i, re.DOTALL)]
                        i = [(x[0].replace('\/', '/'), source_utils.label_to_quality(x[1])) for x in i if '/no-video.mp4' not in x[0]]

                        for url, quality in i:
                            sources.append({'source': 'gvideo', 'quality': quality, 'language': 'de', 'url': url, 'direct': True, 'debridonly': False})
                    else:
                        try:
                            valid, host = source_utils.is_host_valid(i, hostDict)
                            if not valid: continue

                            urls = []
                            if 'google' in i: host = 'gvideo'; direct = True; urls = directstream.google(i);
                            if 'google' in i and not urls and directstream.googletag(i):  host = 'gvideo'; direct = True; urls = [{'quality': directstream.googletag(i)[0]['quality'], 'url': i}]
                            elif 'ok.ru' in i: host = 'vk'; direct = True; urls = directstream.odnoklassniki(i)
                            elif 'vk.com' in i: host = 'vk'; direct = True; urls = directstream.vk(i)
                            else: direct = False; urls = [{'quality': 'SD', 'url': i}]

                            for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'de', 'url': x['url'], 'direct': direct, 'debridonly': False})
                        except:
                            pass
                except:
                    pass

            return sources
        except:
            return sources
コード例 #25
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            query = urlparse.urljoin(self.base_link, url)
            r = client.request(query)

            r1 = client.parseDOM(r, 'div', attrs={'id': 'playeroptions'})[0]
            links = dom.parse_dom(r1, 'li', req=['data-post', 'data-nume'])
            links = [(i.attrs['data-post'], i.attrs['data-nume'],
                      client.parseDOM(i.content,
                                      'span',
                                      attrs={'class': 'title'})[0])
                     for i in links]
            links = [(i[0], i[1], i[2]) for i in links
                     if not 'trailer' in i[1]]
            try:
                extra = client.parseDOM(r,
                                        'div',
                                        attrs={'class': 'links_table'})[0]
                extra = dom.parse_dom(extra, 'td')
                extra = [
                    dom.parse_dom(i.content, 'img', req='src') for i in extra
                    if i
                ]
                extra = [(i[0].attrs['src'],
                          dom.parse_dom(i[0].content, 'a', req='href'))
                         for i in extra if i]
                extra = [(re.findall('domain=(.+?)$',
                                     i[0])[0], i[1][0].attrs['href'])
                         for i in extra if i]
            except BaseException:
                pass
            info = []
            ptype = 'tv' if '/tvshows/' in query else 'movie'
            for item in links:

                plink = 'https://onlinemovie.gr/wp-admin/admin-ajax.php'
                pdata = {
                    'action': 'doo_player_ajax',
                    'post': item[0],
                    'nume': item[1],
                    'type': ptype
                }
                pdata = urllib.urlencode(pdata)
                link = client.request(plink, post=pdata)
                link = client.parseDOM(link, 'iframe', ret='src')[0]
                lang = 'gr'
                quality, info = source_utils.get_release_quality(
                    item[2], item[2])
                info.append('SUB')
                info = ' | '.join(info)
                if 'jwplayer' in link:
                    sub = re.findall('&sub=(.+?)&id', link)[0]
                    sub = urllib.unquote(sub)
                    sub = urlparse.urljoin(
                        self.base_link,
                        sub) if sub.startswith('/sub/') else sub
                    url = re.findall('source=(.+?)&sub', link)[0]
                    url = urllib.unquote(url)
                    url = urlparse.urljoin(self.base_link,
                                           url) if url.startswith('/') else url

                    if 'cdn' in url or 'nd' in url or url.endswith(
                            '.mp4') or url.endswith('.m3u8'):
                        sources.append({
                            'source': 'CDN',
                            'quality': quality,
                            'language': lang,
                            'url': url,
                            'info': info,
                            'direct': True,
                            'debridonly': False,
                            'sub': sub
                        })

                elif 'api.myhls' in link:
                    quality2, info = source_utils.get_release_quality(
                        item[2], None)
                    info.append('SUB')
                    info = ' | '.join(info)
                    data = client.request(link, referer=self.base_link)
                    if not unjuice.test(data): raise Exception()
                    r = unjuice.run(data)
                    urls = re.findall(
                        '''file['"]:['"]([^'"]+).+?label":['"]([^'"]+)''', r,
                        re.DOTALL)
                    sub = [i[0] for i in urls if 'srt' in i[0]][0]
                    sub = urlparse.urljoin(
                        self.base_link,
                        sub) if sub.startswith('/sub/') else sub

                    urls = [(i[0], i[1]) for i in urls if not '.srt' in i[0]]
                    for i in urls:
                        host = 'GVIDEO'
                        quality, url = i[1].lower(), i[0]

                        url = '%s|User-Agent=%s&Referer=%s' % (
                            url, urllib.quote(client.agent()), link)
                        sources.append({
                            'source': host,
                            'quality': quality,
                            'language': lang,
                            'url': url,
                            'info': info,
                            'direct': True,
                            'debridonly': False,
                            'sub': sub
                        })

                elif 'myhls.stream' in link:
                    vid = link.split('/')[-1]
                    plink = 'https://myhls.stream/api/source/%s' % vid
                    data = client.request(plink,
                                          post='r=',
                                          referer=link,
                                          XHR=True)
                    data = json.loads(data)

                    urls = data['data']

                    sub = data['captions'][0]['path']
                    sub = 'https://myhls.stream/asset' + sub if sub.startswith(
                        '/') else sub

                    for i in urls:
                        url = i['file'] if not i['file'].startswith(
                            '/') else 'https://myhls.stream/%s' % i['file']
                        quality = i['label']
                        host = 'CDN-HLS'

                        sources.append({
                            'source': host,
                            'quality': quality,
                            'language': lang,
                            'url': url,
                            'info': info,
                            'direct': True,
                            'debridonly': False,
                            'sub': sub
                        })

                elif 'drive' in link:
                    quality, info = source_utils.get_release_quality(
                        item[1], None)
                    info.append('SUB')
                    info = ' | '.join(info)
                    try:
                        links = directstream.google(item[0])
                        for x in links:
                            sources.append({
                                'source': 'GVIDEO',
                                'quality': x['quality'],
                                'language': lang,
                                'url': x['url'],
                                'info': info,
                                'direct': True,
                                'debridonly': False,
                                'sub': sub
                            })
                    except BaseException:
                        pass

                    try:
                        r = client.request(item[0])
                        links = re.findall('''\{file:\s*['"]([^'"]+)''', r,
                                           re.DOTALL)
                        for x in links:
                            sources.append({
                                'source': 'GVIDEO',
                                'quality': quality,
                                'language': lang,
                                'url': x,
                                'info': info,
                                'direct': True,
                                'debridonly': False,
                                'sub': sub
                            })

                    except BaseException:
                        pass

                else:
                    continue

            for item in extra:
                url = item[1]
                if 'movsnely' in url:
                    url = client.request(url, output='geturl', redirect=True)
                else:
                    url = url
                quality = 'SD'
                lang, info = 'gr', 'SUB'
                valid, host = source_utils.is_host_valid(item[0], hostDict)
                if not valid: continue

                sources.append({
                    'source': host,
                    'quality': quality,
                    'language': lang,
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': False,
                    'sub': sub
                })

            return sources
        except BaseException:
            return sources
コード例 #26
0
ファイル: dayt.py プロジェクト: azumimuo/family-xbmc-addon
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = (data['title'].translate(None, '\/:*?"\'<>|!,')).replace(' ', '-').replace('--', '-').lower()
            try:
                is_movie = not (int(data['episode']) > 0)
            except:
                is_movie = True

            if is_movie:
                url = urlparse.urljoin(self.base_link, self.watch_link % title)
            else:
                url = urlparse.urljoin(self.base_link, self.watch_series_link % (title, data['season'], data['episode']))

            r = client.request(url, output='geturl')

            if r is None: raise Exception()

            r = client.request(url)
            r = re.sub(r'[^\x00-\x7F]+',' ', r)
            result = r

            y = re.findall('Date\s*:\s*.+?>.+?(\d{4})', r)
            y = y[0] if len(y) > 0 else None

            if is_movie:
                if not (data['imdb'] in r or data['year'] == y): raise Exception()

            q = client.parseDOM(r, 'title')
            q = q[0] if len(q) > 0 else None

            quality = '1080p' if ' 1080' in q else 'HD'
            r = client.parseDOM(r, 'div', attrs = {'id': '5throw'})[0]
            r = client.parseDOM(r, 'a', ret='href', attrs = {'rel': 'nofollow'})

            links = []

            for url in r:
                try:
                    if 'yadi.sk' in url:
                        url = directstream.yandex(url)
                    elif 'mail.ru' in url:
                        url = directstream.cldmailru(url)
                    else:
                        raise Exception()

                    if url == None: raise Exception()
                    links += [{'source': 'cdn', 'url': url, 'quality': quality, 'direct': False}]
                except:
                    pass

            try:
                r = client.parseDOM(result, 'iframe', ret='src')
                if is_movie:
                    r = [i for i in r if 'pasmov' in i][0]
                else:
                    r = [i for i in r if 'pasep' in i][0]

                for i in range(0, 4):
                    try:
                        if not r.startswith('http'):
                            r = urlparse.urljoin(self.base_link, r)
                        r = client.request(r)
                        r = re.sub(r'[^\x00-\x7F]+',' ', r)
                        r = client.parseDOM(r, 'iframe', ret='src')[0]
                        if 'google' in r: break
                    except:
                        break

                if not 'google' in r: raise Exception()
                r = directstream.google(r)

                for i in r:
                    try:
                        links += [{'source': 'gvideo', 'url': i['url'], 'quality': i['quality'], 'direct': True}]
                    except:
                        pass
            except:
                pass

            for i in links:
                sources.append({'source': i['source'], 'quality': i['quality'], 'language': 'en', 'url': i['url'], 'direct': i['direct'], 'debridonly': False})

            return sources
        except:
            return sources
コード例 #27
0
ファイル: miradetodo_mv.py プロジェクト: mitnits/exodus
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            r = urlparse.urljoin(self.base_link, url)

            cookie, agent, result = cloudflare.request(r, output='extended')

            f = client.parseDOM(result, 'div', attrs = {'class': 'movieplay'})
            f = client.parseDOM(f, 'iframe', ret='src')

            f = [i for i in f if 'miradetodo' in i]

            links = []
            dupes = []

            for u in f:

                try:
                    id = urlparse.parse_qs(urlparse.urlparse(u).query)['id'][0]

                    if id in dupes: raise Exception()
                    dupes.append(id)

                    try:
                        url = base64.b64decode(id)

                        if 'google' in url: url = directstream.google(url)
                        else: raise Exception()

                        for i in url: links.append({'source': 'gvideo', 'quality': i['quality'], 'url': i['url']})
                        continue
                    except:
                        pass


                    result = cloudflare.source(u, headers={'Referer': r})


                    try:
                        headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': u}

                        post = re.findall('{link\s*:\s*"([^"]+)', result)[0]
                        post = urllib.urlencode({'link': post})

                        url = urlparse.urljoin(self.base_link, '/stream/plugins/gkpluginsphp.php')
                        url = cloudflare.source(url, post=post, headers=headers)
                        url = json.loads(url)['link']
                        url = [i['link'] for i in url if 'link' in i]

                        for i in url:
                            try: links.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i})
                            except: pass

                        continue
                    except:
                        pass

                    try:
                        url = re.findall('AmazonPlayer.*?file\s*:\s*"([^"]+)', result, re.DOTALL)[0]

                        class NoRedirection(urllib2.HTTPErrorProcessor):
                            def http_response(self, request, response): return response

                        o = urllib2.build_opener(NoRedirection)
                        o.addheaders = [('User-Agent', agent)]
                        r = o.open(url)
                        url = r.headers['Location']
                        r.close()

                        links.append({'source': 'cdn', 'quality': 'HD', 'url': url})
                    except:
                        pass
                except:
                    pass


            for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'MiraDeTodo', 'url': i['url'], 'direct': True, 'debridonly': False})

            return sources
        except:
            return sources
コード例 #28
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if (self.user == '' or self.password == ''): raise Exception()

            class NoRedirection(urllib2.HTTPErrorProcessor):
                def http_response(self, request, response):
                    return response

            headers = {'X-Requested-With': 'XMLHttpRequest'}
            login = urlparse.urljoin(self.base_link, '/login')
            post = {
                'username': self.user,
                'password': self.password,
                'action': 'login'
            }
            post = urllib.urlencode(post)

            cookie = client.source(login,
                                   post=post,
                                   headers=headers,
                                   output='cookie')

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url, cookie=cookie)

            url = re.compile("embeds\[\d+\]\s*=\s*'([^']+)").findall(result)[0]
            url = client.parseDOM(url, 'iframe', ret='src')[0]
            url = url.replace('https://', 'http://')

            links = []

            try:
                url = re.compile('mplanet\*(.+)').findall(url)[0]
                url = url.rsplit('&')[0]
                dec = self._gkdecrypt(
                    base64.b64decode('MllVcmlZQmhTM2swYU9BY0lmTzQ='), url)
                dec = directstream.google(dec)

                links += [(i['url'], i['quality'], 'gvideo') for i in dec]
            except:
                pass

            try:
                result = client.source(url)

                result = re.compile('sources\s*:\s*\[(.*?)\]',
                                    re.DOTALL).findall(result)[0]
                result = re.compile(
                    '''['"]*file['"]*\s*:\s*['"]*([^'"]+).*?['"]*label['"]*\s*:\s*['"]*([^'"]+)''',
                    re.DOTALL).findall(result)
            except:
                pass

            try:
                u = result[0][0]
                if not 'download.php' in u and not '.live.' in u:
                    raise Exception()
                o = urllib2.build_opener(NoRedirection)
                o.addheaders = [('User-Agent', client.randomagent()),
                                ('Cookie', cookie)]
                r = o.open(u)
                try:
                    u = r.headers['Location']
                except:
                    pass
                r.close()
                links += [(u, '1080p', 'cdn')]
            except:
                pass
            try:
                u = [(i[0], re.sub('[^0-9]', '', i[1])) for i in result]
                u = [(i[0], i[1]) for i in u if i[1].isdigit()]
                links += [(i[0], '1080p', 'gvideo') for i in u
                          if int(i[1]) >= 1080]
                links += [(i[0], 'HD', 'gvideo') for i in u
                          if 720 <= int(i[1]) < 1080]
                links += [(i[0], 'SD', 'gvideo') for i in u
                          if 480 <= int(i[1]) < 720]
            except:
                pass

            for i in links:
                sources.append({
                    'source': i[2],
                    'quality': i[1],
                    'provider': 'Moviesplanet',
                    'url': i[0],
                    'direct': True,
                    'debridonly': False
                })

            return sources
        except:
            return sources
コード例 #29
0
ファイル: dtmovies_mv.py プロジェクト: hieuhienvn/hieuhien.vn
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            query = base64.b64decode('aHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vY3VzdG9tc2VhcmNoL3YxZWxlbWVudD9rZXk9QUl6YVN5Q1ZBWGlVelJZc01MMVB2NlJ3U0cxZ3VubU1pa1R6UXFZJnJzej1maWx0ZXJlZF9jc2UmbnVtPTEwJmhsPWVuJmN4PTAxNjE2OTU5MjY5NTEyNzQ5NTk0OTpsYnB1dGVqbmxrNCZnb29nbGVob3N0PXd3dy5nb29nbGUuY29tJnE9JXM=')
            query = query % urllib.quote_plus('%s %s' % (data['title'].replace(':', ' '), data['year']))

            t = cleantitle.get(data['title'])

            r = client.request(query)
            r = json.loads(r)['results']

            r = [(i['url'], i['titleNoFormatting']) for i in r]
            r = [(i[0], cleantitle.get(i[1]), re.findall('\d{4}', i[1])) for i in r]
            r = [(i[0], i[1], i[2][-1]) for i in r if len(i[2]) > 0]
            r = [i[0] for i in r if t == i[1] and data['year'] == i[2]][0]

            u = urlparse.urljoin(self.base_link, r)

            result = client.request(u)
            result = re.sub(r'[^\x00-\x7F]+',' ', result)

            q = client.parseDOM(result, 'title')[0]

            quality = '1080p' if ' 1080' in q else 'HD'

            r = client.parseDOM(result, 'div', attrs = {'id': '5throw'})[0]
            r = client.parseDOM(r, 'a', ret='href', attrs = {'rel': 'nofollow'})

            links = []

            for url in r:
                try:
                    if 'yadi.sk' in url:
                        url = directstream.yandex(url)
                    elif 'mail.ru' in url:
                        url = directstream.cldmailru(url)
                    else:
                        raise Exception()

                    if url == None: raise Exception()
                    links += [{'source': 'cdn', 'url': url, 'quality': quality, 'direct': False}]
                except:
                    pass


            try:
                r = client.parseDOM(result, 'iframe', ret='src')
                r = [i for i in r if 'pasep' in i][0]

                for i in range(0, 4):
                    try:
                        r = client.request(r)
                        r = re.sub(r'[^\x00-\x7F]+',' ', r)
                        r = client.parseDOM(r, 'iframe', ret='src')[0]
                        if 'google' in r: break
                    except:
                        break

                if not 'google' in r: raise Exception()
                url = directstream.google(r)

                for i in url:
                    try: links += [{'source': 'gvideo', 'url': i['url'], 'quality': i['quality'], 'direct': True}]
                    except: pass
            except:
                pass

            for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'Dtmovies', 'url': i['url'], 'direct': i['direct'], 'debridonly': False})

            return sources
        except:
            return sources
コード例 #30
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = cloudflare.source(url)

            links = client.parseDOM(result,
                                    'div',
                                    attrs={'class': 'server_line.+?'})

            for link in links:
                try:
                    host = client.parseDOM(
                        link, 'p', attrs={'class': 'server_servername'})[0]
                    host = host.strip().lower().split(' ')[-1]

                    url = client.parseDOM(link, 'a', ret='href')[0]
                    url = client.replaceHTMLCodes(url)
                    url = urlparse.urljoin(self.base_link, url)
                    url = url.encode('utf-8')

                    if 'google' in host:
                        url = cloudflare.source(url)
                        url = base64.b64decode(
                            re.compile('decode\("(.+?)"').findall(url)[0])
                        url = re.compile('proxy\.link=([^"&]+)').findall(
                            url)[0]
                        url = url.split('*', 1)[-1]
                        url = self._gkdecrypt(
                            base64.b64decode('Q05WTmhPSjlXM1BmeFd0UEtiOGg='),
                            url)
                        url = directstream.google(url)
                        for i in url:
                            sources.append({
                                'source': 'gvideo',
                                'quality': i['quality'],
                                'provider': 'Tunemovie',
                                'url': i['url'],
                                'direct': True,
                                'debridonly': False
                            })

                    elif 'openload' in host:
                        sources.append({
                            'source': 'openload.co',
                            'quality': 'HD',
                            'provider': 'Tunemovie',
                            'url': url,
                            'direct': False,
                            'debridonly': False
                        })

                    #elif 'videomega' in host:
                    #sources.append({'source': 'videomega.tv', 'quality': 'HD', 'provider': 'Tunemovie', 'url': url, 'direct': False, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
コード例 #31
0
ファイル: foxx.py プロジェクト: azumimuo/family-xbmc-addon
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'})
            rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'})
            rels = dom_parser.parse_dom(rels, 'li')
            rels = [(dom_parser.parse_dom(i, 'a', attrs={'class': 'options'}, req='href'), dom_parser.parse_dom(i, 'img', req='src')) for i in rels]
            rels = [(i[0][0].attrs['href'][1:], re.findall('\/flags\/(\w+)\.png$', i[1][0].attrs['src'])) for i in rels if i[0] and i[1]]
            rels = [i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de']

            r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels]
            r = [(re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''', i[0].content), dom_parser.parse_dom(i, 'iframe', attrs={'class': 'metaframe'}, req='src')) for i in r]
            r = [i[0][0] if len(i[0]) > 0 else i[1][0].attrs['src'] for i in r if i[0] or i[1]]

            for i in r:
                try:
                    i = re.sub('\[.+?\]|\[/.+?\]', '', i)
                    i = client.replaceHTMLCodes(i)

                    if self.base_link in i:
                        i = client.request(i, referer=url)

                        s = re.compile('(eval\(function.*?)</script>', re.DOTALL).findall(i)

                        for x in s:
                            try: i += jsunpack.unpack(x)
                            except: pass

                        i = re.findall('file"?\s*:\s*"(.+?)"', i)

                        for u in i:
                            try:
                                u = u.replace('\\/', '/').replace('\/', '/')
                                u = client.replaceHTMLCodes(u).encode('utf-8')

                                sources.append({'source': 'gvideo', 'quality': directstream.googletag(u)[0]['quality'], 'language': 'de', 'url': u, 'direct': True, 'debridonly': False})
                            except:
                                pass
                    else:
                        try:
                            valid, host = source_utils.is_host_valid(i, hostDict)
                            if not valid: continue

                            urls = []
                            if 'google' in i: host = 'gvideo'; direct = True; urls = directstream.google(i);
                            if 'google' in i and not urls and directstream.googletag(i):  host = 'gvideo'; direct = True; urls = [{'quality': directstream.googletag(i)[0]['quality'], 'url': i}]
                            elif 'ok.ru' in i: host = 'vk'; direct = True; urls = directstream.odnoklassniki(i)
                            elif 'vk.com' in i: host = 'vk'; direct = True; urls = directstream.vk(i)
                            else: direct = False; urls = [{'quality': 'SD', 'url': i}]

                            for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'de', 'url': x['url'], 'direct': direct, 'debridonly': False})
                        except:
                            pass
                except:
                    pass

            return sources
        except:
            return sources
コード例 #32
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'})
            rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'})
            rels = dom_parser.parse_dom(rels, 'li')
            rels = [(dom_parser.parse_dom(i,
                                          'a',
                                          attrs={'class': 'options'},
                                          req='href'),
                     dom_parser.parse_dom(i, 'img', req='src')) for i in rels]
            rels = [(i[0][0].attrs['href'][1:],
                     re.findall('\/flags\/(\w+)\.png$', i[1][0].attrs['src']))
                    for i in rels if i[0] and i[1]]
            rels = [
                i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de'
            ]

            r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels]

            links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''',
                               ''.join([i[0].content for i in r]))
            links += [
                l.attrs['src'] for l in dom_parser.parse_dom(
                    i, 'iframe', attrs={'class': 'metaframe'}, req='src')
                for i in r
            ]
            links += [
                l.attrs['src']
                for l in dom_parser.parse_dom(i, 'source', req='src')
                for i in r
            ]

            for i in links:
                try:
                    i = re.sub('\[.+?\]|\[/.+?\]', '', i)
                    i = client.replaceHTMLCodes(i)

                    if self.domains[0] in i:
                        i = client.request(i, referer=url)

                        s = re.compile('(eval\(function.*?)</script>',
                                       re.DOTALL).findall(i)

                        for x in s:
                            try:
                                i += jsunpack.unpack(x)
                            except:
                                pass

                        i = re.findall('file"?\s*:\s*"(.+?)"', i)

                        for u in i:
                            try:
                                u = u.replace('\\/', '/').replace('\/', '/')
                                u = client.replaceHTMLCodes(u).encode('utf-8')

                                sources.append({
                                    'source':
                                    'gvideo',
                                    'quality':
                                    directstream.googletag(u)[0]['quality'],
                                    'language':
                                    'de',
                                    'url':
                                    u,
                                    'direct':
                                    True,
                                    'debridonly':
                                    False
                                })
                            except:
                                pass
                    else:
                        try:
                            valid, host = source_utils.is_host_valid(
                                i, hostDict)
                            if not valid: continue

                            urls = []
                            if 'google' in i:
                                host = 'gvideo'
                                direct = True
                                urls = directstream.google(i)
                            if 'google' in i and not urls and directstream.googletag(
                                    i):
                                host = 'gvideo'
                                direct = True
                                urls = [{
                                    'quality':
                                    directstream.googletag(i)[0]['quality'],
                                    'url':
                                    i
                                }]
                            elif 'ok.ru' in i:
                                host = 'vk'
                                direct = True
                                urls = directstream.odnoklassniki(i)
                            elif 'vk.com' in i:
                                host = 'vk'
                                direct = True
                                urls = directstream.vk(i)
                            else:
                                direct = False
                                urls = [{
                                    'quality': 'SD',
                                    'url': i
                                }]

                            for x in urls:
                                sources.append({
                                    'source': host,
                                    'quality': x['quality'],
                                    'language': 'de',
                                    'url': x['url'],
                                    'direct': direct,
                                    'debridonly': False
                                })
                        except:
                            pass
                except:
                    pass

            return sources
        except:
            return sources
コード例 #33
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if (self.user == '' or self.password == ''): raise Exception()

            class NoRedirection(urllib2.HTTPErrorProcessor):
                def http_response(self, request, response): return response


            headers = {'X-Requested-With': 'XMLHttpRequest'}
            login = urlparse.urljoin(self.base_link, '/login')
            post = {'username': self.user, 'password': self.password, 'action': 'login'}
            post = urllib.urlencode(post)

            cookie = client.source(login, post=post, headers=headers, output='cookie')


            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url, cookie=cookie)

            url = re.compile("embeds\[\d+\]\s*=\s*'([^']+)").findall(result)[0]
            url = client.parseDOM(url, 'iframe', ret='src')[0]
            url = url.replace('https://', 'http://')

            links = []

            try:
                url = re.compile('mplanet\*(.+)').findall(url)[0]
                url = url.rsplit('&')[0]
                dec = self._gkdecrypt(base64.b64decode('MllVcmlZQmhTM2swYU9BY0lmTzQ='), url)
                dec = directstream.google(dec)

                links += [(i['url'], i['quality'], 'gvideo') for i in dec]
            except:
                pass

            try:
                result = client.source(url)

                result = re.compile('sources\s*:\s*\[(.*?)\]', re.DOTALL).findall(result)[0]
                result = re.compile('''['"]*file['"]*\s*:\s*['"]*([^'"]+).*?['"]*label['"]*\s*:\s*['"]*([^'"]+)''', re.DOTALL).findall(result)
            except:
                pass

            try:
                u = result[0][0]
                if not 'download.php' in u and not '.live.' in u: raise Exception()
                o = urllib2.build_opener(NoRedirection)
                o.addheaders = [('User-Agent', client.randomagent()), ('Cookie', cookie)]
                r = o.open(u)
                try: u = r.headers['Location']
                except: pass
                r.close()
                links += [(u, '1080p', 'cdn')]
            except:
                pass
            try:
                u = [(i[0], re.sub('[^0-9]', '', i[1])) for i in result]
                u = [(i[0], i[1]) for i in u if i[1].isdigit()]
                links += [(i[0], '1080p', 'gvideo') for i in u if int(i[1]) >= 1080]
                links += [(i[0], 'HD', 'gvideo') for i in u if 720 <= int(i[1]) < 1080]
                links += [(i[0], 'SD', 'gvideo') for i in u if 480 <= int(i[1]) < 720]
            except:
                pass


            for i in links: sources.append({'source': i[2], 'quality': i[1], 'provider': 'Moviesplanet', 'url': i[0], 'direct': True, 'debridonly': False})

            return sources
        except:
            return sources
コード例 #34
0
ファイル: streams.py プロジェクト: bitsbb01/coldkeys-addons
    def process(self, url, direct=True):
        try:
            dialog = None
            dialog = control.progressDialog
            dialog.create(control.addonInfo('name'),
                          control.lang(30726).encode('utf-8'))
            dialog.update(0)
        except:
            pass

        try:
            if not '</regex>' in url: raise Exception()
            from resources.lib.modules import regex
            u = regex.resolve(url)
            if not u == None: url = u
        except:
            pass

        try:
            if not url.startswith('rtmp'): raise Exception()
            if len(re.compile('\s*timeout=(\d*)').findall(url)) == 0:
                url += ' timeout=10'
            try:
                dialog.close()
            except:
                pass
            return url
        except:
            pass

        try:
            if not '.m3u8' in url: raise Exception()
            ext = url.split('?')[0].split('&')[0].split('|')[0].rsplit(
                '.')[-1].replace('/', '').lower()
            if not ext == 'm3u8': raise Exception()
            try:
                dialog.close()
            except:
                pass
            return url
        except:
            pass

        try:
            preset = re.findall('<preset>(.+?)</preset>', url)[0]

            title, year, imdb = re.findall(
                '<title>(.+?)</title>', url)[0], re.findall(
                    '<year>(.+?)</year>',
                    url)[0], re.findall('<imdb>(.+?)</imdb>', url)[0]

            try:
                tvdb, tvshowtitle, premiered, season, episode = re.findall(
                    '<tvdb>(.+?)</tvdb>', url)[0], re.findall(
                        '<tvshowtitle>(.+?)</tvshowtitle>',
                        url)[0], re.findall('<premiered>(.+?)</premiered>',
                                            url)[0], re.findall(
                                                '<season>(.+?)</season>',
                                                url)[0], re.findall(
                                                    '<episode>(.+?)</episode>',
                                                    url)[0]
            except:
                tvdb = tvshowtitle = premiered = season = episode = None

            direct = False

            presetDict = [
                'primewire_mv_tv', 'watchfree_mv_tv', 'movie25_mv',
                'watchseries_tv', 'dizibox_tv', 'dizigold_tv', 'dizilab_tv',
                'miradetodo_mv', 'onemovies_mv_tv', 'onlinedizi_tv',
                'pelispedia_mv_tv', 'pubfilm_mv_tv', 'putlocker_mv_tv',
                'sezonlukdizi_tv', 'usmovies_mv', 'usseries_tv', 'watch1080_mv'
            ]

            if preset == 'searchsd':
                presetDict = [
                    'primewire_mv_tv', 'watchfree_mv_tv', 'movie25_mv',
                    'watchseries_tv'
                ]

            from resources.lib.sources import sources

            try:
                dialog.update(0,
                              control.lang(30726).encode('utf-8'),
                              control.lang(30731).encode('utf-8'))
            except:
                pass

            u = sources().getSources(title,
                                     year,
                                     imdb,
                                     tvdb,
                                     season,
                                     episode,
                                     tvshowtitle,
                                     premiered,
                                     presetDict=presetDict,
                                     progress=False,
                                     timeout=20)

            try:
                dialog.update(50,
                              control.lang(30726).encode('utf-8'),
                              control.lang(30731).encode('utf-8'))
            except:
                pass

            u = sources().sourcesDirect(u, progress=False)

            if not u == None:
                try:
                    dialog.close()
                except:
                    pass
                return u
        except:
            pass

        try:
            from resources.lib.sources import sources

            u = sources().getURISource(url)

            if not u == False: direct = False
            if u == None or u == False or u == []: raise Exception()

            try:
                dialog.update(50,
                              control.lang(30726).encode('utf-8'),
                              control.lang(30731).encode('utf-8'))
            except:
                pass

            u = sources().sourcesDirect(u, progress=False)

            if not u == None:
                try:
                    dialog.close()
                except:
                    pass
                return u
        except:
            pass

        try:
            if not '.google.com' in url: raise Exception()
            from resources.lib.modules import directstream
            u = directstream.google(url)[0]['url']
            try:
                dialog.close()
            except:
                pass
            return u
        except:
            pass

        try:
            import urlresolver

            try:
                hmf = urlresolver.HostedMediaFile(url=url,
                                                  include_disabled=True,
                                                  include_universal=False)
            except:
                hmf = urlresolver.HostedMediaFile(url=url)

            if hmf.valid_url() == False: raise Exception()

            direct = False
            u = hmf.resolve()
            if 'plugin://plugin.video.youtube' in u: raise Exception()

            if not u == False:
                try:
                    dialog.close()
                except:
                    pass
                return u
        except:
            pass

        try:
            try:
                headers = dict(urlparse.parse_qsl(url.rsplit('|', 1)[1]))
            except:
                headers = dict('')
            if not url.startswith('http'): raise Exception()
            result = client.request(url.split('|')[0],
                                    headers=headers,
                                    output='headers',
                                    timeout='20')
            if 'Content-Type' in result and not 'html' in result[
                    'Content-Type']:
                raise Exception()

            import liveresolver
            if liveresolver.isValid(url) == True: direct = False
            u = liveresolver.resolve(url)

            if not u == None:
                try:
                    dialog.close()
                except:
                    pass
                return u
        except:
            pass

        if direct == True: return url

        try:
            dialog.close()
        except:
            pass
コード例 #35
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            aliases = eval(data['aliases'])

            if 'tvshowtitle' in data:
                season = int(data['season'])
                episode = int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'],
                                      aliases)
            else:
                url = self.searchMovie(data['title'], data['year'], aliases)

            if url is None: return sources

            r = client.request(url)

            if 'tvshowtitle' in data:
                episodes = re.findall('(<h2.+?title=".+?<\/h2>)', r)
                for ep in episodes:
                    try:
                        if client.parseDOM(
                                ep, 'button')[0] == 'S%02d-E%02d' % (season,
                                                                     episode):
                            ep_id = client.parseDOM(ep,
                                                    'button',
                                                    ret='idepisode')[0]
                            ajaxurl = urlparse.urljoin(self.base_link,
                                                       '/ajax-tvshow.php')
                            post = urllib.urlencode({'idepisode': ep_id})
                            r = client.request(ajaxurl, post=post)
                    except:
                        raise Exception()

            items = client.parseDOM(r, 'div', attrs={'class': 'le-server'})

            for item in items:
                try:
                    data_id = client.parseDOM(item, 'span', ret='data')[0]
                    ajaxurl = urlparse.urljoin(self.base_link, '/ajax_new.php')
                    post = urllib.urlencode({'m4u': data_id})
                    r = client.request(ajaxurl, post=post)
                    url = client.parseDOM(r,
                                          'div',
                                          attrs={'class': 'containervideo'})[0]
                    url = client.parseDOM(url, 'iframe', ret='src')[0]
                    if 'drive.google' in url:
                        gd_urls = directstream.google(url)
                        for i in gd_urls:
                            try:
                                sources.append({
                                    'source': 'GDRIVE',
                                    'quality': i['quality'],
                                    'language': 'en',
                                    'url': i['url'],
                                    'direct': True,
                                    'debridonly': False
                                })
                            except:
                                pass
                    else:
                        valid, host = source_utils.is_host_valid(url, hostDict)
                        if valid:
                            quality, info = source_utils.get_release_quality(
                                url)
                            sources.append({
                                'source': host,
                                'quality': quality,
                                'language': 'en',
                                'url': url,
                                'info': '',
                                'direct': False,
                                'debridonly': False
                            })
                except:
                    pass

            return sources
        except:
            log_utils.log(
                '>>>> %s TRACE <<<<\n%s' %
                (__file__.upper().split('\\')[-1].split('.')[0],
                 traceback.format_exc()), log_utils.LOGDEBUG)
            return sources
コード例 #36
0
ファイル: phstreams.py プロジェクト: kevintone/tdbaddon
    def process(self, url, direct=True):
        try:
            if not any(i in url for i in ['.jpg', '.png', '.gif']): raise Exception()
            ext = url.split('?')[0].split('&')[0].split('|')[0].rsplit('.')[-1].replace('/', '').lower()
            if not ext in ['jpg', 'png', 'gif']: raise Exception()
            try:
                dialog = None
                dialog = control.progressDialog
                dialog.create(control.addonInfo('name'), control.lang(30732).encode('utf-8'))
                dialog.update(0)
                i = os.path.join(control.dataPath,'img')
                control.deleteFile(i)
                f = control.openFile(i, 'w')
                f.write(client.request(url))
                f.close()
                dialog.close()
                control.execute('ShowPicture("%s")' % i)
                return True
            except:
                return
        except:
            pass

        try:
            dialog = None
            dialog = control.progressDialog
            dialog.create(control.addonInfo('name'), control.lang(30726).encode('utf-8'))
            dialog.update(0)
        except:
            pass

        try:
            if not '</regex>' in url: raise Exception()
            from resources.lib.modules import regex
            u = regex.resolve(url)
            if not u == None: url = u
        except:
            pass

        try:
            if not url.startswith('rtmp'): raise Exception()
            if len(re.compile('\s*timeout=(\d*)').findall(url)) == 0: url += ' timeout=10'
            try: dialog.close()
            except: pass
            return url
        except:
            pass

        try:
            if not any(i in url for i in ['.m3u8', '.f4m', '.ts']): raise Exception()
            ext = url.split('?')[0].split('&')[0].split('|')[0].rsplit('.')[-1].replace('/', '').lower()
            if not ext in ['m3u8', 'f4m', 'ts']: raise Exception()
            try: dialog.close()
            except: pass
            return url
        except:
            pass

        try:
            preset = re.findall('<preset>(.+?)</preset>', url)[0]

            title, year, imdb = re.findall('<title>(.+?)</title>', url)[0], re.findall('<year>(.+?)</year>', url)[0], re.findall('<imdb>(.+?)</imdb>', url)[0]

            try: tvdb, tvshowtitle, premiered, season, episode = re.findall('<tvdb>(.+?)</tvdb>', url)[0], re.findall('<tvshowtitle>(.+?)</tvshowtitle>', url)[0], re.findall('<premiered>(.+?)</premiered>', url)[0], re.findall('<season>(.+?)</season>', url)[0], re.findall('<episode>(.+?)</episode>', url)[0]
            except: tvdb = tvshowtitle = premiered = season = episode = None

            direct = False

            presetDict = ['primewire_mv_tv', 'watchfree_mv_tv', 'movie25_mv', 'watchseries_tv', 'afdah_mv', 'dtmovies_mv', 'dizibox_tv', 'dizigold_tv', 'miradetodo_mv', 'onemovies_mv_tv', 'onlinedizi_tv', 'pelispedia_mv_tv', 'pubfilm_mv_tv', 'putlocker_mv_tv', 'rainierland_mv', 'sezonlukdizi_tv', 'tunemovie_mv', 'xmovies_mv']

            if preset == 'searchsd': presetDict = ['primewire_mv_tv', 'watchfree_mv_tv', 'movie25_mv', 'watchseries_tv']

            from resources.lib.sources import sources

            try: dialog.update(0, control.lang(30726).encode('utf-8'), control.lang(30731).encode('utf-8'))
            except: pass

            u = sources().getSources(title, year, imdb, tvdb, season, episode, tvshowtitle, premiered, presetDict=presetDict, progress=False, timeout=20)

            try: dialog.update(50, control.lang(30726).encode('utf-8'), control.lang(30731).encode('utf-8'))
            except: pass

            u = sources().sourcesDirect(u, progress=False)

            if not u == None:
                try: dialog.close()
                except: pass
                return u
        except:
            pass

        try:
            from resources.lib.sources import sources

            u = sources().getURISource(url)

            if not u == False: direct = False
            if u == None or u == False or u == []: raise Exception()

            try: dialog.update(50, control.lang(30726).encode('utf-8'), control.lang(30731).encode('utf-8'))
            except: pass

            u = sources().sourcesDirect(u, progress=False)

            if not u == None:
                try: dialog.close()
                except: pass
                return u
        except:
            pass

        try:
            if not '.google.com' in url: raise Exception()
            from resources.lib.modules import directstream
            u = directstream.google(url)[0]['url']
            try: dialog.close()
            except: pass
            return u
        except:
            pass

        try:
            import urlresolver

            hmf = urlresolver.HostedMediaFile(url=url, include_disabled=True, include_universal=False)

            if hmf.valid_url() == False: raise Exception()

            direct = False ; u = hmf.resolve()
            if 'plugin://plugin.video.youtube' in u: raise Exception()

            if not u == False:
                try: dialog.close()
                except: pass
                return u
        except:
            pass


        try:
            try: headers = dict(urlparse.parse_qsl(url.rsplit('|', 1)[1]))
            except: headers = dict('')
            if not url.startswith('http'): raise Exception()
            result = client.request(url.split('|')[0], headers=headers, output='headers', timeout='20')
            if 'Content-Type' in result and not 'html' in result['Content-Type']: raise Exception()

            import liveresolver
            if liveresolver.isValid(url) == True: direct = False
            u = liveresolver.resolve(url)

            if not u == None:
                try: dialog.close()
                except: pass
                return u
        except:
            pass


        if direct == True: return url

        try: dialog.close()
        except: pass
コード例 #37
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if (self.user == '' or self.password == ''): raise Exception()

            headers = {'X-Requested-With': 'XMLHttpRequest'}
            login = urlparse.urljoin(self.base_link, '/login')
            post = {
                'username': self.user,
                'password': self.password,
                'action': 'login'
            }
            post = urllib.urlencode(post)

            cookie = client.request(login,
                                    post=post,
                                    headers=headers,
                                    output='cookie')

            url = urlparse.urljoin(self.base_link, url)

            result = client.request(url, cookie=cookie)

            url = re.findall("embeds\[\d+\]\s*=\s*'([^']+)", result)[0]
            url = client.parseDOM(url, 'iframe', ret='src')[0]
            url = url.replace('https://', 'http://')

            links = []

            try:
                dec = re.findall('mplanet\*(.+)', url)[0]
                dec = dec.rsplit('&')[0]
                dec = self._gkdecrypt(
                    base64.b64decode('MllVcmlZQmhTM2swYU9BY0lmTzQ='), dec)
                dec = directstream.google(dec)

                links += [(i['url'], i['quality'], 'gvideo') for i in dec]
            except:
                pass

            result = client.request(url)

            try:
                url = re.compile('sources\s*:\s*\[(.*?)\]',
                                 re.DOTALL).findall(result)[0]
                url = re.compile(
                    '''['"]*file['"]*\s*:\s*['"]*([^'"]+).*?['"]*label['"]*\s*:\s*['"]*[^'"]+''',
                    re.DOTALL).findall(url)

                for i in url:
                    try:
                        links.append({
                            'source':
                            'gvideo',
                            'quality':
                            directstream.googletag(i)[0]['quality'],
                            'url':
                            i
                        })
                    except:
                        pass
            except:
                pass

            try:
                url = client.parseDOM(result, 'source', ret='src')[0]

                links.append({'source': 'cdn', 'quality': 'HD', 'url': url})
            except:
                pass

            for i in links:
                sources.append({
                    'source': i['source'],
                    'quality': i['quality'],
                    'provider': 'Moviesplanet',
                    'url': i['url'],
                    'direct': True,
                    'debridonly': False
                })

            return sources
        except:
            return sources
コード例 #38
0
ファイル: 1movies.py プロジェクト: C6SUMMER/StreamHub
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            ref = urlparse.urljoin(self.base_link, url)

            r = client.request(ref)

            p = re.findall('load_player\((\d+)\)', r)
            r = client.request(urlparse.urljoin(self.base_link,
                                                self.player_link),
                               post={'id': p[0]},
                               referer=ref,
                               XHR=True)
            url = json.loads(r).get('value')
            link = client.request(url, XHR=True, output='geturl', referer=ref)

            if '1movies.' in link:
                r = client.request(link, XHR=True, referer=ref)
                js = json.loads(r)
                j = js['playlist']
                for a in j:
                    url = a['file']
                    sources.append({
                        'source': 'HLS',
                        'quality': 'HD',
                        'language': 'en',
                        'url': url,
                        'direct': True,
                        'debridonly': False
                    })
            else:
                valid, host = source_utils.is_host_valid(link, hostDict)
                if not valid: return

                urls = []
                if 'google' in link:
                    host = 'gvideo'
                    direct = True
                    urls = directstream.google(link)
                if 'google' in link and not urls and directstream.googletag(
                        link):
                    host = 'gvideo'
                    direct = True
                    urls = [{
                        'quality':
                        directstream.googletag(link)[0]['quality'],
                        'url':
                        link
                    }]
                elif 'ok.ru' in link:
                    host = 'vk'
                    direct = True
                    urls = directstream.odnoklassniki(link)
                elif 'vk.com' in link:
                    host = 'vk'
                    direct = True
                    urls = directstream.vk(link)
                else:
                    direct = False
                    urls = [{
                        'quality': 'HD',
                        'url': link
                    }]

                for x in urls:
                    sources.append({
                        'source': host,
                        'quality': x['quality'],
                        'language': 'en',
                        'url': x['url'],
                        'direct': direct,
                        'debridonly': False
                    })

            return sources
        except:
            return sources
コード例 #39
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if url == None:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            cookie = self.__get_premium_cookie()

            r = client.request(url, mobile=True, cookie=cookie)

            query = urlparse.urljoin(self.base_link, self.part_link)
            id = re.compile('var\s*video_id\s*=\s*"(\d+)"').findall(r)[0]

            p = client.parseDOM(r, 'a', attrs={'class': 'changePart', 'data-part': '\d+p'}, ret='data-part')

            for i in p:
                p = urllib.urlencode({'video_id': id, 'part_name': i, 'page': '0'})
                p = client.request(query, cookie=cookie, mobile=True, XHR=True, post=p, referer=url)

                p = json.loads(p)
                p = p.get('part_count', 0)

                for part_count in range(0, p):
                    try:
                        r = urllib.urlencode({'video_id': id, 'part_name': i, 'page': part_count})
                        r = client.request(query, cookie=cookie, mobile=True, XHR=True, post=r, referer=url)

                        r = json.loads(r)
                        r = r.get('part', {})

                        s = r.get('source', '')
                        url = r.get('code', '')

                        if s == 'url' and 'http' not in url:
                            url = self.__decode_hash(url)
                        elif s == 'other':
                            url = client.parseDOM(url, 'iframe', ret='src')
                            if len(url) < 1: continue
                            url = url[0]
                            if '/old/seframer.php' in url: url = self.__get_old_url(url)

                        host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                        if not host in hostDict and not 'google' in host: continue

                        if i in ['720p', 'HD']: quali = 'HD'
                        elif i in ['1080p', '1440p']: quali = i
                        elif i in ['2160p']: quali = '4K'
                        else: quali = 'SD'

                        if 'google' in url: host = 'gvideo'; direct = True; urls = directstream.google(url)
                        elif 'ok.ru' in url: host = 'vk'; direct = True; urls = directstream.odnoklassniki(url)
                        elif 'vk.com' in url: host = 'vk'; direct = True; urls = directstream.vk(url)
                        else: direct = False; urls = [{'quality': quali, 'url': url}]

                        for i in urls: sources.append({'source': host, 'quality': i['quality'], 'language': 'de', 'url': i['url'], 'direct': direct, 'debridonly': False})
                    except:
                        pass

            return sources
        except:
            return sources
コード例 #40
0
ファイル: streamhub.py プロジェクト: vphuc81/MyRepository
    def process(self, url, direct=True):
        try:
            if not any(i in url for i in ['.jpg', '.png', '.gif']): raise Exception()
            ext = url.split('?')[0].split('&')[0].split('|')[0].rsplit('.')[-1].replace('/', '').lower()
            if not ext in ['jpg', 'png', 'gif']: raise Exception()
            try:
                i = os.path.join(control.dataPath,'img')
                control.deleteFile(i)
                f = control.openFile(i, 'w')
                f.write(client.request(url))
                f.close()
                control.execute('ShowPicture("%s")' % i)
                return False
            except:
                return
        except:
            pass

        try:
            r, x = re.findall('(.+?)\|regex=(.+?)$', url)[0]
            x = regex.fetch(x)
            r += urllib.unquote_plus(x)
            if not '</regex>' in r: raise Exception()
            u = regex.resolve(r)
            if not u == None: url = u
        except:
            pass

        try:
            if not url.startswith('rtmp'): raise Exception()
            if len(re.compile('\s*timeout=(\d*)').findall(url)) == 0: url += ' timeout=10'
            return url
        except:
            pass

        try:
            if not any(i in url for i in ['.m3u8', '.f4m', '.ts']): raise Exception()
            ext = url.split('?')[0].split('&')[0].split('|')[0].rsplit('.')[-1].replace('/', '').lower()
            if not ext in ['m3u8', 'f4m', 'ts']: raise Exception()
            return url
        except:
            pass

        try:
            preset = re.findall('<preset>(.+?)</preset>', url)[0]

            if not 'search' in preset: raise Exception()

            title, year, imdb = re.findall('<title>(.+?)</title>', url)[0], re.findall('<year>(.+?)</year>', url)[0], re.findall('<imdb>(.+?)</imdb>', url)[0]

            try: tvdb, tvshowtitle, premiered, season, episode = re.findall('<tvdb>(.+?)</tvdb>', url)[0], re.findall('<tvshowtitle>(.+?)</tvshowtitle>', url)[0], re.findall('<premiered>(.+?)</premiered>', url)[0], re.findall('<season>(.+?)</season>', url)[0], re.findall('<episode>(.+?)</episode>', url)[0]
            except: tvdb = tvshowtitle = premiered = season = episode = None

            direct = False

            quality = 'HD' if not preset == 'searchsd' else 'SD'

            from resources.sources import sources

            u = sources().getSources(title, year, imdb, tvdb, season, episode, tvshowtitle, premiered, quality)

            if not u == None: return u
        except:
            pass

        try:
            from resources.sources import sources

            u = sources().getURISource(url)

            if not u == False: direct = False
            if u == None or u == False: raise Exception()

            return u
        except:
            pass

        try:
            if not '.google.com' in url: raise Exception()
            from resources.lib.modules import directstream
            u = directstream.google(url)[0]['url']
            return u
        except:
            pass

        try:
            if not 'filmon.com/' in url: raise Exception()
            from resources.lib.modules import filmon
            u = filmon.resolve(url)
            return u
        except:
            pass
			
        try:
            if not 'uptostream/' in url: raise Exception()
            from resources.lib.modules import filmon
            u = url.replace('uptostream','uptobox')
            return u
        except:
            pass

        try:
            if not 'liveonlinetv' in url: raise Exception()
            u = liveonlinetv247(url)
            return (str(u)).replace('["' , '').replace(']"' , '').replace('[[' , '').replace(']]' , '').replace('[' , '').replace(']' , '').replace('"' , '').replace("'" , "") + '|User-Agent=Mozilla/5.0 (Windows NT 6.3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'
        except:
            pass
		
        try:
            if not 'tvplayer' in url: raise Exception()
            u = playtvplayer(url)
            return u
        except:
            pass
			
        try:
            if not 'tvcatchup' in url: raise Exception()
            open = getUrl(url)
            u    = re.compile("file: '(.+?)'").findall(open)[0]
            return u
        except:
            pass
			
        try:
            if not 'robssatellitetv.com' in url: raise Exception()
            open = getUrl(url)
            u    = re.compile('src: "(.+?)"').findall(open)[0]
            return u
        except:
            pass
			
        try:
            if not 'arconaitv.me' in url: raise Exception()
            url  = 'http://www.justproxy.co.uk/index.php?q='+base64.b64encode(url)
            open = getUrl(url)
            u    = re.compile('"src":"(.+?)"').findall(open)[0]
            u    = u.strip()
            return str(u).replace('\/','/').replace('"','')
        except:
            pass
			

        try:
            import urlresolver

            hmf = urlresolver.HostedMediaFile(url=url)

            if hmf.valid_url() == False: raise Exception()

            direct = False ; u = hmf.resolve()

            if not u == False: return u
        except:
            pass

        if direct == True: return url
コード例 #41
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if url == None:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url, headers={'Accept-Encoding': 'gzip'})

            rels = client.parseDOM(r, 'nav', attrs={'class': 'player'})
            rels = client.parseDOM(rels, 'ul', attrs={'class': 'idTabs'})
            rels = client.parseDOM(rels, 'li')
            rels = [(client.parseDOM(i,
                                     'a',
                                     attrs={'class': 'options'},
                                     ret='href'),
                     client.parseDOM(i, 'img', ret='src')) for i in rels]
            rels = [(i[0][0][1:], re.findall('\/flags\/(\w+)\.png$', i[1][0]))
                    for i in rels if len(i[0]) > 0 and len(i[1]) > 0]
            rels = [
                i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de'
            ]

            r = [client.parseDOM(r, 'div', attrs={'id': i}) for i in rels]
            r = [(re.findall('link"?\s*:\s*"(.+?)"', i[0]),
                  client.parseDOM(i,
                                  'iframe',
                                  attrs={'class': '[^\'"]*metaframe[^\'"]*'},
                                  ret='src')) for i in r]
            r = [
                i[0][0] if len(i[0]) > 0 else i[1][0] for i in r
                if len(i[0]) > 0 or len(i[1]) > 0
            ]

            for i in r:
                try:
                    i = re.sub('\[.+?\]|\[/.+?\]', '', i)
                    i = client.replaceHTMLCodes(i)
                    if not i.startswith('http'): i = self.__decode_hash(i)

                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(i.strip().lower()).netloc)[0]
                    if not host in hostDict and not 'google' in host: continue

                    if 'google' in i:
                        host = 'gvideo'
                        direct = True
                        urls = directstream.google(i)
                    elif 'ok.ru' in i:
                        host = 'vk'
                        direct = True
                        urls = directstream.odnoklassniki(i)
                    elif 'vk.com' in i:
                        host = 'vk'
                        direct = True
                        urls = directstream.vk(i)
                    else:
                        direct = False
                        urls = [{
                            'quality': 'SD',
                            'url': i
                        }]

                    for x in urls:
                        sources.append({
                            'source': host,
                            'quality': x['quality'],
                            'language': 'de',
                            'url': x['url'],
                            'direct': direct,
                            'debridonly': False
                        })
                except:
                    pass

            return sources
        except:
            return sources
コード例 #42
0
ファイル: foxx.py プロジェクト: YourFriendCaspian/dotfiles
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url, output='extended')

            headers = r[3]
            headers.update({'Cookie': r[2].get('Set-Cookie'), 'Referer': self.base_link})
            r = r[0]

            rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'})
            rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'})
            rels = dom_parser.parse_dom(rels, 'li')
            rels = [(dom_parser.parse_dom(i, 'a', attrs={'class': 'options'}, req='href'), dom_parser.parse_dom(i, 'img', req='src')) for i in rels]
            rels = [(i[0][0].attrs['href'][1:], re.findall('/flags/(\w+)\.png$', i[1][0].attrs['src'])) for i in rels if i[0] and i[1]]
            rels = [i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de']

            r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels]

            links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''', ''.join([i[0].content for i in r]))
            links += [l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'iframe', attrs={'class': 'metaframe'}, req='src')]
            links += [l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'source', req='src')]

            for i in links:
                try:
                    i = re.sub('\[.+?\]|\[/.+?\]', '', i)
                    i = client.replaceHTMLCodes(i)

                    if '/play/' in i: i = urlparse.urljoin(self.base_link, i)

                    if self.domains[0] in i:
                        i = client.request(i, headers=headers, referer=url)

                        for x in re.findall('''\(["']?(.*)["']?\)''', i):
                            try: i += jsunpack.unpack(base64.decodestring(re.sub('"\s*\+\s*"', '', x))).replace('\\', '')
                            except: pass

                        for x in re.findall('(eval\s*\(function.*?)</script>', i, re.DOTALL):
                            try: i += jsunpack.unpack(x).replace('\\', '')
                            except: pass

                        links = [(match[0], match[1]) for match in re.findall('''['"]?file['"]?\s*:\s*['"]([^'"]+)['"][^}]*['"]?label['"]?\s*:\s*['"]([^'"]*)''', i, re.DOTALL)]
                        links = [(x[0].replace('\/', '/'), source_utils.label_to_quality(x[1])) for x in links if '/no-video.mp4' not in x[0]]

                        doc_links = [directstream.google('https://drive.google.com/file/d/%s/view' % match) for match in re.findall('''file:\s*["'](?:[^"']+youtu.be/([^"']+))''', i, re.DOTALL)]
                        doc_links = [(u['url'], u['quality']) for x in doc_links if x for u in x]
                        links += doc_links

                        for url, quality in links:
                            if self.base_link in url:
                                url = url + '|Referer=' + self.base_link

                            sources.append({'source': 'gvideo', 'quality': quality, 'language': 'de', 'url': url, 'direct': True, 'debridonly': False})
                    else:
                        try:
                            # as long as resolveurl get no Update for this URL (So just a Temp-Solution)
                            did = re.findall('youtube.googleapis.com.*?docid=(\w+)', i)
                            if did: i = 'https://drive.google.com/file/d/%s/view' % did[0]

                            valid, host = source_utils.is_host_valid(i, hostDict)
                            if not valid: continue

                            urls, host, direct = source_utils.check_directstreams(i, host)

                            for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'de', 'url': x['url'], 'direct': direct, 'debridonly': False})
                        except:
                            pass
                except:
                    pass

            return sources
        except:
            return sources
コード例 #43
0
ファイル: lists.py プロジェクト: joeg8385/plugin.video.venom
	def process(self, url, direct=True):
		try:
			if not any(i in url for i in ['.jpg', '.png', '.gif']): raise Exception()
			ext = url.split('?')[0].split('&')[0].split('|')[0].rsplit('.')[-1].replace('/', '').lower()
			if not ext in ['jpg', 'png', 'gif']: raise Exception()
			try:
				i = os.path.join(control.dataPath,'img')
				control.deleteFile(i)
				f = control.openFile(i, 'w')
				f.write(client.request(url))
				f.close()
				control.execute('ShowPicture("%s")' % i)
				return False
			except:
				return
		except:
			pass

		try:
			r, x = re.findall('(.+?)\|regex=(.+?)$', url)[0]
			x = regex.fetch(x)
			r += urllib.unquote_plus(x)
			if not '</regex>' in r: raise Exception()
			u = regex.resolve(r)
			if not u is None: url = u
		except:
			pass

		try:
			if not url.startswith('rtmp'): raise Exception()
			if len(re.compile('\s*timeout=(\d*)').findall(url)) == 0: url += ' timeout=10'
			return url
		except:
			pass

		try:
			if not any(i in url for i in ['.m3u8', '.f4m', '.ts']): raise Exception()
			ext = url.split('?')[0].split('&')[0].split('|')[0].rsplit('.')[-1].replace('/', '').lower()
			if not ext in ['m3u8', 'f4m', 'ts']: raise Exception()
			return url
		except:
			pass

		try:
			preset = re.findall('<preset>(.+?)</preset>', url)[0]

			if not 'search' in preset: raise Exception()

			title, year, imdb = re.findall('<title>(.+?)</title>', url)[0], re.findall('<year>(.+?)</year>', url)[0], re.findall('<imdb>(.+?)</imdb>', url)[0]

			try: tvdb, tvshowtitle, premiered, season, episode = re.findall('<tvdb>(.+?)</tvdb>', url)[0], re.findall('<tvshowtitle>(.+?)</tvshowtitle>', url)[0], re.findall('<premiered>(.+?)</premiered>', url)[0], re.findall('<season>(.+?)</season>', url)[0], re.findall('<episode>(.+?)</episode>', url)[0]
			except: tvdb = tvshowtitle = premiered = season = episode = None

			direct = False

			quality = 'HD' if not preset == 'searchsd' else 'SD'

			from resources.lib.modules import sources

			u = sources.Sources().getSources(title, year, imdb, tvdb, season, episode, tvshowtitle, premiered, quality)

			if not u is None: return u
		except:
			pass

		try:
			from resources.lib.modules import sources

			u = sources.Sources().getURISource(url)

			if not u is False: direct = False
			if u is None or u is False: raise Exception()

			return u
		except:
			pass

		try:
			if not '.google.com' in url: raise Exception()
			from resources.lib.modules import directstream
			u = directstream.google(url)[0]['url']
			return u
		except:
			pass

		try:
			if not 'filmon.com/' in url: raise Exception()
			from resources.lib.modules import filmon
			u = filmon.resolve(url)
			return u
		except:
			pass

		try:
			try: headers = dict(urlparse.parse_qsl(url.rsplit('|', 1)[1]))
			except: headers = dict('')
			if not url.startswith('http'): raise Exception()
			result = client.request(url.split('|')[0], headers=headers, output='headers', timeout='20')
			if 'Content-Type' in result and not 'html' in result['Content-Type']: raise Exception()

			import liveresolver
			if liveresolver.isValid(url) is True: direct = False
			u = liveresolver.resolve(url)

			if not u is None:
				try: dialog.close()
				except: pass
				return u
		except:
			pass

		try:
			import resolveurl

			hmf = resolveurl.HostedMediaFile(url=url)

			if hmf.valid_url() is False: raise Exception()

			direct = False ; u = hmf.resolve()

			if not u is False: return u
		except:
			pass

		if direct is True: return url
コード例 #44
0
	def sources(self, url, hostDict, hostprDict):
		try:
			sources = []

			if url == None: return sources

			# [BUBBLESCODE]
			#if (self.user == '' or self.password == ''): raise Exception()
			if (not self.enabled or self.user == '' or self.password == ''): raise Exception()
			# [/BUBBLESCODE]
			
			login = urlparse.urljoin(self.base_link, '/login')
			post = {'username': self.user, 'password': self.password, 'returnpath': '/'}
			post = urllib.urlencode(post)

			headers = {'User-Agent':client.randomagent()}
			rlogin = client.request(login, headers=headers, post=post, output='extended')
			guid = re.findall('(.*?);\s', rlogin[2]['Set-Cookie'])[0]
			headers['Cookie'] += '; '+guid
			url = urlparse.urljoin(self.base_link, url)

			result = client.request(url, headers=headers)

			url = re.findall("embeds\[\d+\]\s*=\s*'([^']+)", result)[0]
			url = client.parseDOM(url, 'iframe', ret='src')[0]
			url = url.replace('https://', 'http://')

			links = []

			try:
				dec = re.findall('mplanet\*(.+)', url)[0]
				dec = dec.rsplit('&')[0]
				dec = self._gkdecrypt(base64.b64decode('MllVcmlZQmhTM2swYU9BY0lmTzQ='), dec)
				dec = directstream.google(dec)

				links += [(i['url'], i['quality'], 'gvideo') for i in dec]
			except:
				pass

			result = client.request(url, headers=headers)

			try:
				url = re.findall('src\s*=\s*(?:\'|\")(http.+?)(?:\'|\")', result)
				for i in url:
					try: links.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i})
					except: pass
			except:
				pass

			try:
				url = client.parseDOM(result, 'source', ret='src')
				url += re.findall('src\s*:\s*\'(.*?)\'', result)
				url = [i for i in url if '://' in i]
				links.append({'source': 'cdn', 'quality': 'HD', 'url': url[0]})
			except:
				pass

			# [BUBBLESCODE]
			#for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'language': 'en', 'url': i['url'], 'direct': True, 'debridonly': False})
			for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'language': 'en', 'url': i['url'], 'direct': True, 'debridonly': False, 'memberonly' : True})
			# [BUBBLESCODE]

			return sources
		except:
			return sources
コード例 #45
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if (self.user == '' or self.password == ''): raise Exception()

            headers = {'X-Requested-With': 'XMLHttpRequest'}
            login = urlparse.urljoin(self.base_link, '/login')
            post = {'username': self.user, 'password': self.password, 'action': 'login'}
            post = urllib.urlencode(post)

            cookie = client.request(login, post=post, headers=headers, output='cookie')


            url = urlparse.urljoin(self.base_link, url)

            result = client.request(url, cookie=cookie)

            url = re.findall("embeds\[\d+\]\s*=\s*'([^']+)", result)[0]
            url = client.parseDOM(url, 'iframe', ret='src')[0]
            url = url.replace('https://', 'http://')


            links = []

            try:
                dec = re.findall('mplanet\*(.+)', url)[0]
                dec = dec.rsplit('&')[0]
                dec = self._gkdecrypt(base64.b64decode('MllVcmlZQmhTM2swYU9BY0lmTzQ='), dec)
                dec = directstream.google(dec)

                links += [(i['url'], i['quality'], 'gvideo') for i in dec]
            except:
                pass

            result = client.request(url)

            try:
                url = re.compile('sources\s*:\s*\[(.*?)\]', re.DOTALL).findall(result)[0]
                url = re.compile('''['"]*file['"]*\s*:\s*['"]*([^'"]+).*?['"]*label['"]*\s*:\s*['"]*[^'"]+''', re.DOTALL).findall(url)

                for i in url:
                    try: links.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i})
                    except: pass
            except:
                pass

            try:
                url = client.parseDOM(result, 'source', ret='src')
                url += re.findall('src:\s*\'(.*?)\'', result)

                links.append({'source': 'cdn', 'quality': 'HD', 'url': url[0]})
            except:
                pass


            for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'Moviesplanet', 'url': i['url'], 'direct': True, 'debridonly': False})

            return sources
        except:
            return sources
コード例 #46
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if url == None:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            cookie = self.__get_premium_cookie()

            r = client.request(url, mobile=True, cookie=cookie)

            query = urlparse.urljoin(self.base_link, self.part_link)
            id = re.compile('var\s*video_id\s*=\s*"(\d+)"').findall(r)[0]

            p = dom_parser.parse_dom(r, 'a', attrs={'class': 'changePart', 'data-part': re.compile('\d+p')}, req='data-part')

            for i in p:
                i = i.attrs['data-part']

                p = urllib.urlencode({'video_id': id, 'part_name': i, 'page': '0'})
                p = client.request(query, cookie=cookie, mobile=True, XHR=True, post=p, referer=url)

                p = json.loads(p)
                p = p.get('part_count', 0)

                for part_count in range(0, p):
                    try:
                        r = urllib.urlencode({'video_id': id, 'part_name': i, 'page': part_count})
                        r = client.request(query, cookie=cookie, mobile=True, XHR=True, post=r, referer=url)

                        r = json.loads(r)
                        r = r.get('part', {})

                        s = r.get('source', '')
                        url = r.get('code', '')

                        if s == 'url' and 'http' not in url:
                            url = self.__decode_hash(url)
                        elif s == 'other':
                            url = dom_parser.parse_dom(url, 'iframe', req='src')
                            if len(url) < 1: continue
                            url = url[0].attrs['src']
                            if '/old/seframer.php' in url: url = self.__get_old_url(url)

                        valid, host = source_utils.is_host_valid(url, hostDict)
                        if not valid: continue

                        if i in ['720p', 'HD']: quali = 'HD'
                        elif i in ['1080p', '1440p']: quali = i
                        elif i in ['2160p']: quali = '4K'
                        else: quali = 'SD'

                        if 'google' in url: host = 'gvideo'; direct = True; urls = directstream.google(url)
                        elif 'ok.ru' in url: host = 'vk'; direct = True; urls = directstream.odnoklassniki(url)
                        elif 'vk.com' in url: host = 'vk'; direct = True; urls = directstream.vk(url)
                        else: direct = False; urls = [{'quality': quali, 'url': url}]

                        for i in urls: sources.append({'source': host, 'quality': i['quality'], 'language': 'de', 'url': i['url'], 'direct': direct, 'debridonly': False})
                    except:
                        pass

            return sources
        except:
            return sources
コード例 #47
0
ファイル: dayt.py プロジェクト: kisakuldlanor/thedudeabides
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            url = (data['title'].translate(None, '\/:*?"\'<>|!,')).replace(
                ' ', '-').replace('--', '-').lower()
            url = urlparse.urljoin(self.base_link, self.watch_link % url)

            r = client.request(url, output='geturl')

            if r == None: raise Exception()

            r = client.request(url)
            r = re.sub(r'[^\x00-\x7F]+', ' ', r)

            y = re.findall('Date\s*:\s*.+?>.+?(\d{4})', r)
            y = y[0] if len(y) > 0 else None

            if not (data['imdb'] in r or data['year'] == y): raise Exception()

            q = client.parseDOM(r, 'title')
            q = q[0] if len(q) > 0 else None

            quality = '1080p' if ' 1080' in q else 'HD'

            r = client.parseDOM(r, 'div', attrs={'id': '5throw'})[0]
            r = client.parseDOM(r, 'a', ret='href', attrs={'rel': 'nofollow'})

            links = []

            for url in r:
                try:
                    if 'yadi.sk' in url:
                        url = directstream.yandex(url)
                    elif 'mail.ru' in url:
                        url = directstream.cldmailru(url)
                    else:
                        raise Exception()

                    if url == None: raise Exception()
                    links += [{
                        'source': 'cdn',
                        'url': url,
                        'quality': quality,
                        'direct': False
                    }]
                except:
                    pass

            try:
                r = client.parseDOM(result, 'iframe', ret='src')
                r = [i for i in r if 'pasep' in i][0]

                for i in range(0, 4):
                    try:
                        r = client.request(r)
                        r = re.sub(r'[^\x00-\x7F]+', ' ', r)
                        r = client.parseDOM(r, 'iframe', ret='src')[0]
                        if 'google' in r: break
                    except:
                        break

                if not 'google' in r: raise Exception()
                url = directstream.google(r)

                for i in url:
                    try:
                        links += [{
                            'source': 'gvideo',
                            'url': i['url'],
                            'quality': i['quality'],
                            'direct': True
                        }]
                    except:
                        pass
            except:
                pass

            for i in links:
                sources.append({
                    'source': i['source'],
                    'quality': i['quality'],
                    'language': 'en',
                    'url': i['url'],
                    'direct': i['direct'],
                    'debridonly': False
                })

            return sources
        except:
            return sources
コード例 #48
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'})
            rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'})
            rels = dom_parser.parse_dom(rels, 'li')
            rels = dom_parser.parse_dom(rels, 'a', attrs={'class': 'options'}, req='href')
            rels = [i.attrs['href'][1:] for i in rels]

            r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels]

            links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''', ''.join([i[0].content for i in r]))
            links += [l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'iframe', attrs={'class': 'metaframe'}, req='src')]
            links += [l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'source', req='src')]

            for i in set(links):
                try:
                    i = re.sub('\[.+?\]|\[/.+?\]', '', i)
                    i = client.replaceHTMLCodes(i)

                    if 'videoapi.io' in i:
                        i = client.request(i, referer=url)

                        match = re.findall('videoApiPlayer\((.*?)\);', i)
                        if match:
                            i = client.request('https://videoapi.io/api/getlink/actionEmbed', post=json.loads(match[0]), XHR=True)
                            i = json.loads(i).get('sources', [])
                            i = [x.get('file', '').replace('\/', '/') for x in i]

                            for x in i:
                                gtag = directstream.googletag(x)
                                sources.append({'source': 'gvideo', 'quality': gtag[0]['quality'] if gtag else 'SD', 'language': 'ko', 'url': x, 'direct': True, 'debridonly': False})
                    else:
                        try:
                            valid, host = source_utils.is_host_valid(i, hostDict)
                            if not valid: continue

                            urls = []
                            if 'google' in i: host = 'gvideo'; direct = True; urls = directstream.google(i);
                            if 'google' in i and not urls and directstream.googletag(i):  host = 'gvideo'; direct = True; urls = [{'quality': directstream.googletag(i)[0]['quality'], 'url': i}]
                            elif 'ok.ru' in i: host = 'vk'; direct = True; urls = directstream.odnoklassniki(i)
                            elif 'vk.com' in i: host = 'vk'; direct = True; urls = directstream.vk(i)
                            else: direct = False; urls = [{'quality': 'SD', 'url': i}]

                            for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'ko', 'url': x['url'], 'direct': direct, 'debridonly': False})
                        except:
                            pass
                except:
                    pass

            return sources
        except:
            return sources
コード例 #49
0
ファイル: dtmovies_mv.py プロジェクト: bopopescu/upload
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            query = base64.b64decode(
                'aHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vY3VzdG9tc2VhcmNoL3YxZWxlbWVudD9rZXk9QUl6YVN5Q1ZBWGlVelJZc01MMVB2NlJ3U0cxZ3VubU1pa1R6UXFZJnJzej1maWx0ZXJlZF9jc2UmbnVtPTEwJmhsPWVuJmN4PTAxNjE2OTU5MjY5NTEyNzQ5NTk0OTpsYnB1dGVqbmxrNCZnb29nbGVob3N0PXd3dy5nb29nbGUuY29tJnE9JXM='
            )
            query = query % urllib.quote_plus(
                '%s %s' % (data['title'].replace(':', ' '), data['year']))

            t = cleantitle.get(data['title'])

            r = client.request(query)
            r = json.loads(r)['results']

            r = [(i['url'], i['titleNoFormatting']) for i in r]
            r = [(i[0], cleantitle.get(i[1]), re.findall('\d{4}', i[1]))
                 for i in r]
            r = [(i[0], i[1], i[2][-1]) for i in r if len(i[2]) > 0]
            r = [i[0] for i in r if t == i[1] and data['year'] == i[2]][0]

            u = urlparse.urljoin(self.base_link, r)

            result = client.request(u)
            result = re.sub(r'[^\x00-\x7F]+', ' ', result)

            q = client.parseDOM(result, 'title')[0]

            quality = '1080p' if ' 1080' in q else 'HD'

            r = client.parseDOM(result, 'div', attrs={'id': '5throw'})[0]
            r = client.parseDOM(r, 'a', ret='href', attrs={'rel': 'nofollow'})

            links = []

            for url in r:
                try:
                    if 'yadi.sk' in url:
                        url = directstream.yandex(url)
                    elif 'mail.ru' in url:
                        url = directstream.cldmailru(url)
                    else:
                        raise Exception()

                    if url == None: raise Exception()
                    links += [{
                        'source': 'cdn',
                        'url': url,
                        'quality': quality,
                        'direct': False
                    }]
                except:
                    pass

            try:
                r = client.parseDOM(result, 'iframe', ret='src')
                r = [i for i in r if 'pasep' in i][0]

                for i in range(0, 4):
                    try:
                        r = client.request(r)
                        r = re.sub(r'[^\x00-\x7F]+', ' ', r)
                        r = client.parseDOM(r, 'iframe', ret='src')[0]
                        if 'google' in r: break
                    except:
                        break

                if not 'google' in r: raise Exception()
                url = directstream.google(r)

                for i in url:
                    try:
                        links += [{
                            'source': 'gvideo',
                            'url': i['url'],
                            'quality': i['quality'],
                            'direct': True
                        }]
                    except:
                        pass
            except:
                pass

            for i in links:
                sources.append({
                    'source': i['source'],
                    'quality': i['quality'],
                    'provider': 'Dtmovies',
                    'url': i['url'],
                    'direct': i['direct'],
                    'debridonly': False
                })

            return sources
        except:
            return sources
コード例 #50
0
ファイル: solarmovie.py プロジェクト: mattg32/CerebroTVRepo
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            aliases = eval(data['aliases'])
            mozhdr = {
                'User-Agent':
                'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'
            }
            headers = mozhdr
            headers['X-Requested-With'] = 'XMLHttpRequest'

            self.s = cfscrape.create_scraper()
            if 'tvshowtitle' in data:
                episode = int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'],
                                      aliases, headers)
            else:
                episode = 0
                url = self.searchMovie(data['title'], data['year'], aliases,
                                       headers)

            headers['Referer'] = url
            ref_url = url
            mid = re.findall('-(\d*)\.', url)[0]
            data = {'id': mid}
            r = self.s.post(url, headers=headers)
            try:
                u = urlparse.urljoin(self.base_link, self.server_link % mid)
                r = self.s.get(u, headers=mozhdr).content
                r = json.loads(r)['html']
                rl = client.parseDOM(r, 'div', attrs={'class': 'pas-list'})
                rh = client.parseDOM(r, 'div', attrs={'class': 'pas-header'})
                ids = client.parseDOM(rl, 'li', ret='data-id')
                servers = client.parseDOM(rl, 'li', ret='data-server')
                labels = client.parseDOM(rl, 'a', ret='title')
                r = zip(ids, servers, labels)
                rrr = zip(client.parseDOM(rh, 'li', ret='data-id'),
                          client.parseDOM(rh, 'li', ret='class'))
                types = {}
                for rr in rrr:
                    types[rr[0]] = rr[1]

                for eid in r:
                    try:
                        try:
                            ep = re.findall('episode.*?(\d+).*?',
                                            eid[2].lower())[0]
                        except:
                            ep = 0
                        if (episode == 0) or (int(ep) == episode):
                            t = str(int(time.time() * 1000))
                            quali = source_utils.get_release_quality(eid[2])[0]
                            if 'embed' in types[eid[1]]:
                                url = urlparse.urljoin(
                                    self.base_link, self.embed_link % (eid[0]))
                                xml = self.s.get(url, headers=headers).content
                                url = json.loads(xml)['src']
                                valid, hoster = source_utils.is_host_valid(
                                    url, hostDict)
                                if not valid: continue
                                q = source_utils.check_sd_url(url)
                                q = q if q != 'SD' else quali
                                sources.append({
                                    'source': hoster,
                                    'quality': q,
                                    'language': 'en',
                                    'url': url,
                                    'direct': False,
                                    'debridonly': False
                                })
                                continue
                            else:
                                url = urlparse.urljoin(
                                    self.base_link,
                                    self.token_link % (eid[0], mid, t))
                            script = self.s.get(url, headers=headers).content
                            if '$_$' in script:
                                params = self.uncensored1(script)
                            elif script.startswith('[]') and script.endswith(
                                    '()'):
                                params = self.uncensored2(script)
                            elif '_x=' in script:
                                x = re.search('''_x=['"]([^"']+)''',
                                              script).group(1)
                                y = re.search('''_y=['"]([^"']+)''',
                                              script).group(1)
                                params = {'x': x, 'y': y}
                            else:
                                raise Exception()
                            u = urlparse.urljoin(
                                self.base_link, self.source_link %
                                (eid[0], params['x'], params['y']))
                            length = 0
                            count = 0
                            while length == 0 and count < 11:
                                r = self.s.get(u, headers=headers).text
                                length = len(r)
                                if length == 0: count += 1
                            uri = None
                            uri = json.loads(r)['playlist'][0]['sources']
                            try:
                                uri = [i['file'] for i in uri if 'file' in i]
                            except:
                                try:
                                    uri = [uri['file']]
                                except:
                                    continue

                            for url in uri:
                                if 'googleapis' in url:
                                    q = source_utils.check_sd_url(url)
                                    sources.append({
                                        'source': 'gvideo',
                                        'quality': q,
                                        'language': 'en',
                                        'url': url,
                                        'direct': True,
                                        'debridonly': False
                                    })
                                    continue

                                valid, hoster = source_utils.is_host_valid(
                                    url, hostDict)
                                #urls, host, direct = source_utils.check_directstreams(url, hoster)
                                q = quali
                                if valid:
                                    #for z in urls:
                                    if hoster == 'gvideo':
                                        direct = True
                                        try:
                                            q = directstream.googletag(
                                                url)[0]['quality']
                                        except:
                                            pass
                                        url = directstream.google(url,
                                                                  ref=ref_url)
                                    else:
                                        direct = False
                                    sources.append({
                                        'source': hoster,
                                        'quality': q,
                                        'language': 'en',
                                        'url': url,
                                        'direct': direct,
                                        'debridonly': False
                                    })
                                else:
                                    sources.append({
                                        'source': 'CDN',
                                        'quality': q,
                                        'language': 'en',
                                        'url': url,
                                        'direct': True,
                                        'debridonly': False
                                    })
                    except:
                        pass
            except:
                pass

            return sources
        except:
            return sources
コード例 #51
0
ファイル: cypher.py プロジェクト: CYBERxNUKE/xbmc-addon
	def process(self, url, direct=True):
		try:
			if not any(i in url for i in ['.jpg', '.png', '.gif']): raise Exception()
			ext = url.split('?')[0].split('&')[0].split('|')[0].rsplit('.')[-1].replace('/', '').lower()
			if not ext in ['jpg', 'png', 'gif']: raise Exception()
			try:
				i = os.path.join(control.dataPath,'img')
				control.deleteFile(i)
				f = control.openFile(i, 'w')
				f.write(client.request(url))
				f.close()
				control.execute('ShowPicture("%s")' % i)
				return False
			except:
				return
		except:
			pass

		try:
			r, x = re.findall('(.+?)\|regex=(.+?)$', url)[0]
			x = regex.fetch(x)
			r += urllib.unquote_plus(x)
			if not '</regex>' in r: raise Exception()
			u = regex.resolve(r)
			if not u == None: url = u
		except:
			pass

		try:
			if not url.startswith('rtmp'): raise Exception()
			if len(re.compile('\s*timeout=(\d*)').findall(url)) == 0: url += ' timeout=10'
			return url
		except:
			pass

		try:
			if not any(i in url for i in ['.m3u8', '.f4m', '.ts']): raise Exception()
			ext = url.split('?')[0].split('&')[0].split('|')[0].rsplit('.')[-1].replace('/', '').lower()
			if not ext in ['m3u8', 'f4m', 'ts']: raise Exception()
			return url
		except:
			pass

		try:
			preset = re.findall('<preset>(.+?)</preset>', url)[0]

			if not 'search' in preset: raise Exception()

			title, year, imdb = re.findall('<title>(.+?)</title>', url)[0], re.findall('<year>(.+?)</year>', url)[0], re.findall('<imdb>(.+?)</imdb>', url)[0]

			try: tvdb, tvshowtitle, premiered, season, episode = re.findall('<tvdb>(.+?)</tvdb>', url)[0], re.findall('<tvshowtitle>(.+?)</tvshowtitle>', url)[0], re.findall('<premiered>(.+?)</premiered>', url)[0], re.findall('<season>(.+?)</season>', url)[0], re.findall('<episode>(.+?)</episode>', url)[0]
			except: tvdb = tvshowtitle = premiered = season = episode = None

			direct = False

			quality = 'HD' if not preset == 'searchsd' else 'SD'

			from resources.lib.modules import sources

			u = sources.sources().getSources(title, year, imdb, tvdb, season, episode, tvshowtitle, premiered, quality)

			if not u == None: return u
		except:
			pass

		try:
			from resources.lib.modules import sources

			u = sources.sources().getURISource(url)

			if not u == False: direct = False
			if u == None or u == False: raise Exception()

			return u
		except:
			pass

		try:
			if not '.google.com' in url: raise Exception()
			from resources.lib.modules import directstream
			u = directstream.google(url)[0]['url']
			return u
		except:
			pass

		try:
			if not 'filmon.com/' in url: raise Exception()
			from resources.lib.modules import filmon
			u = filmon.resolve(url)
			return u
		except:
			pass

		try:
			try: headers = dict(urlparse.parse_qsl(url.rsplit('|', 1)[1]))
			except: headers = dict('')
			if not url.startswith('http'): raise Exception()
			result = client.request(url.split('|')[0], headers=headers, output='headers', timeout='20')
			if 'Content-Type' in result and not 'html' in result['Content-Type']: raise Exception()

			import liveresolver
			if liveresolver.isValid(url) == True: direct = False
			u = liveresolver.resolve(url)

			if not u == None:
				try: dialog.close()
				except: pass
				return u
		except:
			pass
			
		try:
			import urlresolver

			hmf = urlresolver.HostedMediaFile(url=url)

			if hmf.valid_url() == False: raise Exception()

			direct = False ; u = hmf.resolve()

			if not u == False: return u
		except:
			pass

		if direct == True: return url
コード例 #52
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None:
                return sources

            if self.user == "" or self.password == "":
                raise Exception()

            class NoRedirection(urllib2.HTTPErrorProcessor):
                def http_response(self, request, response):
                    return response

            headers = {"X-Requested-With": "XMLHttpRequest"}
            login = urlparse.urljoin(self.base_link, "/login")
            post = {"username": self.user, "password": self.password, "action": "login"}
            post = urllib.urlencode(post)

            cookie = client.source(login, post=post, headers=headers, output="cookie")

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url, cookie=cookie)

            url = re.compile("embeds\[\d+\]\s*=\s*'([^']+)").findall(result)[0]
            url = client.parseDOM(url, "iframe", ret="src")[0]
            url = url.replace("https://", "http://")

            links = []

            try:
                url = re.compile("mplanet\*(.+)").findall(url)[0]
                url = url.rsplit("&")[0]
                dec = self._gkdecrypt(base64.b64decode("MllVcmlZQmhTM2swYU9BY0lmTzQ="), url)
                dec = directstream.google(dec)

                links += [(i["url"], i["quality"], "gvideo") for i in dec]
            except:
                pass

            try:
                result = client.source(url)

                result = re.compile("sources\s*:\s*\[(.*?)\]", re.DOTALL).findall(result)[0]
                result = re.compile(
                    """['"]*file['"]*\s*:\s*['"]*([^'"]+).*?['"]*label['"]*\s*:\s*['"]*([^'"]+)""", re.DOTALL
                ).findall(result)
            except:
                pass

            try:
                u = result[0][0]
                if not "download.php" in u and not ".live." in u:
                    raise Exception()
                o = urllib2.build_opener(NoRedirection)
                o.addheaders = [("User-Agent", client.randomagent()), ("Cookie", cookie)]
                r = o.open(u)
                try:
                    u = r.headers["Location"]
                except:
                    pass
                r.close()
                links += [(u, "1080p", "cdn")]
            except:
                pass
            try:
                u = [(i[0], re.sub("[^0-9]", "", i[1])) for i in result]
                u = [(i[0], i[1]) for i in u if i[1].isdigit()]
                links += [(i[0], "1080p", "gvideo") for i in u if int(i[1]) >= 1080]
                links += [(i[0], "HD", "gvideo") for i in u if 720 <= int(i[1]) < 1080]
                links += [(i[0], "SD", "gvideo") for i in u if 480 <= int(i[1]) < 720]
            except:
                pass

            for i in links:
                sources.append(
                    {
                        "source": i[2],
                        "quality": i[1],
                        "provider": "Moviesplanet",
                        "url": i[0],
                        "direct": True,
                        "debridonly": False,
                    }
                )

            return sources
        except:
            return sources
コード例 #53
0
			if u: return u
		except:
			log_utils.error()

		try:
			from resources.lib.modules import sources
			u = sources.Sources().getURISource(url)
			if u: direct = False
			if not u: raise Exception()
			return u
		except: pass

		try:
			if '.google.com' not in url: raise Exception()
			from resources.lib.modules import directstream
			u = directstream.google(url)[0]['url']
			return u
		except: pass

		try:
			if not 'filmon.com/' in url: raise Exception()
			from resources.lib.modules import filmon
			u = filmon.resolve(url)
			return u
		except: pass

		try:
			try: headers = dict(parse_qsl(url.rsplit('|', 1)[1]))
			except: headers = dict('')
			if not url.startswith('http'): raise Exception()
			result = client.request(url.split('|')[0], headers=headers, output='headers', timeout='20')
コード例 #54
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'})
            rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'})
            rels = dom_parser.parse_dom(rels, 'li')
            rels = dom_parser.parse_dom(rels,
                                        'a',
                                        attrs={'class': 'options'},
                                        req='href')
            rels = [i.attrs['href'][1:] for i in rels]

            r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels]

            links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''',
                               ''.join([i[0].content for i in r]))
            links += [
                l.attrs['src'] for i in r for l in dom_parser.parse_dom(
                    i, 'iframe', attrs={'class': 'metaframe'}, req='src')
            ]
            links += [
                l.attrs['src'] for i in r
                for l in dom_parser.parse_dom(i, 'source', req='src')
            ]

            for i in set(links):
                try:
                    i = re.sub('\[.+?\]|\[/.+?\]', '', i)
                    i = client.replaceHTMLCodes(i)

                    if 'videoapi.io' in i:
                        i = client.request(i, referer=url)

                        match = re.findall('videoApiPlayer\((.*?)\);', i)
                        if match:
                            i = client.request(
                                'https://videoapi.io/api/getlink/actionEmbed',
                                post=json.loads(match[0]),
                                XHR=True)
                            i = json.loads(i).get('sources', [])
                            i = [
                                x.get('file', '').replace('\/', '/') for x in i
                            ]

                            for x in i:
                                gtag = directstream.googletag(x)
                                sources.append({
                                    'source':
                                    'gvideo',
                                    'quality':
                                    gtag[0]['quality'] if gtag else 'SD',
                                    'language':
                                    'ko',
                                    'url':
                                    x,
                                    'direct':
                                    True,
                                    'debridonly':
                                    False
                                })
                    else:
                        try:
                            valid, host = source_utils.is_host_valid(
                                i, hostDict)
                            if not valid: continue

                            urls = []
                            if 'google' in i:
                                host = 'gvideo'
                                direct = True
                                urls = directstream.google(i)
                            if 'google' in i and not urls and directstream.googletag(
                                    i):
                                host = 'gvideo'
                                direct = True
                                urls = [{
                                    'quality':
                                    directstream.googletag(i)[0]['quality'],
                                    'url':
                                    i
                                }]
                            elif 'ok.ru' in i:
                                host = 'vk'
                                direct = True
                                urls = directstream.odnoklassniki(i)
                            elif 'vk.com' in i:
                                host = 'vk'
                                direct = True
                                urls = directstream.vk(i)
                            else:
                                direct = False
                                urls = [{
                                    'quality': 'SD',
                                    'url': i
                                }]

                            for x in urls:
                                sources.append({
                                    'source': host,
                                    'quality': x['quality'],
                                    'language': 'ko',
                                    'url': x['url'],
                                    'direct': direct,
                                    'debridonly': False
                                })
                        except:
                            pass
                except:
                    pass

            return sources
        except:
            return sources
コード例 #55
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            aliases = eval(data['aliases'])
            mozhdr = {
                'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'}
            headers = mozhdr
            headers['X-Requested-With'] = 'XMLHttpRequest'

            self.s = cfscrape.create_scraper()
            if 'tvshowtitle' in data:
                episode = int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers)
            else:
                episode = 0
                url = self.searchMovie(data['title'], data['year'], aliases, headers)

            headers['Referer'] = url
            mid = re.findall('-(\d*)\.', url)[0]
            data = {'id': mid}
            r = self.s.post(url, headers=headers)
            try:
                u = urlparse.urljoin(self.base_link, self.server_link % mid)
                r = self.s.get(u, headers=mozhdr).content
                r = json.loads(r)['html']
                rl = client.parseDOM(r, 'div', attrs={'class': 'pas-list'})
                rh = client.parseDOM(r, 'div', attrs={'class': 'pas-header'})
                ids = client.parseDOM(rl, 'li', ret='data-id')
                servers = client.parseDOM(rl, 'li', ret='data-server')
                labels = client.parseDOM(rl, 'a', ret='title')
                r = zip(ids, servers, labels)
                rrr = zip(client.parseDOM(rh, 'li', ret='data-id'), client.parseDOM(rh, 'li', ret='class'))
                types = {}
                for rr in rrr:
                    types[rr[0]] = rr[1]

                for eid in r:
                    try:
                        try:
                            ep = re.findall('episode.*?(\d+).*?', eid[2].lower())[0]
                        except Exception:
                            ep = 0
                        if (episode == 0) or (int(ep) == episode):
                            t = str(int(time.time()*1000))
                            quali = source_utils.get_release_quality(eid[2])[0]
                            if 'embed' in types[eid[1]]:
                                url = urlparse.urljoin(self.base_link, self.embed_link % (eid[0]))
                                xml = self.s.get(url, headers=headers).content
                                url = json.loads(xml)['src']
                                valid, hoster = source_utils.is_host_valid(url, hostDict)
                                if not valid:
                                    continue
                                q = source_utils.check_sd_url(url)
                                q = q if q != 'SD' else quali
                                sources.append({'source': hoster, 'quality': q, 'language': 'en',
                                                'url': url, 'direct': False, 'debridonly': False})
                                continue
                            else:
                                url = urlparse.urljoin(self.base_link, self.token_link % (eid[0], mid, t))
                            script = self.s.get(url, headers=headers).content
                            if '$_$' in script:
                                params = self.uncensored1(script)
                            elif script.startswith('[]') and script.endswith('()'):
                                params = self.uncensored2(script)
                            elif '_x=' in script:
                                x = re.search('''_x=['"]([^"']+)''', script).group(1)
                                y = re.search('''_y=['"]([^"']+)''', script).group(1)
                                params = {'x': x, 'y': y}
                            else:
                                raise Exception()
                            u = urlparse.urljoin(self.base_link, self.source_link % (eid[0], params['x'], params['y']))
                            length = 0
                            count = 0
                            while length == 0 and count < 11:
                                r = self.s.get(u, headers=headers).text
                                length = len(r)
                                if length == 0:
                                    count += 1
                            uri = None
                            uri = json.loads(r)['playlist'][0]['sources']
                            try:
                                uri = [i['file'] for i in uri if 'file' in i]
                            except Exception:
                                try:
                                    uri = [uri['file']]
                                except Exception:
                                    continue

                            for url in uri:
                                if 'googleapis' in url:
                                    q = source_utils.check_sd_url(url)
                                    sources.append({'source': 'gvideo', 'quality': q, 'language': 'en',
                                                    'url': url, 'direct': True, 'debridonly': False})
                                    continue

                                valid, hoster = source_utils.is_host_valid(url, hostDict)
                                # urls, host, direct = source_utils.check_directstreams(url, hoster)
                                q = quali
                                if valid:
                                    # for z in urls:
                                    if hoster == 'gvideo':
                                        direct = True
                                        try:
                                            q = directstream.googletag(url)[0]['quality']
                                        except Exception:
                                            pass
                                        url = directstream.google(url)
                                    else:
                                        direct = False
                                    sources.append({'source': hoster, 'quality': q, 'language': 'en',
                                                    'url': url, 'direct': direct, 'debridonly': False})
                                else:
                                    sources.append({'source': 'CDN', 'quality': q, 'language': 'en',
                                                    'url': url, 'direct': True, 'debridonly': False})
                    except Exception:
                        pass
            except Exception:
                pass

            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('SolarMoviez - Exception: \n' + str(failure))
            return sources