def tvshow(self, imdb, tvdb, tvshowtitle, year):
        try:
            if (self.user == '' or self.password == ''): raise Exception()

            headers = {'X-Requested-With': 'XMLHttpRequest'}

            url = urlparse.urljoin(self.base_link, self.search_link)

            post = {'q': tvshowtitle.rsplit(':', 1)[0], 'limit': '100', 'timestamp': int(time.time() * 1000), 'verifiedCheck': ''}
            post = urllib.urlencode(post)

            result = client.source(url, post=post, headers=headers)
            result = json.loads(result)

            tvshowtitle = cleantitle.get(tvshowtitle)

            result = [i for i in result if i['meta'].strip().split(' ')[0].lower() == 'tv']
            result = [i for i in result if tvshowtitle == cleantitle.get(i['title'])][:2]

            if len(result) > 1:
                result = [(i, urlparse.urljoin(self.base_link, i['permalink'])) for i in result]
                result = [(i[0], str(client.source(i[1]))) for i in result]
                result = [(i[0], re.compile('/(tt\d+)').findall(i[1])) for i in result]
                result = [i[0] for i in result if len(i[1]) > 0 and imdb == i[1][0]]

            result = result[0]['permalink']

            url = urlparse.urljoin(self.base_link, result)
            url = urlparse.urlparse(url).path
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Exemplo n.º 2
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            num = base64.b64decode('aHR0cDovL3RoZXR2ZGIuY29tL2FwaS8xRDYyRjJGOTAwMzBDNDQ0L3Nlcmllcy8lcy9kZWZhdWx0LyUwMWQvJTAxZA==')
            num = num % (tvdb, int(season), int(episode))
            num = client.source(num)
            num = client.parseDOM(num, 'absolute_number')[0]

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)
            result = result.decode('iso-8859-1').encode('utf-8')

            result = client.parseDOM(result, 'tr', attrs = {'class': ''})
            result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'td', attrs = {'class': 'epnum'})) for i in result]
            result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
            result = [i[0] for i in result if num == i[1]][0]

            url = urlparse.urljoin(self.base_link, result)
            url = urlparse.urlparse(url).path
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Exemplo n.º 3
0
    def tvshow(self, imdb, tvdb, tvshowtitle, year):
        try:
            genre = 'http://www.imdb.com/title/%s/' % imdb
            genre = client.source(genre)
            genre = re.findall('href\s*=\s*[\'|\"](.+?)[\'|\"]', genre)
            genre = [i for i in genre if '/genre/' in i]
            genre = [i.split('/genre/')[-1].split('?')[0].lower() for i in genre]
            if not 'animation' in genre: raise Exception()

            query = self.search_link % (urllib.quote_plus(tvshowtitle))
            query = urlparse.urljoin(self.base_link, query)

            result = client.source(query)
            result = result.decode('iso-8859-1').encode('utf-8')

            tvshowtitle = cleantitle.get(tvshowtitle)

            result = client.parseDOM(result, 'ol', attrs = {'id': 'searchresult'})[0]
            result = client.parseDOM(result, 'h2')
            result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in result]
            result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
            result = [(i[0], re.sub('<.+?>|</.+?>','', i[1])) for i in result]
            result = [i for i in result if tvshowtitle == cleantitle.get(i[1])]
            result = result[-1][0]

            url = urlparse.urljoin(self.base_link, result)
            url = urlparse.urlparse(url).path
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Exemplo n.º 4
0
    def resolve(self, url):
        try:
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            headers = {'X-Requested-With': 'XMLHttpRequest'}

            now = time.localtime()
            url = '/ajax/film/episode?hash_id=%s&f=&p=%s' % (data['hash_id'], now.tm_hour + now.tm_min)
            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url, headers=headers, referer=data['referer'])
            result = json.loads(result)

            grabber = {'flash': 1, 'json': 1, 's': now.tm_min, 'link': result['videoUrlHash'], '_': int(time.time())}
            grabber = result['grabber'] + '?' + urllib.urlencode(grabber)

            result = client.source(grabber, headers=headers, referer=url)
            result = json.loads(result)

            url = [(re.findall('(\d+)', i['label']), i['file']) for i in result if 'label' in i and 'file' in i]
            url = [(int(i[0][0]), i[1]) for i in url if len(i[0]) > 0]
            url = sorted(url, key=lambda k: k[0])
            url = url[-1][1]

            url = client.request(url, output='geturl')
            if 'requiressl=yes' in url: url = url.replace('http://', 'https://')
            else: url = url.replace('https://', 'http://')
            return url
        except:
            return
Exemplo n.º 5
0
    def movie(self, imdb, title, year):
        try:
            t = cleantitle.get(title)

            query = '%s %s' % (title, year)
            query = base64.b64decode(self.search_link) % urllib.quote_plus(query)

            result = client.source(query)
            result = json.loads(result)['results']

            result = [(i['url'], i['titleNoFormatting']) for i in result]
            result = [(i[0], re.findall('(?:^Ver Online |^Ver |)(.+?)(?: HD |)\((\d{4})\)', i[1])) for i in result]
            result = [(i[0], i[1][0][0], i[1][0][1]) for i in result if len(i[1]) > 0]

            r = [i for i in result if t == cleantitle.get(i[1]) and year == i[2]]

            if len(r) == 0:
                t = 'http://www.imdb.com/title/%s' % imdb
                t = client.source(t, headers={'Accept-Language':'ar-AR'})
                t = client.parseDOM(t, 'title')[0]
                t = re.sub('(?:\(|\s)\d{4}.+', '', t).strip()
                t = cleantitle.get(t)

                r = [i for i in result if t == cleantitle.get(i[1]) and year == i[2]]

            try: url = re.findall('//.+?(/.+)', r[0][0])[0]
            except: url = r[0][0]
            try: url = re.findall('(/.+?/.+?/)', url)[0]
            except: pass
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')

            return url
        except:
            pass
Exemplo n.º 6
0
    def tvshow(self, imdb, tvdb, tvshowtitle, year):
        try:
            if (self.user == '' or self.password == ''): raise Exception()

            if self.cookie == None: self.cookie = client.source(self.sign, post=self.post, headers=self.headers, cookie=self.lang, output='cookie')

            url = urlparse.urljoin(self.base_link, self.tvsearch_link)

            result = client.source(url, cookie='%s; %s' % (self.cookie, self.lang))

            tvshowtitle = cleantitle.get(tvshowtitle)
            years = ['%s' % str(year)]

            result = client.parseDOM(result, 'div', attrs = {'class': 'index show'})
            result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', attrs = {'class': 'name'}), client.parseDOM(i, 'span', attrs = {'class': 'value'})) for i in result]
            result = [(i[0][0], i[1][0], i[2][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
            result = [i for i in result if tvshowtitle == cleantitle.get(i[1])]
            result = [i[0] for i in result if any(x in i[2] for x in years)][0]

            url = urlparse.urljoin(self.base_link, result)
            url = urlparse.urlparse(url).path
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Exemplo n.º 7
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)

            links = []

            try:
                try: url = re.compile('proxy\.link=([^"&]+)').findall(result)[0]
                except: url = client.source(re.compile('proxy\.list=([^"&]+)').findall(result)[0])

                url = url.split('*', 1)[-1].rsplit('<')[0]

                dec = self._gkdecrypt(base64.b64decode('aUJocnZjOGdGZENaQWh3V2huUm0='), url)
                if not 'http' in dec: dec = self._gkdecrypt(base64.b64decode('QjZVTUMxUms3VFJBVU56V3hraHI='), url)

                url = directstream.google(dec)

                links += [(i['url'], i['quality']) for i in url]
            except:
                pass

            try:
                url = 'http://miradetodo.com.ar/gkphp/plugins/gkpluginsphp.php'

                post = client.parseDOM(result, 'div', attrs = {'class': 'player.+?'})[0]
                post = post.replace('iframe', 'IFRAME')
                post = client.parseDOM(post, 'IFRAME', ret='.+?')[0]
                post = urlparse.parse_qs(urlparse.urlparse(post).query)

                result = ''
                try: result += client.source(url, post=urllib.urlencode({'link': post['id'][0]}))
                except: pass
                try: result += client.source(url, post=urllib.urlencode({'link': post['id1'][0]}))
                except: pass
                try: result += client.source(url, post=urllib.urlencode({'link': post['id2'][0]}))
                except: pass

                result = re.compile('"?link"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?"').findall(result)
                result = [(i[0].replace('\\/', '/'), i[1])  for i in result]

                links += [(i[0], '1080p') for i in result if int(i[1]) >= 1080]
                links += [(i[0], 'HD') for i in result if 720 <= int(i[1]) < 1080]
                links += [(i[0], 'SD') for i in result if 480 <= int(i[1]) < 720]
                if not 'SD' in [i[1] for i in links]: links += [(i[0], 'SD') for i in result if 360 <= int(i[1]) < 480]
            except:
                pass

            for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'provider': 'MiraDeTodo', 'url': i[0], 'direct': True, 'debridonly': False})

            return sources
        except:
            return sources
    def movie(self, imdb, title, year):
        try:
            t = cleantitle.get(title)

            query = '%s %s' % (title, year)
            query = base64.b64decode(self.search_link) % urllib.quote_plus(query)

            result = client.source(query)
            result = json.loads(result)['results']

            result = [(i['url'], i['titleNoFormatting']) for i in result]
            result = [(i[0], re.findall('(?:^Ver |)(.+?)(?: HD |)\((\d{4})', i[1])) for i in result]
            result = [(i[0], i[1][0][0], i[1][0][1]) for i in result if len(i[1]) > 0]

            r = [i for i in result if t == cleantitle.get(i[1]) and year == i[2]]

            if len(r) == 0:
                t = 'http://www.imdb.com/title/%s' % imdb
                t = client.source(t, headers={'Accept-Language':'es-ES'})
                t = client.parseDOM(t, 'title')[0]
                t = re.sub('(?:\(|\s)\d{4}.+', '', t).strip()
                t = cleantitle.get(t)

                r = [i for i in result if t == cleantitle.get(i[1]) and year == i[2]]

            try: url = re.findall('//.+?(/.+)', r[0][0])[0]
            except: url = r[0][0]
            try: url = re.findall('(/.+?/.+?/)', url)[0]
            except: pass
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')

            return url
        except:
            pass

        try:
            t = cleantitle.get(title)

            query = self.search3_link % urllib.quote_plus(cleantitle.query(title))
            query = urlparse.urljoin(self.base_link, query)

            result = cloudflare.source(query)
            result = re.sub(r'[^\x00-\x7F]+','', result)

            r = result.split('<li class=')
            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'i'), re.findall('\((\d{4})\)', i)) for i in r]
            r = [(i[0][0], re.sub('\(|\)','', i[1][0]), i[2][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
            r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]

            try: url = re.findall('//.+?(/.+)', r)[0]
            except: url = r
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')

            return url
        except:
            pass
Exemplo n.º 9
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

                match = data['title'].replace(':', '').replace('\'', '').replace(' ', '-')
                match = re.sub('\-+', '-', match.lower())
                match = '/%s-%s' % (match, data['year'])

                url = cache.get(self.usmovies_moviecache, 120)

                url = [i for i in url if match in i][-1]
                url = client.replaceHTMLCodes(url)


            r = urlparse.urljoin(self.base_link, url)
            result = client.source(r)

            links = []
            headers = {'Referer': r}
            result = client.parseDOM(result, 'div', attrs = {'class': 'video-embed'})[0]

            try:
                post = re.findall('{link\s*:\s*"([^"]+)', result)[0]
                post = urllib.urlencode({'link': post})

                url = urlparse.urljoin(self.base_link, '/plugins/gkpluginsphp.php')
                url = client.source(url, post=post, headers=headers)
                url = json.loads(url)['link']
                links += [i['link'] for i in url if 'link' in i]
            except:
                pass

            try:
                url = client.parseDOM(result, 'iframe', ret='.+?')[0]
                url = client.source(url, headers=headers)
                url = url.replace('\n', '')

                url = re.findall('sources\s*:\s*\[(.+?)\]', url)[0]
                url = re.findall('"file"\s*:\s*"(.+?)"', url)
                links += [i.split()[0] for i in url]
            except:
                pass

            for i in links:
                try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'USmovies', 'url': i, 'direct': True, 'debridonly': False})
                except: pass

            return sources
        except:
            return sources
Exemplo n.º 10
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            content = re.compile('(.+?)\?episode=\d*$').findall(url)
            content = 'movie' if len(content) == 0 else 'episode'

            try: url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(url)[0]
            except: pass

            result = client.source(url)

            url = zip(client.parseDOM(result, 'a', ret='href', attrs = {'target': 'player_iframe'}), client.parseDOM(result, 'a', attrs = {'target': 'player_iframe'}))
            url = [(i[0], re.compile('(\d+)').findall(i[1])) for i in url]
            url = [(i[0], i[1][-1]) for i in url if len(i[1]) > 0]

            if content == 'episode':
                url = [i for i in url if i[1] == '%01d' % int(episode)]

            links = [client.replaceHTMLCodes(i[0]) for i in url]

            for u in links:

                try:
                    headers = {'X-Requested-With': 'XMLHttpRequest', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0', 'Referer': u}

                    post = urlparse.parse_qs(urlparse.urlparse(u).query)['link'][0]
                    post = urllib.urlencode({'link': post})

                    url = 'http://player.pubfilm.com/smplayer/plugins/gkphp/plugins/gkpluginsphp.php'

                    url = client.source(url, post=post, headers=headers)
                    url = json.loads(url)

                    if 'gklist' in url:
                        url = client.source(u)
                        url = re.findall('sources\s*:\s*\[(.+?)\]', url)[0]
                        url = re.findall('"file"\s*:\s*"(.+?)"', url)
                        url = [i.split()[0].replace('\\/', '/') for i in url]
                    else:
                        url = url['link']
                        url = directstream.google(url)
                        url = [i['url'] for i in url]

                    for i in url:
                        try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'Pubfilm', 'url': i, 'direct': True, 'debridonly': False})
                        except: pass
                except:
                    pass

            return sources
        except:
            return sources
Exemplo n.º 11
0
    def movie(self, imdb, title, year):
        try:
            query = "%s %s" % (title.replace(":", " "), year)
            query = base64.b64decode(self.search_link) % urllib.quote_plus(query)

            result = client.source(query)
            result = json.loads(result)["results"]

            t = cleantitle.get(title)
            years = ["(%s)" % str(year), "(%s)" % str(int(year) + 1), "(%s)" % str(int(year) - 1)]

            result = [(i["url"], i["titleNoFormatting"]) for i in result]
            result = [
                (i[0], re.compile('(^Watch Full "|^Watch |^Xmovies8:|^xmovies8:|)(.+? [(]\d{4}[)])').findall(i[1]))
                for i in result
            ]
            result = [(i[0], i[1][0][-1]) for i in result if len(i[1]) > 0]
            result = [i for i in result if t == cleantitle.get(i[1])]
            result = [i[0] for i in result if any(x in i[1] for x in years)][0]

            url = urlparse.urljoin(self.base_link, result)
            url = urlparse.urlparse(url).path
            url = "/".join(url.split("/")[:3]) + "/"
            return url
        except:
            pass

        try:
            t = title.replace("'", "")
            t = re.sub(r"[^a-zA-Z0-9\s]+", " ", t).lower().strip()
            t = re.sub("\s\s+", " ", t)
            t = "/movie/" + t.replace(" ", "-") + "-"

            years = ["-%s" % str(year), "-%s" % str(int(year) + 1), "-%s" % str(int(year) - 1)]

            query = base64.b64decode(self.search_link_2) % t

            result = client.source(query)
            result = json.loads(result)["results"]
            result = [i["contentNoFormatting"] for i in result]
            result = "".join(result)
            result = re.compile("(/movie/.+?)\s").findall(result)
            result = [i for i in result if t in i]
            result = [i for i in result if any(x in i for x in years)][0]

            url = urlparse.urljoin(self.base_link, result)
            url = urlparse.urlparse(url).path
            url = "/".join(url.split("/")[:3]) + "/"
            url = client.replaceHTMLCodes(url)
            url = url.encode("utf-8")
            return url
        except:
            pass
Exemplo n.º 12
0
    def request(self, url, check):
        try:
            result = client.source(url)
            if check in str(result): return result.decode('iso-8859-1').encode('utf-8')

            result = client.source(proxy.get() + urllib.quote_plus(url))
            if check in str(result): return result.decode('iso-8859-1').encode('utf-8')

            result = client.source(proxy.get() + urllib.quote_plus(url))
            if check in str(result): return result.decode('iso-8859-1').encode('utf-8')
        except:
            return
Exemplo n.º 13
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            tvshowtitle = cleantitle.get(data['tvshowtitle'] )
            season = '%01d' % int(season)
            episode = '%01d' % int(episode)

            try:
                query = '%s season %01d' % (data['tvshowtitle'], int(season))
                query = base64.b64decode(self.search_link) % urllib.quote_plus(query)

                result = client.source(query)
                result = json.loads(result)['results']

                r = [(i['url'], i['titleNoFormatting']) for i in result]
                r = [(i[0], re.compile('(^Watch Full "|^Watch |)(.+)').findall(i[1])) for i in r]
                r = [(i[0], i[1][0][-1]) for i in r if len(i[1]) > 0]
                r = [(i[0], re.compile('(.+?) - Season (\d*)').findall(i[1])) for i in r]
                r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
                r = [(re.sub('http.+?//.+?/','', i[0]), i[1], i[2]) for i in r]
                r = [('/'.join(i[0].split('/')[:2]), i[1], i[2]) for i in r]
                r = [x for y,x in enumerate(r) if x not in r[:y]]
                r = [i for i in r if tvshowtitle == cleantitle.get(i[1])]
                u = [i[0] for i in r if season == '%01d' % int(i[2])][0]

            except:
                query = self.search2_link % urllib.quote_plus(data['tvshowtitle'])
                query = urlparse.urljoin(self.base_link, query)

                result = client.source(query)

                r = client.parseDOM(result, 'div', attrs = {'class': 'ml-item'})
                r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
                r = [(i[0][0], i[1][-1]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
                r = [(i[0], re.compile('(.+?) - Season (\d*)').findall(i[1])) for i in r]
                r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
                r = [(re.sub('http.+?//.+?/','', i[0]), i[1], i[2]) for i in r]
                r = [('/'.join(i[0].split('/')[:2]), i[1], i[2]) for i in r]
                r = [x for y,x in enumerate(r) if x not in r[:y]]
                r = [i for i in r if tvshowtitle == cleantitle.get(i[1])]
                u = [i[0] for i in r if season == '%01d' % int(i[2])][0]


            url = urlparse.urljoin(self.base_link, u)
            url = urlparse.urlparse(url).path
            url += '?episode=%01d' % int(episode)
            url = url.encode('utf-8')
            return url
        except:
            return
Exemplo n.º 14
0
    def movie(self, imdb, title, year):
        try:
            query = '%s %s' % (title.replace(':', ' '), year)
            query = base64.b64decode(self.search_link) % urllib.quote_plus(query)

            result = client.source(query)
            result = json.loads(result)['results']

            t = cleantitle.get(title)
            years = ['(%s)' % str(year)]

            result = [(i['url'], i['titleNoFormatting']) for i in result]
            result = [(i[0], re.compile('(^Watch Full "|^Watch |^Xmovies8:|^xmovies8:|)(.+? [(]\d{4}[)])').findall(i[1])) for i in result]
            result = [(i[0], i[1][0][-1]) for i in result if len(i[1]) > 0]
            result = [i for i in result if t == cleantitle.get(i[1])]
            result = [i[0] for i in result if any(x in i[1] for x in years)][0]

            url = urlparse.urljoin(self.base_link, result)
            url = urlparse.urlparse(url).path
            url = '/'.join(url.split('/')[:3]) + '/'
            return url
        except:
            pass

        try:
            t = title.replace('\'', '')
            t = re.sub(r'[^a-zA-Z0-9\s]+', ' ', t).lower().strip()
            t = re.sub('\s\s+' , ' ', t)
            t = '/movie/' + t.replace(' ' , '-') + '-'

            years = ['-%s' % str(year)]

            query = base64.b64decode(self.search_link_2) % t

            result = client.source(query)
            result = json.loads(result)['results']
            result = [i['contentNoFormatting'] for i in result]
            result = ''.join(result)
            result = re.compile('(/movie/.+?)\s').findall(result)
            result = [i for i in result if t in i]
            result = [i for i in result if any(x in i for x in years)][0]

            url = urlparse.urljoin(self.base_link, result)
            url = urlparse.urlparse(url).path
            url = '/'.join(url.split('/')[:3]) + '/'
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            pass
Exemplo n.º 15
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            content = re.compile('(.+?)\?episode=\d*$').findall(url)
            content = 'movie' if len(content) == 0 else 'episode'

            try: url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(url)[0]
            except: pass

            result = client.source(url)

            if content == 'movie':
                url = client.parseDOM(result, 'iframe', ret='src')[0]
            else:
                url = zip(client.parseDOM(result, 'a', ret='href', attrs = {'target': 'player_iframe'}), client.parseDOM(result, 'a', attrs = {'target': 'player_iframe'}))
                url = [(i[0], re.compile('(\d+)').findall(i[1])) for i in url]
                url = [(i[0], i[1][-1]) for i in url if len(i[1]) > 0]
                url = [i[0] for i in url if i[1] == '%01d' % int(episode)][0]

            url = client.replaceHTMLCodes(url)

            result = client.source(url)

            headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': url}
            url = 'http://player.pubfilm.com/smplayer/plugins/gkphp/plugins/gkpluginsphp.php'
            post = re.compile('link\s*:\s*"([^"]+)').findall(result)[0]
            post = urllib.urlencode({'link': post})

            result = client.source(url, post=post, headers=headers)

            r = re.compile('"?link"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?"').findall(result)
            if not r: r = [(i, 480) for i in re.compile('"?link"?\s*:\s*"([^"]+)').findall(result)]
            r = [(i[0].replace('\\/', '/'), i[1]) for i in r]

            links = [(i[0], '1080p') for i in r if int(i[1]) >= 1080]
            links += [(i[0], 'HD') for i in r if 720 <= int(i[1]) < 1080]
            links += [(i[0], 'SD') for i in r if 480 <= int(i[1]) < 720]
            if not 'SD' in [i[1] for i in links]: links += [(i[0], 'SD') for i in r if 360 <= int(i[1]) < 480]

            for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'provider': 'Pubfilm', 'url': i[0], 'direct': True, 'debridonly': False})

            return sources
        except:
            return sources
Exemplo n.º 16
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)

            links = client.parseDOM(result, 'div', attrs = {'class': 'links'})[0]
            links = client.parseDOM(links, 'tr')

            for i in links:
                try:
                    url = client.parseDOM(i, 'a', ret='href')[-1]
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in ['ishared.eu', 'shared2.me']: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    quality = client.parseDOM(i, 'td', attrs = {'class': 'quality_td'})
                    quality = quality[0].lower().strip() if len(quality) > 0 else ''
                    if quality in ['cam', 'ts']: raise Exception()

                    sources.append({'source': host, 'quality': 'SD', 'provider': 'Moviestorm', 'url': url, 'direct': False, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
Exemplo n.º 17
0
    def movie(self, imdb, title, year):
        try:
            query = '%s %s' % (title.replace(':', ' '), year)
            query = base64.b64decode(self.search_link) % urllib.quote_plus(query)

            result = client.source(query)
            result = json.loads(result)['results']

            t = cleantitle.get(title)

            r = [(i['url'], i['titleNoFormatting']) for i in result]
            r += [(i['url'], i['richSnippet']['breadcrumb']['title']) for i in result if 'richSnippet' in i and 'breadcrumb' in i['richSnippet'] and 'title' in i['richSnippet']['breadcrumb']]
            r = [(i[0], re.findall('(?:^Watch Full "|^Watch |)(.+?)\((\d{4})', i[1])) for i in r]
            r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
            r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]]

            result = r[0][0]
            result = urllib.unquote_plus(result)

            url = urlparse.urljoin(self.base_link, result)
            url = urlparse.urlparse(url).path
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Exemplo n.º 18
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)
            result = result.decode('iso-8859-1').encode('utf-8')

            result = client.parseDOM(result, 'div', attrs = {'class': 'tv_episode_item'})

            title = cleantitle.get(title)
            premiered = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(premiered)[0]
            premiered = '%s %01d %s' % (premiered[1].replace('01','January').replace('02','February').replace('03','March').replace('04','April').replace('05','May').replace('06','June').replace('07','July').replace('08','August').replace('09','September').replace('10','October').replace('11','November').replace('12','December'), int(premiered[2]), premiered[0])

            result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'class': 'tv_episode_name'}), client.parseDOM(i, 'span', attrs = {'class': 'tv_num_versions'})) for i in result]
            result = [(i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0]
            result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0] + [(i[0], i[1], None) for i in result if len(i[2]) == 0]
            result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0]

            url = [i for i in result if title == cleantitle.get(i[1]) and premiered == i[2]][:1]
            if len(url) == 0: url = [i for i in result if premiered == i[2]]
            if len(url) == 0 or len(url) > 1: url = [i for i in result if 'season-%01d-episode-%01d' % (int(season), int(episode)) in i[0]]

            url = client.replaceHTMLCodes(url[0][0])
            url = urlparse.urlparse(url).path
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Exemplo n.º 19
0
    def movie(self, imdb, title, year):
        try:
            self.base_link = random.choice([self.base_link_1, self.base_link_2, self.base_link_3])

            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)

            result = client.source(query)

            title = cleantitle.get(title)
            years = ['%s' % str(year)]

            result = client.parseDOM(result, 'div', attrs = {'class': 'cell_container'})
            result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in result]
            result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
            result = [(i[0], re.compile('(.+?) [(](\d{4})[)]').findall(i[1])) for i in result]
            result = [(i[0], i[1][0][0], i[1][0][1]) for i in result if len(i[1]) > 0]
            result = [i for i in result if title == cleantitle.get(i[1])]
            result = [i[0] for i in result if any(x in i[2] for x in years)][0]

            try: url = re.compile('//.+?(/.+)').findall(result)[0]
            except: url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Exemplo n.º 20
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            self.base_link = random.choice([self.base_link_1, self.base_link_2, self.base_link_3])
            self.base_link = self.base_link_1

            referer = urlparse.urljoin(self.base_link, url)

            headers = {'X-Requested-With': 'XMLHttpRequest'}

            post = urlparse.parse_qs(urlparse.urlparse(referer).query).values()[0][0]
            post = urllib.urlencode({'v': post})

            url = urlparse.urljoin(self.base_link, '/video_info/iframe')

            result = client.source(url, post=post, headers=headers, referer=referer)

            result = re.compile('"(\d+)"\s*:\s*"([^"]+)').findall(result)
            result = [(urllib.unquote(i[1].split('url=')[-1]), i[0])  for i in result]

            links = [(i[0], '1080p') for i in result if int(i[1]) >= 1080]
            links += [(i[0], 'HD') for i in result if 720 <= int(i[1]) < 1080]
            links += [(i[0], 'SD') for i in result if 480 <= int(i[1]) < 720]

            for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'provider': 'Afdah', 'url': i[0], 'direct': True, 'debridonly': False})

            return sources
        except:
            return sources
Exemplo n.º 21
0
    def movie(self, imdb, title, year):
        try:
            query = self.moviesearch_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)

            result = client.source(query)
            result = re.compile('showResult\((.*)\)').findall(result)[0]
            result = json.loads(result)
            result = result['feed']['entry']

            title = cleantitle.get(title)
            years = ['%s' % str(year)]

            result = [i for i in result if 'movies' in [x['term'].lower() for x in i['category']]]
            result = [[x for x in i['link'] if x['rel'] == 'alternate' and x['type'] == 'text/html'][0] for i in result]
            result = [(i['href'], i['title']) for i in result]
            result = [(i[0], re.compile('(.+?) (\d{4})(.+)').findall(i[1])) for i in result]
            result = [(i[0], i[1][0][0], i[1][0][1], i[1][0][2]) for i in result if len(i[1]) > 0]
            result = [(i[0], i[1], i[2]) for i in result if not 'TS' in i[3] and not 'CAM' in i[3]]
            result = [i for i in result if title == cleantitle.get(i[1])]
            result = [i[0] for i in result if any(x in i[2] for x in years)][0]

            url = urlparse.urljoin(self.base_link, result)
            url = urlparse.urlparse(url).path
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Exemplo n.º 22
0
    def movie(self, imdb, title, year):
        try:
            query = '%s %s' % (title.replace(':', ' '), year)
            query = base64.b64decode(self.search_link) % urllib.quote_plus(query)

            result = client.source(query)
            result = json.loads(result)['results']

            title = cleantitle.get(title)
            years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]

            r = [(i['url'], i['titleNoFormatting']) for i in result]
            r = [(i[0], re.compile('(^Watch Full "|^Watch |)(.+? [(]\d{4}[)])').findall(i[1])) for i in r]
            r = [(i[0], i[1][0][-1]) for i in r if len(i[1]) > 0]
            r += [(i['url'], i['richSnippet']['breadcrumb']['title']) for i in result if 'richSnippet' in i and 'breadcrumb' in i['richSnippet'] and 'title' in i['richSnippet']['breadcrumb']]
            result = [i for i in r if title == cleantitle.get(i[1])]
            result = [i[0] for i in result if any(x in i[1] for x in years)][0]
            result = urllib.unquote_plus(result)

            url = urlparse.urljoin(self.base_link, result)
            url = urlparse.urlparse(url).path
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Exemplo n.º 23
0
    def movie(self, imdb, title, year):
        try:
            query = title.replace(':', ' ')
            query = base64.b64decode(self.search_link) % urllib.quote_plus(query)

            result = client.source(query)
            result = json.loads(result)['results']

            t = cleantitle.get(title)

            result = [(i['url'], i['titleNoFormatting']) for i in result]
            result = [(i[0], re.findall('(?:^Watch Full "|^Watch |^Xmovies8:|^xmovies8:|)(.+?)\((\d{4})', i[1])) for i in result]
            result = [(i[0], i[1][0][0], i[1][0][1]) for i in result if len(i[1]) > 0]
            result = [i for i in result if t == cleantitle.get(i[1]) and year == i[2]]
            result = result[0][0]

            url = urlparse.urljoin(self.base_link, result)
            url = urlparse.urlparse(url).path
            url = '/'.join(url.split('/')[:3]) + '/'
            return url
        except:
            pass

        try:
            t = title.replace('\'', '')
            t = re.sub(r'[^a-zA-Z0-9\s]+', ' ', t).lower().strip()
            t = re.sub('\s\s+' , ' ', t)
            t = '/movie/' + t.replace(' ' , '-') + '-'

            query = base64.b64decode(self.search_link_2) % t

            result = client.source(query)
            result = json.loads(result)['results']
            result = [i['contentNoFormatting'] for i in result]
            result = ''.join(result)
            result = re.findall('(/movie/.+?)\s', result)
            result = [i for i in result if t in i and year in i]
            result = result[0]

            url = urlparse.urljoin(self.base_link, result)
            url = urlparse.urlparse(url).path
            url = '/'.join(url.split('/')[:3]) + '/'
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            pass
Exemplo n.º 24
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)
            result = re.sub(r'[^\x00-\x7F]+', ' ', result)

            pages = []
            try:
                r = client.parseDOM(result, 'div', attrs = {'id': 'embed'})[0]
                pages.append(client.parseDOM(r, 'iframe', ret='src')[0])
            except:
                pass
            try:
                r = client.parseDOM(result, 'div', attrs = {'id': 'playerMenu'})[0]
                r = client.parseDOM(r, 'div', ret='data-id', attrs = {'class': 'item'})[0]
                r = client.source(urlparse.urljoin(self.base_link, self.video_link), post=urllib.urlencode( {'id': r} ))
                pages.append(client.parseDOM(r, 'iframe', ret='src')[0])
            except:
                pass

            for page in pages:
                try:
                    result = client.source(page)

                    captions = re.search('kind\s*:\s*(?:\'|\")captions(?:\'|\")', result)
                    if not captions: raise Exception()

                    result = re.compile('"?file"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?[^"]*"').findall(result)

                    links = [(i[0], '1080p') for i in result if int(i[1]) >= 1080]
                    links += [(i[0], 'HD') for i in result if 720 <= int(i[1]) < 1080]
                    links += [(i[0], 'SD') for i in result if 480 <= int(i[1]) < 720]

                    for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'provider': 'Sezonlukdizi', 'url': i[0], 'direct': True, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
Exemplo n.º 25
0
    def movie(self, imdb, title, year):
        try:
            t = cleantitle.get(title)

            try:
                query = '%s %s' % (title, year)
                query = base64.b64decode(self.search_link) % urllib.quote_plus(query)

                result = client.source(query)
                result = json.loads(result)['results']

                r = [(i['url'], i['titleNoFormatting']) for i in result]
                r = [(i[0], re.compile('(^Watch Full "|^Watch |)(.+)').findall(i[1])) for i in r]
                r = [(i[0], i[1][0][-1]) for i in r if len(i[1]) > 0]
                r = [(i[0], i[1].rsplit(' For Free On 123Movies', 1)[0].rsplit('On 123Movies', 1)[0]) for i in r]
                r = [(re.sub('http.+?//.+?/','', i[0]), i[1]) for i in r]
                r = [('/'.join(i[0].split('/')[:2]), i[1]) for i in r]
                r = [x for y,x in enumerate(r) if x not in r[:y]]
                r = [i for i in r if t == cleantitle.get(i[1])]
                u = [i[0] for i in r][0]

            except:
                query = self.search2_link % urllib.quote_plus(title)
                query = urlparse.urljoin(self.base_link, query)

                result = client.source(query)

                r = client.parseDOM(result, 'div', attrs = {'class': 'ml-item'})
                r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
                r = [(i[0][0], i[1][-1]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
                r = [(re.sub('http.+?//.+?/','', i[0]), i[1]) for i in r]
                r = [('/'.join(i[0].split('/')[:2]), i[1]) for i in r]
                r = [x for y,x in enumerate(r) if x not in r[:y]]
                r = [i for i in r if t == cleantitle.get(i[1])]
                u = [i[0] for i in r][0]


            url = urlparse.urljoin(self.base_link, u)
            url = urlparse.urlparse(url).path
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Exemplo n.º 26
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)
            path = urlparse.urlparse(url).path

            result = client.source(url, close=False)
            result = re.sub(r'[^\x00-\x7F]+','', result)
            result = client.parseDOM(result, 'li')
            result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in result]
            result = [i[0] for i in result if len(i[0]) > 0 and path in i[0][0] and len(i[1]) > 0 and 'Altyaz' in i[1][0]][0][0]

            url = urlparse.urljoin(self.base_link, result)

            result = client.source(url, close=False)
            result = re.sub(r'[^\x00-\x7F]+','', result)
            result = client.parseDOM(result, 'div', attrs = {'class': 'video-player'})[0]
            result = client.parseDOM(result, 'iframe', ret='src')[-1]

            try:
                url = base64.b64decode(urlparse.parse_qs(urlparse.urlparse(result).query)['id'][0])
                if not url.startswith('http'): raise Exception()
            except:
                url = client.source(result)
                url = urllib.unquote_plus(url.decode('string-escape'))
                url = re.compile('"(.+?)"').findall(url)
                url = [i for i in url if 'ok.ru' in i or 'vk.com' in i][0]

            try: url = 'http://ok.ru/video/%s' % urlparse.parse_qs(urlparse.urlparse(url).query)['mid'][0]
            except: pass

            if 'ok.ru' in url: host = 'vk' ; url = directstream.odnoklassniki(url)
            elif 'vk.com' in url: host = 'vk' ; url = directstream.vk(url)
            else: raise Exception()

            for i in url: sources.append({'source': host, 'quality': i['quality'], 'provider': 'Onlinedizi', 'url': i['url'], 'direct': True, 'debridonly': False})

            return sources
        except:
            return sources
Exemplo n.º 27
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            content = re.compile('(.+?)\?episode=\d*$').findall(url)
            content = 'movie' if len(content) == 0 else 'episode'

            try: url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(url)[0]
            except: pass

            result = client.source(url)

            url = zip(client.parseDOM(result, 'a', ret='href', attrs = {'target': 'player_iframe'}), client.parseDOM(result, 'a', attrs = {'target': 'player_iframe'}))
            url = [(i[0], re.compile('(\d+)').findall(i[1])) for i in url]
            url = [(i[0], i[1][-1]) for i in url if len(i[1]) > 0]

            if content == 'episode':
                url = [i for i in url if i[1] == '%01d' % int(episode)]

            links = [client.replaceHTMLCodes(i[0]) for i in url]

            for u in links:

                try:
                    result = client.source(u)
                    result = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
                    result = re.findall('"file"\s*:\s*"(.+?)".+?"label"\s*:\s*"(.+?)"', result)

                    url = [{'url': i[0], 'quality': '1080p'} for i in result if '1080' in i[1]]
                    url += [{'url': i[0], 'quality': 'HD'} for i in result if '720' in i[1]]

                    for i in url:
                        sources.append({'source': 'gvideo', 'quality': i['quality'], 'provider': 'Pubfilm', 'url': i['url'], 'direct': True, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
Exemplo n.º 28
0
 def usmovies_moviecache(self):
     try:
         url = urlparse.urljoin(self.base_link, '/attachment-sitemap.xml')
         result = str(client.source(url))
         result = client.parseDOM(result, 'loc')
         result = [re.sub('http.+?//.+?/','/', i) for i in result]
         result = [i.split('/attachment/')[0] for i in result]
         return result
     except:
         return
Exemplo n.º 29
0
 def request(self, url, post=None, cookie=None, referer=None, output='', close=True):
     try:
         headers = {'Accept': '*/*'}
         if not cookie == None: headers['Cookie'] = cookie
         if not referer == None: headers['Referer'] = referer
         result = client.source(url, post=post, headers=headers, output=output, close=close)
         result = result.decode('iso-8859-1').encode('utf-8')
         result = urllib.unquote_plus(result)
         return result
     except:
         return
Exemplo n.º 30
0
    def ninemovies_cache(self):
        try:
            url = urlparse.urljoin(self.base_link, self.search_link)

            result = client.source(url)
            result = result.split('>Movies and TV-Shows<')[-1]
            result = client.parseDOM(result, 'ul', attrs = {'class': 'sub-menu'})[0]
            result = re.compile('href="(.+?)">(.+?)<').findall(result)
            result = [(re.sub('http.+?//.+?/','/', i[0]), re.sub('&#\d*;','', i[1])) for i in result]
            return result
        except:
            return
Exemplo n.º 31
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            tvshowtitle = cleantitle.get(data['tvshowtitle'])
            season = '%01d' % int(season)
            episode = '%01d' % int(episode)

            try:
                query = '%s season %01d' % (data['tvshowtitle'], int(season))
                query = base64.b64decode(
                    self.search_link) % urllib.quote_plus(query)

                result = client.source(query)
                result = json.loads(result)['results']

                r = [(i['url'], i['titleNoFormatting']) for i in result]
                r = [(i[0],
                      re.compile('(^Watch Full "|^Watch |)(.+)').findall(i[1]))
                     for i in r]
                r = [(i[0], i[1][0][-1]) for i in r if len(i[1]) > 0]
                r = [(i[0], re.compile('(.+?) - Season (\d*)').findall(i[1]))
                     for i in r]
                r = [(i[0], i[1][0][0], i[1][0][1]) for i in r
                     if len(i[1]) > 0]
                r = [(re.sub('http.+?//.+?/', '', i[0]), i[1], i[2])
                     for i in r]
                r = [('/'.join(i[0].split('/')[:2]), i[1], i[2]) for i in r]
                r = [x for y, x in enumerate(r) if x not in r[:y]]
                r = [i for i in r if tvshowtitle == cleantitle.get(i[1])]
                u = [i[0] for i in r if season == '%01d' % int(i[2])][0]

            except:
                query = self.search2_link % urllib.quote_plus(
                    data['tvshowtitle'])
                query = urlparse.urljoin(self.base_link, query)

                result = client.source(query)

                r = client.parseDOM(result, 'div', attrs={'class': 'ml-item'})
                r = [(client.parseDOM(i, 'a', ret='href'),
                      client.parseDOM(i, 'a', ret='title')) for i in r]
                r = [(i[0][0], i[1][-1]) for i in r
                     if len(i[0]) > 0 and len(i[1]) > 0]
                r = [(i[0], re.compile('(.+?) - Season (\d*)').findall(i[1]))
                     for i in r]
                r = [(i[0], i[1][0][0], i[1][0][1]) for i in r
                     if len(i[1]) > 0]
                r = [(re.sub('http.+?//.+?/', '', i[0]), i[1], i[2])
                     for i in r]
                r = [('/'.join(i[0].split('/')[:2]), i[1], i[2]) for i in r]
                r = [x for y, x in enumerate(r) if x not in r[:y]]
                r = [i for i in r if tvshowtitle == cleantitle.get(i[1])]
                u = [i[0] for i in r if season == '%01d' % int(i[2])][0]

            url = urlparse.urljoin(self.base_link, u)
            url = urlparse.urlparse(url).path
            url += '?episode=%01d' % int(episode)
            url = url.encode('utf-8')
            return url
        except:
            return
Exemplo n.º 32
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

                title = cleantitle.get(data['tvshowtitle'])
                season = data['season']
                episode = data['episode']

                year = data['year']
                years = ['%s' % str(int(year)+1), '%s' % str(int(year)-1)]

                url = cache.get(self.usseries_tvcache, 120)

                url = [i[0] for i in url if title == i[1] and season == i[2]][-1]
                url = [i for i in url.split('/') if not i == ''][-1]
                url = '/%s-episode-%01d' % (url.replace('/', ''), int(episode))
                url = urlparse.urljoin(self.base_link, url)


            try:
                result = client.source(url)
                r = client.parseDOM(result, 'link', ret='href', attrs = {'rel': 'canonical'})[0]
            except:
                url = url.replace('/the-', '/').replace('-the-', '-')
                result = client.source(url)
                r = client.parseDOM(result, 'link', ret='href', attrs = {'rel': 'canonical'})[0]


            links = []
            headers = {'Referer': r}
            result = client.parseDOM(result, 'div', attrs = {'class': 'video-embed'})[0]

            try:
                post = re.findall('{link\s*:\s*"([^"]+)', result)[0]
                post = urllib.urlencode({'link': post})

                url = urlparse.urljoin(self.base_link, '/plugins/gkpluginsphp.php')
                url = client.source(url, post=post, headers=headers)
                url = json.loads(url)['link']
                links += [i['link'] for i in url if 'link' in i]
            except:
                pass

            try:
                url = client.parseDOM(result, 'iframe', ret='.+?')[0]
                url = client.source(url, headers=headers)
                url = url.replace('\n', '')

                url = re.findall('sources\s*:\s*\[(.+?)\]', url)[0]
                url = re.findall('"file"\s*:\s*"(.+?)"', url)
                links += [i.split()[0] for i in url]
            except:
                pass

            for i in links:
                try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'USseries', 'url': i, 'direct': True, 'debridonly': False})
                except: pass

            return sources
        except:
            return sources
Exemplo n.º 33
0
    def movie(self, imdb, title, year):
        try:
            t = cleantitle.get(title)

            query = '%s %s' % (title, year)
            query = base64.b64decode(
                self.search_link) % urllib.quote_plus(query)

            result = client.source(query)
            result = json.loads(result)['results']

            result = [(i['url'], i['titleNoFormatting']) for i in result]
            result = [(i[0],
                       re.findall('(?:^Ver |)(.+?)(?: HD |)\((\d{4})', i[1]))
                      for i in result]
            result = [(i[0], i[1][0][0], i[1][0][1]) for i in result
                      if len(i[1]) > 0]

            r = [
                i for i in result if t == cleantitle.get(i[1]) and year == i[2]
            ]

            if len(r) == 0:
                t = 'http://www.imdb.com/title/%s' % imdb
                t = client.source(t, headers={'Accept-Language': 'es-ES'})
                t = client.parseDOM(t, 'title')[0]
                t = re.sub('(?:\(|\s)\d{4}.+', '', t).strip()
                t = cleantitle.get(t)

                r = [
                    i for i in result
                    if t == cleantitle.get(i[1]) and year == i[2]
                ]

            try:
                url = re.findall('//.+?(/.+)', r[0][0])[0]
            except:
                url = r[0][0]
            try:
                url = re.findall('(/.+?/.+?/)', url)[0]
            except:
                pass
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')

            return url
        except:
            pass

        try:
            t = cleantitle.get(title)

            query = self.search3_link % urllib.quote_plus(
                cleantitle.query(title))
            query = urlparse.urljoin(self.base_link, query)

            result = cloudflare.source(query)
            result = re.sub(r'[^\x00-\x7F]+', '', result)

            r = result.split('<li class=')
            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'i'),
                  re.findall('\((\d{4})\)', i)) for i in r]
            r = [(i[0][0], re.sub('\(|\)', '', i[1][0]), i[2][0]) for i in r
                 if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
            r = [
                i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]
            ][0]

            try:
                url = re.findall('//.+?(/.+)', r)[0]
            except:
                url = r
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')

            return url
        except:
            pass
Exemplo n.º 34
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            r = urlparse.urljoin(self.base_link, url)

            result = cloudflare.source(r)

            f = client.parseDOM(result, 'iframe', ret='src')
            f = [i for i in f if 'iframe' in i][0]

            result = cloudflare.source(f, headers={'Referer': r})

            r = client.parseDOM(result, 'div', attrs={'id': 'botones'})[0]
            r = client.parseDOM(r, 'a', ret='href')
            r = [(i, urlparse.urlparse(i).netloc) for i in r]
            r = [i[0] for i in r if 'pelispedia' in i[1]]

            links = []

            for u in r:
                result = cloudflare.source(u, headers={'Referer': f})

                try:
                    url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
                    url = re.findall('"file"\s*:\s*"(.+?)"', url)
                    url = [i.split()[0].replace('\\/', '/') for i in url]

                    for i in url:
                        try:
                            links.append({
                                'source':
                                'gvideo',
                                'quality':
                                directstream.googletag(i)[0]['quality'],
                                'url':
                                i
                            })
                        except:
                            pass
                except:
                    pass

                try:
                    headers = {
                        'X-Requested-With': 'XMLHttpRequest',
                        'Referer': u
                    }

                    post = re.findall('gkpluginsphp.*?link\s*:\s*"([^"]+)',
                                      result)[0]
                    post = urllib.urlencode({'link': post})

                    url = urlparse.urljoin(
                        self.base_link,
                        '/Pe_flv_flsh/plugins/gkpluginsphp.php')
                    url = client.source(url, post=post, headers=headers)
                    url = json.loads(url)['link']

                    links.append({
                        'source': 'gvideo',
                        'quality': 'HD',
                        'url': url
                    })
                except:
                    pass

                try:
                    headers = {'X-Requested-With': 'XMLHttpRequest'}

                    post = re.findall('var\s+parametros\s*=\s*"([^"]+)',
                                      result)[0]
                    post = urlparse.parse_qs(
                        urlparse.urlparse(post).query)['pic'][0]
                    post = urllib.urlencode({
                        'sou': 'pic',
                        'fv': '21',
                        'url': post
                    })

                    url = urlparse.urljoin(
                        self.base_link,
                        '/Pe_Player_Html5/pk/pk/plugins/protected.php')
                    url = cloudflare.source(url, post=post, headers=headers)
                    url = json.loads(url)[0]['url']

                    links.append({
                        'source': 'cdn',
                        'quality': 'HD',
                        'url': url
                    })
                except:
                    pass

            for i in links:
                sources.append({
                    'source': i['source'],
                    'quality': i['quality'],
                    'provider': 'Pelispedia',
                    'url': i['url'],
                    'direct': True,
                    'debridonly': False
                })

            return sources
        except:
            return sources
Exemplo n.º 35
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if (self.user == '' or self.password == ''): raise Exception()

            class NoRedirection(urllib2.HTTPErrorProcessor):
                def http_response(self, request, response): return response


            headers = {'X-Requested-With': 'XMLHttpRequest'}
            login = urlparse.urljoin(self.base_link, '/login')
            post = {'username': self.user, 'password': self.password, 'action': 'login'}
            post = urllib.urlencode(post)

            cookie = client.source(login, post=post, headers=headers, output='cookie')


            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url, cookie=cookie)

            url = re.compile("embeds\[\d+\]\s*=\s*'([^']+)").findall(result)[0]
            url = client.parseDOM(url, 'iframe', ret='src')[0]
            url = url.replace('https://', 'http://')

            links = []

            try:
                url = re.compile('mplanet\*(.+)').findall(url)[0]
                url = url.rsplit('&')[0]
                dec = self._gkdecrypt(base64.b64decode('MllVcmlZQmhTM2swYU9BY0lmTzQ='), url)
                dec = directstream.google(dec)

                links += [(i['url'], i['quality'], 'gvideo') for i in dec]
            except:
                pass

            try:
                result = client.source(url)

                result = re.compile('sources\s*:\s*\[(.*?)\]', re.DOTALL).findall(result)[0]
                result = re.compile('''['"]*file['"]*\s*:\s*['"]*([^'"]+).*?['"]*label['"]*\s*:\s*['"]*([^'"]+)''', re.DOTALL).findall(result)
            except:
                pass

            try:
                u = a = result[0][0]
                if not 'download.php' in u and not '.live.' in u: raise Exception()
                o = urllib2.build_opener(NoRedirection)
                o.addheaders = [('User-Agent', client.randomagent()), ('Cookie', cookie)]
                r = o.open(u)
                try: u = r.headers['Location']
                except: pass
                r.close()
                if a == u: raise Exception()
                links += [(u, '1080p', 'cdn')]
            except:
                pass
            try:
                u = [(i[0], re.sub('[^0-9]', '', i[1])) for i in result]
                u = [(i[0], i[1]) for i in u if i[1].isdigit()]
                links += [(i[0], '1080p', 'gvideo') for i in u if int(i[1]) >= 1080]
                links += [(i[0], 'HD', 'gvideo') for i in u if 720 <= int(i[1]) < 1080]
                links += [(i[0], 'SD', 'gvideo') for i in u if 480 <= int(i[1]) < 720]
            except:
                pass


            for i in links: sources.append({'source': i[2], 'quality': i[1], 'provider': 'Moviesplanet', 'url': i[0], 'direct': True, 'debridonly': False})

            return sources
        except:
            return sources
Exemplo n.º 36
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)
            result = result.decode('iso-8859-1').encode('utf-8')

            result = client.parseDOM(result,
                                     'div',
                                     attrs={'class': 'tv_episode_item'})

            title = cleantitle.get(title)
            premiered = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(
                premiered)[0]
            premiered = '%s %01d %s' % (premiered[1].replace(
                '01', 'January').replace('02', 'February').replace(
                    '03', 'March').replace('04', 'April').replace(
                        '05', 'May').replace('06', 'June').replace(
                            '07', 'July').replace('08', 'August').replace(
                                '09',
                                'September').replace('10', 'October').replace(
                                    '11', 'November').replace(
                                        '12', 'December'), int(
                                            premiered[2]), premiered[0])

            result = [(client.parseDOM(i, 'a', ret='href'),
                       client.parseDOM(i,
                                       'span',
                                       attrs={'class': 'tv_episode_name'}),
                       client.parseDOM(i,
                                       'span',
                                       attrs={'class': 'tv_num_versions'}))
                      for i in result]
            result = [
                (i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0
            ] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0]
            result = [
                (i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0
            ] + [(i[0], i[1], None) for i in result if len(i[2]) == 0]
            result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0]

            url = [
                i for i in result
                if title == cleantitle.get(i[1]) and premiered == i[2]
            ][:1]
            if len(url) == 0: url = [i for i in result if premiered == i[2]]
            if len(url) == 0 or len(url) > 1:
                url = [
                    i for i in result if 'season-%01d-episode-%01d' %
                    (int(season), int(episode)) in i[0]
                ]

            url = client.replaceHTMLCodes(url[0][0])
            url = urlparse.urlparse(url).path
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Exemplo n.º 37
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if not str(url).startswith('http'):
                try:
                    data = urlparse.parse_qs(url)
                    data = dict([(i, data[i][0]) if data[i] else (i, '')
                                 for i in data])

                    title = data[
                        'tvshowtitle'] if 'tvshowtitle' in data else data[
                            'title']

                    year = re.findall(
                        '(\d{4})', data['premiered']
                    )[0] if 'tvshowtitle' in data else data['year']

                    try:
                        episode = data['episode']
                    except:
                        pass

                    query = {'keyword': title}
                    query.update(self.__get_token(query))
                    search_url = urlparse.urljoin(self.base_link, '/search')
                    search_url = search_url + '?' + urllib.urlencode(query)

                    result = client.source(search_url, safe=True)

                    r = client.parseDOM(
                        result, 'div', attrs={'class':
                                              '[^"]*movie-list[^"]*'})[0]
                    r = client.parseDOM(r, 'div', attrs={'class': 'item'})
                    r = [(client.parseDOM(i, 'a', ret='href'),
                          client.parseDOM(i, 'a', attrs={'class': 'name'}))
                         for i in r]
                    r = [(i[0][0], i[1][0]) for i in r
                         if len(i[0]) > 0 and len(i[1]) > 0]
                    r = [(re.sub('http.+?//.+?/', '/',
                                 i[0]), re.sub('&#\d*;', '', i[1])) for i in r]

                    if 'season' in data:
                        url = [(i[0], re.findall('(.+?) (\d*)$', i[1]))
                               for i in r]
                        url = [(i[0], i[1][0][0], i[1][0][1]) for i in url
                               if len(i[1]) > 0]
                        url = [
                            i for i in url
                            if cleantitle.get(title) == cleantitle.get(i[1])
                        ]
                        url = [
                            i for i in url if '%01d' %
                            int(data['season']) == '%01d' % int(i[2])
                        ]
                    else:
                        url = [
                            i for i in r
                            if cleantitle.get(title) == cleantitle.get(i[1])
                        ]
                    '''
                    r = cache.get(self.fmovies_cache, 120)

                    if 'season' in data:
                        url = [(i[0], re.findall('(.+?) (\d*)$', i[1]), i[2]) for i in r]
                        url = [(i[0], i[1][0][0], i[1][0][1], i[2]) for i in url if len(i[1]) > 0]
                        url = [i for i in url if cleantitle.get(title) == cleantitle.get(i[1])]
                        url = [i for i in url if i[3] == year] + [i for i in url if i[3] == data['year']]
                        url = [i for i in url if '%01d' % int(data['season']) == '%01d' % int(i[2])]
                    else:
                        url = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1]) and i[2] == year]
                    '''

                    url = url[0][0]
                    url = urlparse.urljoin(self.base_link, url)
                except:
                    url == self.base_link

            try:
                url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(
                    url)[0]
            except:
                pass

            referer = url

            #xtoken = self.__get_xtoken()

            #if xtoken == None: raise Exception()

            result = client.source(url, safe=True)

            atr = [
                i for i in client.parseDOM(result, 'dd')
                if len(re.findall('(\d{4})', i)) > 0
            ][-1]
            if 'season' in data:
                result = result if atr == year or atr == data['year'] else None
            else:
                result = result if atr == year else None

            try:
                quality = client.parseDOM(result,
                                          'span',
                                          attrs={'class':
                                                 'quality'})[0].lower()
            except:
                quality = 'hd'
            if quality == 'cam' or quality == 'ts': quality = 'CAM'
            elif quality == 'hd' or 'hd ' in quality: quality = 'HD'
            else: quality = 'SD'

            result = client.parseDOM(result, 'ul', attrs={'id': 'servers'})

            servers = []
            servers = client.parseDOM(result,
                                      'li',
                                      attrs={'data-type': 'direct'})
            servers = zip(client.parseDOM(servers, 'a', ret='data-id'),
                          client.parseDOM(servers, 'a'))
            servers = [(i[0], re.findall('(\d+)', i[1])) for i in servers]
            servers = [(i[0], ''.join(i[1][:1])) for i in servers]

            try:
                servers = [
                    i for i in servers
                    if '%01d' % int(i[1]) == '%01d' % int(episode)
                ]
            except:
                pass

            for s in servers[:3]:
                try:
                    headers = {'X-Requested-With': 'XMLHttpRequest'}

                    url = urlparse.urljoin(self.base_link, self.hash_link)

                    query = {'id': s[0], 'update': '0', '_xtoken': xtoken}
                    query.update(self.__get_token(query))
                    url = url + '?' + urllib.urlencode(query)

                    result = client.source(url,
                                           headers=headers,
                                           referer=referer,
                                           safe=True)
                    result = json.loads(result)

                    query = result['params']
                    query['mobile'] = '0'
                    query.update(self.__get_token(query))
                    grabber = result['grabber'] + '?' + urllib.urlencode(query)

                    result = client.source(grabber,
                                           headers=headers,
                                           referer=url,
                                           safe=True)
                    result = json.loads(result)

                    result = result['data']
                    result = [i['file'] for i in result if 'file' in i]

                    for i in result:
                        try:
                            sources.append({
                                'source':
                                'gvideo',
                                'quality':
                                directstream.googletag(i)[0]['quality'],
                                'provider':
                                'Ninemovies',
                                'url':
                                i,
                                'direct':
                                True,
                                'debridonly':
                                False
                            })
                        except:
                            pass
                except:
                    pass

            if quality == 'CAM':
                for i in sources:
                    i['quality'] = 'CAM'

            return sources
        except:
            return sources
Exemplo n.º 38
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)

            links = []

            try:
                try:
                    url = re.compile('proxy\.link=([^"&]+)').findall(result)[0]
                except:
                    url = client.source(
                        re.compile('proxy\.list=([^"&]+)').findall(result)[0])

                url = url.split('*', 1)[-1].rsplit('<')[0]

                dec = self._gkdecrypt(
                    base64.b64decode('aUJocnZjOGdGZENaQWh3V2huUm0='), url)
                if not 'http' in dec:
                    dec = self._gkdecrypt(
                        base64.b64decode('QjZVTUMxUms3VFJBVU56V3hraHI='), url)

                url = directstream.google(dec)

                links += [(i['url'], i['quality']) for i in url]
            except:
                pass

            try:
                url = 'http://miradetodo.com.ar/gkphp/plugins/gkpluginsphp.php'

                post = client.parseDOM(result,
                                       'div',
                                       attrs={'class': 'player.+?'})[0]
                post = post.replace('iframe', 'IFRAME')
                post = client.parseDOM(post, 'IFRAME', ret='.+?')[0]
                post = urlparse.parse_qs(urlparse.urlparse(post).query)

                result = ''
                try:
                    result += client.source(url,
                                            post=urllib.urlencode(
                                                {'link': post['id'][0]}))
                except:
                    pass
                try:
                    result += client.source(url,
                                            post=urllib.urlencode(
                                                {'link': post['id1'][0]}))
                except:
                    pass
                try:
                    result += client.source(url,
                                            post=urllib.urlencode(
                                                {'link': post['id2'][0]}))
                except:
                    pass

                result = re.compile(
                    '"?link"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?"'
                ).findall(result)
                result = [(i[0].replace('\\/', '/'), i[1]) for i in result]

                links += [(i[0], '1080p') for i in result if int(i[1]) >= 1080]
                links += [(i[0], 'HD') for i in result
                          if 720 <= int(i[1]) < 1080]
                links += [(i[0], 'SD') for i in result
                          if 480 <= int(i[1]) < 720]
                if not 'SD' in [i[1] for i in links]:
                    links += [(i[0], 'SD') for i in result
                              if 360 <= int(i[1]) < 480]
            except:
                pass

            for i in links:
                sources.append({
                    'source': 'gvideo',
                    'quality': i[1],
                    'provider': 'MiraDeTodo',
                    'url': i[0],
                    'direct': True,
                    'debridonly': False
                })

            return sources
        except:
            return sources
Exemplo n.º 39
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '')
                             for i in data])

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                    'title']

                year = data['year']

                if (self.user == '' or self.password == ''): raise Exception()

                query = urlparse.urljoin(self.base_link, '/login.html')
                post = urllib.urlencode({
                    'username': self.user,
                    'password': self.password,
                    'submit': 'Login'
                })

                try:
                    r, headers, content, cookie = client.source(
                        query, post=post, output='extended')
                    headers = {
                        'Cookie': cookie,
                        'User-Agent': headers['User-Agent']
                    }
                except:
                    cookie, agent, url = cloudflare.request(query,
                                                            post=post,
                                                            output='extended')
                    headers = {'Cookie': cookie, 'User-Agent': agent}

                query = urlparse.urljoin(self.base_link, self.search_link)
                post = urllib.urlencode({'search': title})

                r = cloudflare.source(query, post=post, headers=headers)

                if 'tvshowtitle' in data:
                    r = re.findall('(watch-tvshow-.+?-\d+\.html)', r)
                    r = [(i, re.findall('watch-tvshow-(.+?)-\d+\.html', i))
                         for i in r]
                else:
                    r = re.findall('(watch-movie-.+?-\d+\.html)', r)
                    r = [(i, re.findall('watch-movie-(.+?)-\d+\.html', i))
                         for i in r]

                r = [(i[0], i[1][0]) for i in r if len(i[1]) > 0]
                r = [
                    i for i in r
                    if cleantitle.get(title) == cleantitle.get(i[1])
                ]
                r = [i[0] for i in r][0]

                r = urlparse.urljoin(self.base_link, r)

                url = cloudflare.source(r, headers=headers)

                if 'season' in data and 'episode' in data:
                    r = re.findall('(episode-.+?-.+?\d+.+?\d+-\d+.html)', url)
                    r = [
                        i for i in r if '-s%02de%02d-' %
                        (int(data['season']),
                         int(data['episode'])) in i.lower()
                    ][0]
                    r = urlparse.urljoin(self.base_link, r)

                    url = cloudflare.source(r, headers=headers)

            else:
                r = urlparse.urljoin(self.base_link, url)
                cookie, agent, url = cloudflare.request(r, output='extended')
                headers = {'Cookie': cookie, 'User-Agent': agent}

            quality = 'HD' if '-movie-' in url else 'SD'

            func = re.findall('''["']sources['"]\s*:\s*\[(.*?)\]''', url)[0]
            func = re.findall('''['"]*file['"]*\s*:\s*([^\(]+)''', func)[0]

            u = re.findall('function\s+%s[^{]+{\s*([^}]+)' % func, url)[0]
            u = re.findall(
                '\[([^\]]+)[^+]+\+\s*([^.]+).*?getElementById\("([^"]+)', u)[0]

            a = re.findall('var\s+%s\s*=\s*\[([^\]]+)' % u[1], url)[0]
            b = client.parseDOM(url, 'span', {'id': u[2]})[0]

            url = u[0] + a + b
            url = url.replace('"', '').replace(',', '').replace('\/', '/')
            url += '|' + urllib.urlencode(headers)

            sources.append({
                'source': 'cdn',
                'quality': quality,
                'provider': 'Streamlord',
                'url': url,
                'direct': True,
                'debridonly': False,
                'autoplay': False
            })

            return sources
        except:
            return sources
Exemplo n.º 40
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            try:
                if not 'tvshowtitle' in data: raise Exception()

                links = []

                f = [
                    'S%02dE%02d' % (int(data['season']), int(data['episode']))
                ]
                t = data['tvshowtitle']

                q = base64.b64decode(self.search_link) + urllib.quote_plus(
                    '%s %s' % (t, f[0]))
                q = urlparse.urljoin(self.base_link, q)

                result = client.source(q)
                result = json.loads(result)
            except:
                links = result = []

            for i in result:
                try:
                    if not cleantitle.get(t) == cleantitle.get(i['showName']):
                        raise Exception()

                    y = i['release']
                    y = re.compile('[\.|\(|\[|\s](\d{4}|S\d*E\d*)[\.|\)|\]|\s]'
                                   ).findall(y)[-1]
                    y = y.upper()
                    if not any(x == y for x in f): raise Exception()

                    quality = i['quality']

                    size = i['size']
                    size = float(size) / 1024
                    size = '%.2f GB' % size

                    if 'X265' in quality: info = '%s | HEVC' % size
                    else: info = size

                    if '1080P' in quality: quality = '1080p'
                    elif quality in ['720P', 'WEBDL']: quality = 'HD'
                    else: quality = 'SD'

                    url = i['links']
                    for x in url.keys():
                        links.append({
                            'url': url[x],
                            'quality': quality,
                            'info': info
                        })
                except:
                    pass

            for i in links:
                try:
                    url = i['url']
                    if len(url) > 1: raise Exception()
                    url = url[0].encode('utf-8')

                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostprDict: raise Exception()
                    host = host.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': i['quality'],
                        'provider': 'DirectDL',
                        'url': url,
                        'info': i['info'],
                        'direct': False,
                        'debridonly': True
                    })
                except:
                    pass

            try:
                hostDict2 = [(i.rsplit('.', 1)[0], i) for i in hostDict]

                q = ('/tv/a-z/%s',
                     data['tvshowtitle']) if 'tvshowtitle' in data else (
                         '/movies/a-z/%s', data['title'])
                q = q[0] % re.sub('^THE\s+|^A\s+', '', q[1].strip().upper())[0]

                url = cache.get(self.directdl_cache, 120, q)
                url = [i[0] for i in url if data['imdb'] == i[1]][0]
                url = urlparse.urljoin(base64.b64decode(self.b_link), url)

                try:
                    v = urlparse.parse_qs(urlparse.urlparse(url).query)['v'][0]
                except:
                    v = None

                if v == None:
                    result = self.request(url)
                    url = re.compile('(/ip[.]php.+?>)%01dx%02d' %
                                     (int(data['season']), int(
                                         data['episode']))).findall(result)[0]
                    url = re.compile('(/ip[.]php.+?)>').findall(url)[-1]
                    url = urlparse.urljoin(base64.b64decode(self.b_link), url)

                url = urlparse.parse_qs(urlparse.urlparse(url).query)['v'][0]

                u = base64.b64decode(self.u_link) % url
                r = base64.b64decode(self.r_link) % url
                j = base64.b64decode(self.j_link)
                p = base64.b64decode(self.p_link)

                result = self.request(u, referer=r)

                secret = re.compile(
                    'lastChild\.value="([^"]+)"(?:\s*\+\s*"([^"]+))?').findall(
                        result)[0]
                secret = ''.join(secret)

                t = re.compile('"&t=([^"]+)').findall(result)[0]

                s_start = re.compile('(?:\s+|,)s\s*=(\d+)').findall(result)[0]
                m_start = re.compile('(?:\s+|,)m\s*=(\d+)').findall(result)[0]

                img = re.compile('<iframe[^>]*src="([^"]+)').findall(result)
                img = img[0] if len(img) > 0 else '0'
                img = urllib.unquote(img)

                result = client.parseDOM(result,
                                         'div',
                                         attrs={'class': 'ripdiv'})
                result = [(re.compile('<b>(.*?)</b>').findall(i), i)
                          for i in result]
                result = [(i[0][0], i[1].split('<p>')) for i in result
                          if len(i[0]) > 0]
                result = [[(i[0], x) for x in i[1]] for i in result]
                result = sum(result, [])
            except:
                result = []

            for i in result:
                try:
                    quality = i[0]
                    if any(x in quality for x in ['1080p', '720p', 'HD']):
                        quality = 'HD'
                    else:
                        quality = 'SD'

                    host = client.parseDOM(i[1], 'a')[-1]
                    host = re.sub('\s|<.+?>|</.+?>|.+?#\d*:', '', host)
                    host = host.strip().rsplit('.', 1)[0].lower()
                    host = [x[1] for x in hostDict2 if host == x[0]][0]
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    s = int(s_start) + random.randint(3, 1000)
                    m = int(m_start) + random.randint(21, 1000)
                    id = client.parseDOM(i[1], 'a', ret='onclick')[-1]
                    id = re.compile('[(](.+?)[)]').findall(id)[0]
                    url = j % (id, t) + '|' + p % (id, s, m, secret, t)
                    url += '|%s' % urllib.urlencode({'Referer': u, 'Img': img})
                    url = url.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'provider': 'DirectDL',
                        'url': url,
                        'direct': False,
                        'debridonly': True
                    })
                except:
                    pass

            return sources
        except:
            return sources
Exemplo n.º 41
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '')
                             for i in data])

                match = data['title'].replace(':', '').replace('\'',
                                                               '').replace(
                                                                   ' ', '-')
                match = re.sub('\-+', '-', match.lower())
                match = '/%s-%s' % (match, data['year'])

                url = cache.get(self.usmovies_moviecache, 120)

                url = [i for i in url if match in i][-1]
                url = client.replaceHTMLCodes(url)

            r = urlparse.urljoin(self.base_link, url)
            result = client.source(r)

            links = []
            headers = {'Referer': r}
            result = client.parseDOM(result,
                                     'div',
                                     attrs={'class': 'video-embed'})[0]

            try:
                post = re.findall('{link\s*:\s*"([^"]+)', result)[0]
                post = urllib.urlencode({'link': post})

                url = urlparse.urljoin(self.base_link,
                                       '/plugins/gkpluginsphp.php')
                url = client.source(url, post=post, headers=headers)
                url = json.loads(url)['link']
                links += [i['link'] for i in url if 'link' in i]
            except:
                pass

            try:
                url = client.parseDOM(result, 'iframe', ret='.+?')[0]
                url = client.source(url, headers=headers)
                url = url.replace('\n', '')

                url = re.findall('sources\s*:\s*\[(.+?)\]', url)[0]
                url = re.findall('"file"\s*:\s*"(.+?)"', url)
                links += [i.split()[0] for i in url]
            except:
                pass

            for i in links:
                try:
                    sources.append({
                        'source':
                        'gvideo',
                        'quality':
                        directstream.googletag(i)[0]['quality'],
                        'provider':
                        'USmovies',
                        'url':
                        i,
                        'direct':
                        True,
                        'debridonly':
                        False
                    })
                except:
                    pass

            return sources
        except:
            return sources
Exemplo n.º 42
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            try:
                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '')
                             for i in data])

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                    'title']
                title = cleantitle.get(title)

                url = cache.get(self.ninemovies_cache, 120)

                url = [(i[0], i[1], cleantitle.get(i[1])) for i in url]
                url = [(i[0], i[1], i[2], re.sub('\d*$', '', i[2]))
                       for i in url]
                url = [i for i in url if title == i[2]
                       ] + [i for i in url if title == i[3]]

                if 'season' in data and int(data['season']) > 1:
                    url = [(i[0], re.compile('\s+(\d*)$').findall(i[1]))
                           for i in url]
                    url = [(i[0], i[1][0]) for i in url if len(i[1]) > 0]
                    url = [
                        i for i in url
                        if '%01d' % int(data['season']) == '%01d' % int(i[1])
                    ]

                url = url[0][0]
            except:
                pass

            url = urlparse.urljoin(self.base_link, url)
            print url

            result = client.source(url)

            years = re.findall('(\d{4})', data['premiered']
                               )[0] if 'tvshowtitle' in data else data['year']
            years = [
                '%s' % str(years),
                '%s' % str(int(years) + 1),
                '%s' % str(int(years) - 1)
            ]
            year = re.compile('<dd>(\d{4})</dd>').findall(result)[0]
            if not year in years: raise Exception()

            try:
                quality = client.parseDOM(result,
                                          'dd',
                                          attrs={'class':
                                                 'quality'})[0].lower()
            except:
                quality = 'hd'
            if quality == 'cam' or quality == 'ts': quality = 'CAM'
            elif quality == 'hd' or 'hd ' in quality: quality = 'HD'
            else: quality = 'SD'

            result = client.parseDOM(result, 'ul', attrs={'class': 'episodes'})
            result = zip(client.parseDOM(result, 'a', ret='data-id'),
                         client.parseDOM(result, 'a'))
            result = [(i[0], re.findall('(\d+)', i[1])) for i in result]
            result = [(i[0], ''.join(i[1][:1])) for i in result]

            if 'episode' in data:
                result = [
                    i for i in result
                    if '%01d' % int(i[1]) == '%01d' % int(data['episode'])
                ]

            links = [
                urllib.urlencode({
                    'hash_id': i[0],
                    'referer': url
                }) for i in result
            ]

            for i in links:
                sources.append({
                    'source': 'gvideo',
                    'quality': quality,
                    'provider': 'Ninemovies',
                    'url': i,
                    'direct': True,
                    'debridonly': False
                })

            try:
                if not quality == 'HD': raise Exception()
                quality = directstream.googletag(self.resolve(
                    links[0]))[0]['quality']
                if not quality == 'SD': raise Exception()
                for i in sources:
                    i['quality'] = 'SD'
            except:
                pass

            return sources
        except:
            return sources
Exemplo n.º 43
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            dt = int(datetime.datetime.now().strftime('%Y%m%d'))
            mt = {
                'jan': '1',
                'feb': '2',
                'mar': '3',
                'apr': '4',
                'may': '5',
                'jun': '6',
                'jul': '7',
                'aug': '8',
                'sep': '9',
                'oct': '10',
                'nov': '11',
                'dec': '12'
            }

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            hdlr = [
                'S%02dE%02d' % (int(data['season']), int(data['episode']))
            ] if 'tvshowtitle' in data else [
                '%s' % str(data['year']),
                '%s' % str(int(data['year']) + 1),
                '%s' % str(int(data['year']) - 1)
            ]

            query = data[
                'tvshowtitle'] if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/|:|;|\*|\?|"|\'|<|>|\|)', '', query)
            query = self.search_link % urllib.quote_plus(query)
            query = urlparse.urljoin(self.base_link, query)

            result = client.source(query)

            result = client.parseDOM(result, 'div', attrs={'id': 'post-\d+'})
            result = [(client.parseDOM(i,
                                       'a',
                                       ret='href',
                                       attrs={'rel': 'nofollow'}),
                       client.parseDOM(i,
                                       'a',
                                       ret='title',
                                       attrs={'rel': 'nofollow'}),
                       client.parseDOM(i, 'span', attrs={'class': 'date'}),
                       client.parseDOM(i, 'a', attrs={'rel': 'category tag'}))
                      for i in result]
            result = [(i[0][-1], i[1][-1], i[2][-1], i[3]) for i in result
                      if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0
                      and len(i[3]) > 0]

            result = [(i[0], i[1], i[2], i[3]) for i in result
                      if '1-Click' in i[3]]
            if not 'tvshowtitle' in data:
                result = [(
                    i[0], i[1], i[2]
                ) for i in result if 'Movies' in i[3] and not any(
                    x in ['BDRip', 'CAM', 'DVDR', 'DVDRip', 'DVDSCR', 'TS']
                    for x in i[3])]
            else:
                result = [(i[0], i[1], i[2]) for i in result
                          if 'Tv Shows' in i[3] and not 'Tv-Pack' in i[3]]

            result = [(i[0], i[1],
                       re.compile('(\w+).+?(\d+).+?(\d{4})').findall(i[2]))
                      for i in result]
            result = [(i[0], i[1], '%04d%02d%02d' % (int(
                i[2][0][2]), int(mt[i[2][0][0][:3].lower()]), int(i[2][0][1])))
                      for i in result if len(i[2]) > 0]
            result = [(i[0], i[1],
                       (abs(dt - int(i[2])) < control.integer * 10))
                      for i in result]
            result = [(i[0], i[1]) for i in result if i[2] == True]

            result = [(i[0], re.compile('(^Download |)(.+)').findall(i[1]))
                      for i in result]
            result = [(i[0], i[1][0][-1], i[1][0][-1].upper()) for i in result
                      if len(i[1]) > 0]
            result = [
                (i[0], i[1],
                 re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '', i[2]),
                 re.compile(
                     '[\.|\(|\[|\s](\d{4}|S\d*E\d*)[\.|\)|\]|\s|]').findall(
                         i[2])) for i in result
            ]
            result = [(i[0], i[1], i[2]) for i in result
                      if len(i[3]) > 0 and any(x in i[3][0] for x in hdlr)]
            result = [(i[0], i[1]) for i in result
                      if cleantitle.get(title) == cleantitle.get(i[2])]

            try:
                result = [[
                    (i[0], '1080p') for i in result if '1080p' in i[1]
                ][0]] + [[(i[0], 'HD') for i in result if '720p' in i[1]][0]]
            except:
                result = [[(i[0], 'HD') for i in result if '720p' in i[1]][0]]

            links = []

            for i in result:
                try:
                    result = client.replaceHTMLCodes(i[0])
                    result = client.source(result)
                    result = result.replace('\n', '')
                    result = re.sub('\s\s+', ' ', result)
                    result = re.compile("<span class='info2'(.+)").findall(
                        result)[0]
                    result = result.split("<span class='info2'")[-1].split(
                        '<span')[0]
                    result = client.parseDOM(result, 'a', ret='href')
                    for url in result:
                        links.append({'url': url, 'quality': i[1]})
                except:
                    pass

            for i in links:
                try:
                    url = i['url']
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostprDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': i['quality'],
                        'provider': 'DDLvalley',
                        'url': url,
                        'direct': False,
                        'debridonly': True
                    })
                except:
                    pass

            return sources
        except:
            return sources
Exemplo n.º 44
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)
            path = urlparse.urlparse(url).path

            result = cloudflare.source(url)
            result = re.sub(r'[^\x00-\x7F]+', '', result)
            result = client.parseDOM(result, 'li')
            result = [(client.parseDOM(i, 'a',
                                       ret='href'), client.parseDOM(i, 'a'))
                      for i in result]
            result = [
                i[0] for i in result if len(i[0]) > 0 and path in i[0][0]
                and len(i[1]) > 0 and 'Altyaz' in i[1][0]
            ][0][0]

            url = urlparse.urljoin(self.base_link, result)

            result = cloudflare.source(url)
            result = re.sub(r'[^\x00-\x7F]+', '', result)
            result = client.parseDOM(result,
                                     'div',
                                     attrs={'class': 'video-player'})[0]
            result = client.parseDOM(result, 'iframe', ret='src')[-1]

            try:
                url = base64.b64decode(
                    urlparse.parse_qs(
                        urlparse.urlparse(result).query)['id'][0])
                if not url.startswith('http'): raise Exception()
            except:
                url = cloudflare.source(result)
                url = urllib.unquote_plus(url.decode('string-escape'))

                frame = client.parseDOM(url, 'iframe', ret='src')

                if len(frame) > 0:
                    url = [client.source(frame[-1], output='geturl')]
                else:
                    url = re.compile('"(.+?)"').findall(url)
                url = [
                    i for i in url
                    if 'ok.ru' in i or 'vk.com' in i or 'openload.co' in i
                ][0]

            try:
                url = 'http://ok.ru/video/%s' % urlparse.parse_qs(
                    urlparse.urlparse(url).query)['mid'][0]
            except:
                pass

            if 'openload.co' in url:
                host = 'openload.co'
                direct = False
                url = [{
                    'url': url,
                    'quality': 'HD'
                }]
            elif 'ok.ru' in url:
                host = 'vk'
                direct = True
                url = directstream.odnoklassniki(url)
            elif 'vk.com' in url:
                host = 'vk'
                direct = True
                url = directstream.vk(url)
            else:
                raise Exception()

            for i in url:
                sources.append({
                    'source': host,
                    'quality': i['quality'],
                    'provider': 'Onlinedizi',
                    'url': i['url'],
                    'direct': direct,
                    'debridonly': False
                })

            return sources
        except:
            return sources
Exemplo n.º 45
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            dt = int(datetime.datetime.now().strftime('%Y%m%d'))

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['title']
            hdlr = ['%s' % str(data['year'])]

            query = re.sub('(\\\|/|:|;|\*|\?|"|\'|<|>|\|)', '', title)
            query = self.search_link % urllib.quote_plus(query)
            query = urlparse.urljoin(self.base_link, query)

            result = client.source(query)
            result = client.parseDOM(result, 'ul', attrs={'class': 'posts'})[0]

            result = client.parseDOM(result, 'li')
            result = [(client.parseDOM(i, 'a', ret='href'),
                       client.parseDOM(i, 'a', attrs={'title': '.+?'}),
                       client.parseDOM(i, 'a', attrs={'rel': 'category tag'}))
                      for i in result]
            result = [(i[0][0], i[1][0], i[0][0], i[2]) for i in result
                      if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]

            result = [(i[0], i[1], i[2]) for i in result if 'MOVIES' in i[3]]

            result = [(i[0], i[1],
                       re.compile('/(\d{4})/(\d+)/(\d+)/').findall(i[2]))
                      for i in result]
            result = [(i[0], i[1], '%04d%02d%02d' %
                       (int(i[2][0][0]), int(i[2][0][1]), int(i[2][0][2])))
                      for i in result if len(i[2]) > 0]
            result = [(i[0], i[1],
                       (abs(dt - int(i[2])) < control.integer * 30))
                      for i in result]
            result = [(i[0], i[1]) for i in result if i[2] == True]

            result = [
                (i[0],
                 re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '', i[1]),
                 re.compile('[\.|\(|\[|\s](1080p|720p)[\.|\)|\]|\s|]').findall(
                     i[1]),
                 re.compile(
                     '[\.|\(|\[|\s](\d{4}|S\d*E\d*)[\.|\)|\]|\s|]').findall(
                         i[1])) for i in result
            ]
            result = [(i[0], i[1], i[2][0], i[3][0]) for i in result
                      if len(i[2]) > 0 and len(i[3]) > 0]
            result = [(i[0], i[2]) for i in result
                      if cleantitle.get(title) == cleantitle.get(i[1]) and any(
                          x in i[3] for x in hdlr)]

            try:
                result = [[
                    (i[0], '1080p') for i in result if i[1] == '1080p'
                ][0]] + [[(i[0], 'HD') for i in result if i[1] == '720p'][0]]
            except:
                result = [[(i[0], 'HD') for i in result if i[1] == '720p'][0]]

            links = []

            for i in result:
                try:
                    result = client.replaceHTMLCodes(i[0])
                    result = client.source(result)
                    result = client.parseDOM(result,
                                             'div',
                                             attrs={'class':
                                                    'post_content'})[0]
                    result = re.sub('\s\s+', ' ', result)

                    try:
                        size = re.compile('Size\s*:\s*(.+? [M|G]B) ').findall(
                            result)[-1]
                        div = 1 if size.endswith(' GB') else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        info = '%.2f GB' % size
                    except:
                        info = ''

                    result = client.parseDOM(result, 'ul')[0]
                    result = client.parseDOM(result, 'a', ret='href')
                    for url in result:
                        links.append({
                            'url': url,
                            'quality': i[1],
                            'info': info
                        })
                except:
                    pass

            for i in links:
                try:
                    url = i['url']
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': i['quality'],
                        'provider': 'MVlinks',
                        'url': url,
                        'info': i['info'],
                        'direct': False,
                        'debridonly': True
                    })
                except:
                    pass

            return sources
        except:
            return sources
Exemplo n.º 46
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            content = re.compile('(.+?)\?episode=\d*$').findall(url)
            content = 'movie' if len(content) == 0 else 'episode'

            try:
                url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(
                    url)[0]
            except:
                pass

            result = client.source(url)

            if content == 'movie':
                url = client.parseDOM(result, 'iframe', ret='src')[0]
            else:
                url = zip(
                    client.parseDOM(result,
                                    'a',
                                    ret='href',
                                    attrs={'target': 'player_iframe'}),
                    client.parseDOM(result,
                                    'a',
                                    attrs={'target': 'player_iframe'}))
                url = [(i[0], re.compile('(\d+)').findall(i[1])) for i in url]
                url = [(i[0], i[1][-1]) for i in url if len(i[1]) > 0]
                url = [i[0] for i in url if i[1] == '%01d' % int(episode)][0]

            url = client.replaceHTMLCodes(url)

            result = client.source(url)

            headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': url}
            url = 'http://player.pubfilm.com/smplayer/plugins/gkphp/plugins/gkpluginsphp.php'
            post = re.compile('link\s*:\s*"([^"]+)').findall(result)[0]
            post = urllib.urlencode({'link': post})

            result = client.source(url, post=post, headers=headers)

            r = re.compile(
                '"?link"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?"'
            ).findall(result)
            if not r:
                r = [(i, 480) for i in re.compile(
                    '"?link"?\s*:\s*"([^"]+)').findall(result)]
            r = [(i[0].replace('\\/', '/'), i[1]) for i in r]

            links = [(i[0], '1080p') for i in r if int(i[1]) >= 1080]
            links += [(i[0], 'HD') for i in r if 720 <= int(i[1]) < 1080]
            links += [(i[0], 'SD') for i in r if 480 <= int(i[1]) < 720]
            if not 'SD' in [i[1] for i in links]:
                links += [(i[0], 'SD') for i in r if 360 <= int(i[1]) < 480]

            for i in links:
                sources.append({
                    'source': 'gvideo',
                    'quality': i[1],
                    'provider': 'Pubfilm',
                    'url': i[0],
                    'direct': True,
                    'debridonly': False
                })

            return sources
        except:
            return sources
Exemplo n.º 47
0
def resolve(url):
    try:
        netloc = urlparse.urlparse(url.strip().lower()).netloc
        netloc = netloc.split('.google')[0]

        if netloc == 'docs' or netloc == 'drive':
            url = url.split('/preview', 1)[0]
            url = url.replace('drive.google.com', 'docs.google.com')

            result = client.request(url,
                                    headers={'User-Agent': client.agent()})

            result = re.compile('"fmt_stream_map",(".+?")').findall(result)[0]

            result = json.loads(result)
            result = [i.split('|')[-1] for i in result.split(',')]
            result = sum([tag(i) for i in result], [])

        elif netloc == 'photos':
            result = client.request(url,
                                    headers={'User-Agent': client.agent()})

            result = result.replace('\r', '').replace('\n',
                                                      '').replace('\t', '')
            result = re.compile('"\d*/\d*x\d*.+?","(.+?)"').findall(result)[0]

            result = result.replace('\\u003d', '=').replace('\\u0026', '&')
            result = re.compile('url=(.+?)&').findall(result)
            result = [urllib.unquote(i) for i in result]

            result = [tag(i)[0] for i in result]

        elif netloc == 'picasaweb':
            id = re.compile('#(\d*)').findall(url)[0]

            result = client.request(url,
                                    headers={'User-Agent': client.agent()})

            result = re.search('feedPreload:\s*(.*}]}})},', result,
                               re.DOTALL).group(1)
            result = json.loads(result)['feed']['entry']

            if len(result) > 1:
                result = [
                    i for i in result if str(id) in i['link'][0]['href']
                ][0]
            elif len(result) == 1:
                result = result[0]

            result = result['media']['content']
            result = [i['url'] for i in result if 'video' in i['type']]
            result = sum([tag(i) for i in result], [])

        elif netloc == 'plus':
            result = client.source(url, headers={'User-Agent': client.agent()})

            id = (urlparse.urlparse(url).path).split('/')[-1]
            result = result.replace('\r', '').replace('\n',
                                                      '').replace('\t', '')
            result = result.split('"%s"' % id)[-1].split(']]')[0]

            result = result.replace('\\u003d', '=').replace('\\u0026', '&')
            result = re.compile('url=(.+?)&').findall(result)
            result = [urllib.unquote(i) for i in result]

            result = [tag(i)[0] for i in result]

        url = []
        try:
            url += [[i for i in result if i['quality'] == '1080p'][0]]
        except:
            pass
        try:
            url += [[i for i in result if i['quality'] == '720p'][0]]
        except:
            pass
        try:
            url += [[i for i in result if i['quality'] == '480p'][0]]
        except:
            pass
        try:
            url += [[i for i in result if i['quality'] == '360p'][0]]
        except:
            pass
        try:
            url += [[i for i in result if i['quality'] == '240p'][0]]
        except:
            pass

        if len(url) == 0:
            return
        elif len(url) == 1:
            return url[0]['url']
        else:
            q = ['GoogleVideo - %s' % i['quality'] for i in url]
            u = [i['url'] for i in url]
            select = xbmcgui.Dialog().select('Choose a linkz', q)
            if select == -1: return
            return u[select]
    except:
        return
Exemplo n.º 48
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)
            result = re.sub(r'[^\x00-\x7F]+', ' ', result)

            pages = []
            try:
                r = client.parseDOM(result, 'div', attrs={'id': 'embed'})[0]
                pages.append(client.parseDOM(r, 'iframe', ret='src')[0])
            except:
                pass
            try:
                r = client.parseDOM(result, 'div', attrs={'id':
                                                          'playerMenu'})[0]
                r = client.parseDOM(r,
                                    'div',
                                    ret='data-id',
                                    attrs={'class': 'item'})[0]
                r = client.source(urlparse.urljoin(self.base_link,
                                                   self.video_link),
                                  post=urllib.urlencode({'id': r}))
                pages.append(client.parseDOM(r, 'iframe', ret='src')[0])
            except:
                pass

            for page in pages:
                try:
                    result = client.source(page)

                    captions = re.search(
                        'kind\s*:\s*(?:\'|\")captions(?:\'|\")', result)
                    if not captions: raise Exception()

                    result = re.compile(
                        '"?file"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?[^"]*"'
                    ).findall(result)

                    links = [(i[0], '1080p') for i in result
                             if int(i[1]) >= 1080]
                    links += [(i[0], 'HD') for i in result
                              if 720 <= int(i[1]) < 1080]
                    links += [(i[0], 'SD') for i in result
                              if 480 <= int(i[1]) < 720]

                    for i in links:
                        sources.append({
                            'source': 'gvideo',
                            'quality': i[1],
                            'provider': 'Sezonlukdizi',
                            'url': i[0],
                            'direct': True,
                            'debridonly': False
                        })
                except:
                    pass

            return sources
        except:
            return sources
Exemplo n.º 49
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if (self.user == '' or self.password == ''): raise Exception()

            cookie = client.source(self.sign,
                                   post=self.post,
                                   headers=self.headers,
                                   cookie=self.lang,
                                   output='cookie')
            cookie = '%s; %s' % (cookie, self.lang)

            try:
                url, season, episode = re.compile(
                    '(.+?)#(\d*)-(\d*)$').findall(url)[0]
            except:
                pass
            try:
                href = '#%01d-%01d' % (int(season), int(episode))
            except:
                href = '.+?'

            url = referer = urlparse.urljoin(self.base_link, url)

            result = client.source(url, cookie=cookie)

            url = client.parseDOM(result,
                                  'a',
                                  ret='data-href',
                                  attrs={'href': href})[0]
            url = urlparse.urljoin(self.base_link, url)

            headers = {'X-Requested-With': 'XMLHttpRequest'}
            result = client.source(url,
                                   cookie=cookie,
                                   referer=referer,
                                   headers=headers)

            headers = '|%s' % urllib.urlencode(
                {
                    'User-Agent': self.headers['User-Agent'],
                    'Cookie': str(cookie)
                })

            url = client.parseDOM(result,
                                  'source',
                                  ret='src',
                                  attrs={'type': 'video/mp4'})
            url += client.parseDOM(result,
                                   'source',
                                   ret='src',
                                   attrs={'type': 'video/.+?'})
            url = url[0] + headers

            sources.append({
                'source': 'ororo',
                'quality': 'HD',
                'provider': 'Ororo',
                'url': url,
                'direct': True,
                'debridonly': False
            })

            return sources
        except:
            return sources