Example #1
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return


            ep_url = '/pelicula/%s-season-%01d-episode-%01d/' % (url.strip('/').split('/')[-1], int(season), int(episode))
            ep_url = urlparse.urljoin(self.base_link, ep_url)
            r = client.request(ep_url, limit=1, timeout='10')

            if not r:
                ep_url = '/pelicula/%s-season-%01d-episode-%01d-/' % (url.strip('/').split('/')[-1], int(season), int(episode))
                ep_url = urlparse.urljoin(self.base_link, ep_url)
                r = client.request(ep_url, limit=1, timeout='10')

            if not r:
                url = 'http://www.imdb.com/title/%s' % imdb
                url = client.request(url, headers={'Accept-Language':'es-ES'}, timeout='10')
                url = client.parseDOM(url, 'title')[0]
                url = re.sub('\((?:.+?|)\d{4}.+', '', url).strip()
                url = cleantitle.geturl(url.encode("utf-8"))
                url = '/pelicula/%s-season-%01d-episode-%01d/' % (url.strip('/').split('/')[-1], int(season), int(episode))
                ep_url = urlparse.urljoin(self.base_link, url)
                r = client.request(ep_url, limit=1, timeout='10')

            if not r:
                raise Exception()
            return ep_url
        except:
            return
Example #2
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources
            url = urlparse.urljoin(self.base_link, url)
            for i in range(3):
                result = client.request(url, timeout=10)
                if not result == None: break
            
            dom = dom_parser.parse_dom(result, 'div', attrs={'class':'links', 'id': 'noSubs'})
            result = dom[0].content
            
            links = re.compile('<tr\s*>\s*<td><i\s+class="fa fa-youtube link-logo"></i>([^<]+).*?href="([^"]+)"\s+class="watch',re.DOTALL).findall(result)         
            for link in links[:5]:
                try:
                    url2 = urlparse.urljoin(self.base_link, link[1])
                    for i in range(2):
                        result2 = client.request(url2, timeout=3)
                        if not result2 == None: break                    
                    r = re.compile('href="([^"]+)"\s+class="action-btn').findall(result2)[0]
                    valid, hoster = source_utils.is_host_valid(r, hostDict)
                    if not valid: continue
                    urls, host, direct = source_utils.check_directstreams(r, hoster)
                    for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False})
                    
                except:
                    #traceback.print_exc()
                    pass           
                    
            return sources
        except:
            return sources
Example #3
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            r = client.parseDOM(r, 'iframe', ret='src')

            for u in r:
                try:
                    if not u.startswith('http') and not 'vidstreaming' in u: raise Exception()

                    url = client.request(u)
                    url = client.parseDOM(url, 'source', ret='src')

                    for i in url:
                        try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
                        except: pass
                except:
                    pass

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('GoGoAnime - Exception: \n' + str(failure))
            return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources
            html = client.request(url)

            source = re.compile('<iframe src="(.+?)"',re.DOTALL).findall(html)[0]
            if 'consistent.stream' in source:
                html = client.request(source)
                page = re.compile(""":title=["'](.+?)["']\>""").findall(html)[0]
                decode = client.replaceEscapeCodes(page)
                links = re.compile('"sources.+?"(http.+?)"',re.DOTALL).findall(decode)
                for link in links:
                    link = link.replace('\\','')
                    if '1080' in link:
                        quality='1080p'
                    elif '720' in link:
                        quality = '720p'
                    else:
                        quality = 'DVD'
                    host = link.split('//')[1].replace('www.','')
                    host = host.split('/')[0].split('.')[0].title()
                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': link, 'direct': False, 'debridonly': False})
            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('VexMovies - Exception: \n' + str(failure))
            return sources
Example #5
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            langMap = {'hi':'hindi', 'ta':'tamil', 'te':'telugu', 'ml':'malayalam', 'kn':'kannada', 'bn':'bengali', 'mr':'marathi', 'pa':'punjabi'}

            lang = 'http://www.imdb.com/title/%s/' % imdb
            lang = client.request(lang)
            lang = re.findall('href\s*=\s*[\'|\"](.+?)[\'|\"]', lang)
            lang = [i for i in lang if 'primary_language' in i]
            lang = [urlparse.parse_qs(urlparse.urlparse(i).query) for i in lang]
            lang = [i['primary_language'] for i in lang if 'primary_language' in i]
            lang = langMap[lang[0][0]]

            q = self.search_link % (lang, urllib.quote_plus(title))
            q = urlparse.urljoin(self.base_link, q)

            t = cleantitle.get(title)

            r = client.request(q)

            r = client.parseDOM(r, 'li')
            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'h3'), client.parseDOM(i, 'div', attrs = {'class': 'info'})) for i in r]
            r = [(i[0][0], i[1][0], i[2][0]) for i in r if i[0] and i[1] and i[2]]
            r = [(re.findall('(\d+)', i[0]), i[1], re.findall('(\d{4})', i[2])) for i in r]
            r = [(i[0][0], i[1], i[2][0]) for i in r if i[0] and i[2]]
            r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]

            url = str(r)
            return url
        except:
            return
Example #6
0
	def resolve(self,url):
		html=client.request(url)
		soup=webutils.bs(html)
		try:
			link=soup.find('iframe',{'frameborder':'0'})['src']
		except:    
			sd = re.findall('<source src="(.+?)" type=\'video/mp4\' data-res="360p">',html)[0]
			try:
				hd = re.findall('<source src="(.+?)" type=\'video/mp4\' data-res="720p">',html)[0]
			except:
				hd = sd
			return hd

		if 'http' not in link:
			link = 'http://nbahd.com' + link
		try:
			html = client.request(link)
			urls = re.findall('src="(.+?)" type="video/mp4"',html)
			try: url = urls[1]
			except: url = urls[0]
			return url
		except:
				try:
					import urlresolver
					resolved = urlresolver.resolve(link)
					return resolved
				except:
					return
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            if 'tvshowtitle' in data:
                url = '%s/episodes/%s-%01dx%01d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode']))
                year = re.findall('(\d{4})', data['premiered'])[0]
                r = client.request(url)

                y = client.parseDOM(r, 'span', attrs = {'class': 'date'})[0]
                y = re.findall('(\d{4})', y)[0]
                if not y == year: raise Exception()
            else:
                r = client.request(url)


            result = re.findall('''['"]file['"]:['"]([^'"]+)['"],['"]label['"]:['"]([^'"]+)''', r)

            for i in result:
                url = i[0].replace('\/', '/')
                sources.append({'source': 'gvideo', 'quality': source_utils.label_to_quality(i[1]), 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})

            return sources
        except:
            return
Example #8
0
def request(url, post=None, headers=None, mobile=False, safe=False, timeout='30'):
    try:
        try: headers.update(headers)
        except: headers = {}

        agent = cache.get(cloudflareAgent, 168)

        if not 'User-Agent' in headers: headers['User-Agent'] = agent

        u = '%s://%s' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc)

        cookie = cache.get(cloudflareCookie, 168, u, post, headers, mobile, safe, timeout)

        result = client.request(url, cookie=cookie, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout, output='response', error=True)

        if result[0] == '503':
            agent = cache.get(cloudflareAgent, 0) ; headers['User-Agent'] = agent

            cookie = cache.get(cloudflareCookie, 0, u, post, headers, mobile, safe, timeout)

            result = client.request(url, cookie=cookie, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout)
        else:
            result= result[1]

        return result
    except:
        return
Example #9
0
def cloudflareCookie(url, post, headers, mobile, safe, timeout):
    try:
        result = client.request(url, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout, error=True)

        jschl = re.compile('name="jschl_vc" value="(.+?)"/>').findall(result)[0]
        init = re.compile('setTimeout\(function\(\){\s*.*?.*:(.*?)};').findall(result)[-1]
        builder = re.compile(r"challenge-form\'\);\s*(.*)a.v").findall(result)[0]
        decryptVal = parseJSString(init)
        lines = builder.split(';')

        for line in lines:
            if len(line)>0 and '=' in line:
                sections=line.split('=')
                line_val = parseJSString(sections[1])
                decryptVal = int(eval(str(decryptVal)+sections[0][-1]+str(line_val)))

        answer = decryptVal + len(urlparse.urlparse(url).netloc)

        query = '%s/cdn-cgi/l/chk_jschl?jschl_vc=%s&jschl_answer=%s' % (url, jschl, answer)

        if 'type="hidden" name="pass"' in result:
            passval = re.compile('name="pass" value="(.*?)"').findall(result)[0]
            query = '%s/cdn-cgi/l/chk_jschl?pass=%s&jschl_vc=%s&jschl_answer=%s' % (url, urllib.quote_plus(passval), jschl, answer)
            time.sleep(5)

        cookie = client.request(query, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout, output='cookie', error=True)
        return cookie
    except:
        pass
Example #10
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            t = cleantitle.get(title)

            q = '%s %s' % (title, year)
            q = self.search_link.decode('base64') % urllib.quote_plus(q)

            r = client.request(q)
            r = json.loads(r)['results']
            r = [(i['url'], i['titleNoFormatting']) for i in r]
            r = [(i[0].split('%')[0], re.findall('(?:^Watch |)(.+?)(?:\(|)(\d{4})', i[1])) for i in r]
            r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if i[1]]
            r = [i for i in r if '/watch/' in i[0] and not '-season-' in i[0]]
            r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]]
            r = r[0][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            pass

        try:
            url = re.sub('[^A-Za-z0-9]', '-', title).lower()
            url = self.moviesearch_link % (url, year)

            r = urlparse.urljoin(self.base_link, url)
            r = client.request(r, output='geturl')
            if not year in r: raise Exception()

            return url
        except:
            return
Example #11
0
    def sky_list(self, num, channel, id):
        try:
            url = self.sky_now_link % id
            result = client.request(url, timeout='10')
            result = json.loads(result)
            match = result['listings'][id][0]['url']

            dt1 = (self.uk_datetime).strftime('%Y-%m-%d')
            dt2 = int((self.uk_datetime).strftime('%H'))
            if (dt2 < 6): dt2 = 0
            elif (dt2 >= 6 and dt2 < 12): dt2 = 1
            elif (dt2 >= 12 and dt2 < 18): dt2 = 2
            elif (dt2 >= 18): dt2 = 3

            url = self.sky_programme_link % (id, str(dt1), str(dt2))
            result = client.request(url, timeout='10')
            result = json.loads(result)
            result = result['listings'][id]
            result = [i for i in result if i['url'] == match][0]

            year = result['d']
            year = re.findall('[(](\d{4})[)]', year)[0].strip()
            year = year.encode('utf-8')

            title = result['t']
            title = title.replace('(%s)' % year, '').strip()
            title = client.replaceHTMLCodes(title)
            title = title.encode('utf-8')

            self.items.append((title, year, channel, num))
        except:
            pass
Example #12
0
File: movie2z.py Project: mpie/repo
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            query = urlparse.urljoin(self.base_link, url)

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'ul', attrs={'id': 'mainmenu'})
            r = dom_parser.parse_dom(r, 'li')

            for i in r:
                i = dom_parser.parse_dom(i, 'a')
                i = i[0][0]['href']
                i = client.request(i)
                i = dom_parser.parse_dom(i, 'select', attrs={'id': 'selecthost'})
                i = dom_parser.parse_dom(i, 'option')

                for x in i:
                    hoster = re.search('^\S*', x[1]).group().lower()
                    url = x[0]['value']

                    valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                    if not valid: continue

                    sources.append({'source': hoster, 'quality': 'SD', 'language': 'de', 'url': url, 'direct': False, 'debridonly': False})

            return sources
        except:
            return sources
Example #13
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            r = client.parseDOM(r, 'div', attrs = {'class': 'player_wraper'})
            r = client.parseDOM(r, 'iframe', ret='src')[0]
            r = urlparse.urljoin(url, r)
            r = client.request(r, referer=url)
            a = client.parseDOM(r, 'div', ret='value', attrs = {'id': 'k2'})[-1]
            b = client.parseDOM(r, 'div', ret='value', attrs = {'id': 'k1'})[-1]
            c = client.parseDOM(r, 'body', ret='style')[0]
            c = re.findall('(\d+)',  c)[-1]
            r = '/player/%s?s=%s&e=%s' % (a, b, c)
            r = urlparse.urljoin(url, r)
            r = client.request(r, referer=url)
            r = re.findall('"(?:url|src)"\s*:\s*"(.+?)"', r)

            for i in r:
                try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
                except: pass

            return sources
        except:
            return sources
Example #14
0
    def __get_episode_url(self, data):
        try:
            path = self.search_link % urllib.quote_plus(cleantitle.query(data['tvshowtitle']))
            url = urlparse.urljoin(self.base_link, path)

            xbmc.log('__get_episode_url start url: ' + str(url))

            response = client.request(url)

            exp = 'href="([^"]+?)".+?videoHname.+?title="%s - Season %s"' % (data['tvshowtitle'], data['season'])
            get_season = re.findall(exp, response, flags=re.I)[0]
            url = urlparse.urljoin(self.base_link, get_season + '/season')

            xbmc.log('__get_episode_url season url: ' + str(url))

            response = client.request(url)

            exp = 'href="([^"]+?)" title="(.+?Episode (?:%02d|%s):.+?)".+?videoHname' % (int(data['episode']), data['episode'])
            episode = re.findall(exp, response)[0][0]
            url = urlparse.urljoin(self.base_link, episode)

            xbmc.log('__get_episode_url episode url: ' + str(url))

            return url

        except Exception:
            return
Example #15
0
    def __get_movie_url(self, data):
        try:
            query = data['title'].lower().replace(' ', '+')
            path = self.movie_search % query
            url = urlparse.urljoin(self.base_link, path)

            response = client.request(url, headers=self.headers)

            movie_id = json.loads(response)[0]['id']

            path = self.movie_details % movie_id
            url = urlparse.urljoin(self.base_link, path)

            response = client.request(url, headers=self.headers)
            token_encrypted = json.loads(response)['langs'][0]['sources'][0]['hash']

            token = self.__decrypt(token_encrypted)

            path = self.fetcher % token
            url = urlparse.urljoin(self.base_link, path)

            return url

        except Exception:
            return
Example #16
0
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        try:
            if (self.user == '' or self.password == ''): raise Exception()

            t = cleantitle.get(tvshowtitle)

            u = urlparse.urljoin(self.base_link, self.search_link)

            p = {'q': tvshowtitle.rsplit(':', 1)[0], 'limit': '10', 'timestamp': int(time.time() * 1000), 'verifiedCheck': ''}
            p = urllib.urlencode(p)

            r = client.request(u, post=p, XHR=True)
            r = json.loads(r)

            r = [i for i in r if i['meta'].strip().split()[0].lower() == 'tv']
            r = [i['permalink'] for i in r if t == cleantitle.get(i['title'])][:2]
            r = [(i, urlparse.urljoin(self.base_link, i)) for i in r]
            r = [(i[0], client.request(i[1])) for i in r]
            r = [(i[0], i[1]) for i in r if not i[1] == None]
            r = [(i[0], re.sub('\s|<.+?>|</.+?>', '', i[1])) for i in r]
            r = [(i[0], re.findall('eleased:(\d{4})', i[1])) for i in r]
            r = [(i[0], i[1][0]) for i in r if i[1]]
            r = [i for i in r if year in i[1]]
            r = r[0][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Example #17
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = urlparse.parse_qs(url)
            url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
            url['premiered'], url['season'], url['episode'] = premiered, season, episode
            try:
                clean_title = cleantitle.geturl(url['tvshowtitle'])+'-season-%d' % int(season)
                search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
                search_results = client.request(search_url)
                parsed = client.parseDOM(search_results, 'div', {'id': 'movie-featured'})
                parsed = [(client.parseDOM(i, 'a', ret='href'), re.findall('<b><i>(.+?)</i>', i)) for i in parsed]
                parsed = [(i[0][0], i[1][0]) for i in parsed if cleantitle.get(i[1][0]) == cleantitle.get(clean_title)]
                url = parsed[0][0]
            except:
                pass
            data = client.request(url)
            data = client.parseDOM(data, 'div', attrs={'id': 'details'})
            data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href'))
            url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]

            return url[0][1]
        except:
            return
Example #18
0
    def movie(self, imdb, title, localtitle, year):
        try:
            q = self.search_link_2.decode('base64') % urllib.quote_plus(title)

            r = client.request(q)
            if r == None: r = client.request(q)
            if r == None: r = client.request(q)
            if r == None: r = client.request(q)

            r = json.loads(r)['results']
            r = [(i['url'], i['titleNoFormatting']) for i in r]
            r = [(i[0], re.findall('(?:^Watch |)(.+? \(\d{4}\))', i[1])) for i in r]
            r = [(urlparse.urljoin(self.base_link, i[0]), i[1][0]) for i in r if i[1]]

            t = cleantitle.get(title)
            years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]

            r = [i for i in r if any(x in i[1] for x in years)]

            match = [i[0] for i in r if t in cleantitle.get(i[1]) and '(%s)' % str(year) in i[1] and self.base_link in i[0]]

            match2 = [i[0] for i in r]
            match2 = [x for y,x in enumerate(match2) if x not in match2[:y]]
            if match2 == []: return

            for i in match2[:5]:
                try:
                    if len(match) > 0 : url = match[0] ; break
                except:
                    pass

            return url
        except:
            pass
Example #19
0
    def tvshow(self, imdb, tvdb, tvshowtitle, year):
        try:
            url = self.tvsearch_link % cleantitle.geturl(tvshowtitle)

            r = urlparse.urljoin(self.base_link, url)
            r = client.request(r, limit='1')
            r = client.parseDOM(r, 'title')

            if not r:
                url = 'http://www.imdb.com/title/%s' % imdb
                url = client.request(url, headers={'Accept-Language':'es-ES'})
                url = client.parseDOM(url, 'title')[0]
                url = re.sub('\((?:.+?|)\d{4}.+', '', url).strip()
                url = cleantitle.normalize(url.encode("utf-8"))
                url = self.tvsearch_link % cleantitle.geturl(url)

                r = urlparse.urljoin(self.base_link, url)
                r = client.request(r, limit='1')
                r = client.parseDOM(r, 'title')

            if not year in r[0]: raise Exception()

            return url
        except:
            return
Example #20
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url == None: return
            urldata = urlparse.parse_qs(url)
            urldata = dict((i, urldata[i][0]) for i in urldata)
            title = urldata['title'].replace(':', ' ').replace('-', ' ').lower()
            year  = urldata['year']

            search_id = title.lower()
            start_url = self.search_link % (self.base_link, search_id.replace(' ','%20'))

            headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
            html = client.request(start_url,headers=headers)
            Links = re.compile('"post","link":"(.+?)","title".+?"rendered":"(.+?)"',re.DOTALL).findall(html)
            for link,name in Links:
                link = link.replace('\\','')
                name = name.replace('&#038;', '')
                if title.lower() in name.lower(): 
                    if year in name:
                        holder = client.request(link,headers=headers)
                        dpage = re.compile('id="main-down".+?href="(.+?)"',re.DOTALL).findall(holder)[0]
                        sources = self.scrape_results(dpage, title, year)
                        return sources
            return sources
        except:
            return sources
Example #21
0
File: alltube.py Project: mpie/repo
    def resolve(self, url):
        try:
            myurl = url.split('?')
            mycookie = client.request(myurl[0], output='cookie', error=True)

            tmp = 'ZGVmIGFiYyhpbl9hYmMpOg0KICAgIGRlZiByaGV4KGEpOg0KICAgICAgICBoZXhfY2hyID0gJzAxMjM0NTY3ODlhYmNkZWYnDQogICAgICAgIHJldCA9ICcnDQogICAgICAgIGZvciBpIGluIHJhbmdlKDQpOg0KICAgICAgICAgICAgcmV0ICs9IGhleF9jaHJbKGEgPj4gKGkgKiA4ICsgNCkpICYgMHgwRl0gKyBoZXhfY2hyWyhhID4+IChpICogOCkpICYgMHgwRl0NCiAgICAgICAgcmV0dXJuIHJldA0KICAgIGRlZiBoZXgodGV4dCk6DQogICAgICAgIHJldCA9ICcnDQogICAgICAgIGZvciBpIGluIHJhbmdlKGxlbih0ZXh0KSk6DQogICAgICAgICAgICByZXQgKz0gcmhleCh0ZXh0W2ldKQ0KICAgICAgICByZXR1cm4gcmV0DQogICAgZGVmIGFkZDMyKGEsIGIpOg0KICAgICAgICByZXR1cm4gKGEgKyBiKSAmIDB4RkZGRkZGRkYNCiAgICBkZWYgY21uKGEsIGIsIGMsIGQsIGUsIGYpOg0KICAgICAgICBiID0gYWRkMzIoYWRkMzIoYiwgYSksIGFkZDMyKGQsIGYpKTsNCiAgICAgICAgcmV0dXJuIGFkZDMyKChiIDw8IGUpIHwgKGIgPj4gKDMyIC0gZSkpLCBjKQ0KICAgIGRlZiBmZihhLCBiLCBjLCBkLCBlLCBmLCBnKToNCiAgICAgICAgcmV0dXJuIGNtbigoYiAmIGMpIHwgKCh+YikgJiBkKSwgYSwgYiwgZSwgZiwgZykNCiAgICBkZWYgZ2coYSwgYiwgYywgZCwgZSwgZiwgZyk6DQogICAgICAgIHJldHVybiBjbW4oKGIgJiBkKSB8IChjICYgKH5kKSksIGEsIGIsIGUsIGYsIGcpDQogICAgZGVmIGhoKGEsIGIsIGMsIGQsIGUsIGYsIGcpOg0KICAgICAgICByZXR1cm4gY21uKGIgXiBjIF4gZCwgYSwgYiwgZSwgZiwgZykNCiAgICBkZWYgaWkoYSwgYiwgYywgZCwgZSwgZiwgZyk6DQogICAgICAgIHJldHVybiBjbW4oYyBeIChiIHwgKH5kKSksIGEsIGIsIGUsIGYsIGcpDQogICAgZGVmIGNyeXB0Y3ljbGUodGFiQSwgdGFiQik6DQogICAgICAgIGEgPSB0YWJBWzBdDQogICAgICAgIGIgPSB0YWJBWzFdDQogICAgICAgIGMgPSB0YWJBWzJdDQogICAgICAgIGQgPSB0YWJBWzNdDQogICAgICAgIGEgPSBmZihhLCBiLCBjLCBkLCB0YWJCWzBdLCA3LCAtNjgwODc2OTM2KTsNCiAgICAgICAgZCA9IGZmKGQsIGEsIGIsIGMsIHRhYkJbMV0sIDEyLCAtMzg5NTY0NTg2KTsNCiAgICAgICAgYyA9IGZmKGMsIGQsIGEsIGIsIHRhYkJbMl0sIDE3LCA2MDYxMDU4MTkpOw0KICAgICAgICBiID0gZmYoYiwgYywgZCwgYSwgdGFiQlszXSwgMjIsIC0xMDQ0NTI1MzMwKTsNCiAgICAgICAgYSA9IGZmKGEsIGIsIGMsIGQsIHRhYkJbNF0sIDcsIC0xNzY0MTg4OTcpOw0KICAgICAgICBkID0gZmYoZCwgYSwgYiwgYywgdGFiQls1XSwgMTIsIDEyMDAwODA0MjYpOw0KICAgICAgICBjID0gZmYoYywgZCwgYSwgYiwgdGFiQls2XSwgMTcsIC0xNDczMjMxMzQxKTsNCiAgICAgICAgYiA9IGZmKGIsIGMsIGQsIGEsIHRhYkJbN10sIDIyLCAtNDU3MDU5ODMpOw0KICAgICAgICBhID0gZmYoYSwgYiwgYywgZCwgdGFiQls4XSwgNywgMTc3MDAzNTQxNik7DQogICAgICAgIGQgPSBmZihkLCBhLCBiLCBjLCB0YWJCWzldLCAxMiwgLTE5NTg0MTQ0MTcpOw0KICAgICAgICBjID0gZmYoYywgZCwgYSwgYiwgdGFiQlsxMF0sIDE3LCAtNDIwNjMpOw0KICAgICAgICBiID0gZmYoYiwgYywgZCwgYSwgdGFiQlsxMV0sIDIyLCAtMTk5MDQwNDE2Mik7DQogICAgICAgIGEgPSBmZihhLCBiLCBjLCBkLCB0YWJCWzEyXSwgNywgMTgwNDYwMzY4Mik7DQogICAgICAgIGQgPSBmZihkLCBhLCBiLCBjLCB0YWJCWzEzXSwgMTIsIC00MDM0MTEwMSk7DQogICAgICAgIGMgPSBmZihjLCBkLCBhLCBiLCB0YWJCWzE0XSwgMTcsIC0xNTAyMDAyMjkwKTsNCiAgICAgICAgYiA9IGZmKGIsIGMsIGQsIGEsIHRhYkJbMTVdLCAyMiwgMTIzNjUzNTMyOSk7DQogICAgICAgIGEgPSBnZyhhLCBiLCBjLCBkLCB0YWJCWzFdLCA1LCAtMTY1Nzk2NTEwKTsNCiAgICAgICAgZCA9IGdnKGQsIGEsIGIsIGMsIHRhYkJbNl0sIDksIC0xMDY5NTAxNjMyKTsNCiAgICAgICAgYyA9IGdnKGMsIGQsIGEsIGIsIHRhYkJbMTFdLCAxNCwgNjQzNzE3NzEzKTsNCiAgICAgICAgYiA9IGdnKGIsIGMsIGQsIGEsIHRhYkJbMF0sIDIwLCAtMzczODk3MzAyKTsNCiAgICAgICAgYSA9IGdnKGEsIGIsIGMsIGQsIHRhYkJbNV0sIDUsIC03MDE1NTg2OTEpOw0KICAgICAgICBkID0gZ2coZCwgYSwgYiwgYywgdGFiQlsxMF0sIDksIDM4MDE2MDgzKTsNCiAgICAgICAgYyA9IGdnKGMsIGQsIGEsIGIsIHRhYkJbMTVdLCAxNCwgLTY2MDQ3ODMzNSk7DQogICAgICAgIGIgPSBnZyhiLCBjLCBkLCBhLCB0YWJCWzRdLCAyMCwgLTQwNTUzNzg0OCk7DQogICAgICAgIGEgPSBnZyhhLCBiLCBjLCBkLCB0YWJCWzldLCA1LCA1Njg0NDY0MzgpOw0KICAgICAgICBkID0gZ2coZCwgYSwgYiwgYywgdGFiQlsxNF0sIDksIC0xMDE5ODAzNjkwKTsNCiAgICAgICAgYyA9IGdnKGMsIGQsIGEsIGIsIHRhYkJbM10sIDE0LCAtMTg3MzYzOTYxKTsNCiAgICAgICAgYiA9IGdnKGIsIGMsIGQsIGEsIHRhYkJbOF0sIDIwLCAxMTYzNTMxNTAxKTsNCiAgICAgICAgYSA9IGdnKGEsIGIsIGMsIGQsIHRhYkJbMTNdLCA1LCAtMTQ0NDY4MTQ2Nyk7DQogICAgICAgIGQgPSBnZyhkLCBhLCBiLCBjLCB0YWJCWzJdLCA5LCAtNTE0MDM3ODQpOw0KICAgICAgICBjID0gZ2coYywgZCwgYSwgYiwgdGFiQls3XSwgMTQsIDE3MzUzMjg0NzMpOw0KICAgICAgICBiID0gZ2coYiwgYywgZCwgYSwgdGFiQlsxMl0sIDIwLCAtMTkyNjYwNzczNCk7DQogICAgICAgIGEgPSBoaChhLCBiLCBjLCBkLCB0YWJCWzVdLCA0LCAtMzc4NTU4KTsNCiAgICAgICAgZCA9IGhoKGQsIGEsIGIsIGMsIHRhYkJbOF0sIDExLCAtMjAyMjU3NDQ2Myk7DQogICAgICAgIGMgPSBoaChjLCBkLCBhLCBiLCB0YWJCWzExXSwgMTYsIDE4MzkwMzA1NjIpOw0KICAgICAgICBiID0gaGgoYiwgYywgZCwgYSwgdGFiQlsxNF0sIDIzLCAtMzUzMDk1NTYpOw0KICAgICAgICBhID0gaGgoYSwgYiwgYywgZCwgdGFiQlsxXSwgNCwgLTE1MzA5OTIwNjApOw0KICAgICAgICBkID0gaGgoZCwgYSwgYiwgYywgdGFiQls0XSwgMTEsIDEyNzI4OTMzNTMpOw0KICAgICAgICBjID0gaGgoYywgZCwgYSwgYiwgdGFiQls3XSwgMTYsIC0xNTU0OTc2MzIpOw0KICAgICAgICBiID0gaGgoYiwgYywgZCwgYSwgdGFiQlsxMF0sIDIzLCAtMTA5NDczMDY0MCk7DQogICAgICAgIGEgPSBoaChhLCBiLCBjLCBkLCB0YWJCWzEzXSwgNCwgNjgxMjc5MTc0KTsNCiAgICAgICAgZCA9IGhoKGQsIGEsIGIsIGMsIHRhYkJbMF0sIDExLCAtMzU4NTM3MjIyKTsNCiAgICAgICAgYyA9IGhoKGMsIGQsIGEsIGIsIHRhYkJbM10sIDE2LCAtNzIyNTIxOTc5KTsNCiAgICAgICAgYiA9IGhoKGIsIGMsIGQsIGEsIHRhYkJbNl0sIDIzLCA3NjAyOTE4OSk7DQogICAgICAgIGEgPSBoaChhLCBiLCBjLCBkLCB0YWJCWzldLCA0LCAtNjQwMzY0NDg3KTsNCiAgICAgICAgZCA9IGhoKGQsIGEsIGIsIGMsIHRhYkJbMTJdLCAxMSwgLTQyMTgxNTgzNSk7DQogICAgICAgIGMgPSBoaChjLCBkLCBhLCBiLCB0YWJCWzE1XSwgMTYsIDUzMDc0MjUyMCk7DQogICAgICAgIGIgPSBoaChiLCBjLCBkLCBhLCB0YWJCWzJdLCAyMywgLTk5NTMzODY1MSk7DQogICAgICAgIGEgPSBpaShhLCBiLCBjLCBkLCB0YWJCWzBdLCA2LCAtMTk4NjMwODQ0KTsNCiAgICAgICAgZCA9IGlpKGQsIGEsIGIsIGMsIHRhYkJbN10sIDEwLCAxMTI2ODkxNDE1KTsNCiAgICAgICAgYyA9IGlpKGMsIGQsIGEsIGIsIHRhYkJbMTRdLCAxNSwgLTE0MTYzNTQ5MDUpOw0KICAgICAgICBiID0gaWkoYiwgYywgZCwgYSwgdGFiQls1XSwgMjEsIC01NzQzNDA1NSk7DQogICAgICAgIGEgPSBpaShhLCBiLCBjLCBkLCB0YWJCWzEyXSwgNiwgMTcwMDQ4NTU3MSk7DQogICAgICAgIGQgPSBpaShkLCBhLCBiLCBjLCB0YWJCWzNdLCAxMCwgLTE4OTQ5ODY2MDYpOw0KICAgICAgICBjID0gaWkoYywgZCwgYSwgYiwgdGFiQlsxMF0sIDE1LCAtMTA1MTUyMyk7DQogICAgICAgIGIgPSBpaShiLCBjLCBkLCBhLCB0YWJCWzFdLCAyMSwgLTIwNTQ5MjI3OTkpOw0KICAgICAgICBhID0gaWkoYSwgYiwgYywgZCwgdGFiQls4XSwgNiwgMTg3MzMxMzM1OSk7DQogICAgICAgIGQgPSBpaShkLCBhLCBiLCBjLCB0YWJCWzE1XSwgMTAsIC0zMDYxMTc0NCk7DQogICAgICAgIGMgPSBpaShjLCBkLCBhLCBiLCB0YWJCWzZdLCAxNSwgLTE1NjAxOTgzODApOw0KICAgICAgICBiID0gaWkoYiwgYywgZCwgYSwgdGFiQlsxM10sIDIxLCAxMzA5MTUxNjQ5KTsNCiAgICAgICAgYSA9IGlpKGEsIGIsIGMsIGQsIHRhYkJbNF0sIDYsIC0xNDU1MjMwNzApOw0KICAgICAgICBkID0gaWkoZCwgYSwgYiwgYywgdGFiQlsxMV0sIDEwLCAtMTEyMDIxMDM3OSk7DQogICAgICAgIGMgPSBpaShjLCBkLCBhLCBiLCB0YWJCWzJdLCAxNSwgNzE4Nzg3MjU5KTsNCiAgICAgICAgYiA9IGlpKGIsIGMsIGQsIGEsIHRhYkJbOV0sIDIxLCAtMzQzNDg1NTUxKTsNCiAgICAgICAgdGFiQVswXSA9IGFkZDMyKGEsIHRhYkFbMF0pOw0KICAgICAgICB0YWJBWzFdID0gYWRkMzIoYiwgdGFiQVsxXSk7DQogICAgICAgIHRhYkFbMl0gPSBhZGQzMihjLCB0YWJBWzJdKTsNCiAgICAgICAgdGFiQVszXSA9IGFkZDMyKGQsIHRhYkFbM10pDQogICAgZGVmIGNyeXB0YmxrKHRleHQpOg0KICAgICAgICByZXQgPSBbXQ0KICAgICAgICBmb3IgaSBpbiByYW5nZSgwLCA2NCwgNCk6DQogICAgICAgICAgICByZXQuYXBwZW5kKG9yZCh0ZXh0W2ldKSArIChvcmQodGV4dFtpKzFdKSA8PCA4KSArIChvcmQodGV4dFtpKzJdKSA8PCAxNikgKyAob3JkKHRleHRbaSszXSkgPDwgMjQpKQ0KICAgICAgICByZXR1cm4gcmV0DQogICAgZGVmIGpjc3lzKHRleHQpOg0KICAgICAgICB0eHQgPSAnJzsNCiAgICAgICAgdHh0TGVuID0gbGVuKHRleHQpDQogICAgICAgIHJldCA9IFsxNzMyNTg0MTkzLCAtMjcxNzMzODc5LCAtMTczMjU4NDE5NCwgMjcxNzMzODc4XQ0KICAgICAgICBpID0gNjQNCiAgICAgICAgd2hpbGUgaSA8PSBsZW4odGV4dCk6DQogICAgICAgICAgICBjcnlwdGN5Y2xlKHJldCwgY3J5cHRibGsodGV4dFsnc3Vic3RyaW5nJ10oaSAtIDY0LCBpKSkpDQogICAgICAgICAgICBpICs9IDY0DQogICAgICAgIHRleHQgPSB0ZXh0W2kgLSA2NDpdDQogICAgICAgIHRtcCA9IFswLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwXQ0KICAgICAgICBpID0gMA0KICAgICAgICB3aGlsZSBpIDwgbGVuKHRleHQpOg0KICAgICAgICAgICAgdG1wW2kgPj4gMl0gfD0gb3JkKHRleHRbaV0pIDw8ICgoaSAlIDQpIDw8IDMpDQogICAgICAgICAgICBpICs9IDENCiAgICAgICAgdG1wW2kgPj4gMl0gfD0gMHg4MCA8PCAoKGkgJSA0KSA8PCAzKQ0KICAgICAgICBpZiBpID4gNTU6DQogICAgICAgICAgICBjcnlwdGN5Y2xlKHJldCwgdG1wKTsNCiAgICAgICAgICAgIGZvciBpIGluIHJhbmdlKDE2KToNCiAgICAgICAgICAgICAgICB0bXBbaV0gPSAwDQogICAgICAgIHRtcFsxNF0gPSB0eHRMZW4gKiA4Ow0KICAgICAgICBjcnlwdGN5Y2xlKHJldCwgdG1wKTsNCiAgICAgICAgcmV0dXJuIHJldA0KICAgIGRlZiByZXplZG93YSh0ZXh0KToNCiAgICAgICAgcmV0dXJuIGhleChqY3N5cyh0ZXh0KSkNCiAgICByZXR1cm4gcmV6ZWRvd2EoaW5fYWJjKQ0K'
            tmp = base64.b64decode(tmp)
            _myFun = compile(tmp, '', 'exec')
            vGlobals = {"__builtins__": None, 'len': len, 'list': list, 'ord': ord, 'range': range}
            vLocals = {'abc': ''}
            exec _myFun in vGlobals, vLocals
            myFun1 = vLocals['abc']

            data = client.request(urlparse.urljoin(self.base_link, '/jsverify.php?op=tag'), cookie=mycookie)
            data = byteify(json.loads(data))
            d = {}
            for i in range(len(data['key'])):
                d[data['key'][i]] = data['hash'][i]
            tmp = ''
            for k in sorted(d.keys()):
                tmp += d[k]
            mycookie = 'tmvh=%s;%s' % (myFun1(tmp), mycookie)

            link = client.request(myurl[-1].decode('base64') + '&width=673&height=471.09999999999997', cookie=mycookie)
            match = re.search('<iframe src="(.+?)"', link)
            if match:
                linkVideo = match.group(1)
                return linkVideo
            return
        except:
            return
Example #22
0
    def play_list(self, url):
        try:
            result = client.request(url)
            result = json.loads(result)
            items = result['items']
        except:
            pass

        for i in range(1, 5):
            try:
                if not 'nextPageToken' in result: raise Exception()
                next = url + '&pageToken=' + result['nextPageToken']
                result = client.request(next)
                result = json.loads(result)
                items += result['items']
            except:
                pass

        for item in items:
            try:
                title = item['snippet']['title']
                title = title.encode('utf-8')

                url = item['id']
                url = url.encode('utf-8')

                image = item['snippet']['thumbnails']['high']['url']
                if '/default.jpg' in image: raise Exception()
                image = image.encode('utf-8')

                self.list.append({'title': title, 'url': url, 'image': image})
            except:
                pass

        return self.list
Example #23
0
    def __search(self, titles, year):
        try:
            query = self.search_link % (cleantitle.getsearch(titles[0].replace(' ','%20')))

            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i][0]

            r = client.request(query)

            r = client.parseDOM(r, 'li', attrs={'class': 'item everyone-item over_online haveTooltip'})

            for i in r:
                title = client.parseDOM(i, 'a', ret='title')[0]
                url = client.parseDOM(i, 'a', ret='href')[0]
                data = client.request(url)
                y = re.findall('<p><span>AƱo:</span>(\d{4})',data)[0]
                original_t = re.findall('movie-text">.+?h2.+?">\((.+?)\)</h2>',data, re.DOTALL)[0]
                original_t, title = cleantitle.get(original_t), cleantitle.get(title)

                if (t in title or t in original_t) and y == year :
                    x = dom_parser.parse_dom(i, 'a', req='href')
                    return source_utils.strip_domain(x[0][0]['href'])

            return
        except:
            return
Example #24
0
    def __search(self, titles, year, imdb):
        try:
            query = self.search_link % (urllib.quote_plus(titles[0]))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'movie_cell'})
            r = [(dom_parser.parse_dom(i, 'div', attrs={'class': 'bottom'}), dom_parser.parse_dom(i, 'div', attrs={'class': 'year'})) for i in r]
            r = [(dom_parser.parse_dom(i[0], 'a', req=['href', 'title']), re.findall('[(](\d{4})[)]', i[1][0].content)) for i in r if i[0] and i[1]]
            r = [(i[0][0].attrs['href'], i[0][0].content, i[1][0]) for i in r if i[0] and i[1]]
            r = [(i[0], i[1].lower(), i[2]) for i in r if i[2] in y]
            r = sorted(r, key=lambda i: int(i[2]), reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t]

            if len(r) > 1:
                for i in r:
                    data = client.request(urlparse.urljoin(self.base_link, i))
                    data = dom_parser.parse_dom(data, 'a', attrs={'name': re.compile('.*/tt\d+.*')}, req='name')
                    data = [re.findall('.+?(tt\d+).*?', d.attrs['name']) for d in data]
                    data = [d[0] for d in data if len(d) > 0 and d[0] == imdb]

                    if len(data) >= 1:
                        url = i
            else:
                url = r[0]

            if url:
                return source_utils.strip_domain(url)
        except:
            return
Example #25
0
    def sources(self, url, hostDict, locDict):
        sources = []

        try:
            if url == None: return sources
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)
            #query = urlparse.urljoin(self.base_link, self.ajax_link)            
            #post = urllib.urlencode({'action':'sufi_search', 'search_string': title})
            
            result = client.request(query)
            r = client.parseDOM(result, 'div', attrs={'id':'showList'})
            r = re.findall(r'<a\s+style="color:white;"\s+href="([^"]+)">([^<]+)', r[0])     
            r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1]) and data['year'] in i[1]][0]
            url = r[0]                     
            result = client.request(url)
            r = re.findall(r'video\s+id="\w+.*?src="([^"]+)".*?data-res="([^"]+)',result,re.DOTALL)
            
            for i in r:                
                try:
                    q = source_utils.label_to_quality(i[1])
                    sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': i[0], 'direct': True, 'debridonly': False})                
                except:
                    pass

            return sources
        except Exception as e:
            return sources
Example #26
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            query = urlparse.urljoin(self.base_link, url)

            r = client.request(query)

            q = client.parseDOM(r, 'ul', attrs={'class': 'tabs'})[0]

            matches = re.compile('re">\d+.+?class="(\w{2})".+?c">([^>]+)<', re.DOTALL).findall(q)

            urls_id = re.compile('<div id="tab\d+"\s*class="tab_content"><script>(\w+)\("([^"]+)"\)</script>',re.DOTALL).findall(r)

            for i in range(0,len(urls_id)):

                lang, info = self.get_lang_by_type(matches[i][0])

                qual = matches[i][1]
                qual = 'HD' if 'HD' or 'BR' in qual else 'SD'

                url, host = self.url_function(urls_id[i][1], urls_id[i][0])
                if 'goo' in url:
                    data = client.request(url)
                    url = re.findall('var\s*videokeyorig\s*=\s*"(.+?)"', data, re.DOTALL)[0]
                    url, host = 'http://hqq.tv/player/embed_player.php?vid=%s'%(url), 'netu.tv'

                sources.append({'source': host, 'quality': qual, 'language': lang, 'url': url, 'info': info, 'direct': False,'debridonly': False})

            return sources
        except:
            return sources
Example #27
0
	def sources(self, url, hostDict, hostprDict):
		try:
			sources = []

			if url == None: return sources

			url = urlparse.urljoin(self.base_link, url)

			h = {'User-Agent': client.agent()}

			r = client.request(url, headers=h, output='extended')

			s = client.parseDOM(r[0], 'ul', attrs = {'class': 'episodes'})
			s = client.parseDOM(s, 'a', ret='data.+?')
			s = [client.replaceHTMLCodes(i).replace(':', '=').replace(',', '&').replace('"', '').strip('{').strip('}') for i in s]

			for u in s:
				try:
					url = '/io/1.0/stream?%s' % u
					url = urlparse.urljoin(self.base_link, url)

					r = client.request(url)
					r = json.loads(r)

					url = [i['src'] for i in r['streams']]

					for i in url:
						try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
						except: pass
				except:
					pass

			return sources
		except:
			return sources
Example #28
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            query = urlparse.urljoin(self.base_link, url)

            r = client.request(query)
            q = re.findall("'(http://www.elreyxhd.+?)'",r, re.DOTALL)[0]
            links = client.request(q)
            links = client.parseDOM(links, 'a', ret='href')

            for url in links:
                lang, info = 'es', 'LAT'
                qual = 'HD'
                if not 'http' in url: continue
                if 'elrey' in url :continue

                valid, host = source_utils.is_host_valid(url, hostDict)
                if not valid: continue

                sources.append({'source': host, 'quality': qual, 'language': lang, 'url': url, 'info': info, 'direct':
                    False,'debridonly': False})

            return sources
        except:
            return sources
Example #29
0
    def __search(self, titles, year, content):
        try:

            query = self.search_link % (urllib.quote_plus(cleantitle.getsearch(titles[0])))

            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i][0] #cleantitle.get(titles[0])

            r = client.request(query)

            r = client.parseDOM(r, 'div', attrs={'class': 'tab-content clearfix'})

            if content == 'movies':
                r = client.parseDOM(r, 'div', attrs={'id': 'movies'})
            else:
                r = client.parseDOM(r, 'div', attrs={'id': 'series'})

            data = dom_parser.parse_dom(r, 'figcaption')

            for i in data:
                title = i[0]['title']
                title = cleantitle.get(title)
                if title in t:
                    x = dom_parser.parse_dom(i, 'a', req='href')
                    return source_utils.strip_domain(x[0][0]['href'])
                else:
                    url = dom_parser.parse_dom(i, 'a', req='href')
                    data = client.request(url[0][0]['href'])
                    data = re.findall('<h1><a.+?">(.+?)\((\d{4})\).*?</a></h1>', data, re.DOTALL)[0]
                    if titles[0] in data[0] and year == data[1]: return source_utils.strip_domain(url[0][0]['href'])

            return
        except:
            return
Example #30
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url == None: return
            urldata = urlparse.parse_qs(url)
            urldata = dict((i, urldata[i][0]) for i in urldata)
            title = urldata['title'].replace(':', ' ').lower()
            year = urldata['year']

            search_id = title.lower()
            start_url = self.search_link % (self.base_link, search_id.replace(' ','%20'))

            headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
            html = client.request(start_url,headers=headers)
            Links = re.compile('"post","link":"(.+?)","title".+?"rendered":"(.+?)"',re.DOTALL).findall(html)
            for link,name in Links:
                link = link.replace('\\','')
                if title.lower() in name.lower(): 
                    if year in name:
                        holder = client.request(link,headers=headers)
                        new = re.compile('<iframe src="(.+?)"',re.DOTALL).findall(holder)[0]
                        end = client.request(new,headers=headers)
                        final_url = re.compile('<iframe src="(.+?)"',re.DOTALL).findall(end)[0]
                        valid, host = source_utils.is_host_valid(final_url, hostDict)
                        sources.append({'source':host,'quality':'1080p','language': 'en','url':final_url,'info':[],'direct':False,'debridonly':False})
            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('1080PMovies - Exception: \n' + str(failure))
            return sources
Example #31
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            s = client.request(self.base_link)
            s = re.findall('\'(http.+?)\'', s) + re.findall('\"(http.+?)\"', s)
            s = [i for i in s if urlparse.urlparse(self.base_link).netloc in i and len(i.strip('/').split('/')) > 3]
            s = s[0] if s else urlparse.urljoin(self.base_link, 'posts')
            s = s.strip('/')

            url = s + self.search_link % urllib.quote_plus(query)

            r = client.request(url)

            r = client.parseDOM(r, 'h2', attrs = {'class': 'post-title'})
            r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
            r = [(i[0], i[1], re.sub('(\.|\(|\[|\s)(\d{4}|3D)(\.|\)|\]|\s|)(.+|)', '', i[1]), re.findall('[\.|\(|\[|\s](\d{4}|)([\.|\)|\]|\s|].+)', i[1])) for i in r]
            r = [(i[0], i[1], i[2], i[3][0][0], i[3][0][1]) for i in r if i[3]]
            r = [(i[0], i[1], i[2], i[3], re.split('\.|\(|\)|\[|\]|\s|\-', i[4])) for i in r]
            r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[2]) and data['year'] == i[3]]
            r = [i for i in r if not any(x in i[4] for x in ['HDCAM', 'CAM', 'DVDR', 'DVDRip', 'DVDSCR', 'HDTS', 'TS', '3D'])]
            r = [i for i in r if '1080p' in i[4]][:1] + [i for i in r if '720p' in i[4]][:1]

            posts = [(i[1], i[0]) for i in r]

            hostDict = hostprDict + hostDict

            items = []

            for post in posts:
                try:
                    t = post[0]

                    u = client.request(post[1])
                    u = re.findall('\'(http.+?)\'', u) + re.findall('\"(http.+?)\"', u)
                    u = [i for i in u if not '/embed/' in i]
                    u = [i for i in u if not 'youtube' in i]

                    items += [(t, i) for i in u]
                except:
                    pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)

                    if not cleantitle.get(t) == cleantitle.get(title): raise Exception()

                    y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()

                    if not y == hdlr: raise Exception()

                    fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
                    fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
                    fmt = [i.lower() for i in fmt]

                    if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception()
                    if any(i in ['extras'] for i in fmt): raise Exception()

                    if '1080p' in fmt: quality = '1080p'
                    elif '720p' in fmt: quality = 'HD'
                    else: quality = 'SD'
                    if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
                    elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'

                    info = []

                    if '3d' in fmt: info.append('3D')

                    try:
                        size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size))/div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC')

                    info = ' | '.join(info)

                    url = item[1]
                    if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
                except:
                    pass

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check

            return sources
        except:
            return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            r = urlparse.urljoin(self.base_link, url)

            result = client.request(r)

            f = client.parseDOM(result, 'iframe', ret='src')
            f = [i for i in f if 'iframe' in i][0]

            result = client.request(f, headers={'Referer': r})

            r = client.parseDOM(result, 'div', attrs = {'id': 'botones'})[0]
            r = client.parseDOM(r, 'a', ret='href')
            r = [(i, urlparse.urlparse(i).netloc) for i in r]
            r = [i[0] for i in r if 'pelispedia' in i[1]]

            links = []

            for u in r:
                result = client.request(u, headers={'Referer': f})

                try:
                    url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
                    url = re.findall('"file"\s*:\s*"(.+?)"', url)
                    url = [i.split()[0].replace('\\/', '/') for i in url]

                    for i in url:
                        try: links.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i})
                        except: pass
                except:
                    pass

                try:
                    headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': u}

                    post = re.findall('gkpluginsphp.*?link\s*:\s*"([^"]+)', result)[0]
                    post = urllib.urlencode({'link': post})

                    url = urlparse.urljoin(self.base_link, '/Pe_flv_flsh/plugins/gkpluginsphp.php')
                    url = client.request(url, post=post, headers=headers)
                    url = json.loads(url)['link']

                    links.append({'source': 'gvideo', 'quality': 'HD', 'url': url})
                except:
                    pass

                try:
                    headers = {'X-Requested-With': 'XMLHttpRequest'}

                    post = re.findall('var\s+parametros\s*=\s*"([^"]+)', result)[0]
                    post = urlparse.parse_qs(urlparse.urlparse(post).query)['pic'][0]
                    post = urllib.urlencode({'sou': 'pic', 'fv': '21', 'url': post})

                    url = urlparse.urljoin(self.base_link, '/Pe_Player_Html5/pk/pk/plugins/protected.php')
                    url = client.request(url, post=post, headers=headers)
                    url = json.loads(url)[0]['url']

                    links.append({'source': 'cdn', 'quality': 'HD', 'url': url})
                except:
                    pass

            for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'Pelispedia', 'url': i['url'], 'direct': True, 'debridonly': False})

            return sources
        except:
            return sources
Example #33
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url == None:
                raise Exception()

            if not (self.api and not self.api == ''):
                raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            year = int(
                data['year']
            ) if 'year' in data and not data['year'] == None else None
            season = int(
                data['season']
            ) if 'season' in data and not data['season'] == None else None
            episode = int(
                data['episode']
            ) if 'episode' in data and not data['episode'] == None else None
            query = '%s S%02dE%02d' % (
                title, season,
                episode) if 'tvshowtitle' in data else '%s %d' % (title, year)

            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            query += ' lang:%s' % self.language[0]
            query = urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, self.search_link)

            hostDict = hostprDict + hostDict

            iterations = self.streamLimit / self.streamIncrease
            last = self.streamLimit - (iterations * self.streamIncrease)
            if not last:
                iterations = iterations - 1
                last = self.streamIncrease
            iterations = iterations + 1

            seen_urls = set()
            for type in self.types:
                searchFrom = 0
                searchCount = self.streamIncrease
                for offset in range(iterations):
                    if iterations == offset + 1: searchCount = last
                    urlNew = url % (type, self.api, query, searchCount,
                                    searchFrom)
                    searchFrom = searchFrom + self.streamIncrease

                    results = client.request(urlNew)
                    results = json.loads(results)

                    apistatus = results['status']
                    if apistatus != 'success': break

                    results = results['result']

                    added = False
                    for result in results:
                        jsonName = result['title']
                        jsonSize = result['sizeinternal']
                        jsonExtension = result['extension']
                        jsonLanguage = result['lang']
                        jsonHoster = result['hostername'].lower()
                        jsonLink = result['hosterurls'][0]['url']

                        if jsonLink in seen_urls: continue
                        seen_urls.add(jsonLink)

                        if not hdlr in jsonName.upper(): continue

                        if not self.releaseValid(title, jsonName):
                            continue  # filter non en releases

                        if not jsonHoster in hostDict: continue

                        if jsonExtension == 'rar': continue

                        quality, info = source_utils.get_release_quality(
                            jsonName)
                        info.append(self.formatSize(jsonSize))
                        info.append(jsonName)
                        info = '|'.join(info)

                        sources.append({
                            'source': jsonHoster,
                            'quality': quality,
                            'language': jsonLanguage,
                            'url': jsonLink,
                            'info': info,
                            'direct': False,
                            'debridonly': False
                        })
                        added = True

                    if not added:
                        break

            return sources
        except:
            return sources
Example #34
0
def authTrakt():
    try:
        if getTraktCredentialsInfo() == True:
            if control.yesnoDialog(
                    control.lang(32511).encode('utf-8'),
                    control.lang(32512).encode('utf-8'), '', 'Trakt'):
                control.setSetting(id='trakt.user', value='')
                control.setSetting(id='trakt.token', value='')
                control.setSetting(id='trakt.refresh', value='')
            raise Exception()

        result = getTraktAsJson('/oauth/device/code',
                                {'client_id': V2_API_KEY})
        verification_url = (control.lang(32513) %
                            result['verification_url']).encode('utf-8')
        user_code = (control.lang(32514) % result['user_code']).encode('utf-8')
        expires_in = int(result['expires_in'])
        device_code = result['device_code']
        interval = result['interval']

        progressDialog = control.progressDialog
        progressDialog.create('Trakt', verification_url, user_code)

        for i in range(0, expires_in):
            try:
                if progressDialog.iscanceled(): break
                time.sleep(1)
                if not float(i) % interval == 0: raise Exception()
                r = getTraktAsJson(
                    '/oauth/device/token', {
                        'client_id': V2_API_KEY,
                        'client_secret': CLIENT_SECRET,
                        'code': device_code
                    })
                if 'access_token' in r: break
            except:
                pass

        try:
            progressDialog.close()
        except:
            pass

        token, refresh = r['access_token'], r['refresh_token']

        headers = {
            'Content-Type': 'application/json',
            'trakt-api-key': V2_API_KEY,
            'trakt-api-version': 2,
            'Authorization': 'Bearer %s' % token
        }

        result = client.request(urlparse.urljoin(BASE_URL, '/users/me'),
                                headers=headers)
        result = utils.json_loads_as_str(result)

        user = result['username']

        control.setSetting(id='trakt.user', value=user)
        control.setSetting(id='trakt.token', value=token)
        control.setSetting(id='trakt.refresh', value=refresh)
        raise Exception()
    except:
        control.openSettings('3.1')
Example #35
0
def __getTrakt(url, post=None):
    try:
        url = urlparse.urljoin(BASE_URL, url)
        post = json.dumps(post) if post else None
        headers = {
            'Content-Type': 'application/json',
            'trakt-api-key': V2_API_KEY,
            'trakt-api-version': 2
        }

        if getTraktCredentialsInfo():
            headers.update({
                'Authorization':
                'Bearer %s' % control.setting('trakt.token')
            })

        result = client.request(url,
                                post=post,
                                headers=headers,
                                output='extended',
                                error=True)

        resp_code = result[1]
        resp_header = result[2]
        result = result[0]

        if resp_code in [
                '500', '502', '503', '504', '520', '521', '522', '524'
        ]:
            log_utils.log('Temporary Trakt Error: %s' % resp_code,
                          log_utils.LOGWARNING)
            return
        elif resp_code in ['404']:
            log_utils.log('Object Not Found : %s' % resp_code,
                          log_utils.LOGWARNING)
            return

        if resp_code not in ['401', '405']:
            return result, resp_header

        oauth = urlparse.urljoin(BASE_URL, '/oauth/token')
        opost = {
            'client_id': V2_API_KEY,
            'client_secret': CLIENT_SECRET,
            'redirect_uri': REDIRECT_URI,
            'grant_type': 'refresh_token',
            'refresh_token': control.setting('trakt.refresh')
        }

        result = client.request(oauth, post=json.dumps(opost), headers=headers)
        result = utils.json_loads_as_str(result)

        token, refresh = result['access_token'], result['refresh_token']

        control.setSetting(id='trakt.token', value=token)
        control.setSetting(id='trakt.refresh', value=refresh)

        headers['Authorization'] = 'Bearer %s' % token

        result = client.request(url,
                                post=post,
                                headers=headers,
                                output='extended',
                                error=True)
        return result[0], result[2]
    except Exception as e:
        log_utils.log('Unknown Trakt Error: %s' % e, log_utils.LOGWARNING)
        pass
Example #36
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url == None:
                raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            pack = None

            if 'exact' in data and data['exact']:
                query = title = data[
                    'tvshowtitle'] if 'tvshowtitle' in data else data['title']
                year = None
                season = None
                episode = None
                pack = False
                packCount = None
            else:
                title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                    'title']
                year = int(
                    data['year']
                ) if 'year' in data and not data['year'] == None else None
                season = int(
                    data['season']
                ) if 'season' in data and not data['season'] == None else None
                episode = int(
                    data['episode']) if 'episode' in data and not data[
                        'episode'] == None else None
                pack = data['pack'] if 'pack' in data else False
                packCount = data['packcount'] if 'packcount' in data else None

                if 'tvshowtitle' in data:
                    if pack: query = '%s %d' % (title, season)
                    else: query = '%s S%02dE%02d' % (title, season, episode)
                else:
                    query = '%s %d' % (title, year)
                query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = urlparse.urljoin(self.base_link, self.search_link)
            post = self.post_link % urllib.quote_plus(query)
            html = BeautifulSoup(client.request(url, post=post))

            htmlTable = html.find_all('table')[0]
            htmlRows = htmlTable.find_all('tr', recursive=False)
            for i in range(1, len(htmlRows)):
                try:
                    htmlColumns = htmlRows[i].find_all('td', recursive=False)

                    # Name
                    htmlName = htmlColumns[6].getText().strip()

                    # Link
                    htmlLink = htmlColumns[1].find_all('a')[0]['href']

                    # Seeds
                    try:
                        htmlSeeds = int(htmlColumns[4].getText().strip())
                    except:
                        htmlSeeds = None

                    # Metadata
                    meta = metadata.Metadata(name=htmlName,
                                             title=title,
                                             year=year,
                                             season=season,
                                             episode=episode,
                                             pack=pack,
                                             packCount=packCount,
                                             link=htmlLink,
                                             seeds=htmlSeeds)
                    meta.mIgnoreLength = 8  # Relax this, otherwise too many links are filtered out (eg: Avatar 2009).

                    # Ignore
                    if meta.ignore(size=False):
                        continue

                    # Add
                    sources.append({
                        'url': htmlLink,
                        'debridonly': False,
                        'direct': False,
                        'source': 'torrent',
                        'language': self.language[0],
                        'quality': meta.videoQuality(),
                        'metadata': meta,
                        'file': htmlName,
                        'pack': pack
                    })
                except:
                    pass

            return sources
        except:
            return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

                if 'tvshowtitle' in data:
                    url = '%s/episode/%s-s%02de%02d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode']))
                    year = re.findall('(\d{4})', data['premiered'])[0]

                    url = client.request(url, output='geturl')
                    if url == None: raise Exception()

                    r = client.request(url)

                    y = client.parseDOM(r, 'span', attrs = {'class': 'date'})
                    y += [i for i in client.parseDOM(r, 'div', attrs = {'class': 'metadatac'}) if 'date' in i]
                    y = re.findall('(\d{4})', y[0])[0]
                    if not y == year: raise Exception()

                else:
                    #url = '%s/watch/%s-%s/' % (self.base_link, cleantitle.geturl(data['title']), data['year'])
                    url = '%s/%s-%s/' % (self.base_link, cleantitle.geturl(data['title']), data['year'])

                    url = client.request(url, output='geturl')
                    if url == None: raise Exception()

                    r = client.request(url)

            else:
                url = urlparse.urljoin(self.base_link, url)

                r = client.request(url)


            links = client.parseDOM(r, 'iframe', ret='src')

            for link in links:
                try:
                    url = link.replace('\/', '/')
                    url = client.replaceHTMLCodes(url)
                    url = 'http:' + url if url.startswith('//') else url
                    url = url.encode('utf-8')

                    if not '.php' in url: raise Exception()

                    r = client.request(url, timeout='10')

                    s = re.compile('<script>(.+?)</script>', re.DOTALL).findall(r)

                    for i in s:
                        try: r += jsunpack.unpack(i)
                        except: pass

                    r = re.findall('file\s*:\s*(?:\"|\')(.+?)(?:\"|\')', r)

                    for i in r:
                        try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
                        except: pass
                except:
                    pass

            return sources
        except:
            return sources
Example #38
0
    def sources(self, url, hostDict, hostprDict):
        try:

            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)
            h = {'User-Agent': client.randomagent()}

            result = client.request(url, output='extended', headers=h)
            cookie = result[4]
            ajax_prov = client.parseDOM(result[0],
                                        'meta',
                                        attrs={'property': 'provision'},
                                        ret='content')[0]

            ajax_url = urlparse.urljoin(self.base_link,
                                        self.ajax_link) % ajax_prov
            h['X-CSRFToken'] = re.findall('csrftoken=(.*?);', cookie)[0]
            result = client.request(ajax_url,
                                    cookie=cookie,
                                    XHR=True,
                                    headers=h)

            r = client.parseDOM(result,
                                'div',
                                attrs={'class': 'host-container pull-left'})
            r = [(
                client.parseDOM(i,
                                'div',
                                attrs={'class': 'url'},
                                ret='data-url'),
                client.parseDOM(i,
                                'span',
                                attrs={'class': 'label label-default'}),
                client.parseDOM(i, 'img', attrs={'class': 'ttip'},
                                ret='title'),
                client.parseDOM(
                    i,
                    'span',
                    attrs={'class': 'glyphicon glyphicon-hd-video ttip'},
                    ret='title'),
            ) for i in r]

            r = [(self.html_parser.unescape(i[0][0]), i[1][0], i[2][0],
                  len(i[3]) > 0) for i in r]
            r = [(client.parseDOM(i[0], 'iframe', ret='src'), i[1], i[2], i[3])
                 for i in r]
            r = [(i[0][0], i[1], i[2], i[3]) for i in r if len(i[0]) > 0]

            for i in r:
                try:

                    host = urlparse.urlparse(i[0]).netloc
                    host = host.replace('www.', '').replace('embed.', '')
                    host = host.lower()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')
                    lang, info = self.get_lang_by_type(i[1])

                    q = 'SD'
                    if 'Wysoka' in i[2]: q = 'HD'
                    if i[3] == True: q = '1080p'

                    sources.append({
                        'source': host,
                        'quality': q,
                        'language': lang,
                        'url': i[0],
                        'info': info,
                        'direct': False,
                        'debridonly': False
                    })
                except:
                    pass

            return sources
        except:
            return sources
Example #39
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                ep = data['episode']
                url = '%s/film/%s-season-%01d/watching.html?ep=%s' % (
                    self.base_link, cleantitle.geturl(
                        data['tvshowtitle']), int(data['season']), ep)
                r = client.request(url,
                                   headers=headers,
                                   timeout='10',
                                   output='geturl')

                if url == None:
                    url = self.searchShow(data['tvshowtitle'], data['season'],
                                          aliases, headers)

            else:
                url = self.searchMovie(data['title'], data['year'], aliases,
                                       headers)

            if url == None: raise Exception()

            r = client.request(url, headers=headers, timeout='10')
            r = client.parseDOM(r, 'div', attrs={'class': 'les-content'})
            if 'tvshowtitle' in data:
                ep = data['episode']
                links = client.parseDOM(r,
                                        'a',
                                        attrs={'episode-data': ep},
                                        ret='player-data')
            else:
                links = client.parseDOM(r, 'a', ret='player-data')

            for link in links:
                if '123movieshd' in link or 'seriesonline' in link:
                    r = client.request(link, headers=headers, timeout='10')
                    r = re.findall('(https:.*?redirector.*?)[\'\"]', r)

                    for i in r:
                        try:
                            sources.append({
                                'source':
                                'gvideo',
                                'quality':
                                directstream.googletag(i)[0]['quality'],
                                'language':
                                'en',
                                'url':
                                i,
                                'direct':
                                True,
                                'debridonly':
                                False
                            })
                        except:
                            pass
                else:
                    try:
                        host = re.findall(
                            '([\w]+[.][\w]+)$',
                            urlparse.urlparse(link.strip().lower()).netloc)[0]
                        if not host in hostDict: raise Exception()
                        host = client.replaceHTMLCodes(host)
                        host = host.encode('utf-8')

                        sources.append({
                            'source': host,
                            'quality': 'SD',
                            'language': 'en',
                            'url': link,
                            'direct': False,
                            'debridonly': False
                        })
                    except:
                        pass

            return sources
        except:
            return sources
Example #40
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s s%02de%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('[\\\\:;*?"<>|/ \+\']+', '-', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)
            #log_utils.log('\n\n\n\n\n\nquery, url: %s, %s' % (query,url))
            r = client.request(url)

            # grab the (only?) relevant div and cut off the footer
            r = client.parseDOM(r, "div", attrs={'class': 'entry-content'})[0]
            r = re.sub('shareaholic-canvas.+', '', r, flags=re.DOTALL)

            # gather actual <a> links then clear all <a>/<img> to prep for naked-url scan
            # inner text could be useful if url looks like http://somehost.com/ugly_hash_377cbc738eff
            a_txt = ''
            a_url = ''
            a_txt = client.parseDOM(r, "a", attrs={'href': '.+?'})
            a_url = client.parseDOM(r, "a", ret="href")
            r = re.sub('<a .+?</a>', '', r, flags=re.DOTALL)
            r = re.sub('<img .+?>', '', r, flags=re.DOTALL)

            # check pre blocks for size and gather naked-urls
            size = ''
            pre_txt = []
            pre_url = []
            pres = client.parseDOM(r, "pre", attrs={'style': '.+?'})
            for pre in pres:
                try:
                    size = re.findall('([0-9,\.]+ ?(?:GB|GiB|MB|MiB))', pre)[0]
                except:
                    pass

                url0 = re.findall(
                    'https?://[^ <"\'\s]+', pre,
                    re.DOTALL)  # bad form but works with this site
                txt0 = [size] * len(url0)
                pre_url = pre_url + url0
                pre_txt = pre_txt + txt0  # we're just grabbing raw urls so there's no other info

            r = re.sub('<pre .+?</pre>', '', r, flags=re.DOTALL)

            # assume info at page top is true for all movie links, and only movie links
            #  (and that otherwise, only <pre>'s have scrapable sizes)
            size = ''
            if not 'tvshowtitle' in data:
                try:
                    size = " " + re.findall('([0-9,\.]+ ?(?:GB|GiB|MB|MiB))',
                                            r)[0]
                except:
                    pass

            # get naked urls (after exhausting <a>'s and <pre>'s)
            # note: all examples use full titles in links, so we can be careful
            raw_url = re.findall(
                'https?://[^ <"\'\s]+', r,
                re.DOTALL)  # bad form but works with this site
            raw_txt = [size] * len(
                raw_url
            )  # we're just grabbing raw urls so there's no other info

            # combine the 3 types of scrapes
            pairs = zip(a_url + pre_url + raw_url, a_txt + pre_txt + raw_txt)

            for pair in pairs:
                try:
                    url = str(pair[0])
                    info = re.sub(
                        '<.+?>', '',
                        pair[1])  #+ size  # usually (??) no <span> inside

                    # immediately abandon pairs with undesired traits
                    #  (if they stop using urls w/ titles, would need to accomodate here)
                    if any(x in url for x in ['.rar', '.zip', '.iso']):
                        raise Exception()
                    if not query.lower() in re.sub('[\\\\:;*?"<>|/ \+\'\.]+',
                                                   '-', url + info).lower():
                        raise Exception()

                    # establish size0 for this pair: 'size' is pre-loaded for movies only...
                    #  ...but prepend 'info' to lead with more-specific sizes (from a <pre>)
                    size0 = info + " " + size

                    # grab first reasonable data size from size0 string
                    try:
                        size0 = re.findall('([0-9,\.]+ ?(?:GB|GiB|MB|MiB))',
                                           size0)[0]
                        div = 1 if size0.endswith(('GB', 'GiB')) else 1024
                        size0 = float(re.sub('[^0-9\.]', '', size0)) / div
                        size0 = '%.2f GB' % size0
                    except:
                        size0 = ''
                        pass

                    # process through source_tools and hint with size0
                    quality, info = source_utils.get_release_quality(url, info)
                    info.append(size0)
                    info = ' | '.join(info)
                    #log_utils.log('** pair: [%s / %s] %s' % (quality,info,url))

                    url = url.encode('utf-8')
                    hostDict = hostDict + hostprDict

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if not valid: continue
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })

                except:
                    pass

            return sources
        except:
            return sources
Example #41
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         data_sources = eval(data['sources'])
         for i, s in data_sources:
             token = str(
                 self.___token(
                     {
                         'id': i,
                         'server': s,
                         'update': 0,
                         'ts': data['ts']
                     }, 'iQDWcsGqN'))
             query = (self.info_path % (data['ts'], token, i, s))
             url = urlparse.urljoin(self.base_link, query)
             for r in range(1, 3):
                 info_response = client.request(url, XHR=True, timeout=10)
                 if info_response != None: break
             grabber_dict = json.loads(info_response)
             try:
                 if grabber_dict['type'] == 'direct':
                     token64 = grabber_dict['params']['token']
                     randint = random.randint(1000000, 2000000)
                     query = (self.grabber_path %
                              (data['ts'], randint, i, token64))
                     url = urlparse.urljoin(self.base_link, query)
                     for r in range(1, 3):
                         response = client.request(url,
                                                   XHR=True,
                                                   timeout=10)
                         if response != None: break
                     sources_list = json.loads(response)['data']
                     for j in sources_list:
                         quality = j[
                             'label'] if not j['label'] == '' else 'SD'
                         quality = source_utils.label_to_quality(quality)
                         urls = None
                         if 'googleapis' in j['file']:
                             sources.append({
                                 'source': 'gvideo',
                                 'quality': quality,
                                 'language': 'en',
                                 'url': j['file'],
                                 'direct': True,
                                 'debridonly': False
                             })
                             continue
                         if 'lh3.googleusercontent' in j[
                                 'file'] or 'bp.blogspot' in j['file']:
                             try:
                                 newheaders = {
                                     'User-Agent':
                                     'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
                                     'Accept': '*/*',
                                     'Host': 'lh3.googleusercontent.com',
                                     'Accept-Language':
                                     'en-US,en;q=0.8,de;q=0.6,es;q=0.4',
                                     'Accept-Encoding':
                                     'identity;q=1, *;q=0',
                                     'Referer': self.film_url,
                                     'Connection': 'Keep-Alive',
                                     'X-Client-Data':
                                     'CJK2yQEIo7bJAQjEtskBCPqcygEIqZ3KAQjSncoBCKijygE=',
                                     'Range': 'bytes=0-'
                                 }
                                 resp = client.request(j['file'],
                                                       headers=newheaders,
                                                       redirect=False,
                                                       output='extended',
                                                       timeout='10')
                                 loc = resp[2]['Location']
                                 c = resp[2]['Set-Cookie'].split(';')[0]
                                 j['file'] = '%s|Cookie=%s' % (loc, c)
                                 urls, host, direct = [{
                                     'quality': quality,
                                     'url': j['file']
                                 }], 'gvideo', True
                             except:
                                 pass
                         valid, hoster = source_utils.is_host_valid(
                             j['file'], hostDict)
                         if not urls or urls == []:
                             urls, host, direct = source_utils.check_directstreams(
                                 j['file'], hoster)
                         for x in urls:
                             sources.append({
                                 'source': 'gvideo',
                                 'quality': x['quality'],
                                 'language': 'en',
                                 'url': x['url'],
                                 'direct': True,
                                 'debridonly': False
                             })
                 elif not grabber_dict['target'] == '':
                     url = 'https:' + grabber_dict[
                         'target'] if not grabber_dict['target'].startswith(
                             'http') else grabber_dict['target']
                     valid, hoster = source_utils.is_host_valid(
                         url, hostDict)
                     if not valid: continue
                     urls, host, direct = source_utils.check_directstreams(
                         url, hoster)
                     sources.append({
                         'source': hoster,
                         'quality': urls[0]['quality'],
                         'language': 'en',
                         'url': urls[0]['url'],
                         'direct': False,
                         'debridonly': False
                     })
             except:
                 pass
         return sources
     except:
         failure = traceback.format_exc()
         log_utils.log('PLocker - Exception: \n' + str(failure))
         return sources
Example #42
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            try:
                if not 'tvshowtitle' in data: raise Exception()

                links = []

                f = [
                    'S%02dE%02d' % (int(data['season']), int(data['episode']))
                ]
                t = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '',
                           data['tvshowtitle'])
                t = t.replace("&", "")

                q = self.search_link + urllib.quote_plus('%s %s' % (t, f[0]))

                q = urlparse.urljoin(self.base_link, q)
                result = client.request(q)
                print(q)
                result = json.loads(result)

                result = result['results']
            except:
                links = result = []

            for i in result:
                try:
                    if not cleantitle.get(t) == cleantitle.get(i['showName']):
                        raise Exception()

                    y = i['release']
                    y = re.compile('[\.|\(|\[|\s](\d{4}|S\d*E\d*)[\.|\)|\]|\s]'
                                   ).findall(y)[-1]
                    y = y.upper()
                    if not any(x == y for x in f): raise Exception()

                    quality = i['quality']

                    quality = quality.upper()

                    size = i['size']
                    size = float(size) / 1024
                    size = '%.2f GB' % size

                    if any(x in quality for x in ['HEVC', 'X265', 'H265']):
                        info = '%s | HEVC' % size
                    else:
                        info = size

                    if '1080P' in quality: quality = '1080p'
                    elif '720P' in quality: quality = 'HD'
                    else: quality = 'SD'

                    url = i['links']
                    #for x in url.keys(): links.append({'url': url[x], 'quality': quality, 'info': info})

                    links = []

                    for x in url.keys():
                        links.append({'url': url[x], 'quality': quality})

                    for link in links:
                        try:
                            url = link['url']
                            quality2 = link['quality']
                            #url = url[1]
                            #url = link
                            if len(url) > 1: raise Exception()
                            url = url[0].encode('utf-8')

                            host = re.findall(
                                '([\w]+[.][\w]+)$',
                                urlparse.urlparse(
                                    url.strip().lower()).netloc)[0]
                            if not host in hostprDict: raise Exception()
                            host = host.encode('utf-8')

                            sources.append({
                                'source': host,
                                'quality': quality2,
                                'language': 'en',
                                'url': url,
                                'info': info,
                                'direct': False,
                                'debridonly': True
                            })
                        except:
                            pass

                except:
                    pass

            return sources
        except:
            return sources
 def resolve(self, url):
     try:
         url = client.request(url, output='geturl')
         return url
     except:
         return
Example #44
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle']
            season = '%01d' % int(data['season'])
            episode = '%02d' % int(data['episode'])

            r = cache.get(self.ddlseries_tvcache, 120)

            r = [(i[0], i[3]) for i in r
                 if cleantitle.get(title) == cleantitle.get(i[1])
                 and season == i[2]]

            links = []

            for url, quality in r:
                try:
                    link = client.request(url)
                    vidlinks = client.parseDOM(link,
                                               'span',
                                               attrs={'class': 'overtr'})[0]
                    match = re.compile('href="([^"]+)[^>]*>\s*Episode\s+(\d+)<'
                                       ).findall(vidlinks)
                    match = [(i[0], quality) for i in match if episode == i[1]]
                    links += match
                except:
                    pass

            for url, quality in links:
                try:
                    if "protect-links" in url:
                        redirect = client.request(url)
                        url = re.findall('<a href="(.*?)" target="_blank">',
                                         redirect)
                        url = url[0]

                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostprDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': True
                    })
                except:
                    pass

            return sources
        except:
            return sources
    def __search(self, imdb, titles, year):
        try:
            q = self.search_link % urllib.quote_plus(
                cleantitle.query(titles[0]))
            q = urlparse.urljoin(self.base_link, q)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(q)

            r = dom_parser.parse_dom(
                r, 'tr', attrs={'id': re.compile('coverPreview.+?')})
            r = [(dom_parser.parse_dom(i, 'a', req='href'),
                  dom_parser.parse_dom(i,
                                       'div',
                                       attrs={'style': re.compile('.+?')}),
                  dom_parser.parse_dom(i, 'img', req='src')) for i in r]
            r = [(i[0][0].attrs['href'].strip(), i[0][0].content.strip(), i[1],
                  i[2]) for i in r if i[0] and i[2]]
            r = [(i[0], i[1], [
                x.content for x in i[2]
                if x.content.isdigit() and len(x.content) == 4
            ], i[3]) for i in r]
            r = [(i[0], i[1], i[2][0] if i[2] else '0', i[3]) for i in r]
            r = [
                i for i in r if any('us_flag' in x.attrs['src'] for x in i[3])
            ]
            r = [(i[0], i[1], i[2], [
                re.findall('(\d+)', x.attrs['src']) for x in i[3]
                if 'smileys' in x.attrs['src']
            ]) for i in r]
            r = [(i[0], i[1], i[2], [x[0] for x in i[3] if x]) for i in r]
            r = [(i[0], i[1], i[2], int(i[3][0]) if i[3] else 0) for i in r]
            r = sorted(r, key=lambda x: x[3])[::-1]
            r = [(i[0], i[1], i[2], re.findall('\((.+?)\)$', i[1])) for i in r]
            r = [(i[0], i[1], i[2]) for i in r if not i[3]]
            r = [i for i in r if i[2] in y]
            r = sorted(r, key=lambda i: int(i[2]),
                       reverse=True)  # with year > no year

            r = [(client.replaceHTMLCodes(i[0]), i[1], i[2]) for i in r]

            match = [
                i[0] for i in r if cleantitle.get(i[1]) in t and year == i[2]
            ]

            match2 = [i[0] for i in r]
            match2 = [x for y, x in enumerate(match2) if x not in match2[:y]]
            if match2 == []: return

            for i in match2[:5]:
                try:
                    if match:
                        url = match[0]
                        break
                    r = client.request(urlparse.urljoin(self.base_link, i))
                    r = re.findall('(tt\d+)', r)
                    if imdb in r:
                        url = i
                        break
                except:
                    pass

            return source_utils.strip_domain(url)
        except:
            return
Example #46
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         data = urlparse.parse_qs(url)
         data = dict((i, data[i][0]) for i in data)
         clean_title = cleantitle.geturl(data['tvshowtitle'])
         query = (self.movie_search_path % clean_title)
         url = urlparse.urljoin(self.base_link, query)
         for r in range(1, 3):
             search_response = client.request(url, timeout=10)
             if search_response != None: break
         results_list = client.parseDOM(search_response,
                                        'div',
                                        attrs={'class': 'items'})[0]
         film_id = []
         film_tries = [
             '\/' + (clean_title + '-0' + season) + '[^-0-9](.+?)\"',
             '\/' + (clean_title + '-' + season) + '[^-0-9](.+?)\"',
             '\/' + clean_title + '[^-0-9](.+?)\"'
         ]
         for i in range(len(film_tries)):
             if not film_id:
                 film_id = re.findall(film_tries[i], results_list)
             else:
                 break
         film_id = film_id[0]
         query = (self.film_path % film_id)
         url = urlparse.urljoin(self.base_link, query)
         self.film_url = url
         for r in range(1, 3):
             film_response = client.request(url, timeout=10)
             if film_response != None: break
         ts = re.findall('(data-ts=\")(.*?)(\">)', film_response)[0][1]
         server_ids = client.parseDOM(film_response,
                                      'div',
                                      ret='data-id',
                                      attrs={'class': 'server row'})
         sources_dom_list = client.parseDOM(
             film_response, 'ul', attrs={'class': 'episodes range active'})
         if not re.findall('([^\/]*)\">' + episode + '[^0-9]',
                           sources_dom_list[0]):
             episode = '%02d' % int(episode)
         sources_list = []
         for i in sources_dom_list:
             try:
                 source_id = re.findall(
                     ('([^\/]*)\">' + episode + '[^0-9]'), i)[0]
                 sources_list.append(source_id)
             except:
                 pass
         sources_list = zip(sources_list, server_ids)
         data.update({
             'title': title,
             'premiered': premiered,
             'season': season,
             'episode': episode,
             'ts': ts,
             'sources': sources_list
         })
         url = urllib.urlencode(data)
         return url
     except:
         failure = traceback.format_exc()
         log_utils.log('PLocker - Exception: \n' + str(failure))
         return
Example #47
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if (self.user != '' and self.password != ''):  #raise Exception()

                login = urlparse.urljoin(self.base_link, '/login.html')

                post = urllib.urlencode({
                    'username': self.user,
                    'password': self.password,
                    'submit': 'Login'
                })

                cookie = client.request(login,
                                        post=post,
                                        output='cookie',
                                        close=False)

                r = client.request(login,
                                   post=post,
                                   cookie=cookie,
                                   output='extended')

                headers = {'User-Agent': r[3]['User-Agent'], 'Cookie': r[4]}
            else:
                headers = {}

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '')
                             for i in data])

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                    'title']
                if 'season' in data: season = data['season']
                if 'episode' in data: episode = data['episode']
                year = data['year']

                query = urlparse.urljoin(
                    self.base_link, self.search_link %
                    urllib.quote_plus(cleantitle.getsearch(title)))
                query2 = urlparse.urljoin(
                    self.base_link,
                    self.search_link % re.sub('\s', '+', title))
                r = client.request(query)
                r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
                if len(r) == 0:
                    r = client.request(query2)
                    r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
                r = zip(client.parseDOM(r, 'a', ret='href'),
                        client.parseDOM(r, 'a', ret='title'),
                        client.parseDOM(r, 'a', ret='data-url'))

                if 'tvshowtitle' in data:
                    cltitle = cleantitle.get(title + 'season' + season)
                    cltitle2 = cleantitle.get(title +
                                              'season%02d' % int(season))
                else:
                    cltitle = cleantitle.get(title)

                r = [
                    i for i in r if cltitle == cleantitle.get(i[1])
                    or cltitle2 == cleantitle.get(i[1])
                ]
                id = [re.findall('/(\d+)$', i[2])[0] for i in r][0]

                ajx = urlparse.urljoin(self.base_link,
                                       '/ajax/movie_episodes/' + id)

                r = client.request(ajx)
                if 'episode' in data:
                    eids = re.findall(
                        r'title=\\"Episode\s+%02d.*?data-id=\\"(\d+)' %
                        int(episode), r)
                else:
                    eids = re.findall(r'title=.*?data-id=\\"(\d+)', r)

                for eid in eids:
                    try:
                        ajx = 'ajax/movie_token?eid=%s&mid=%s&_=%d' % (
                            eid, id, int(time.time() * 1000))
                        ajx = urlparse.urljoin(self.base_link, ajx)
                        r = client.request(ajx)
                        [x, y] = re.findall(r"_x='([^']+)',\s*_y='([^']+)'",
                                            r)[0]
                        ajx = 'ajax/movie_sources/%s?x=%s&y=%s' % (eid, x, y)
                        ajx = urlparse.urljoin(self.base_link, ajx)
                        r = client.request(ajx)
                        r = json.loads(r)
                        r = r['playlist'][0]['sources']
                        for i in r:
                            try:
                                label = source_utils.label_to_quality(
                                    i['label'])
                            except:
                                label = 'SD'
                            sources.append({
                                'source': 'cdn',
                                'quality': label,
                                'language': 'en',
                                'url': i['file'],
                                'direct': True,
                                'debridonly': False
                            })
                    except:
                        pass

            return sources
        except:
            return sources
Example #48
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            posts = client.parseDOM(r, 'item')

            hostDict = hostprDict + hostDict

            items = []

            for post in posts:
                try:
                    t = client.parseDOM(post, 'title')[0]

                    c = client.parseDOM(post, 'content.+?')[0]

                    u = re.findall('>Single Link(.+?)p>\s*<span', c.replace('\n', ''))[0]

                    u = client.parseDOM(u, 'a', ret='href')

                    s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', c)
                    s = s[0] if s else '0'

                    items += [(t, i, s) for i in u]
                except:
                    pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)

                    if not cleantitle.get(t) == cleantitle.get(title): raise Exception()

                    y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()

                    if not y == hdlr: raise Exception()
                    quality, info = source_utils.get_release_quality(name, item[1])

                    try:
                        size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size))/div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    info = ' | '.join(info)

                    url = item[1]
                    if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    valid, host = source_utils.is_host_valid(url,hostDict)
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
                except:
                    pass

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check

            return sources
        except:
            return sources
Example #49
0
def request(url,
            check,
            close=True,
            redirect=True,
            error=False,
            proxy=None,
            post=None,
            headers=None,
            mobile=False,
            XHR=False,
            limit=None,
            referer=None,
            cookie=None,
            compression=True,
            output='',
            timeout='30'):
    try:
        r = client.request(url,
                           close=close,
                           redirect=redirect,
                           proxy=proxy,
                           post=post,
                           headers=headers,
                           mobile=mobile,
                           XHR=XHR,
                           limit=limit,
                           referer=referer,
                           cookie=cookie,
                           compression=compression,
                           output=output,
                           timeout=timeout)
        if r is not None and error is not False: return r
        if check in str(r) or str(r) == '': return r

        proxies = sorted(get(), key=lambda x: random.random())
        proxies = sorted(proxies, key=lambda x: random.random())
        proxies = proxies[:3]

        for p in proxies:
            p += urllib.quote_plus(url)
            if post is not None:
                if isinstance(post, dict):
                    post = utils.byteify(post)
                    post = urllib.urlencode(post)
                p += urllib.quote_plus('?%s' % post)
            r = client.request(p,
                               close=close,
                               redirect=redirect,
                               proxy=proxy,
                               headers=headers,
                               mobile=mobile,
                               XHR=XHR,
                               limit=limit,
                               referer=referer,
                               cookie=cookie,
                               compression=compression,
                               output=output,
                               timeout='20')
            if check in str(r) or str(r) == '': return r
    except:
        pass
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '')
                             for i in data])

                if 'tvshowtitle' in data:
                    url = '%s/drama/%s/episode-%01d/' % (
                        self.base_link, cleantitle.geturl(
                            data['tvshowtitle']), int(data['episode']))
                else:
                    url = '%s/movie/%s/' % (self.base_link,
                                            cleantitle.geturl(data['title']))

                url = client.request(url, timeout='10', output='geturl')
                if url == None: raise Exception()

            else:
                url = urlparse.urljoin(self.base_link, url)
                r = client.request(url, timeout='10')

            r = client.request(url, timeout='10')
            links = client.parseDOM(r, 'iframe', ret='src')

            for link in links:
                if 'vidnow' in link:
                    r = client.request(link, timeout='10')
                    s = re.findall('window\.atob\(\"(.*?)\"\)', r)
                    r = re.findall('(https:.*?(openload|redirector).*?)[\'\"]',
                                   r)

                    for i in s:
                        i = base64.b64decode(i)
                        try:
                            sources.append({
                                'source':
                                'gvideo',
                                'quality':
                                directstream.googletag(i)[0]['quality'],
                                'language':
                                'ko',
                                'url':
                                i,
                                'direct':
                                True,
                                'debridonly':
                                False
                            })
                        except:
                            pass

                    for i in r:
                        if 'openload' in i:
                            try:
                                sources.append({
                                    'source': 'openload',
                                    'quality': 'SD',
                                    'language': 'ko',
                                    'url': i[0],
                                    'direct': False,
                                    'debridonly': False
                                })
                            except:
                                pass
                        elif 'google' in i:
                            try:
                                sources.append({
                                    'source':
                                    'gvideo',
                                    'quality':
                                    directstream.googletag(i)[0]['quality'],
                                    'language':
                                    'ko',
                                    'url':
                                    i[0],
                                    'direct':
                                    True,
                                    'debridonly':
                                    False
                                })
                            except:
                                pass
                        else:
                            pass
                else:
                    pass

            return sources
        except:
            return sources
Example #51
0
def authTrakt():
    try:
        if getTraktCredentialsInfo() == True:
            if control.yesnoDialog(
                    control.lang(32511).encode("utf-8"),
                    control.lang(32512).encode("utf-8"),
                    "",
                    "Trakt",
            ):
                control.setSetting(id="trakt.user", value="")
                control.setSetting(id="trakt.token", value="")
                control.setSetting(id="trakt.refresh", value="")
            raise Exception()

        result = getTraktAsJson("/oauth/device/code",
                                {"client_id": V2_API_KEY})
        verification_url = (control.lang(32513) %
                            result["verification_url"]).encode("utf-8")
        user_code = (control.lang(32514) % result["user_code"]).encode("utf-8")
        expires_in = int(result["expires_in"])
        device_code = result["device_code"]
        interval = result["interval"]

        progressDialog = control.progressDialog
        progressDialog.create("Trakt", verification_url, user_code)

        for i in range(0, expires_in):
            try:
                if progressDialog.iscanceled():
                    break
                time.sleep(1)
                if not float(i) % interval == 0:
                    raise Exception()
                r = getTraktAsJson(
                    "/oauth/device/token",
                    {
                        "client_id": V2_API_KEY,
                        "client_secret": CLIENT_SECRET,
                        "code": device_code,
                    },
                )
                if "access_token" in r:
                    break
            except:
                pass

        try:
            progressDialog.close()
        except:
            pass

        token, refresh = r["access_token"], r["refresh_token"]

        headers = {
            "Content-Type": "application/json",
            "trakt-api-key": V2_API_KEY,
            "trakt-api-version": 2,
            "Authorization": "Bearer %s" % token,
        }

        result = client.request(urlparse.urljoin(BASE_URL, "/users/me"),
                                headers=headers)
        result = utils.json_loads_as_str(result)

        user = result["username"]

        control.setSetting(id="trakt.user", value=user)
        control.setSetting(id="trakt.token", value=token)
        control.setSetting(id="trakt.refresh", value=refresh)
        raise Exception()
    except:
        control.openSettings("3.1")
Example #52
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            hostDict = hostprDict + hostDict

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            try:
                feed = True

                url = self.search_link % urllib.quote_plus(query)
                url = urlparse.urljoin(self.base_link, url)

                r = client.request(url)
                if r == None: feed = False

                posts = client.parseDOM(r, 'item')
                if not posts: feed = False

                items = []

                for post in posts:
                    try:
                        u = client.parseDOM(post, 'enclosure', ret='url')
                        u = [(i.strip('/').split('/')[-1], i) for i in u]
                        items += u
                    except:
                        pass
            except:
                pass

            try:
                if feed == True: raise Exception()

                url = self.search_link_2 % urllib.quote_plus(query)
                url = urlparse.urljoin(self.base_link, url)

                r = client.request(url)

                posts = client.parseDOM(r, 'div', attrs={'class': 'post'})

                items = []
                dupes = []

                for post in posts:
                    try:
                        t = client.parseDOM(post, 'a')[0]
                        t = re.sub('<.+?>|</.+?>', '', t)

                        x = re.sub(
                            '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                            '', t)
                        if not cleantitle.get(title) in cleantitle.get(x):
                            raise Exception()
                        y = re.findall(
                            '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                            t)[-1].upper()
                        if not y == hdlr: raise Exception()

                        fmt = re.sub(
                            '(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)',
                            '', t.upper())
                        fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
                        fmt = [i.lower() for i in fmt]
                        if not any(i in ['1080p', '720p'] for i in fmt):
                            raise Exception()

                        if len(dupes) > 2: raise Exception()
                        dupes += [x]

                        u = client.parseDOM(post, 'a', ret='href')[0]

                        r = client.request(u)
                        u = client.parseDOM(r, 'a', ret='href')
                        u = [(i.strip('/').split('/')[-1], i) for i in u]
                        items += u
                    except:
                        pass
            except:
                pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '', name)

                    if not cleantitle.get(t) == cleantitle.get(title):
                        raise Exception()

                    y = re.findall(
                        '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                        name)[-1].upper()

                    if not y == hdlr: raise Exception()

                    quality, info = source_utils.get_release_quality(
                        name, item[1])

                    url = item[1]
                    if any(x in url for x in ['.rar', '.zip', '.iso']):
                        raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })
                except:
                    pass

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('SceneRls - Exception: \n' + str(failure))
            return sources
Example #53
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        '''
        Takes episode information, finds the ts and list sources, encodes it as
        name value pairs, and returns a string of url params

        Keyword arguments:

        url -- string - url params
        imdb -- string - imdb tv show id
        tvdb -- string - tvdb tv show id
        title -- string - episode title
        premiered -- string - date the episode aired (format: year-month-day)
        season -- string - the episodes season
        episode -- string - the episode number

        Returns:

        url -- string - url encoded params

        '''
        try:
            data = urlparse.parse_qs(url)
            data = dict((i, data[i][0]) for i in data)

            clean_title = cleantitle.geturl(data['tvshowtitle'])
            query = (self.episode_search_path % clean_title)
            url = urlparse.urljoin(self.base_link, query)

            search_response = client.request(url)

            results_list = client.parseDOM(search_response,
                                           'div',
                                           attrs={'class':
                                                  'row movie-list'})[0]

            film_id = ''

            film_tries = [
                '\/' + (clean_title + '-0' + season) + '[^-0-9](.+?)\"',
                '\/' + (clean_title + '-' + season) + '[^-0-9](.+?)\"',
                '\/' + clean_title + '[^-0-9](.+?)\"'
            ]

            for i in range(len(film_tries)):
                if not film_id:
                    film_id = re.findall(film_tries[i], results_list)
                else:
                    break

            film_id = film_id[0]

            query = (self.film_path % film_id)
            url = urlparse.urljoin(self.base_link, query)

            film_response = client.request(url)

            ts = re.findall('(data-ts=\")(.*?)(\">)', film_response)[0][1]

            sources_dom_list = client.parseDOM(
                film_response, 'ul', attrs={'class': 'episodes range active'})

            if not re.findall('([^\/]*)\">' + episode + '[^0-9]',
                              sources_dom_list[0]):
                episode = '%02d' % int(episode)

            sources_list = []

            for i in sources_dom_list:
                source_id = re.findall(('([^\/]*)\">' + episode + '[^0-9]'),
                                       i)[0]
                sources_list.append(source_id)

            data.update({
                'title': title,
                'premiered': premiered,
                'season': season,
                'episode': episode,
                'ts': ts,
                'sources': sources_list
            })

            url = urllib.urlencode(data)

            return url

        except Exception:
            return
Example #54
0
def __getTrakt(url, post=None):
    try:
        url = urlparse.urljoin(BASE_URL, url)
        post = json.dumps(post) if post else None
        headers = {
            "Content-Type": "application/json",
            "trakt-api-key": V2_API_KEY,
            "trakt-api-version": 2,
        }

        if getTraktCredentialsInfo():
            headers.update({
                "Authorization":
                "Bearer %s" % control.setting("trakt.token")
            })

        result = client.request(url,
                                post=post,
                                headers=headers,
                                output="extended",
                                error=True)

        resp_code = result[1]
        resp_header = result[2]
        result = result[0]

        if resp_code in [
                "500", "502", "503", "504", "520", "521", "522", "524"
        ]:
            log_utils.log("Temporary Trakt Error: %s" % resp_code,
                          log_utils.LOGWARNING)
            return
        elif resp_code in ["404"]:
            log_utils.log("Object Not Found : %s" % resp_code,
                          log_utils.LOGWARNING)
            return
        elif resp_code in ["429"]:
            log_utils.log("Trakt Rate Limit Reached: %s" % resp_code,
                          log_utils.LOGWARNING)
            return

        if resp_code not in ["401", "405"]:
            return result, resp_header

        oauth = urlparse.urljoin(BASE_URL, "/oauth/token")
        opost = {
            "client_id": V2_API_KEY,
            "client_secret": CLIENT_SECRET,
            "redirect_uri": REDIRECT_URI,
            "grant_type": "refresh_token",
            "refresh_token": control.setting("trakt.refresh"),
        }

        result = client.request(oauth, post=json.dumps(opost), headers=headers)
        result = utils.json_loads_as_str(result)

        token, refresh = result["access_token"], result["refresh_token"]

        control.setSetting(id="trakt.token", value=token)
        control.setSetting(id="trakt.refresh", value=refresh)

        headers["Authorization"] = "Bearer %s" % token

        result = client.request(url,
                                post=post,
                                headers=headers,
                                output="extended",
                                error=True)
        return result[0], result[2]
    except Exception as e:
        log_utils.log("Unknown Trakt Error: %s" % e, log_utils.LOGWARNING)
        pass
Example #55
0
    def sourcesResolve(self, item, info=False):
        try:
            self.url = None

            u = url = item['url']

            d = item['debrid']
            direct = item['direct']
            local = item.get('local', False)

            provider = item['provider']
            call = [i[1] for i in self.sourceDict if i[0] == provider][0]
            u = url = call.resolve(url)

            if url == None or (not '://' in str(url) and not local):
                raise Exception()

            if not local:
                url = url[8:] if url.startswith('stack:') else url

                urls = []
                for part in url.split(' , '):
                    u = part
                    if not d == '':
                        part = debrid.resolver(part, d)

                    elif not direct == True:
                        hmf = resolveurl.HostedMediaFile(
                            url=u,
                            include_disabled=True,
                            include_universal=False)
                        if hmf.valid_url() == True: part = hmf.resolve()
                    urls.append(part)

                url = 'stack://' + ' , '.join(urls) if len(
                    urls) > 1 else urls[0]

            if url == False or url == None: raise Exception()

            ext = url.split('?')[0].split('&')[0].split('|')[0].rsplit(
                '.')[-1].replace('/', '').lower()
            if ext == 'rar': raise Exception()

            try:
                headers = url.rsplit('|', 1)[1]
            except:
                headers = ''
            headers = urllib.quote_plus(headers).replace(
                '%3D', '=') if ' ' in headers else headers
            headers = dict(urlparse.parse_qsl(headers))

            if url.startswith('http') and '.m3u8' in url:
                result = client.request(url.split('|')[0],
                                        headers=headers,
                                        output='geturl',
                                        timeout='20')
                if result == None: raise Exception()

            elif url.startswith('http'):
                result = client.request(url.split('|')[0],
                                        headers=headers,
                                        output='chunk',
                                        timeout='20')
                if result == None: raise Exception()

            self.url = url
            return url
        except:
            if info == True: self.errorForSources()
            return
Example #56
0
    def sources(self, url, hostDict, hostprDict):
        '''
        Loops over site sources and returns a dictionary with corresponding
        file locker sources and information

        Keyword arguments:

        url -- string - url params

        Returns:

        sources -- string - a dictionary of source information

        '''

        sources = []

        try:
            data = urlparse.parse_qs(url)
            data = dict((i, data[i][0]) for i in data)
            data['sources'] = re.findall("[^', u\]\[]+", data['sources'])

            for i in data['sources']:
                token = str(
                    self.__token({
                        'id': i,
                        'update': '0',
                        'ts': data['ts']
                    }))
                query = (self.info_path % (data['ts'], token, i))
                url = urlparse.urljoin(self.base_link, query)

                info_response = client.request(url, XHR=True)

                grabber_dict = json.loads(info_response)

                if grabber_dict['type'] == 'direct':
                    token64 = grabber_dict['params']['token']
                    query = (self.grabber_path % (data['ts'], i, token64))
                    url = urlparse.urljoin(self.base_link, query)

                    response = client.request(url, XHR=True)

                    sources_list = json.loads(response)['data']

                    for j in sources_list:
                        source = directstream.googletag(j['file'])[0]

                        sources.append({
                            'source': 'gvideo',
                            'quality': source['quality'],
                            'language': 'en',
                            'url': source['url'],
                            'direct': True,
                            'debridonly': False
                        })

            return sources

        except Exception:
            return sources
Example #57
0
 def resolve(self, url):
     return client.request(url, output='geturl')
Example #58
0
def evaluate(host):
    try:
        if 'animeshd' in host:
            host = client.request(host, output='geturl')

        else:
            host = host
        xbmc.log('@#@HOST:%s' % host, xbmc.LOGNOTICE)
        if 'openload' in host:
            from resources.lib.modules import openload
            if openload.test_video(host):
                host = openload.get_video_openload(host)
            else:
                host = resolveurl.resolve(host)
            return host

        elif 'animehdpro' in host:
            data = client.request(host)
            host = re.compile('''file['"]:['"]([^'"]+)''',
                              re.DOTALL).findall(data)[0]
            host = requests.get(host).headers['location']
            xbmc.log('@#@HDPRO:%s' % host, xbmc.LOGNOTICE)
            return host + '|User-Agent=%s' % urllib.quote(client.agent())

        elif 'tiwi' in host:
            from resources.lib.modules import jsunpack
            data = client.request(host)
            if jsunpack.detect(data):
                data = jsunpack.unpack(data)
                link = re.compile('''\{file:['"]([^'"]+)''',
                                  re.DOTALL).findall(data)[0]
                xbmc.log('@#@HDPRO:%s' % link, xbmc.LOGNOTICE)
            else:
                #link = re.compile('''video\/mp4.+?src:['"](.+?)['"]''', re.DOTALL).findall(data)[0]
                link = re.compile('''dash\+xml.+?src:['"](.+?)['"]''',
                                  re.DOTALL).findall(data)[0]
                xbmc.log('@#@HDPRO:%s' % link, xbmc.LOGNOTICE)
            return link + '|User-Agent=%s&Referer=%s' % (urllib.quote(
                client.agent()), host)

        elif 'www.pelisplus.net' in host:
            res_quality = []
            stream_url = []

            headers = {'User-Agent': client.agent(), 'Referer': host}
            cj = requests.get(host, headers=headers).cookies
            cj = '__cfduid=%s' % str(cj['__cfduid'])
            vid_id = host.split('/')[-1]
            headers['Cookie'] = cj
            data = requests.post('https://www.pelisplus.net/api/sources/%s' %
                                 vid_id,
                                 headers=headers).json()
            streams = data['data']
            for stream in streams:
                url = stream['file']
                qual = stream['label']
                res_quality.append(qual)
                stream_url.append(url)
            if len(res_quality) > 1:
                dialog = xbmcgui.Dialog()
                ret = dialog.select('Ver en', res_quality)
                if ret == -1:
                    return
                elif ret > -1:
                    host = stream_url[ret]
                    xbmc.log('@#@HDPRO:%s' % host, xbmc.LOGNOTICE)
                    return host + '|User-Agent=%s' % urllib.quote(
                        client.agent())

        else:
            host = resolveurl.resolve(host)
            return host
    except:
        return host
Example #59
0
def google(url):
    try:
        netloc = urlparse.urlparse(url.strip().lower()).netloc
        netloc = netloc.split('.google')[0]

        if netloc == 'docs' or netloc == 'drive':
            url = url.split('/preview', 1)[0]
            url = url.replace('drive.google.com', 'docs.google.com')

        headers = {'User-Agent': client.agent()}

        result = client.request(url, output='extended', headers=headers)

        try: headers['Cookie'] = result[2]['Set-Cookie']
        except: pass

        result = result[0]


        if netloc == 'docs' or netloc == 'drive':
            result = re.compile('"fmt_stream_map",(".+?")').findall(result)[0]
            result = json.loads(result)
            result = [i.split('|')[-1] for i in result.split(',')]
            result = sum([googletag(i) for i in result], [])


        elif netloc == 'photos':
            result = result.replace('\r','').replace('\n','').replace('\t','')
            result = re.compile('"\d*/\d*x\d*.+?","(.+?)"').findall(result)[0]

            result = result.replace('\\u003d','=').replace('\\u0026','&')
            result = re.compile('url=(.+?)&').findall(result)
            result = [urllib.unquote(i) for i in result]

            result = [googletag(i)[0] for i in result]


        elif netloc == 'picasaweb':
            id = re.compile('#(\d*)').findall(url)[0]

            result = re.search('feedPreload:\s*(.*}]}})},', result, re.DOTALL).group(1)
            result = json.loads(result)['feed']['entry']

            if len(result) > 1: result = [i for i in result if str(id) in i['link'][0]['href']][0]
            elif len(result) == 1: result = result[0]

            result = result['media']['content']
            result = [i['url'] for i in result if 'video' in i['type']]
            result = sum([googletag(i) for i in result], [])


        elif netloc == 'plus':
            id = (urlparse.urlparse(url).path).split('/')[-1]

            result = result.replace('\r','').replace('\n','').replace('\t','')
            result = result.split('"%s"' % id)[-1].split(']]')[0]

            result = result.replace('\\u003d','=').replace('\\u0026','&')
            result = re.compile('url=(.+?)&').findall(result)
            result = [urllib.unquote(i) for i in result]

            result = [googletag(i)[0] for i in result]


        url = []
        try: url += [[i for i in result if i['quality'] == '1080p'][0]]
        except: pass
        try: url += [[i for i in result if i['quality'] == 'HD'][0]]
        except: pass
        try: url += [[i for i in result if i['quality'] == 'SD'][0]]
        except: pass

        for i in url: i.update({'url': i['url'] + '|%s' % urllib.urlencode(headers)})

        if url == []: return
        return url
    except:
        return
Example #60
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            data = urlparse.parse_qs(url)
            data = dict((i, data[i][0]) for i in data)

            if 'episode' in data:
                url = self.__get_episode_url(data)
                get_body = 'type=episode&%s=%s&imd_id=%s&seasonsNo=%02d&episodesNo=%02d'
            else:
                url = self.__get_movie_url(data)

            response = client.request(url)
            url = re.findall('<iframe .+? src="(.+?)"', response)[0]

            response = client.request(url)

            token = re.findall('var tc = \'(.+?)\'', response)[0]
            seeds = re.findall(
                '_tsd_tsd\(s\) .+\.slice\((.+?),(.+?)\).+ return .+? \+ \"(.+?)\"\+\"(.+?)";',
                response)[0]
            pair = re.findall('\'type\': \'.+\',\s*\'(.+?)\': \'(.+?)\'',
                              response)[0]

            header_token = self.__xtoken(token, seeds)
            body = 'tokenCode=' + token

            headers = {
                'Content-Type':
                'application/x-www-form-urlencoded; charset=UTF-8',
                'x-token': header_token
            }

            url = urlparse.urljoin(self.source_link, self.decode_file)
            response = client.request(url,
                                      XHR=True,
                                      post=body,
                                      headers=headers)

            sources_dict = json.loads(response)

            for source in sources_dict:
                try:
                    if 'vidushare.com' in source:
                        sources.append({
                            'source': 'CDN',
                            'quality': 'HD',
                            'language': 'en',
                            'url': source,
                            'direct': True,
                            'debridonly': False
                        })
                except Exception:
                    pass

            body = get_body % (pair[0], pair[1], data['imdb'],
                               int(data['season']), int(data['episode']))

            url = urlparse.urljoin(self.source_link, self.grabber_file)
            response = client.request(url,
                                      XHR=True,
                                      post=body,
                                      headers=headers)

            sources_dict = json.loads(response)

            for source in sources_dict:
                try:
                    quality = source_utils.label_to_quality(source['label'])
                    link = source['file']

                    if 'lh3.googleusercontent' in link:
                        link = directstream.googleredirect(link)

                    sources.append({
                        'source': 'gvideo',
                        'quality': quality,
                        'language': 'en',
                        'url': link,
                        'direct': True,
                        'debridonly': False
                    })

                except Exception:
                    pass

            return sources

        except:
            return sources