示例#1
0
    def movie(self, imdb, title, year):
        try:
            t = cleantitle.get(title)

            try:
                query = self.search2_link % urllib.quote_plus(title)
                query = urlparse.urljoin(self.base_link, query)

                result = client.request(query)

                r = client.parseDOM(result, 'div', attrs = {'class': 'ml-item'})
                r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
                r = [(i[0][0], i[1][-1]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
                r = [(re.sub('http.+?//.+?/','', i[0]), i[1]) for i in r]
                r = [('/'.join(i[0].split('/')[:2]), i[1]) for i in r]
                r = [x for y,x in enumerate(r) if x not in r[:y]]
                r = [i[0] for i in r if t == cleantitle.get(i[1])]

                for i in r:
                    url = self._info(i, year)
                    if not url == None: return url
            except:
                pass

        except:
            return
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = urlparse.parse_qs(url)
            url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
            url['premiered'], url['season'], url['episode'] = premiered, season, episode
            try:
                clean_title = cleantitle.geturl(url['tvshowtitle'])+'-season-%d' % int(season)
                search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
                r = cache.get(client.request, 1, search_url)
                r = client.parseDOM(r, 'div', {'id': 'movie-featured'})
                r = [(client.parseDOM(i, 'a', ret='href'),
                      re.findall('<b><i>(.+?)</i>', i)) for i in r]
                r = [(i[0][0], i[1][0]) for i in r if
                     cleantitle.get(i[1][0]) == cleantitle.get(clean_title)]
                url = r[0][0]
            except:
                pass
            data = client.request(url)
            data = client.parseDOM(data, 'div', attrs={'id': 'details'})
            data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href'))
            url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]

            return url[0][1]
        except:
            return
示例#3
0
    def __prepare_links(self,links):
        new = []
        for link in links:
            try:
                try:
                    bitrate = client.parseDOM(link, "td", attrs={"class":"bitrate"})[0]
                    bitrate = ' ' + re.sub("<[^>]*>","",bitrate) 
                except:
                    bitrate = ' '

                try:
                    lang = client.parseDOM(link,"img",ret="title")[0]
                    lang = ' ' + re.sub("<[^>]*>","",lang)
                except:
                    lang = ' '


                health = re.findall('&nbsp;(\d+)',link)[0]
                streamer_tmp,video,eid,lid,ci,si,jj,url = re.findall('show_webplayer\(\'(\w+)\',\s*\'(\w+)\',\s*(\w+),\s*(\w+),\s*(\w+),\s*(\w+),\s*\'(\w+)\'\).+?href="(.+?)">',link)[0]
                title = "%s (health %s%%) %s %s"%(streamer_tmp,health,lang,bitrate)
                if 'sopcast' in streamer_tmp or 'acestream' in streamer_tmp:
                    new.append((url,title))
            except:
                pass
        return new
示例#4
0
文件: alltube.py 项目: mpie/repo
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.request(url)

            links = client.parseDOM(result, 'tr')
            links = [(client.parseDOM(i, 'a', attrs={'class': 'watch'}, ret='data-iframe')[0],
                    client.parseDOM(i, 'img', ret='alt')[0],
                    client.parseDOM(i, 'td', attrs={'class':'text-center'})[0]) for i in links]

            for i in links:
                try:
                    url1 = '%s?%s' % (url, i[0])
                    url1 = url1.encode('utf-8')
                    language, info = self.get_language_by_type(i[2]);
                    
                    sources.append({'source': i[1].encode('utf-8'), 'quality': 'SD', 'language': language, 'url': url1, 'info': info, 'direct': False, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
示例#5
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            langMap = {'hi':'hindi', 'ta':'tamil', 'te':'telugu', 'ml':'malayalam', 'kn':'kannada', 'bn':'bengali', 'mr':'marathi', 'pa':'punjabi'}

            lang = 'http://www.imdb.com/title/%s/' % imdb
            lang = client.request(lang)
            lang = re.findall('href\s*=\s*[\'|\"](.+?)[\'|\"]', lang)
            lang = [i for i in lang if 'primary_language' in i]
            lang = [urlparse.parse_qs(urlparse.urlparse(i).query) for i in lang]
            lang = [i['primary_language'] for i in lang if 'primary_language' in i]
            lang = langMap[lang[0][0]]

            q = self.search_link % (lang, urllib.quote_plus(title))
            q = urlparse.urljoin(self.base_link, q)

            t = cleantitle.get(title)

            r = client.request(q)

            r = client.parseDOM(r, 'li')
            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'h3'), client.parseDOM(i, 'div', attrs = {'class': 'info'})) for i in r]
            r = [(i[0][0], i[1][0], i[2][0]) for i in r if i[0] and i[1] and i[2]]
            r = [(re.findall('(\d+)', i[0]), i[1], re.findall('(\d{4})', i[2])) for i in r]
            r = [(i[0][0], i[1], i[2][0]) for i in r if i[0] and i[2]]
            r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]

            url = str(r)
            return url
        except:
            return
示例#6
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            r = client.parseDOM(r, 'iframe', ret='src')

            for u in r:
                try:
                    if not u.startswith('http') and not 'vidstreaming' in u: raise Exception()

                    url = client.request(u)
                    url = client.parseDOM(url, 'source', ret='src')

                    for i in url:
                        try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
                        except: pass
                except:
                    pass

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('GoGoAnime - Exception: \n' + str(failure))
            return sources
示例#7
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = urlparse.urljoin(self.base_link, url)

            result = proxy.request(url, 'main_body')
            result = client.parseDOM(result, 'div', attrs = {'class': 'tv_episode_item'})

            title = cleantitle.get(title)

            result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'class': 'tv_episode_name'}), re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in result]
            result = [(i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0]
            result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0] + [(i[0], i[1], None) for i in result if len(i[2]) == 0]
            result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0]

            url = [i for i in result if title == cleantitle.get(i[1]) and premiered == i[2]][:1]
            if len(url) == 0: url = [i for i in result if premiered == i[2]]
            if len(url) == 0 or len(url) > 1: url = [i for i in result if 'season-%01d-episode-%01d' % (int(season), int(episode)) in i[0]]

            url = client.replaceHTMLCodes(url[0][0])
            url = proxy.parse(url)
            url = re.findall('(?://.+?|)(/.+)', url)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = '%s/serie/%s' % (self.base_link, url)

            r = proxy.request(url, 'tv shows')
            r = client.parseDOM(r, 'li', attrs = {'itemprop': 'episode'})

            t = cleantitle.get(title)

            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'itemprop': 'name'}), re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in r]
            r = [(i[0], i[1][0].split('&nbsp;')[-1], i[2]) for i in r if i[1]] + [(i[0], None, i[2]) for i in r if not i[1]]
            r = [(i[0], i[1], i[2][0]) for i in r if i[2]] + [(i[0], i[1], None) for i in r if not i[2]]
            r = [(i[0][0], i[1], i[2]) for i in r if i[0]]

            url = [i for i in r if t == cleantitle.get(i[1]) and premiered == i[2]][:1]
            if not url: url = [i for i in r if t == cleantitle.get(i[1])]
            if len(url) > 1 or not url: url = [i for i in r if premiered == i[2]]
            if len(url) > 1 or not url: raise Exception() 

            url = url[0][0]
            url = proxy.parse(url)

            url = re.findall('(?://.+?|)(/.+)', url)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
示例#9
0
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        try:
            tv_maze = tvmaze.tvMaze()
            tvshowtitle = tv_maze.showLookup('thetvdb', tvdb)
            tvshowtitle = tvshowtitle['name']

            t = cleantitle.get(tvshowtitle)

            q = urlparse.urljoin(self.base_link, self.search_link)
            q = q % urllib.quote_plus(tvshowtitle)

            r = client.request(q)

            r = client.parseDOM(r, 'ul', attrs={'class': 'items'})
            r = client.parseDOM(r, 'li')
            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), re.findall('\d{4}', i)) for i in r]
            r = [(i[0][0], i[1][0], i[2][-1]) for i in r if i[0] and i[1] and i[2]]
            r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]]
            r = r[0][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            failure = traceback.format_exc()
            log_utils.log('GoGoAnime - Exception: \n' + str(failure))
            return
示例#10
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            posts = client.parseDOM(r, 'item')

            for post in posts:
                Links = client.parseDOM(post, 'enclosure', ret='url')
                if not len(Links) == None:
                    for vid_url in Links:
                        quality,info = source_utils.get_release_quality(url, vid_url)
                        host = vid_url.split('//')[1].replace('www.','')
                        host = host.split('/')[0].lower()
                        sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': vid_url, 'info': info, 'direct': False, 'debridonly': False})
            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('Best-Moviez - Exception: \n' + str(failure))
            return sources
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        try:
            t = cleantitle.get(tvshowtitle)

            q = urllib.quote_plus(cleantitle.query(tvshowtitle))
            p = urllib.urlencode({'term': q})

            r = client.request(self.search_link, post=p, XHR=True)
            try: r = json.loads(r)
            except: r = None

            if r:
                r = [(i['seo_url'], i['value'], i['label']) for i in r if 'value' in i and 'label' in i and 'seo_url' in i]
            else:
                r = requests.get(self.search_link_2 % q, 'tv shows').text
                r = client.parseDOM(r, 'div', attrs = {'valign': '.+?'})
                r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), client.parseDOM(i, 'a')) for i in r]
                r = [(i[0][0], i[1][0], i[2][0]) for i in r if i[0] and i[1] and i[2]]

            r = [(i[0], i[1], re.findall('(\d{4})', i[2])) for i in r]
            r = [(i[0], i[1], i[2][-1]) for i in r if i[2]]
            r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]]

            url = r[0][0]
            url = proxy.parse(url)

            url = url.strip('/').split('/')[-1]
            url = url.encode('utf-8')
            return url
        except:
            return
示例#12
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if url == None: return sources

            hostDict = [(i.rsplit('.', 1)[0], i) for i in hostDict]
            locDict = [i[0] for i in hostDict]

            print urlparse.urljoin(self.base_link, url)

            r = client.request(urlparse.urljoin(self.base_link, url))

            r = client.parseDOM(r, 'div', attrs={'class': 'filmicerik'})
            r = client.parseDOM(r, 'p')
            r = [(client.parseDOM(i, 'iframe', ret='src'), client.parseDOM(i, 'b'), client.parseDOM(r, 'span', attrs={'class': 'lg'})) for i in r]
            r = [(i[0], [x.lower().replace('lecteur','').strip() for x in i[1]], i[2][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
            r = [(i[0], [[y[1] for y in hostDict if y[0] == x][0] for x in i[1] if x in locDict], i[2], re.findall('\((.+?)\)$', i[2])) for i in r]
            r = [(dict(zip(i[0], i[1])), i[3][0] if len(i[2]) > 0 else i[2]) for i in r]

            for links, lang in r:
                for link, host in links.iteritems():
                    sources.append({'source': host, 'quality': 'SD', 'language': 'fr', 'info': lang, 'url': link, 'direct': False, 'debridonly': False})

            return sources
        except:
            return sources
示例#13
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = urlparse.urljoin(self.base_link, url)

            result = proxy.request(url, 'tv_episode_item')
            result = client.parseDOM(result, 'div', attrs = {'class': 'tv_episode_item'})

            title = cleantitle.get(title)
            premiered = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(premiered)[0]
            premiered = '%s %01d %s' % (premiered[1].replace('01','January').replace('02','February').replace('03','March').replace('04','April').replace('05','May').replace('06','June').replace('07','July').replace('08','August').replace('09','September').replace('10','October').replace('11','November').replace('12','December'), int(premiered[2]), premiered[0])

            result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'class': 'tv_episode_name'}), client.parseDOM(i, 'span', attrs = {'class': 'tv_num_versions'})) for i in result]
            result = [(i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0]
            result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0] + [(i[0], i[1], None) for i in result if len(i[2]) == 0]
            result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0]

            url = [i for i in result if title == cleantitle.get(i[1]) and premiered == i[2]][:1]
            if len(url) == 0: url = [i for i in result if premiered == i[2]]
            if len(url) == 0 or len(url) > 1: url = [i for i in result if 'season-%01d-episode-%01d' % (int(season), int(episode)) in i[0]]

            url = url[0][0]
            url = proxy.parse(url)
            url = re.findall('(?://.+?|)(/.+)', url)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
示例#14
0
    def __search(self, title, season):
        try:
            query = self.search_link % (urllib.quote_plus(cleantitle.query(title)))
            query = urlparse.urljoin(self.base_link, query)

            t = cleantitle.get(title)

            r = client.request(query)

            r = client.parseDOM(r, 'div', attrs={'class': 'moviefilm'})
            r = client.parseDOM(r, 'div', attrs={'class': 'movief'})
            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in r]
            r = [(i[0][0], i[1][0].lower()) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
            r = [(i[0], i[1], re.findall('(.+?)\s+(?:saison)\s+(\d+)', i[1])) for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            r = [(i[0], i[1], re.findall('\((.+?)\)$', i[1]), i[2]) for i in r]
            r = [(i[0], i[2][0] if len(i[2]) > 0 else i[1], i[3]) for i in r]
            r = [i[0] for i in r if t == cleantitle.get(i[1]) and int(i[2]) == int(season)][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
示例#15
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            url = self.__search(data['tvshowtitle'], season)
            if not url and data['tvshowtitle'] is not data['localtvshowtitle']: url = self.__search(data['localtvshowtitle'], season)
            if not url: return

            print urlparse.urljoin(self.base_link, url)

            r = client.request(urlparse.urljoin(self.base_link, url))

            r = client.parseDOM(r, 'div', attrs={'class': 'keremiya_part'})
            r = re.compile('(<a.+?/a>)', re.DOTALL).findall(''.join(r))
            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span')) for i in r]
            r = [(i[0][0], re.findall('(\d+)', i[1][0])) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
            r = [i[0] for i in r if len(i[1]) > 0 and int(i[1][0]) == int(episode)][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
示例#16
0
    def tvshow(self, imdb, tvdb, tvshowtitle, year):
        try:
            r = 'search/tvdb/%s?type=show&extended=full' % tvdb
            r = json.loads(trakt.getTrakt(r))
            if not r: return '0'

            d = r[0]['show']['genres']
            if not ('anime' in d or 'animation' in d): return '0'

            tv_maze = tvmaze.tvMaze()
            tvshowtitle = tv_maze.showLookup('thetvdb', tvdb)
            tvshowtitle = tvshowtitle['name']

            t = cleantitle.get(tvshowtitle)

            q = urlparse.urljoin(self.base_link, self.search_link)
            q = q % urllib.quote_plus(tvshowtitle)

            r = client.request(q)

            r = client.parseDOM(r, 'ul', attrs={'class': 'items'})
            r = client.parseDOM(r, 'li')
            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), re.findall('\d{4}', i)) for i in r]
            r = [(i[0][0], i[1][0], i[2][-1]) for i in r if i[0] and i[1] and i[2]]
            r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]]
            r = r[0][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
示例#17
0
    def sources(self, url, hostDict, hostprDict):
        
        sources = []
        try:

            if url == None: return sources
            result = client.request(url)
            
            rows = client.parseDOM(result, 'tr', attrs={'data-id':'.*?'}) 
            

            for row in rows:
                try:
                    link = client.parseDOM(row, 'td', attrs={'class':'name hover'}, ret='data-bind')[0]
                    link = re.findall(r"'(.*?)'", link, re.DOTALL)[0] 
                                
                    valid, host = source_utils.is_host_valid(link, hostDict)
                    if not valid: continue                                   
                    
                    found_quality = client.parseDOM(row, 'td')[1] 
                    q = 'SD'
                    if 'Wysoka' in found_quality: q = 'HD'
                    
                    type_desc= client.parseDOM(row, 'font')[0]
                    lang, info = self.get_lang_by_type(type_desc)

                    sources.append({'source': host, 'quality': q, 'language': lang, 'url': link, 'info': info, 'direct': False, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
示例#18
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            url = self.search_link % (cleantitle.geturl(title), year)

            q = urlparse.urljoin(self.base_link, url)

            r = proxy.geturl(q)
            if not r == None: return url

            t = cleantitle.get(title)

            q = self.search_link_2 % urllib.quote_plus(cleantitle.query(title))
            q = urlparse.urljoin(self.base_link, q)

            r = client.request(q)

            r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a'))
            r = [(i[0], re.findall('(?:\'|\")(.+?)(?:\'|\")', i[1])) for i in r]
            r = [(i[0], [re.findall('(.+?)\((\d{4})', x) for x in i[1]]) for i in r]
            r = [(i[0], [x[0] for x in i[1] if x]) for i in r]
            r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if i[1]]
            r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]]

            url = re.findall('(?://.+?|)(/.+)', r[0])[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
示例#19
0
    def __search(self, titles, year):
        try:

            query = self.search_link % (urllib.quote_plus(cleantitle.getsearch(titles[0]+' '+year)))

            query = urlparse.urljoin(self.base_link, query)

            t =  cleantitle.get(titles[0])

            r = client.request(query)

            r = client.parseDOM(r, 'div', attrs={'class': 'card'})

            r = client.parseDOM(r, 'h3')

            for i in r:
                data = re.findall('<span.*?>(.+?)</span>.+?date">\s*\((\d{4}).*?</span>', i, re.DOTALL)
                for title, year in data:
                    title = cleantitle.get(title)
                    y = year
                    if title in t and year == y:
                        url = client.parseDOM(i, 'a', ret='href')[0]
                        return source_utils.strip_domain(url)

            return
        except:
            return
示例#20
0
    def sources(self, url, hostDict, hostprDict):
        
        sources = []
        try:

            if url == None: return sources
            url = url + self.video_tab
            result = client.request(url)
                                    
            rows = client.parseDOM(result, 'ul', attrs={'class':'player_ul'})[0]
            rows = client.parseDOM(rows, 'li')        

            for row in rows:
                try:
                    desc = client.parseDOM(row, 'a')[0]
                    link = client.parseDOM(row, 'a', ret='href')[0]            
                    
                    host, lang, info, q = self.get_info_from_desc(desc)

                    sources.append({'source': host, 'quality': q, 'language': lang, 'url': link, 'info': info, 'direct': False, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
示例#21
0
    def movie(self, imdb, title, year):
        try:
            query = self.search_link % urllib.quote_plus(title)
            query = urlparse.urljoin(self.base_link, query)

            result = self.request(query, 'movie_table')
            result = client.parseDOM(result, 'div', attrs = {'class': 'movie_table'})

            title = cleantitle.get(title)
            years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]

            result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'img', ret='alt')) for i in result]
            result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
            result = [i for i in result if any(x in i[1] for x in years)]
            result = [i[0] for i in result if title == cleantitle.get(i[1])][0]

            url = client.replaceHTMLCodes(result)
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
            except: pass
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
            except: pass
            url = urlparse.urlparse(url).path
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
示例#22
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = proxy.request(url, 'main_body')

            links = client.parseDOM(result, 'tbody')

            for i in links:
                try:
                    url = client.parseDOM(i, 'a', ret='href')[0]
                    url = proxy.parse(url)
                    url = urlparse.parse_qs(urlparse.urlparse(url).query)['url'][0]
                    url = base64.b64decode(url)
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    host = host.encode('utf-8')

                    quality = client.parseDOM(i, 'span', ret='class')[0]
                    quality,info = source_utils.get_release_quality(quality, url)

                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
示例#23
0
    def imdb_person_list(self, url):
        try:
            result = client.request(url)
            result = result.decode('iso-8859-1').encode('utf-8')
            items = client.parseDOM(result, 'tr', attrs = {'class': '.+? detailed'})
        except:
            return

        for item in items:
            try:
                name = client.parseDOM(item, 'a', ret='title')[0]
                name = client.replaceHTMLCodes(name)
                name = name.encode('utf-8')

                url = client.parseDOM(item, 'a', ret='href')[0]
                url = re.findall('(nm\d*)', url, re.I)[0]
                url = self.person_link % url
                url = client.replaceHTMLCodes(url)
                url = url.encode('utf-8')

                image = client.parseDOM(item, 'img', ret='src')[0]
                if not ('._SX' in image or '._SY' in image): raise Exception()
                image = re.sub('_SX\d*|_SY\d*|_CR\d+?,\d+?,\d+?,\d*','_SX500', image)
                image = client.replaceHTMLCodes(image)
                image = image.encode('utf-8')

                self.list.append({'name': name, 'url': url, 'image': image})
            except:
                pass

        return self.list
示例#24
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if not url:
                return sources
            r = client.request(url)
            links = client.parseDOM(r, 'div', attrs={'class': 'mtos'})

            for i in range(1, len(links)):
                idioma = client.parseDOM(links[i], 'img', ret= 'src')[0]
                if 'in.' in idioma: continue
                quality = client.parseDOM(links[i], 'div', attrs={'class': 'dcalidad'})[0]
                servidor = re.findall("src='.+?'\s*/>(.+?)</div>", links[i])[0]

                lang, info = self.get_lang_by_type(idioma)
                quality = self.quality_fixer(quality)

                link = dom_parser.parse_dom(links[i], 'a', req='href')[0][0]['href']
                url = link
                if 'streamcloud' in url: quality = 'SD'
                valid, host = source_utils.is_host_valid(servidor, hostDict)

                sources.append({'source': host, 'quality': quality, 'language': lang, 'url': url, 'info': info,
                                'direct':False,'debridonly': False})

            return sources
        except:
            return sources
示例#25
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            r = client.parseDOM(r, 'div', attrs = {'class': 'player_wraper'})
            r = client.parseDOM(r, 'iframe', ret='src')[0]
            r = urlparse.urljoin(url, r)
            r = client.request(r, referer=url)
            a = client.parseDOM(r, 'div', ret='value', attrs = {'id': 'k2'})[-1]
            b = client.parseDOM(r, 'div', ret='value', attrs = {'id': 'k1'})[-1]
            c = client.parseDOM(r, 'body', ret='style')[0]
            c = re.findall('(\d+)',  c)[-1]
            r = '/player/%s?s=%s&e=%s' % (a, b, c)
            r = urlparse.urljoin(url, r)
            r = client.request(r, referer=url)
            r = re.findall('"(?:url|src)"\s*:\s*"(.+?)"', r)

            for i in r:
                try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
                except: pass

            return sources
        except:
            return sources
示例#26
0
    def tvshow(self, imdb, tvdb, tvshowtitle, year):
        try:
            url = self.tvsearch_link % cleantitle.geturl(tvshowtitle)

            r = urlparse.urljoin(self.base_link, url)
            r = client.request(r, limit='1')
            r = client.parseDOM(r, 'title')

            if not r:
                url = 'http://www.imdb.com/title/%s' % imdb
                url = client.request(url, headers={'Accept-Language':'es-ES'})
                url = client.parseDOM(url, 'title')[0]
                url = re.sub('\((?:.+?|)\d{4}.+', '', url).strip()
                url = cleantitle.normalize(url.encode("utf-8"))
                url = self.tvsearch_link % cleantitle.geturl(url)

                r = urlparse.urljoin(self.base_link, url)
                r = client.request(r, limit='1')
                r = client.parseDOM(r, 'title')

            if not year in r[0]: raise Exception()

            return url
        except:
            return
示例#27
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources

            html = client.request(url)
            try:
                v = re.findall('document.write\(Base64.decode\("(.+?)"\)', html)[0]
                b64 = base64.b64decode(v)
                url = client.parseDOM(b64, 'iframe', ret='src')[0]
                try:
                    host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')
                    sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': url.replace('\/', '/'), 'direct': true, 'debridonly': False})
                except:
                    pass
            except:
                pass
            parsed = client.parseDOM(html, 'div', {'class': 'server_line'})
            parsed = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'p', attrs={'class': 'server_servername'})[0]) for i in parsed]
            if parsed:
                for i in parsed:
                    try:
                        host = re.sub('Server|Link\s*\d+', '', i[1]).lower()
                        url = i[0]
                        host = client.replaceHTMLCodes(host)
                        host = host.encode('utf-8')
                        if 'other'in host: continue
                        sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': url.replace('\/', '/'), 'direct': False, 'debridonly': False})
                    except:
                        pass
            return sources
        except:
            return
示例#28
0
    def imdb_user_list(self, url):
        try:
            result = client.request(url)
            result = result.decode('iso-8859-1').encode('utf-8')
            items = client.parseDOM(result, 'div', attrs = {'class': 'list_name'})
        except:
            pass

        for item in items:
            try:
                name = client.parseDOM(item, 'a')[0]
                name = client.replaceHTMLCodes(name)
                name = name.encode('utf-8')

                url = client.parseDOM(item, 'a', ret='href')[0]
                url = url.split('/list/', 1)[-1].replace('/', '')
                url = self.imdblist_link % url
                url = client.replaceHTMLCodes(url)
                url = url.encode('utf-8')

                self.list.append({'name': name, 'url': url, 'context': url})
            except:
                pass

        self.list = sorted(self.list, key=lambda k: re.sub('(^the |^a )', '', k['name'].lower()))
        return self.list
示例#29
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            pages = []
            data = urlparse.parse_qs(url)
            data = dict((i, data[i][0]) for i in data)
            data.update({'season': season, 'episode': episode, 'title': title, 'premiered': premiered})

            season_base = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', season_base)
            tvshowtitle = data['tvshowtitle']
            tvshowtitle = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', tvshowtitle)

            query = query.replace("&", "and")
            query = query.replace("  ", " ")
            query = query.replace(" ", "+")
            tvshowtitle = tvshowtitle.replace("&", "and")
            tvshowtitle = tvshowtitle.replace("  ", " ")
            tvshowtitle = tvshowtitle.replace(" ", "+")

            start_url = urlparse.urljoin(self.base_link, self.search_link % (tvshowtitle, query))

            html = client.request(start_url)
            results = client.parseDOM(html, 'h2', attrs={'class':'entry-title'})
            for content in results:
                found_link = client.parseDOM(content, 'a', ret='href')[0]
                if self.base_link in found_link:
                    if cleantitle.get(data['tvshowtitle']) in cleantitle.get(found_link):
                        if cleantitle.get(season_base) in cleantitle.get(found_link):
                            pages.append(found_link)
            return pages
        except:
            failure = traceback.format_exc()
            log_utils.log('ALLRLS - Exception: \n' + str(failure))
            return pages
示例#30
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:

            query = urlparse.urljoin(self.base_link, self.search_link)
            if ':' in title:
                title2 = title.split(':')[0] + ':'
                post = 'search=%s&what=title' % title2

            else: post = 'search=%s&what=title' % cleantitle.getsearch(title)


            t = cleantitle.get(title)

            r = client.request(query, post=post)
            r = client.parseDOM(r, 'li')
            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a',)) for i in r]
            r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
            r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
            r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
            r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]

            url = urlparse.urljoin(self.base_link, re.findall('(?://.+?|)(/.+)', r)[0])
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')

            return url
        except:
            return
示例#31
0
    def imdb_list(self, url):
        try:
            for i in re.findall('date\[(\d+)\]', url):
                url = url.replace(
                    'date[%s]' % i,
                    (self.datetime -
                     datetime.timedelta(days=int(i))).strftime('%Y-%m-%d'))

            if url == self.imdbwatchlist_link:

                def imdb_watchlist_id(url):
                    return re.findall('/export[?]list_id=(ls\d*)',
                                      client.request(url))[0]

                url = cache.get(imdb_watchlist_id, 8640, url)
                url = self.imdblist_link % url

            result = client.request(url)

            result = result.replace('\n', '')
            result = result.decode('iso-8859-1').encode('utf-8')

            items = client.parseDOM(result, 'tr', attrs={'class': '.+?'})
            items += client.parseDOM(result,
                                     'div',
                                     attrs={'class': 'list_item.+?'})
        except:
            return

        try:
            next = client.parseDOM(result,
                                   'span',
                                   attrs={'class': 'pagination'})
            next += client.parseDOM(result,
                                    'div',
                                    attrs={'class': 'pagination'})
            name = client.parseDOM(next[-1], 'a')[-1]
            if 'laquo' in name: raise Exception()
            next = client.parseDOM(next, 'a', ret='href')[-1]
            next = url.replace(
                urlparse.urlparse(url).query,
                urlparse.urlparse(next).query)
            next = client.replaceHTMLCodes(next)
            next = next.encode('utf-8')
        except:
            next = ''

        for item in items:
            try:
                try:
                    title = client.parseDOM(item, 'a')[1]
                except:
                    pass
                try:
                    title = client.parseDOM(item,
                                            'a',
                                            attrs={'onclick': '.+?'})[-1]
                except:
                    pass
                title = client.replaceHTMLCodes(title)
                title = title.encode('utf-8')

                year = client.parseDOM(item,
                                       'span',
                                       attrs={'class': 'year_type'})[0]
                year = re.compile('(\d{4})').findall(year)[-1]
                year = year.encode('utf-8')

                if int(year) > int((self.datetime).strftime('%Y')):
                    raise Exception()

                imdb = client.parseDOM(item, 'a', ret='href')[0]
                try:
                    imdb = client.parseDOM(item, 'a', ret='href')[1]
                except:
                    pass
                imdb = 'tt' + re.sub('[^0-9]', '', imdb.rsplit('tt', 1)[-1])
                imdb = imdb.encode('utf-8')

                poster = '0'
                try:
                    poster = client.parseDOM(item, 'img', ret='src')[0]
                except:
                    pass
                try:
                    poster = client.parseDOM(item, 'img', ret='loadlate')[0]
                except:
                    pass
                if not ('_SX' in poster or '_SY' in poster): poster = '0'
                poster = re.sub('_SX\d*|_SY\d*|_CR\d+?,\d+?,\d+?,\d*',
                                '_SX500', poster)
                poster = client.replaceHTMLCodes(poster)
                poster = poster.encode('utf-8')

                genre = client.parseDOM(item, 'span', attrs={'class': 'genre'})
                genre = client.parseDOM(genre, 'a')
                genre = ' / '.join(genre)
                if genre == '': genre = '0'
                genre = client.replaceHTMLCodes(genre)
                genre = genre.encode('utf-8')

                try:
                    duration = re.compile('(\d+?) mins').findall(item)[-1]
                except:
                    duration = '0'
                duration = client.replaceHTMLCodes(duration)
                duration = duration.encode('utf-8')

                try:
                    rating = client.parseDOM(item,
                                             'span',
                                             attrs={'class':
                                                    'rating-rating'})[0]
                except:
                    rating = '0'
                try:
                    rating = client.parseDOM(rating,
                                             'span',
                                             attrs={'class': 'value'})[0]
                except:
                    rating = '0'
                if rating == '' or rating == '-': rating = '0'
                rating = client.replaceHTMLCodes(rating)
                rating = rating.encode('utf-8')

                try:
                    votes = client.parseDOM(
                        item,
                        'div',
                        ret='title',
                        attrs={'class': 'rating rating-list'})[0]
                except:
                    votes = '0'
                try:
                    votes = re.compile('[(](.+?) votes[)]').findall(votes)[0]
                except:
                    votes = '0'
                if votes == '': votes = '0'
                votes = client.replaceHTMLCodes(votes)
                votes = votes.encode('utf-8')

                try:
                    mpaa = client.parseDOM(item,
                                           'span',
                                           attrs={'class': 'certificate'})[0]
                except:
                    mpaa = '0'
                try:
                    mpaa = client.parseDOM(mpaa, 'span', ret='title')[0]
                except:
                    mpaa = '0'
                if mpaa == '' or mpaa == 'NOT_RATED': mpaa = '0'
                mpaa = mpaa.replace('_', '-')
                mpaa = client.replaceHTMLCodes(mpaa)
                mpaa = mpaa.encode('utf-8')

                director = client.parseDOM(item,
                                           'span',
                                           attrs={'class': 'credit'})
                director += client.parseDOM(item,
                                            'div',
                                            attrs={'class': 'secondary'})
                try:
                    director = [
                        i for i in director if 'Director:' in i or 'Dir:' in i
                    ][0]
                except:
                    director = '0'
                director = director.split('With:', 1)[0].strip()
                director = client.parseDOM(director, 'a')
                director = ' / '.join(director)
                if director == '': director = '0'
                director = client.replaceHTMLCodes(director)
                director = director.encode('utf-8')

                cast = client.parseDOM(item, 'span', attrs={'class': 'credit'})
                cast += client.parseDOM(item,
                                        'div',
                                        attrs={'class': 'secondary'})
                try:
                    cast = [i for i in cast
                            if 'With:' in i or 'Stars:' in i][0]
                except:
                    cast = '0'
                cast = cast.split('With:', 1)[-1].strip()
                cast = client.replaceHTMLCodes(cast)
                cast = cast.encode('utf-8')
                cast = client.parseDOM(cast, 'a')
                if cast == []: cast = '0'

                plot = '0'
                try:
                    plot = client.parseDOM(item,
                                           'span',
                                           attrs={'class': 'outline'})[0]
                except:
                    pass
                try:
                    plot = client.parseDOM(item,
                                           'div',
                                           attrs={'class':
                                                  'item_description'})[0]
                except:
                    pass
                plot = plot.rsplit('<span>', 1)[0].strip()
                if plot == '': plot = '0'
                plot = client.replaceHTMLCodes(plot)
                plot = plot.encode('utf-8')

                self.list.append({
                    'title': title,
                    'originaltitle': title,
                    'year': year,
                    'premiered': '0',
                    'studio': '0',
                    'genre': genre,
                    'duration': duration,
                    'rating': rating,
                    'votes': votes,
                    'mpaa': mpaa,
                    'director': director,
                    'writer': '0',
                    'cast': cast,
                    'plot': plot,
                    'code': imdb,
                    'imdb': imdb,
                    'tmdb': '0',
                    'tvdb': '0',
                    'poster': poster,
                    'banner': '0',
                    'fanart': '0',
                    'next': next
                })
            except:
                pass

        return self.list
示例#32
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            posts = client.parseDOM(r, 'item')

            hostDict = hostprDict + hostDict

            items = []

            for post in posts:
                try:
                    t = client.parseDOM(post, 'title')[0]
                    u = client.parseDOM(post, 'enclosure', ret='url', attrs={'type': 'video.+?'})

                    s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GiB|MiB))', post)
                    s = s[0] if s else '0'

                    items += [(t, i, s) for i in u]
                except:
                    pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)

                    if not cleantitle.get(t) == cleantitle.get(title): raise Exception()

                    y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()

                    if not y == hdlr: raise Exception()

                    fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
                    fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
                    fmt = [i.lower() for i in fmt]

                    if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception()
                    if any(i in ['extras'] for i in fmt): raise Exception()

                    if '1080p' in fmt: quality = '1080p'
                    elif '720p' in fmt: quality = 'HD'
                    else: quality = 'SD'
                    if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
                    elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'

                    info = []

                    if '3d' in fmt: info.append('3D')

                    try:
                        size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GiB|MiB))', item[2])[-1]
                        div = 1 if size.endswith('GiB') else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size))/div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC')

                    info = ' | '.join(info)

                    url = item[1]
                    if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
                except:
                    pass

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check

            return sources
        except:
            return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if (self.user != '' and self.password != ''): #raise Exception()

               login = urlparse.urljoin(self.base_link, '/login.html')

               post = urllib.urlencode({'username': self.user, 'password': self.password, 'submit': 'Login'})

               cookie = client.request(login, post=post, output='cookie', close=False)

               r = client.request(login, post=post, cookie=cookie, output='extended')

               headers = {'User-Agent': r[3]['User-Agent'], 'Cookie': r[4]}
            else:
               headers = {}


            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
                if 'season' in data: season = data['season']
                if 'episode' in data: episode = data['episode']
                year = data['year']

                query = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
                query2 = urlparse.urljoin(self.base_link, self.search_link % re.sub('\s','+',title))
                r = client.request(query)
                r = client.parseDOM(r, 'div', attrs = {'class': 'ml-item'})
                if len(r)==0:
                    r = client.request(query2)
                    r = client.parseDOM(r, 'div', attrs = {'class': 'ml-item'})
                r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'), client.parseDOM(r, 'a', ret='data-url'))
                
                if 'tvshowtitle' in data:                   
                    cltitle = cleantitle.get(title+'season'+season)
                    cltitle2 = cleantitle.get(title+'season%02d'%int(season))
                else:
                    cltitle = cleantitle.get(title)

                r = [i for i in r if cltitle == cleantitle.get(i[1]) or cltitle2 == cleantitle.get(i[1])]
                id = [re.findall('/(\d+)$',i[2])[0] for i in r][0]

                ajx = urlparse.urljoin(self.base_link, '/ajax/movie_episodes/'+id)

                r = client.request(ajx)
                if 'episode' in data:
                    eids = re.findall(r'title=\\"Episode\s+%02d.*?data-id=\\"(\d+)'%int(episode),r)
                else:
                    eids = re.findall(r'title=.*?data-id=\\"(\d+)',r)

                for eid in eids:
                    try:
                        ajx = 'ajax/movie_token?eid=%s&mid=%s&_=%d' % (eid, id, int(time.time() * 1000))
                        ajx = urlparse.urljoin(self.base_link, ajx)
                        r = client.request(ajx)
                        [x,y] = re.findall(r"_x='([^']+)',\s*_y='([^']+)'",r)[0]
                        ajx = 'ajax/movie_sources/%s?x=%s&y=%s'%(eid,x,y)
                        ajx = urlparse.urljoin(self.base_link, ajx)
                        r = client.request(ajx)
                        r = json.loads(r)
                        r = r['playlist'][0]['sources']
                        for i in r:
                            try: label = source_utils.label_to_quality(i['label']) 
                            except: label = 'SD'
                            sources.append({'source': 'cdn', 'quality': label, 'language': 'en', 'url': i['file'], 'direct': True, 'debridonly': False})
                    except:
                        pass

            return sources
        except:
            return sources
示例#34
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            hostDict = hostprDict + hostDict

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            imdb = data['imdb']

            content = 'episode' if 'tvshowtitle' in data else 'movie'

            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)


            try:
                #if feed == True: raise Exception()

                url = self.search_link_2 % urllib.quote_plus(query)
                url = urlparse.urljoin(self.base_link, url)
                myheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0',
                   'Host': 'scenedown.in',
                   'Connection': 'keep-alive',
                   'Accept-Encoding': 'gzip, deflate, sdch'
                }
                r = client.request(url, headers=myheaders)

                posts = client.parseDOM(r, 'div', attrs={'class': 'post'})

                items = [] ; dupes = []

                for post in posts:
                    try:
                        t = client.parseDOM(post, 'a')[0]

                        if content == 'movie':
                            x = re.findall('/(tt\d+)', post)[0]
                            if not x == imdb: raise Exception()
                            q = re.findall('<strong>\s*Video\s*:\s*</strong>.+?\s(\d+)', post)[0]
                            if not int(q) >= 720: raise Exception()
                            if len(dupes) > 3: raise Exception()
                            dupes += [x]

                        elif content == 'episode':
                            x = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', t)
                            if not cleantitle.get(title) in cleantitle.get(x): raise Exception()
                            y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', t)[-1].upper()
                            if not y == hdlr: raise Exception()
                            if len(dupes) > 0: raise Exception()
                            dupes += [x]

                        u = client.parseDOM(post, 'a', ret='href')[0]
                        myheaders['Referer'] = url
                        r = client.request(u, headers=myheaders).replace('\n', '')

                        u = client.parseDOM(r, 'div', attrs={'class': 'postContent'})[0]
                        u = re.split('id\s*=\s*"more-\d+"', u)[-1]

                        if content == 'episode':
                            u = re.compile('(?:<strong>|)(.+?)</strong>(.+?)(?:<strong>|$)', re.MULTILINE|re.DOTALL).findall(u)
                            u = [(re.sub('<.+?>|</.+?>|>', '', i[0]), i[1]) for i in u]
                            u = [i for i in u if '720p' in i[0].lower()][0]
                            u, r, t = u[1], u[1], u[0]

                        u = client.parseDOM(u, 'p')
                        u = [client.parseDOM(i, 'a', ret='href') for i in u]
                        u = [i[0] for i in u if len(i) == 1]
                        if not u: raise Exception()

                        s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', r)
                        s = s[0] if s else '0'

                        items += [(t, i, s) for i in u]
                    except:
                        pass
            except:
                pass


            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)

                    if not cleantitle.get(t) == cleantitle.get(title): raise Exception()

                    y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()

                    if not y == hdlr: raise Exception()

                    fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
                    fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
                    fmt = [i.lower() for i in fmt]

                    if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception()
                    if any(i in ['extras'] for i in fmt): raise Exception()

                    if '1080p' in fmt: quality = '1080p'
                    elif '720p' in fmt: quality = 'HD'
                    else: quality = 'SD'
                    if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
                    elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'

                    info = []

                    if '3d' in fmt: info.append('3D')

                    try:
                        size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size))/div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC')

                    info = ' | '.join(info)

                    url = item[1]
                    if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
                except:
                    pass

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check

            return sources
        except:
            return sources
示例#35
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            scraper = cfscrape.create_scraper()

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
            premDate = ''
            
            query = '%s S%02dE%02d' % (
            data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
            data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

            query = query.replace("&", "and")
            query = query.replace("  ", " ")
            query = query.replace(" ", "-")
            
            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            url = "http://proxybb.com/" + query
            if 'tvshowtitle' not in data: url = url + "-1080p"

            r = scraper.get(url).content
            
            if r == None and 'tvshowtitle' in data:
                season = re.search('S(.*?)E', hdlr)
                season = season.group(1)
                query = title
                query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)
                query = query + "-S" + season
                query = query.replace("&", "and")
                query = query.replace("  ", " ")
                query = query.replace(" ", "-")
                url = "http://proxybb.com/" + query
                r = scraper.get(url).content

            
            for loopCount in range(0,2):
                if loopCount == 1 or (r == None and 'tvshowtitle' in data):
                    
                    
                    premDate = re.sub('[ \.]','-',data['premiered'])
                    query = re.sub('[\\\\:;*?"<>|/\-\']', '', data['tvshowtitle'])
                    query = query.replace("&", " and ").replace("  ", " ").replace(" ", "-")
                    query = query + "-" + premDate
                    
                    url = "http://proxybb.com/" + query
                    url = url.replace('The-Late-Show-with-Stephen-Colbert','Stephen-Colbert')
                    

                    r = scraper.get(url).content
                    
                posts = client.parseDOM(r, "div", attrs={"class": "content"})
                hostDict = hostprDict + hostDict
                items = []
                for post in posts:
                    try:
                        u = client.parseDOM(post, 'a', ret='href')
                        for i in u:
                            try:
                                name = str(i)
                                if hdlr in name.upper(): items.append(name)
                                elif len(premDate) > 0 and premDate in name.replace(".","-"): items.append(name)
                                
                            except:
                                pass
                    except:
                        pass
                        
                if len(items) > 0: break

            seen_urls = set()

            for item in items:
                try:
                    info = []

                    url = str(item)
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    if url in seen_urls: continue
                    seen_urls.add(url)

                    host = url.replace("\\", "")
                    host2 = host.strip('"')
                    host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(host2.strip().lower()).netloc)[0]

                    if not host in hostDict: raise Exception()
                    if any(x in host2 for x in ['.rar', '.zip', '.iso']): continue

                    if '720p' in host2:
                        quality = 'HD'
                    elif '1080p' in host2:
                        quality = '1080p'
                    elif '2160p' in host2:
                        quality = '4K'
                    else:
                        quality = 'SD'

                    info = ' | '.join(info)
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')
                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': host2, 'info': info, 'direct': False, 'debridonly': True})
                    
                except:
                    pass
            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check
            return sources
        except:
            return sources
示例#36
0
    def sources(self, url, hostDict, hostprDict):

        hostDict = hostDict + hostprDict

        sources = []

        # they use a peculiar full-word url scheme for tv eps query
        # also prepare here a by-date scheme query
        if 'tvshowtitle' in url:
            request2 = '%s %s' % (url['tvshowtitle'],
                                  re.sub('\D+', '-', url['premiered']))
            request2 = self.base_link + self.search_link % re.sub(
                '\W+', '-', request2)
            log_utils.log('*** request2: %s' % request2)

            request = '%s season %s episode %s' % (
                url['tvshowtitle'], int(url['season']), int(url['episode']))
        else:
            request = '%s %s' % (url['title'], url['year'])

        request = self.base_link + self.search_link % re.sub(
            '\W+', '-', request)
        log_utils.log('***	request: %s' % request)

        # pull html but if ep got a 404, try ep by premiere date
        html = client.request(request)
        if html == None and 'tvshowtitle' in url:
            html = client.request(request2)

        # grab the relevent div and chop off the footer
        html = client.parseDOM(html, "div", attrs={"id": "content"})[0]
        html = re.sub('class="crp_related.+', '', html, flags=re.DOTALL)

        # pre-load *some* size (for movies, mostly)
        try:
            size0 = re.findall('([0-9,\.]+ ?(?:GB|GiB|MB|MiB))', html)[0]
        except:
            size0 = ''

        # this split is based on TV shows, soooo... might screw up movies
        sects = html.split('<strong>')

        for sect in sects:
            hrefs = client.parseDOM(sect,
                                    "a",
                                    attrs={"class": "autohyperlink"},
                                    ret='href')
            if not hrefs: continue

            # filenames (with useful info) seem predictably located
            try:
                fn = re.match('(.+?)</strong>', sect).group(1)
            except:
                fn = ''
            log_utils.log('*** fn: %s' % fn)

            # sections under filenames usually have sizes (for tv at least)
            try:
                size = re.findall('([0-9,\.]+ ?(?:GB|GiB|MB|MiB))', sect)[0]
                div = 1 if size.endswith(('GB', 'GiB')) else 1024
                size = float(re.sub('[^0-9\.]', '', size)) / div
                size = '%.2f GB' % size
            except:
                size = size0

            for url in hrefs:
                quality, info = source_utils.get_release_quality(url, fn)
                info.append(size)
                info = ' | '.join(info)
                log_utils.log(' ** (%s %s) url=%s' %
                              (quality, info, url))  #~~~~~~~~~~~

                url = url.encode('utf-8')
                hostDict = hostDict + hostprDict

                valid, host = source_utils.is_host_valid(url, hostDict)
                if not valid: continue

                log_utils.log(' ** VALID! (host=%s)' % host)  #~~~~~~~~~~~~~~~
                #sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url,
                #				'info': info, 'direct': False, 'debridonly': False})

        return sources
示例#37
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            posts = client.parseDOM(r, 'item')

            hostDict = hostprDict + hostDict

            items = []

            for post in posts:
                try:
                    t = client.parseDOM(post, 'title')[0]

                    c = client.parseDOM(post, 'content.+?')[0]

                    u = re.findall('>article class=(.+?)p>\s*<span', c.replace('\n', ''))[0]

                    u = client.parseDOM(u, 'a', ret='href')

                    s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', c)
                    s = s[0] if s else '0'

                    items += [(t, i, s) for i in u]
                except:
                    pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)

                    if not cleantitle.get(t) == cleantitle.get(title): raise Exception()

                    y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()

                    if not y == hdlr: raise Exception()
                    quality, info = source_utils.get_release_quality(name, item[1])

                    try:
                        size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size))/div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    info = ' | '.join(info)

                    url = item[1]
                    if any(x in url for x in ['.zip', '.iso']): raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    valid, host = source_utils.is_host_valid(url,hostDict)
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
                except:
                    pass

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check

            return sources
        except:
            return sources
示例#38
0
文件: debrid.py 项目: shannah/exodus
def resolver(url, debrid):
    u = url
    u = u.replace('filefactory.com/stream/', 'filefactory.com/file/')

    try:
        if not debrid == 'realdebrid' and not debrid == True: raise Exception()

        if '' in credentials()['realdebrid'].values(): raise Exception()
        id, secret, token, refresh = credentials()['realdebrid'][
            'id'], credentials()['realdebrid']['secret'], credentials(
            )['realdebrid']['token'], credentials()['realdebrid']['refresh']

        USER_AGENT = 'Kodi Exodus/3.0'

        post = urllib.urlencode({'link': u})
        headers = {
            'Authorization': 'Bearer %s' % token,
            'User-Agent': USER_AGENT
        }
        url = 'https://api.real-debrid.com/rest/1.0/unrestrict/link'

        result = client.request(url, post=post, headers=headers, error=True)
        result = json.loads(result)

        if 'error' in result and result['error'] == 'bad_token':
            result = client.request(
                'https://api.real-debrid.com/oauth/v2/token',
                post=urllib.urlencode({
                    'client_id':
                    id,
                    'client_secret':
                    secret,
                    'code':
                    refresh,
                    'grant_type':
                    'http://oauth.net/grant_type/device/1.0'
                }),
                headers={'User-Agent': USER_AGENT},
                error=True)
            result = json.loads(result)
            if 'error' in result: return

            headers['Authorization'] = 'Bearer %s' % result['access_token']
            result = client.request(url, post=post, headers=headers)
            result = json.loads(result)

        url = result['download']
        return url
    except:
        pass

    try:
        if not debrid == 'premiumize' and not debrid == True: raise Exception()

        if '' in credentials()['premiumize'].values(): raise Exception()
        user, password = credentials()['premiumize']['user'], credentials(
        )['premiumize']['pass']

        url = 'http://api.premiumize.me/pm-api/v1.php?method=directdownloadlink&params[login]=%s&params[pass]=%s&params[link]=%s' % (
            user, password, urllib.quote_plus(u))
        result = client.request(url, close=False)
        url = json.loads(result)['result']['location']
        return url
    except:
        pass

    try:
        if not debrid == 'alldebrid' and not debrid == True: raise Exception()

        if '' in credentials()['alldebrid'].values(): raise Exception()
        user, password = credentials()['alldebrid']['user'], credentials(
        )['alldebrid']['pass']

        login_data = urllib.urlencode({
            'action': 'login',
            'login_login': user,
            'login_password': password
        })
        login_link = 'http://alldebrid.com/register/?%s' % login_data
        cookie = client.request(login_link, output='cookie', close=False)

        url = 'http://www.alldebrid.com/service.php?link=%s' % urllib.quote_plus(
            u)
        result = client.request(url, cookie=cookie, close=False)
        url = client.parseDOM(result,
                              'a',
                              ret='href',
                              attrs={'class': 'link_dl'})[0]
        url = client.replaceHTMLCodes(url)
        url = '%s|Cookie=%s' % (url, urllib.quote_plus(cookie))
        return url
    except:
        pass

    try:
        if not debrid == 'rpnet' and not debrid == True: raise Exception()

        if '' in credentials()['rpnet'].values(): raise Exception()
        user, password = credentials()['rpnet']['user'], credentials(
        )['rpnet']['pass']

        login_data = urllib.urlencode({
            'username': user,
            'password': password,
            'action': 'generate',
            'links': u
        })
        login_link = 'http://premium.rpnet.biz/client_api.php?%s' % login_data
        result = client.request(login_link, close=False)
        result = json.loads(result)
        url = result['links'][0]['generated']
        return url
    except:
        return
示例#39
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle']
            season = '%01d' % int(data['season'])
            episode = '%02d' % int(data['episode'])

            r = cache.get(self.ddlseries_tvcache, 120)

            r = [(i[0], i[3]) for i in r
                 if cleantitle.get(title) == cleantitle.get(i[1])
                 and season == i[2]]

            links = []

            for url, quality in r:
                try:
                    link = client.request(url)
                    vidlinks = client.parseDOM(link,
                                               'span',
                                               attrs={'class': 'overtr'})[0]
                    match = re.compile('href="([^"]+)[^>]*>\s*Episode\s+(\d+)<'
                                       ).findall(vidlinks)
                    match = [(i[0], quality) for i in match if episode == i[1]]
                    links += match
                except:
                    pass

            for url, quality in links:
                try:
                    if "protect-links" in url:
                        redirect = client.request(url)
                        url = re.findall('<a href="(.*?)" target="_blank">',
                                         redirect)
                        url = url[0]

                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostprDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': True
                    })
                except:
                    pass

            return sources
        except:
            return sources
示例#40
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            aliases = eval(data['aliases'])
            headers = {}
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            year = data['year']

            if 'tvshowtitle' in data:
                episode = data['episode']
                season = data['season']
                url = self._search(data['tvshowtitle'], data['year'], aliases,
                                   headers)
                url = url.replace(
                    'online-free',
                    'season-%s-episode-%s-online-free' % (season, episode))

            else:
                episode = None
                year = data['year']
                url = self._search(data['title'], data['year'], aliases,
                                   headers)

            url = url if 'http' in url else urlparse.urljoin(
                self.base_link, url)
            result = client.request(url)

            result = client.parseDOM(result,
                                     'li',
                                     attrs={'class': 'link-button'})
            links = client.parseDOM(result, 'a', ret='href')
            i = 0
            for l in links:
                if i == 10: break
                try:
                    l = l.split('=')[1]
                    l = urlparse.urljoin(self.base_link, self.video_link % l)
                    result = client.request(l,
                                            post={},
                                            headers={'Referer': url})
                    u = result if 'http' in result else 'http:' + result
                    if 'google' in u:
                        valid, hoster = source_utils.is_host_valid(u, hostDict)
                        urls, host, direct = source_utils.check_directstreams(
                            u, hoster)
                        for x in urls:
                            sources.append({
                                'source': host,
                                'quality': x['quality'],
                                'language': 'en',
                                'url': x['url'],
                                'direct': direct,
                                'debridonly': False
                            })

                    else:
                        valid, hoster = source_utils.is_host_valid(u, hostDict)
                        if not valid: continue
                        try:
                            u.decode('utf-8')
                            sources.append({
                                'source': hoster,
                                'quality': 'SD',
                                'language': 'en',
                                'url': u,
                                'direct': False,
                                'debridonly': False
                            })
                            i += 1
                        except:
                            pass

                except:
                    pass

            return sources
        except:
            return sources
示例#41
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            s = client.request(self.base_link)
            s = re.findall('\'(http.+?)\'', s) + re.findall('\"(http.+?)\"', s)
            s = [i for i in s if urlparse.urlparse(self.base_link).netloc in i and len(i.strip('/').split('/')) > 3]
            s = s[0] if s else urlparse.urljoin(self.base_link, 'index2')
            s = s.strip('/')

            url = s + self.search_link % urllib.quote_plus(query)

            r = client.request(url)

            r = client.parseDOM(r, 'h2', attrs = {'class': 'post-title'})
            r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
            r = [(i[0], i[1], re.sub('(\.|\(|\[|\s)(\d{4}|3D)(\.|\)|\]|\s|)(.+|)', '', i[1]), re.findall('[\.|\(|\[|\s](\d{4}|)([\.|\)|\]|\s|].+)', i[1])) for i in r]
            r = [(i[0], i[1], i[2], i[3][0][0], i[3][0][1]) for i in r if i[3]]
            r = [(i[0], i[1], i[2], i[3], re.split('\.|\(|\)|\[|\]|\s|\-', i[4])) for i in r]
            r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[2]) and data['year'] == i[3]]
            r = [i for i in r if not any(x in i[4] for x in ['HDCAM', 'CAM', 'DVDR', 'DVDRip', 'DVDSCR', 'HDTS', 'TS', '3D'])]
            r = [i for i in r if '1080p' in i[4]][:1] + [i for i in r if '720p' in i[4]][:1]

            posts = [(i[1], i[0]) for i in r]

            hostDict = hostprDict + hostDict

            items = []

            for post in posts:
                try:
                    t = post[0]

                    u = client.request(post[1])
                    u = re.findall('\'(http.+?)\'', u) + re.findall('\"(http.+?)\"', u)
                    u = [i for i in u if not '/embed/' in i]
                    u = [i for i in u if not 'youtube' in i]

                    items += [(t, i) for i in u]
                except:
                    pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)

                    if not cleantitle.get(t) == cleantitle.get(title): raise Exception()

                    y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()

                    if not y == hdlr: raise Exception()

                    fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
                    fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
                    fmt = [i.lower() for i in fmt]

                    if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception()
                    if any(i in ['extras'] for i in fmt): raise Exception()

                    if '1080p' in fmt: quality = '1080p'
                    elif '720p' in fmt: quality = 'HD'
                    else: quality = 'SD'
                    if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
                    elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'

                    info = []

                    if '3d' in fmt: info.append('3D')

                    try:
                        size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size))/div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC')

                    info = ' | '.join(info)

                    url = item[1]
                    if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
                except:
                    pass

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check

            return sources
        except:
            return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                episode = int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'],
                                      aliases, headers)
            else:
                episode = 0
                url = self.searchMovie(data['title'], data['year'], aliases,
                                       headers)

            try:
                url = url.rsplit('.', 1)[0] + '/watching.html'
                r = client.request(url)

                qual = client.parseDOM(r, 'span', attrs={'class':
                                                         'quality'})[0]
                if any(x in qual.lower() for x in ['ts', 'cam']):
                    quality = 'CAM'
                elif any(x in qual.lower() for x in ['hd', '720p']):
                    quality = '720p'
                else:
                    quality = 'SD'
                if 'hdtv' in qual.lower(): quality = 'SD'

                if 'tvshowtitle' in data:
                    r = client.parseDOM(r,
                                        'div',
                                        attrs={'class': 'series-list'})[0]
                else:
                    r = client.parseDOM(r, 'div', attrs={'class':
                                                         'pas-list'})[0]

                items = client.parseDOM(r, 'li')

                if 'tvshowtitle' in data:
                    items = [(client.parseDOM(i,
                                              'span',
                                              attrs={'class': 'epinum'})[0], i)
                             for i in items]
                    items = [(re.sub('<[^<>]+>', '', i[0]), i[1])
                             for i in items]
                    items = [
                        i[1] for i in items
                        if episode == int(i[0].split(' #')[-1])
                    ]
                    if not len(items) == 1: raise Exception()
                    items = items[0].split('\n')

                for item in items:
                    try:
                        link = client.parseDOM(item, 'a', ret='data-link')[0]
                        if not link.startswith('http'): link = 'http:' + link
                        if 'megadrive.co' in link:
                            sources.append({
                                'source': 'CDN',
                                'quality': quality,
                                'language': 'en',
                                'url': link,
                                'direct': True,
                                'debridonly': False
                            })
                        else:
                            valid, hoster = source_utils.is_host_valid(
                                link, hostDict)
                            urls, host, direct = source_utils.check_directstreams(
                                link, hoster)
                            if valid:
                                for x in urls:
                                    sources.append({
                                        'source': host,
                                        'quality': quality,
                                        'language': 'en',
                                        'url': x['url'],
                                        'direct': direct,
                                        'debridonly': False
                                    })
                    except:
                        pass
            except:
                pass

            return sources
        except:
            log_utils.log(
                '>>>> %s TRACE <<<<\n%s' %
                (__file__.upper().split('\\')[-1].split('.')[0],
                 traceback.format_exc()), log_utils.LOGDEBUG)
            return sources
示例#43
0
 def searchShow(self, title, season, year, aliases, headers):
     try:
         title = cleantitle.normalize(title)
         t = cleantitle.get(title)
         url = urlparse.urljoin(
             self.search_base, self.search_link % urllib.quote_plus(
                 cleantitle.query('%s S%02d' %
                                  (title.replace('\'', '-'), int(season)))))
         #sr = client.request(url, headers=headers, timeout='10')
         sr = self.scraper.get(url).content
         if sr:
             r = client.parseDOM(sr, 'h2', attrs={'class': 'tit'})
             r = [(client.parseDOM(i, 'a', ret='href'),
                   client.parseDOM(i, 'a', ret='title')) for i in r]
             r = [(i[0][0], i[1][0]) for i in r
                  if len(i[0]) > 0 and len(i[1]) > 0]
             r = [(i[0], re.findall('(.+?)\s+-\s+S(\d+)', i[1])) for i in r]
             r = [(i[0], i[1][0][0], i[1][0][1]) for i in r
                  if len(i[1]) > 0]
             r = [
                 i[0] for i in r
                 if t == cleantitle.get(i[1]) and int(season) == int(i[2])
             ][0]
         else:
             url = urlparse.urljoin(
                 self.search_base, self.search_link % urllib.quote_plus(
                     cleantitle.query(
                         '%s Season %01d' %
                         (title.replace('\'', '-'), int(season)))))
             #sr = client.request(url, headers=headers, timeout='10')
             sr = self.scraper.get(url).content
             if sr:
                 r = client.parseDOM(sr, 'h2', attrs={'class': 'tit'})
                 r = [(client.parseDOM(i, 'a', ret='href'),
                       client.parseDOM(i, 'a', ret='title')) for i in r]
                 r = [(i[0][0], i[1][0]) for i in r
                      if len(i[0]) > 0 and len(i[1]) > 0]
                 r = [(i[0], re.findall('(.+?)\s+-\s+Season\s+(\d+)', i[1]))
                      for i in r]
                 r = [(i[0], i[1][0][0], i[1][0][1]) for i in r
                      if len(i[1]) > 0]
                 r = [
                     i[0] for i in r if t == cleantitle.get(i[1])
                     and int(season) == int(i[2])
                 ][0]
             else:
                 url = urlparse.urljoin(
                     self.search_base, self.search_link % urllib.quote_plus(
                         cleantitle.query(
                             '%s %01d' %
                             (title.replace('\'', '-'), int(year)))))
                 #sr = client.request(url, headers=headers, timeout='10')
                 sr = self.scraper.get(url).content
                 if sr:
                     r = client.parseDOM(sr, 'h2', attrs={'class': 'tit'})
                     r = [(client.parseDOM(i, 'a', ret='href'),
                           client.parseDOM(i, 'a', ret='title')) for i in r]
                     r = [(i[0][0], i[1][0]) for i in r
                          if len(i[0]) > 0 and len(i[1]) > 0]
                     r = [(i[0], re.findall('(.+?) \((\d{4})', i[1]))
                          for i in r]
                     r = [(i[0], i[1][0][0], i[1][0][1]) for i in r
                          if len(i[1]) > 0]
                     r = [
                         i[0] for i in r
                         if t == cleantitle.get(i[1]) and year == i[2]
                     ][0]
         url = re.findall('(?://.+?|)(/.+)', r)[0]
         url = client.replaceHTMLCodes(url)
         return url.encode('utf-8')
     except:
         return
示例#44
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None: return
            data = parse_qs(url)
            data = dict((i, data[i][0]) for i in data)
            title = data['title']
            year = data['year']

            tit = cleantitle.geturl(title + ' ' + year)
            query = urljoin(self.base_link, tit)

            r = client.request(query, referer=self.base_link, redirect=True)
            if not data['imdb'] in r:
                return sources

            links = []

            try:
                down = client.parseDOM(r, 'div', attrs={'id':
                                                        'tab-download'})[0]
                down = client.parseDOM(down, 'a', ret='href')[0]
                data = client.request(down)
                frames = client.parseDOM(data,
                                         'div',
                                         attrs={'class': 'single-link'})
                frames = [
                    client.parseDOM(i, 'a', ret='href')[0] for i in frames if i
                ]
                for i in frames:
                    links.append(i)

            except Exception:
                pass
            try:
                streams = client.parseDOM(r, 'div', attrs={'id':
                                                           'tab-stream'})[0]
                streams = re.findall(r'''iframe src=(.+?) frameborder''',
                                     streams.replace('&quot;', ''),
                                     re.I | re.DOTALL)
                for i in streams:
                    links.append(i)
            except Exception:
                pass

            for url in links:
                try:
                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if not valid:
                        valid, host = source_utils.is_host_valid(
                            url, hostprDict)
                        if not valid:
                            continue
                        else:
                            rd = True
                    else:
                        rd = False
                    #quality, _ = source_utils.get_release_quality(url, url)
                    quality = '720p'
                    host = client.replaceHTMLCodes(host)
                    host = ensure_text(host)
                    if rd:
                        sources.append({
                            'source': host,
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'direct': False,
                            'debridonly': True
                        })
                    else:
                        sources.append({
                            'source': host,
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'direct': False,
                            'debridonly': False
                        })
                except Exception:
                    pass
            return sources
        except:
            return sources
示例#45
0
	def tvmaze_list(self, url):
		try:
			result = client.request(url) # not json request
			next = ''
			if getSetting('tvshows.networks.view') == '0':
				result = client.parseDOM(result, 'section', attrs = {'id': 'this-seasons-shows'})
				items = client.parseDOM(result, 'span', attrs = {'class': 'title .*'})
				list_count = 60
			if getSetting('tvshows.networks.view') == '1':
				result = client.parseDOM(result, 'div', attrs = {'id': 'w1'})
				items = client.parseDOM(result, 'span', attrs = {'class': 'title'})
				list_count = 25
				page = int(str(url.split('&page=', 1)[1]))
				next = '%s&page=%s' % (url.split('&page=', 1)[0], page+1)
				last = []
				last = client.parseDOM(result, 'li', attrs = {'class': 'last disabled'})
				if last != []: next = ''
			items = [client.parseDOM(i, 'a', ret='href') for i in items]
			items = [i[0] for i in items if len(i) > 0]
			items = [re.findall(r'/(\d+)/', i) for i in items] #https://www.tvmaze.com/networks/645/tlc pulls tvmaze_id from link
			items = [i[0] for i in items if len(i) > 0]
			items = items[:list_count]
			sortList = items
		except:
			log_utils.error()
			return

		def items_list(tvmaze_id):
			# if i['metacache']: return # not possible with only a tvmaze_id
			try:
				values = {}
				values['next'] = next
				values['tvmaze'] = tvmaze_id
				url = self.tvmaze_info_link % tvmaze_id
				item = get_request(url) 
				values['content'] = item.get('type', '').lower()
				values['mediatype'] = 'tvshow'
				values['title'] = item.get('name')
				values['originaltitle'] = values['title']
				values['tvshowtitle'] = values['title']
				values['premiered'] = str(item.get('premiered', '')) if item.get('premiered') else ''
				try: values['year'] = values['premiered'][:4]
				except: values['year'] = ''
				ids = item.get('externals')
				imdb = str(ids.get('imdb', '')) if ids.get('imdb') else ''
				tvdb = str(ids.get('thetvdb', '')) if ids.get('thetvdb') else ''
				tmdb = '' # TVMaze does not have tmdb_id in api
				studio = item.get('network', {}) or item.get('webChannel', {})
				values['studio'] = studio.get('name', '')
				values['genre'] = []
				for i in item['genres']: values['genre'].append(i.title())
				if values['genre'] == []: values['genre'] = 'NA'
				values['duration'] = int(item.get('runtime', '')) * 60 if item.get('runtime') else ''
				values['rating'] = str(item.get('rating').get('average', '')) if item.get('rating').get('average') else ''
				values['plot'] = client.cleanHTML(item['summary'])
				values['status'] = item.get('status', '')
				values['castandart'] = []
				for person in item['_embedded']['cast']:
					try: values['castandart'].append({'name': person['person']['name'], 'role': person['character']['name'], 'thumbnail': (person['person']['image']['medium'] if person['person']['image']['medium'] else '')})
					except: pass
					if len(values['castandart']) == 150: break
				image = item.get('image', {}) or ''
				values['poster'] = image.get('original', '') if image else ''
				values['fanart'] = '' ; values['banner'] = ''
				values['mpaa'] = '' ; values['votes'] = ''
				try: values['airday'] = item['schedule']['days'][0]
				except: values['airday'] = ''
				values['airtime'] = item['schedule']['time'] or ''
				try: values['airzone'] = item['network']['country']['timezone']
				except: values['airzone'] = ''
				values['metacache'] = False 

#### -- Missing id's lookup -- ####
				if not tmdb and (imdb or tvdb):
					try:
						result = cache.get(tmdb_indexer.TVshows().IdLookup, 96, imdb, tvdb)
						tmdb = str(result.get('id', '')) if result.get('id') else ''
					except: tmdb = ''
				if not imdb or not tmdb or not tvdb:
					try:
						trakt_ids = trakt.SearchTVShow(quote_plus(values['tvshowtitle']), values['year'], full=False)
						if not trakt_ids: raise Exception
						ids = trakt_ids[0].get('show', {}).get('ids', {})
						if not imdb: imdb = str(ids.get('imdb', '')) if ids.get('imdb') else ''
						if not tmdb: tmdb = str(ids.get('tmdb', '')) if ids.get('tmdb') else ''
						if not tvdb: tvdb = str(ids.get('tvdb', '')) if ids.get('tvdb') else ''
					except:
						log_utils.error()
#################################
				if not tmdb:
					return log_utils.log('tvshowtitle: (%s) missing tmdb_id: ids={imdb: %s, tmdb: %s, tvdb: %s}' % (values['tvshowtitle'], imdb, tmdb, tvdb), __name__, log_utils.LOGDEBUG) # log TMDb shows that they do not have
				# self.list = metacache.fetch(self.list, self.lang, self.user)
				# if self.list['metacache'] is True: raise Exception()

				showSeasons = cache.get(tmdb_indexer.TVshows().get_showSeasons_meta, 96, tmdb)
				if not showSeasons: return
				showSeasons = dict((k, v) for k, v in iter(showSeasons.items()) if v is not None and v != '') # removes empty keys so .update() doesn't over-write good meta
				values.update(showSeasons)
				if not values.get('imdb'): values['imdb'] = imdb
				if not values.get('tmdb'): values['tmdb'] = tmdb
				if not values.get('tvdb'): values['tvdb'] = tvdb
				for k in ('seasons',): values.pop(k, None) # pop() keys from showSeasons that are not needed anymore
				if self.enable_fanarttv:
					extended_art = fanarttv_cache.get(fanarttv.get_tvshow_art, 168, tvdb)
					if extended_art: values.update(extended_art)
				meta = {'imdb': imdb, 'tmdb': tmdb, 'tvdb': tvdb, 'lang': self.lang, 'user': self.user, 'item': values} # DO NOT move this after "values = dict()" below or it becomes the same object and "del meta['item']['next']" removes it from both
				values = dict((k,v) for k, v in iter(values.items()) if v is not None and v != '')
				self.list.append(values)
				if 'next' in meta.get('item'): del meta['item']['next'] # next can not exist in metacache
				self.meta.append(meta)
				self.meta = [i for i in self.meta if i.get('tmdb')] # without this ui removed missing tmdb but it still writes these cases to metacache?
				metacache.insert(self.meta)
			except:
				log_utils.error()
		try:
			threads = []
			for tvmaze_id in items:
				threads.append(Thread(target=items_list, args=(tvmaze_id,)))
			[i.start() for i in threads]
			[i.join() for i in threads]
			sorted_list = []
			self.list = [i for i in self.list if i.get('tmdb') and i.get('tmdb') != '0'] # to rid missing tmdb_id's because season list can not load without
			for i in sortList:
				sorted_list += [item for item in self.list if str(item['tvmaze']) == str(i)]
			return sorted_list
		except:
			log_utils.error()
			return
示例#46
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['title'].replace(':', '').lower()
            year = data['year']

            query = '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = urlparse.urljoin(self.base_link, self.post_link)

            post = 'do=search&subaction=search&search_start=0&full_search=0&result_from=1&story=%s' % urllib.quote_plus(
                query)

            r = client.request(url, post=post)
            r = client.parseDOM(r, 'div', attrs={'class': 'box-out margin'})
            r = [(dom_parser2.parse_dom(i,
                                        'div',
                                        attrs={'class': 'news-title'}))
                 for i in r if data['imdb'] in i]
            r = [(dom_parser2.parse_dom(i[0], 'a', req='href')) for i in r
                 if i]
            r = [(i[0].attrs['href'], i[0].content) for i in r if i]

            hostDict = hostprDict + hostDict

            for item in r:
                try:
                    name = item[1]
                    y = re.findall('\((\d{4})\)', name)[0]
                    if not y == year: raise Exception()

                    s = re.findall(
                        '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))',
                        name)
                    s = s[0] if s else '0'
                    data = client.request(item[0])
                    data = dom_parser2.parse_dom(data,
                                                 'div',
                                                 attrs={'id': 'r-content'})
                    data = re.findall(
                        '\s*<b><a href=.+?>(.+?)</b>.+?<u><b><a href="(.+?)".+?</a></b></u>',
                        data[0].content, re.DOTALL)
                    u = [(i[0], i[1], s) for i in data if i]

                    for name, url, size in u:
                        try:
                            if '4K' in name:
                                quality = '4K'
                            elif '1080p' in name:
                                quality = '1080p'
                            elif '720p' in name:
                                quality = '720p'
                            elif any(i in ['dvdscr', 'r5', 'r6']
                                     for i in name):
                                quality = 'SCR'
                            elif any(i in [
                                    'camrip', 'tsrip', 'hdcam', 'hdts',
                                    'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'
                            ] for i in name):
                                quality = 'CAM'
                            else:
                                quality = '720p'

                            info = []
                            if '3D' in name or '.3D.' in url:
                                info.append('3D')
                                quality = '1080p'
                            if any(i in ['hevc', 'h265', 'x265']
                                   for i in name):
                                info.append('HEVC')
                            try:
                                size = re.findall(
                                    '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))',
                                    size)[-1]
                                div = 1 if size.endswith(
                                    ('Gb', 'GiB', 'GB')) else 1024
                                size = float(re.sub('[^0-9|/.|/,]', '',
                                                    size)) / div
                                size = '%.2f GB' % size
                                info.append(size)
                            except:
                                pass

                            info = ' | '.join(info)

                            url = client.replaceHTMLCodes(url)
                            url = url.encode('utf-8')
                            if any(x in url
                                   for x in ['.rar', '.zip', '.iso', 'turk']):
                                continue

                            if 'ftp' in url:
                                host = 'COV'
                                direct = True
                            else:
                                direct = False
                                host = 'turbobit.net'
                            #if not host in hostDict: continue

                            host = client.replaceHTMLCodes(host)
                            host = host.encode('utf-8')

                            sources.append({
                                'source': host,
                                'quality': quality,
                                'language': 'en',
                                'url': url,
                                'info': info,
                                'direct': direct,
                                'debridonly': True
                            })

                        except:
                            pass
                except:
                    pass

            return sources
        except:
            return sources
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            tvshowtitle = cleantitle.get(data['tvshowtitle'])
            season = '%01d' % int(season)
            episode = '%01d' % int(episode)

            try:
                query = '%s season %01d' % (data['tvshowtitle'], int(season))
                query = base64.b64decode(
                    self.search_link) % urllib.quote_plus(query)

                result = client.source(query)
                result = json.loads(result)['results']

                r = [(i['url'], i['titleNoFormatting']) for i in result]
                r = [(i[0],
                      re.compile('(^Watch Full "|^Watch |)(.+)').findall(i[1]))
                     for i in r]
                r = [(i[0], i[1][0][-1]) for i in r if len(i[1]) > 0]
                r = [(i[0], re.compile('(.+?) - Season (\d*)').findall(i[1]))
                     for i in r]
                r = [(i[0], i[1][0][0], i[1][0][1]) for i in r
                     if len(i[1]) > 0]
                r = [(re.sub('http.+?//.+?/', '', i[0]), i[1], i[2])
                     for i in r]
                r = [('/'.join(i[0].split('/')[:2]), i[1], i[2]) for i in r]
                r = [x for y, x in enumerate(r) if x not in r[:y]]
                r = [i for i in r if tvshowtitle == cleantitle.get(i[1])]
                u = [i[0] for i in r if season == '%01d' % int(i[2])][0]

            except:
                query = self.search2_link % urllib.quote_plus(
                    data['tvshowtitle'])
                query = urlparse.urljoin(self.base_link, query)

                result = client.source(query)

                r = client.parseDOM(result, 'div', attrs={'class': 'ml-item'})
                r = [(client.parseDOM(i, 'a', ret='href'),
                      client.parseDOM(i, 'a', ret='title')) for i in r]
                r = [(i[0][0], i[1][-1]) for i in r
                     if len(i[0]) > 0 and len(i[1]) > 0]
                r = [(i[0], re.compile('(.+?) - Season (\d*)').findall(i[1]))
                     for i in r]
                r = [(i[0], i[1][0][0], i[1][0][1]) for i in r
                     if len(i[1]) > 0]
                r = [(re.sub('http.+?//.+?/', '', i[0]), i[1], i[2])
                     for i in r]
                r = [('/'.join(i[0].split('/')[:2]), i[1], i[2]) for i in r]
                r = [x for y, x in enumerate(r) if x not in r[:y]]
                r = [i for i in r if tvshowtitle == cleantitle.get(i[1])]
                u = [i[0] for i in r if season == '%01d' % int(i[2])][0]

            url = urlparse.urljoin(self.base_link, u)
            url = urlparse.urlparse(url).path
            url += '?episode=%01d' % int(episode)
            url = url.encode('utf-8')
            return url
        except:
            return
示例#48
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                episode = int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'],
                                      data['year'], aliases, headers)
            else:
                episode = 0
                url = self.searchMovie(data['title'], data['year'], aliases,
                                       headers)
            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)
            url = re.sub('/watching.html$', '', url.strip('/'))
            url = url + '/watching.html'

            #p = client.request(url, headers=headers, timeout='10')
            p = self.scraper.get(url).content

            if episode > 0:
                r = client.parseDOM(p, 'div', attrs={'class': 'ep_link.+?'})[0]
                r = zip(client.parseDOM(r, 'a', ret='href'),
                        client.parseDOM(r, 'a'))
                r = [(i[0], re.findall('Episode\s+(\d+)', i[1])) for i in r]
                r = [(i[0], i[1][0]) for i in r]
                r = [i[0] for i in r if int(i[1]) == episode][0]
                #p = client.request(r, headers=headers, timeout='10')
                p = self.scraper.get(url).content

            referer = url
            id = re.findall('load_player\(.+?(\d+)', p)[0]
            r = urlparse.urljoin(self.base_link,
                                 '/ajax/movie/load_player_v3?id=%s' % id)
            #r = client.request(r, headers=headers, referer=referer, XHR=True, timeout='10')
            r = self.scraper.get(r).content
            url = json.loads(r)['value']
            if (url.startswith('//')):
                url = 'https:' + url
            url = client.request(url,
                                 headers=headers,
                                 XHR=True,
                                 output='geturl',
                                 timeout='10')
            if 'openload.io' in url or 'openload.co' in url or 'oload.tv' in url:
                sources.append({
                    'source': 'openload.co',
                    'quality': 'HD',
                    'language': 'en',
                    'url': url,
                    'direct': False,
                    'debridonly': False
                })
                raise Exception()

            r = client.request(url, headers=headers, timeout='10')
            try:
                src = json.loads(r)['playlist'][0]
                links = re.findall('''file['"]:\s*u['"]([^'"]+)''', str(src))
                for i in links:
                    try:
                        sources.append({
                            'source': 'gvideo',
                            'quality': 'SD',
                            'language': 'en',
                            'url': i,
                            'direct': True,
                            'debridonly': False
                        })
                    except:
                        pass
            except:
                pass

            return sources
        except:
            return sources
示例#49
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if (self.user == '' or self.password == ''): raise Exception()

            class NoRedirection(urllib2.HTTPErrorProcessor):
                def http_response(self, request, response): return response


            headers = {'X-Requested-With': 'XMLHttpRequest'}
            login = urlparse.urljoin(self.base_link, '/login')
            post = {'username': self.user, 'password': self.password, 'action': 'login'}
            post = urllib.urlencode(post)

            cookie = client.source(login, post=post, headers=headers, output='cookie')


            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url, cookie=cookie)

            url = re.compile("embeds\[\d+\]\s*=\s*'([^']+)").findall(result)[0]
            url = client.parseDOM(url, 'iframe', ret='src')[0]
            url = url.replace('https://', 'http://')

            links = []

            try:
                url = re.compile('mplanet\*(.+)').findall(url)[0]
                url = url.rsplit('&')[0]
                dec = self._gkdecrypt(base64.b64decode('MllVcmlZQmhTM2swYU9BY0lmTzQ='), url)
                dec = directstream.google(dec)

                links += [(i['url'], i['quality'], 'gvideo') for i in dec]
            except:
                pass

            try:
                result = client.source(url)

                result = re.compile('sources\s*:\s*\[(.*?)\]', re.DOTALL).findall(result)[0]
                result = re.compile('''['"]*file['"]*\s*:\s*['"]*([^'"]+).*?['"]*label['"]*\s*:\s*['"]*([^'"]+)''', re.DOTALL).findall(result)
            except:
                pass

            try:
                u = result[0][0]
                if not 'download.php' in u and not '.live.' in u: raise Exception()
                o = urllib2.build_opener(NoRedirection)
                o.addheaders = [('User-Agent', client.randomagent()), ('Cookie', cookie)]
                r = o.open(u)
                try: u = r.headers['Location']
                except: pass
                r.close()
                links += [(u, '1080p', 'cdn')]
            except:
                pass
            try:
                u = [(i[0], re.sub('[^0-9]', '', i[1])) for i in result]
                u = [(i[0], i[1]) for i in u if i[1].isdigit()]
                links += [(i[0], '1080p', 'gvideo') for i in u if int(i[1]) >= 1080]
                links += [(i[0], 'HD', 'gvideo') for i in u if 720 <= int(i[1]) < 1080]
                links += [(i[0], 'SD', 'gvideo') for i in u if 480 <= int(i[1]) < 720]
            except:
                pass


            for i in links: sources.append({'source': i[2], 'quality': i[1], 'provider': 'Moviesplanet', 'url': i[0], 'direct': True, 'debridonly': False})

            return sources
        except:
            return sources
示例#50
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None:
             return sources
         if debrid.status() is False:
             raise Exception()
         if debrid.tor_enabled() is False:
             raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
             'title']
         hdlr = 'S%02dE%02d' % (int(data['season']), int(
             data['episode'])) if 'tvshowtitle' in data else data['year']
         category = '+category%3ATV' if 'tvshowtitle' in data else '+category%3AMovies'
         query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) \
             if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
         query = re.sub('(\\\|/| -|:|;|\*|\?|"|<|>|\|)', ' ', query)
         url = self.search_link % urllib.quote_plus(query)
         url = urlparse.urljoin(self.base_link, url) + str(category)
         html = client.request(url)
         html = html.replace('&nbsp;', ' ')
         try:
             results = client.parseDOM(
                 html,
                 'table',
                 attrs={
                     'class': 'table table-condensed table-torrents vmiddle'
                 })[0]
         except:
             return sources
         rows = re.findall('<tr(.+?)</tr>', results, re.DOTALL)
         if rows is None:
             return sources
         for entry in rows:
             try:
                 try:
                     name = re.findall('<a class=".+?>(.+?)</a>', entry,
                                       re.DOTALL)[0]
                     name = client.replaceHTMLCodes(name).replace(
                         '<hl>', '').replace('</hl>', '')
                     if not cleantitle.get(title) in cleantitle.get(name):
                         continue
                 except:
                     continue
                 y = re.findall(
                     '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                     name)[-1].upper()
                 if not y == hdlr:
                     continue
                 try:
                     seeders = int(
                         re.findall(
                             'class="progress prog trans90" title="Seeders: (.+?) \|',
                             entry, re.DOTALL)[0])
                 except:
                     continue
                 if self.min_seeders > seeders:
                     continue
                 try:
                     link = 'magnet:%s' % (re.findall(
                         'href="magnet:(.+?)"', entry, re.DOTALL)[0])
                     link = str(
                         client.replaceHTMLCodes(link).split('&tr')[0])
                     if link in str(sources):
                         continue
                 except:
                     continue
                 quality, info = source_utils.get_release_quality(
                     name, name)
                 try:
                     size = re.findall(
                         '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                         entry)[-1]
                     div = 1 if size.endswith(('GB', 'GiB')) else 1024
                     size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                     size = '%.2f GB' % size
                     info.append(size)
                 except:
                     pass
                 info = ' | '.join(info)
                 sources.append({
                     'source': 'Torrent',
                     'quality': quality,
                     'language': 'en',
                     'url': link,
                     'info': info,
                     'direct': False,
                     'debridonly': True
                 })
             except:
                 continue
         check = [i for i in sources if not i['quality'] == 'CAM']
         if check:
             sources = check
         return sources
     except:
         return sources
示例#51
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            try:
                if 'tvshowtitle' in data:
                    epi = 'EP%d' % int(data['episode'])
                    links = self.searchShow(data['tvshowtitle'],
                                            data['season'])
                    url = [i[1] for i in links if epi.lower() == i[0].lower()]

                else:
                    url = self.searchMovie(data['title'], data['year'])
                    try:
                        url = client.parseDOM(url,
                                              'iframe',
                                              ret='src',
                                              attrs={'id': 'advanced_iframe'})
                    except:
                        url = re.findall(
                            '''<h4>server\d+</h4>.+?src=['"]([^'"]+)''', url,
                            re.I | re.DOTALL)

            except:
                pass

            for u in url:
                if 'entervideo' in u:
                    r = client.request(u)
                    url = client.parseDOM(r, 'source', ret='src')[0]
                    quality, info = source_utils.get_release_quality(url, url)
                    sources.append({
                        'source': 'CDN',
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'direct': True,
                        'debridonly': False
                    })
                elif 'vidnode' in u:
                    headers = {
                        'Host': 'vidnode.net',
                        'User-Agent':
                        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',
                        'Upgrade-Insecure-Requests': '1',
                        'Accept-Language': 'en-US,en;q=0.9'
                    }
                    r = client.request(u, headers=headers)
                    links = re.findall(
                        '''\{file:\s*['"]([^'"]+).*?label:\s*['"](\d+\s*P)['"]''',
                        r, re.DOTALL | re.I)
                    for u, qual in links:
                        quality, info = source_utils.get_release_quality(
                            qual, u)
                        url = u
                        sources.append({
                            'source': 'CDN',
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'direct': True,
                            'debridonly': False
                        })

            return sources
        except:
            return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)
            url = url.replace('/watching.html', '')

            content = re.compile('(.+?)\?episode=\d*$').findall(url)
            content = 'movie' if len(content) == 0 else 'episode'

            try:
                url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(
                    url)[0]
            except:
                pass

            url = urlparse.urljoin(self.base_link, url) + '/watching.html'

            result = cloudflare.source(url)
            movie = client.parseDOM(result,
                                    'div',
                                    ret='movie-id',
                                    attrs={'id': 'media-player'})[0]

            try:
                quality = client.parseDOM(result,
                                          'span',
                                          attrs={'class':
                                                 'quality'})[0].lower()
            except:
                quality = 'hd'
            if quality == 'cam' or quality == 'ts': quality = 'CAM'
            elif quality == 'hd': quality = 'HD'
            else: quality = 'SD'

            url = '/movie/loadepisodes/%s' % movie
            url = urlparse.urljoin(self.base_link, url)

            result = cloudflare.source(url)

            result = client.parseDOM(result,
                                     'div',
                                     attrs={'class': 'les-content'})
            result = zip(client.parseDOM(result, 'a', ret='onclick'),
                         client.parseDOM(result, 'a', ret='episode-id'),
                         client.parseDOM(result, 'a'))
            result = [(re.sub('[^0-9]', '', i[0].split(',')[0]),
                       re.sub('[^0-9]', '', i[0].split(',')[-1]), i[1],
                       ''.join(re.findall('(\d+)', i[2])[:1])) for i in result]
            result = [(i[0], i[1], i[2], i[3]) for i in result]

            if content == 'episode':
                result = [i for i in result if i[3] == '%01d' % int(episode)]

            links = [('movie/load_episode/%s/%s' % (i[2], i[1]), 'gvideo')
                     for i in result if 2 <= int(i[0]) <= 11]

            for i in links:
                sources.append({
                    'source': i[1],
                    'quality': quality,
                    'provider': 'Onemovies',
                    'url': i[0],
                    'direct': True,
                    'debridonly': False
                })

            links = []
            links += [('movie/loadEmbed/%s/%s' % (i[2], i[1]), 'openload.co')
                      for i in result if i[0] == '14']
            #links += [('movie/loadEmbed/%s/%s' % (i[2], i[1]), 'videomega.tv') for i in result if i[0] == '13']
            #links += [('movie/loadEmbed/%s/%s' % (i[2], i[1]), 'videowood.tv') for i in result if i[0] == '12']

            for i in links:
                sources.append({
                    'source': i[1],
                    'quality': quality,
                    'provider': 'Onemovies',
                    'url': i[0],
                    'direct': False,
                    'debridonly': False
                })

            return sources
        except:
            return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            posts = client.parseDOM(r, 'item')

            hostDict = hostprDict + hostDict

            items = []

            for post in posts:
                try:
                    t = client.parseDOM(post, 'title')[0]
                    u = client.parseDOM(post, 'link')[0]
                    s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', t)
                    s = s[0] if s else '0'

                    items += [(t, u, s) ]

                except:
                    pass

            urls = []
            for item in items:

                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)

                    if not cleantitle.get(t) == cleantitle.get(title): raise Exception()

                    y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()

                    if not y == hdlr: raise Exception()

                    quality, info = source_utils.get_release_quality(name, item[1])
                    if any(x in quality for x in ['CAM', 'SD']): continue

                    try:
                        size = re.sub('i', '', item[2])
                        div = 1 if size.endswith('GB') else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size))/div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    info = ' | '.join(info)

                    url = item[1]
                    links = self.links(url)
                    urls += [(i, quality, info) for i in links]

                except:
                    pass

            for item in urls:

                if 'earn-money' in item[0]: continue
                if any(x in item[0] for x in ['.rar', '.zip', '.iso']): continue
                url = client.replaceHTMLCodes(item[0])
                url = url.encode('utf-8')

                valid, host = source_utils.is_host_valid(url, hostDict)
                if not valid: continue
                host = client.replaceHTMLCodes(host)
                host = host.encode('utf-8')

                sources.append({'source': host, 'quality': item[1], 'language': 'en', 'url': url, 'info': item[2], 'direct': False, 'debridonly': True})

            return sources
        except:
            return sources
示例#54
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if not debrid.status(): raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            r = client.parseDOM(r, 'h2')
            r = [
                re.findall('''<a.+?href=["']([^"']+)["']>(.+?)</a>''', i,
                           re.DOTALL) for i in r
            ]

            hostDict = hostprDict + hostDict

            items = []

            for item in r:
                try:
                    t = item[0][1]
                    t = re.sub('(\[.*?\])|(<.+?>)', '', t)
                    t1 = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '', t)

                    if not cleantitle.get(t1) == cleantitle.get(title):
                        raise Exception()

                    y = re.findall(
                        '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                        t)[-1].upper()

                    if not y == hdlr: raise Exception()

                    data = client.request(
                        urlparse.urljoin(self.base_link, item[0][0]))
                    data = dom_parser2.parse_dom(data,
                                                 'a',
                                                 attrs={'target': '_blank'})
                    u = [(t, i.content) for i in data]
                    items += u

                except:
                    pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    quality, info = source_utils.get_release_quality(
                        name, item[1])

                    try:
                        size = re.findall(
                            '((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))',
                            name)[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    info = ' | '.join(info)

                    url = item[1]
                    if not url.startswith('http'): continue
                    if any(x in url for x in ['.rar', '.zip', '.iso']):
                        raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if not valid: continue
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })
                except:
                    pass

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check

            return sources
        except:
            return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                ep = data['episode']
                url = '%s/film/%s-season-%01d/watching.html?ep=%s' % (
                    self.base_link, cleantitle.geturl(
                        data['tvshowtitle']), int(data['season']), ep)
                r = client.request(url,
                                   headers=headers,
                                   timeout='10',
                                   output='geturl')

                if url == None:
                    url = self.searchShow(data['tvshowtitle'], data['season'],
                                          aliases, headers)

            else:
                url = self.searchMovie(data['title'], data['year'], aliases,
                                       headers)

            if url == None: raise Exception()

            r = client.request(url, headers=headers, timeout='10')
            r = client.parseDOM(r, 'div', attrs={'class': 'les-content'})
            if 'tvshowtitle' in data:
                ep = data['episode']
                links = client.parseDOM(r,
                                        'a',
                                        attrs={'episode-data': ep},
                                        ret='player-data')
            else:
                links = client.parseDOM(r, 'a', ret='player-data')

            for link in links:
                if '123movieshd' in link or 'seriesonline' in link:
                    r = client.request(link, headers=headers, timeout='10')
                    r = re.findall('(https:.*?redirector.*?)[\'\"]', r)

                    for i in r:
                        try:
                            sources.append({
                                'source':
                                'gvideo',
                                'quality':
                                directstream.googletag(i)[0]['quality'],
                                'language':
                                'en',
                                'url':
                                i,
                                'direct':
                                True,
                                'debridonly':
                                False
                            })
                        except:
                            pass
                else:
                    try:
                        host = re.findall(
                            '([\w]+[.][\w]+)$',
                            urlparse.urlparse(link.strip().lower()).netloc)[0]
                        if not host in hostDict: raise Exception()
                        host = client.replaceHTMLCodes(host)
                        host = host.encode('utf-8')

                        sources.append({
                            'source': host,
                            'quality': 'SD',
                            'language': 'en',
                            'url': link,
                            'direct': False,
                            'debridonly': False
                        })
                    except:
                        pass

            return sources
        except:
            return sources
示例#56
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '')
                             for i in data])

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                    'title']

                if 'tvshowtitle' in data:
                    y = str((int(data['year']) + int(data['season'])) - 1)
                    url = '/watch/%s-s%02d-%s-online.html' % (
                        cleantitle.geturl(title), int(data['season']), y)
                    episode = '%01d' % int(data['episode'])
                else:
                    url = '/watch/%s-%s-online.html' % (
                        cleantitle.geturl(title), data['year'])
                    year = data['year']
                    episode = None

                url = urlparse.urljoin(self.base_link, url)

                r = client.request(url, output='geturl')

                if 'error.html' in r: raise Exception()
            else:
                try:
                    url, episode = re.findall('(.+?)\?episode=(\d*)$', url)[0]
                except:
                    episode = None
                try:
                    episode = '%01d' % int(data['episode'])
                except:
                    pass

            r = client.request(url)

            h = {
                'User-Agent': client.agent(),
                'X-Requested-With': 'XMLHttpRequest'
            }

            ip = client.parseDOM(r,
                                 'input',
                                 ret='value',
                                 attrs={'name': 'phimid'})[0]
            ep = episode if not episode == None else '1'

            p = {
                'ip_film': ip,
                'ip_name': ep,
                'ipplugins': '1',
                'ip_server': '11'
            }
            p = urllib.urlencode(p)

            u = '/ip.file/swf/plugins/ipplugins.php'
            u = urlparse.urljoin(self.base_link, u)

            r = client.request(u, post=p, headers=h, referer=url)
            r = json.loads(r)

            u = '/ip.file/swf/ipplayer/ipplayer.php'
            u = urlparse.urljoin(self.base_link, u)

            p = {'u': r['s'], 's': r['v'], 'w': '100%', 'h': '360', 'n': '0'}
            p = urllib.urlencode(p)

            r = client.request(u, post=p, headers=h, referer=url)
            r = json.loads(r)['data']

            u = [i['files'] for i in r if 'files' in i]

            for i in u:
                try:
                    sources.append({
                        'source':
                        'gvideo',
                        'quality':
                        directstream.googletag(i)[0]['quality'],
                        'provider':
                        'Sockshare',
                        'url':
                        i,
                        'direct':
                        True,
                        'debridonly':
                        False
                    })
                except:
                    pass

            return sources
        except:
            return sources
示例#57
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                episode = int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'],
                                      aliases, headers)
            else:
                episode = 0
                url = self.searchMovie(data['title'], data['year'], aliases,
                                       headers)

            mid = re.findall('-(\d+)', url)[-1]

            try:
                headers = {'Referer': url}
                u = urlparse.urljoin(self.base_link, self.server_link % mid)
                r = self.scraper.get(u).content
                r = json.loads(r)['html']
                r = client.parseDOM(r, 'div', attrs={'class': 'pas-list'})
                ids = client.parseDOM(r, 'li', ret='data-id')
                servers = client.parseDOM(r, 'li', ret='data-server')
                labels = client.parseDOM(r, 'a', ret='title')
                r = zip(ids, servers, labels)
                u = urlparse.urljoin(self.base_link, self.info_link % mid)
                quality = self.scraper.get(u).content
                quality = dom_parser.parse_dom(quality,
                                               'div',
                                               attrs={'class': 'jtip-quality'
                                                      })[0].content
                if quality == "HD":
                    quality = "720p"
                for eid in r:
                    try:
                        try:
                            ep = re.findall('episode.*?(\d+).*?',
                                            eid[2].lower())[0]
                        except Exception:
                            ep = 0
                        if (episode == 0) or (int(ep) == episode):
                            if eid[1] != '6':
                                url = urlparse.urljoin(
                                    self.base_link, self.embed_link % eid[0])
                                link = self.scraper.get(url).content
                                link = json.loads(link)['src']
                                valid, host = source_utils.is_host_valid(
                                    link, hostDict)
                                sources.append({
                                    'source': host,
                                    'quality': quality,
                                    'language': 'en',
                                    'url': link,
                                    'info': [],
                                    'direct': False,
                                    'debridonly': False
                                })
                            else:
                                url = urlparse.urljoin(
                                    self.base_link,
                                    self.token_link % (eid[0], mid))
                                script = self.scraper.get(url).content
                                if '$_$' in script:
                                    params = self.uncensored1(script)
                                elif script.startswith(
                                        '[]') and script.endswith('()'):
                                    params = self.uncensored2(script)
                                elif '_x=' in script:
                                    x = re.search('''_x=['"]([^"']+)''',
                                                  script).group(1)
                                    y = re.search('''_y=['"]([^"']+)''',
                                                  script).group(1)
                                    params = {'x': x, 'y': y}
                                else:
                                    raise Exception()

                                u = urlparse.urljoin(
                                    self.base_link, self.source_link %
                                    (eid[0], params['x'], params['y']))
                                r = self.scraper.get(u).content
                                url = json.loads(r)['playlist'][0]['sources']
                                url = [i['file'] for i in url if 'file' in i]
                                url = [directstream.googletag(i) for i in url]
                                url = [i[0] for i in url if i]

                                for s in url:
                                    if 'lh3.googleusercontent.com' in s['url']:
                                        s['url'] = directstream.googleredirect(
                                            s['url'])

                                    sources.append({
                                        'source': 'gvideo',
                                        'quality': s['quality'],
                                        'language': 'en',
                                        'url': s['url'],
                                        'direct': True,
                                        'debridonly': False
                                    })
                    except Exception:
                        pass
            except Exception:
                pass

            return sources
        except Exception:
            return sources
示例#58
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            if debrid.status() is False:
                raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']), int(data['episode'])) \
                if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url).replace('%3A+', '+')

            r = client.request(url)
            if r is None and 'tvshowtitle' in data:
                season = re.search('S(.*?)E', hdlr)
                season = season.group(1)
                url = title

                r = client.request(url)

            for loopCount in range(0, 2):
                if loopCount == 1 or (r is None and 'tvshowtitle' in data):

                    r = client.request(url)

                posts = client.parseDOM(r, "h2", attrs={"class": "postTitle"})
                hostDict = hostprDict + hostDict
                items = []
                for post in posts:
                    try:
                        u = client.parseDOM(post, 'a', ret='href')
                        for i in u:
                            name = str(i)
                            items.append(name)
                    except:
                        pass

                if len(items) > 0:
                    break

            for item in items:
                try:
                    i = str(item)
                    r = client.request(i)
                    u = client.parseDOM(r, "div", attrs={"class": "postContent"})
                    for t in u:
                        r = client.parseDOM(t, 'a', ret='href')
                        for url in r:
                            quality, info = source_utils.get_release_quality(url)
                            if 'SD' in quality:
                                continue
                            valid, host = source_utils.is_host_valid(url, hostDict)
                            sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
                except:
                    pass
            return sources
        except:
            return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if (self.user != '' and self.password != ''): #raise Exception()

                login = urlparse.urljoin(self.base_link, '/login.html')

                post = urllib.urlencode({'username': self.user, 'password': self.password, 'submit': 'Login'})

                cookie = client.request(login, post=post, output='cookie', close=False)

                r = client.request(login, post=post, cookie=cookie, output='extended')

                headers = {'User-Agent': r[3]['User-Agent'], 'Cookie': r[4]}
            else:
                headers = {}


            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

                year = data['year']

                query = urlparse.urljoin(self.base_link, self.search_link)

                post = urllib.urlencode({'searchapi2': title})

                r = client.request(query, post=post, headers=headers)

                if 'tvshowtitle' in data:
                    r = re.findall('(watch-tvshow-.+?-\d+\.html)', r)
                    r = [(i, re.findall('watch-tvshow-(.+?)-\d+\.html', i)) for i in r]
                else:
                    r = re.findall('(watch-movie-.+?-\d+\.html)', r)
                    r = [(i, re.findall('watch-movie-(.+?)-\d+\.html', i)) for i in r]

                r = [(i[0], i[1][0]) for i in r if len(i[1]) > 0]
                r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1])]
                r = [i[0] for i in r][0]

                u = urlparse.urljoin(self.base_link, r)
                for i in range(3):
                    r = client.request(u, headers=headers)
                    if not 'failed' in r: break

                if 'season' in data and 'episode' in data:
                    r = re.findall('(episode-.+?-.+?\d+.+?\d+-\d+.html)', r)
                    r = [i for i in r if '-s%02de%02d-' % (int(data['season']), int(data['episode'])) in i.lower()][0]

                    r = urlparse.urljoin(self.base_link, r)

                    r = client.request(r, headers=headers)

            else:
                r = urlparse.urljoin(self.base_link, url)

                r = client.request(r, post=post, headers=headers)



            quality = 'HD' if '-movie-' in r else 'SD'

            try:
                f = re.findall('''["']sources['"]\s*:\s*\[(.*?)\]''', r)[0]
                f = re.findall('''['"]*file['"]*\s*:\s*([^\(]+)''', f)[0]

                u = re.findall('function\s+%s[^{]+{\s*([^}]+)' % f, r)[0]
                u = re.findall('\[([^\]]+)[^+]+\+\s*([^.]+).*?getElementById\("([^"]+)', u)[0]

                a = re.findall('var\s+%s\s*=\s*\[([^\]]+)' % u[1], r)[0]
                b = client.parseDOM(r, 'span', {'id': u[2]})[0]

                url = u[0] + a + b
                url = url.replace('"', '').replace(',', '').replace('\/', '/')
                url += '|' + urllib.urlencode(headers)
            except:
                try:
                    url =  r = jsunpack.unpack(r)
                    url = url.replace('"', '')
                except:
                    url = re.findall(r'sources[\'"]\s*:\s*\[.*?file[\'"]\s*:\s*(\w+)\(\).*function\s+\1\(\)\s*\{\s*return\([\'"]([^\'"]+)',r,re.DOTALL)[0][1]

            sources.append({'source': 'cdn', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False, 'autoplay': True})

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('StreamLord - Exception: \n' + str(failure))
            return sources
示例#60
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                year = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(
                    data['premiered'])[0][0]
                episode = '%01d' % int(data['episode'])
                url = '%s/tv-series/%s-season-%01d/watch/' % (
                    self.base_link, cleantitle.geturl(
                        data['tvshowtitle']), int(data['season']))
                url = client.request(url,
                                     headers=headers,
                                     timeout='10',
                                     output='geturl')

                if url == None:
                    url = self.searchShow(data['tvshowtitle'], data['season'],
                                          aliases, headers)

            else:
                episode = None
                year = data['year']
                url = self.searchMovie(data['title'], data['year'], aliases,
                                       headers)

            referer = url
            r = client.request(url)
            if episode == None:
                y = re.findall('Released\s*:\s*.+?\s*(\d{4})', r)[0]
                if not year == y: raise Exception()

            r = client.parseDOM(r, 'div', attrs={'class': 'sli-name'})
            r = zip(client.parseDOM(r, 'a', ret='href'),
                    client.parseDOM(r, 'a'))

            if not episode == None:
                r = [
                    i[0] for i in r
                    if i[1].lower().startswith('episode %02d:' %
                                               int(data['episode']))
                ]
            else:
                r = [i[0] for i in r]

            for u in r:
                try:
                    p = client.request(u, referer=referer, timeout='10')
                    t = re.findall('player_type\s*:\s*"(.+?)"', p)[0]
                    if t == 'embed': raise Exception()
                    headers = {'Origin': self.base_link}
                    eid = client.parseDOM(p,
                                          'input',
                                          ret='value',
                                          attrs={'name': 'episodeID'
                                                 })[0].encode('utf-8')
                    r = client.request(self.token_link,
                                       post=urllib.urlencode({'id': eid}),
                                       headers=headers,
                                       referer=referer,
                                       timeout='10',
                                       XHR=True)
                    isV2 = False

                    try:
                        js = json.loads(r)
                        hash = js['hash']
                        token = js['token']
                        _ = js['_']
                        url = self.grabber_link % (eid, hash, token, _)
                        u = client.request(url,
                                           headers=headers,
                                           referer=referer,
                                           timeout='10',
                                           XHR=True)
                        js = json.loads(u)
                    except:
                        isV2 = True
                        pass

                    if isV2:
                        mid = re.compile('.?id:\s+"(\d+)"').findall(
                            p)[0].encode('utf-8')
                        timestamp = str(int(time.time() * 1000))
                        url = self.token_v2_link % (eid, mid, timestamp)
                        script = client.request(url,
                                                headers=headers,
                                                referer=referer,
                                                timeout='10',
                                                XHR=True)
                        script = self.aadecode(script)
                        if 'hash' in script and 'token' in script:
                            hash = re.search('''hash\s+=\s+['"]([^"']+)''',
                                             script).group(1).encode('utf-8')
                            token = re.search('''token\s+=\s+['"]([^"']+)''',
                                              script).group(1).encode('utf-8')
                            _ = re.search('''_\s+=\s+['"]([^"']+)''',
                                          script).group(1).encode('utf-8')
                            url = self.grabber_link % (eid, hash, token, _)
                            u = client.request(url,
                                               headers=headers,
                                               referer=referer,
                                               timeout='10',
                                               XHR=True)
                            js = json.loads(u)

                    try:
                        u = js['playlist'][0]['sources']
                        u = [i['file'] for i in u if 'file' in i]

                        for i in u:
                            try:
                                sources.append({
                                    'source':
                                    'gvideo',
                                    'quality':
                                    directstream.googletag(i)[0]['quality'],
                                    'language':
                                    'en',
                                    'url':
                                    i,
                                    'direct':
                                    True,
                                    'debridonly':
                                    False
                                })
                            except:
                                pass
                    except:
                        pass

                    try:
                        u = js['backup']
                        u = urlparse.parse_qs(urlparse.urlsplit(u).query)
                        u = dict([(i, u[i][0]) if u[i] else (i, '')
                                  for i in u])
                        eid = u['eid']
                        mid = u['mid']

                        if isV2:
                            p = client.request(self.backup_token_link_v2 %
                                               (eid, mid, _),
                                               XHR=True,
                                               referer=referer,
                                               timeout='10')
                            x = re.search('''_x=['"]([^"']+)''', p).group(1)
                            y = re.search('''_y=['"]([^"']+)''', p).group(1)
                            u = client.request(self.backup_link_v2 %
                                               (eid, x, y),
                                               referer=referer,
                                               XHR=True,
                                               timeout='10')
                            js = json.loads(u)
                        else:
                            p = client.request(self.backup_token_link %
                                               (eid, mid, _),
                                               XHR=True,
                                               referer=referer,
                                               timeout='10')
                            x = re.search('''_x=['"]([^"']+)''', p).group(1)
                            y = re.search('''_y=['"]([^"']+)''', p).group(1)
                            u = client.request(self.backup_link % (eid, x, y),
                                               referer=referer,
                                               XHR=True,
                                               timeout='10')
                            js = json.loads(u)

                        try:
                            u = js['playlist'][0]['sources']
                            u = [i['file'] for i in u if 'file' in i]

                            for i in u:
                                try:
                                    sources.append({
                                        'source':
                                        'gvideo',
                                        'quality':
                                        directstream.googletag(i)[0]
                                        ['quality'],
                                        'language':
                                        'en',
                                        'url':
                                        i,
                                        'direct':
                                        True,
                                        'debridonly':
                                        False
                                    })
                                except:
                                    pass
                        except:
                            pass
                    except:
                        pass
                except:
                    pass

            return sources
        except:
            return sources