コード例 #1
0
    def movie(self, imdb, title, year):
        try:
            t = cleantitle.get(title)

            try:
                query = self.search2_link % urllib.quote_plus(title)
                query = urlparse.urljoin(self.base_link, query)

                result = client.request(query)

                r = client.parseDOM(result, 'div', attrs = {'class': 'ml-item'})
                r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
                r = [(i[0][0], i[1][-1]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
                r = [(re.sub('http.+?//.+?/','', i[0]), i[1]) for i in r]
                r = [('/'.join(i[0].split('/')[:2]), i[1]) for i in r]
                r = [x for y,x in enumerate(r) if x not in r[:y]]
                r = [i[0] for i in r if t == cleantitle.get(i[1])]

                for i in r:
                    url = self._info(i, year)
                    if not url == None: return url
            except:
                pass

        except:
            return
コード例 #2
0
    def __search(self, titles, year, season='0'):
        try:
            query = self.search_link % (urllib.quote_plus(titles[0]))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'list_movies'})
            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'item_movie'})
            r = dom_parser.parse_dom(r, 'h2', attrs={'class': 'tit'})
            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [(i.attrs['href'], i.content.lower()) for i in r if i]
            r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            r = [(i[0], i[1], i[2], re.findall('(.+?)\s+(?:\s*-?\s*(?:season|s))\s*(\d+)', i[1])) for i in r]
            r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
            r = [(i[0], i[1], i[2], '1' if int(season) > 0 and i[3] == '0' else i[3]) for i in r]
            r = sorted(r, key=lambda i: int(i[2]), reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y and int(i[3]) == int(season)][0]

            return source_utils.strip_domain(r)
        except:
            return
コード例 #3
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = urlparse.parse_qs(url)
            url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
            url['premiered'], url['season'], url['episode'] = premiered, season, episode
            try:
                clean_title = cleantitle.geturl(url['tvshowtitle'])+'-season-%d' % int(season)
                search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
                r = cache.get(client.request, 1, search_url)
                r = client.parseDOM(r, 'div', {'id': 'movie-featured'})
                r = [(client.parseDOM(i, 'a', ret='href'),
                      re.findall('<b><i>(.+?)</i>', i)) for i in r]
                r = [(i[0][0], i[1][0]) for i in r if
                     cleantitle.get(i[1][0]) == cleantitle.get(clean_title)]
                url = r[0][0]
            except:
                pass
            data = client.request(url)
            data = client.parseDOM(data, 'div', attrs={'id': 'details'})
            data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href'))
            url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]

            return url[0][1]
        except:
            return
コード例 #4
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            pages = []
            data = urlparse.parse_qs(url)
            data = dict((i, data[i][0]) for i in data)
            data.update({'season': season, 'episode': episode, 'title': title, 'premiered': premiered})

            season_base = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', season_base)
            tvshowtitle = data['tvshowtitle']
            tvshowtitle = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', tvshowtitle)

            query = query.replace("&", "and")
            query = query.replace("  ", " ")
            query = query.replace(" ", "+")
            tvshowtitle = tvshowtitle.replace("&", "and")
            tvshowtitle = tvshowtitle.replace("  ", " ")
            tvshowtitle = tvshowtitle.replace(" ", "+")

            start_url = urlparse.urljoin(self.base_link, self.search_link % (tvshowtitle, query))

            html = client.request(start_url)
            results = client.parseDOM(html, 'h2', attrs={'class':'entry-title'})
            for content in results:
                found_link = client.parseDOM(content, 'a', ret='href')[0]
                if self.base_link in found_link:
                    if cleantitle.get(data['tvshowtitle']) in cleantitle.get(found_link):
                        if cleantitle.get(season_base) in cleantitle.get(found_link):
                            pages.append(found_link)
            return pages
        except:
            failure = traceback.format_exc()
            log_utils.log('ALLRLS - Exception: \n' + str(failure))
            return pages
コード例 #5
0
 def matchAlias(self, title, aliases):
     try:
         for alias in aliases:
             if cleantitle.get(title) == cleantitle.get(alias['title']):
                 return True
     except:
         return False
コード例 #6
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            langMap = {'hi':'hindi', 'ta':'tamil', 'te':'telugu', 'ml':'malayalam', 'kn':'kannada', 'bn':'bengali', 'mr':'marathi', 'pa':'punjabi'}

            lang = 'http://www.imdb.com/title/%s/' % imdb
            lang = client.request(lang)
            lang = re.findall('href\s*=\s*[\'|\"](.+?)[\'|\"]', lang)
            lang = [i for i in lang if 'primary_language' in i]
            lang = [urlparse.parse_qs(urlparse.urlparse(i).query) for i in lang]
            lang = [i['primary_language'] for i in lang if 'primary_language' in i]
            lang = langMap[lang[0][0]]

            q = self.search_link % (lang, urllib.quote_plus(title))
            q = urlparse.urljoin(self.base_link, q)

            t = cleantitle.get(title)

            r = client.request(q)

            r = client.parseDOM(r, 'li')
            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'h3'), client.parseDOM(i, 'div', attrs = {'class': 'info'})) for i in r]
            r = [(i[0][0], i[1][0], i[2][0]) for i in r if i[0] and i[1] and i[2]]
            r = [(re.findall('(\d+)', i[0]), i[1], re.findall('(\d{4})', i[2])) for i in r]
            r = [(i[0][0], i[1], i[2][0]) for i in r if i[0] and i[2]]
            r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]

            url = str(r)
            return url
        except:
            return
コード例 #7
0
ファイル: filmyto.py プロジェクト: CYBERxNUKE/xbmc-addon
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)
            result = client.request(query)
            result = client.parseDOM(result, 'div', attrs={'class': 'movie clearfix'})
            result = [(client.parseDOM(i, 'a', ret='href'),
                  client.parseDOM(i, 'span', attrs={'class': 'title-pl'}),
                  client.parseDOM(i, 'span', attrs={'class': 'title-en'}),
                  client.parseDOM(i, 'img', ret='src'),
                  client.parseDOM(i, 'p'),
                  client.parseDOM(i, 'p', attrs={'class': 'plot'})) for i in result ]

            result = [(i[0][0], u" ".join(i[1] + i[2]), re.findall('(\d{4})', i[4][0])) for i in result]

            result = [i for i in result if cleantitle.get(title) in cleantitle.get(i[1])]
            years = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1)]
            result = [i[0] for i in result if any(x in i[2] for x in years)][0]


            try: url = re.compile('//.+?(/.+)').findall(result)[0]
            except: url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except :
            return
コード例 #8
0
    def __search(self, titles, type, year, season=0, episode=False):
        try:
            years = [str(year), str(int(year) + 1), str(int(year) - 1)]
            years = ['&veroeffentlichung[]=%s' % i for i in years]

            query = self.search_link % (type, urllib.quote_plus(cleantitle.query(titles[0])))
            query += ''.join(years)
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]

            r = self.__proceed_search(query)
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and int(i[2]) == int(season)][0]

            url = source_utils.strip_domain(r)
            if episode:
                r = client.request(urlparse.urljoin(self.base_link, url))
                r = dom_parser.parse_dom(r, 'div', attrs={'class': 'season-list'})
                r = dom_parser.parse_dom(r, 'li')
                r = dom_parser.parse_dom(r, 'a', req='href')
                r = [i.attrs['href'] for i in r if i and int(i.content) == int(episode)][0]

                url = source_utils.strip_domain(r)
            return url
        except:
            return
コード例 #9
0
ファイル: primewire.py プロジェクト: CYBERxNUKE/xbmc-addon
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = urlparse.urljoin(self.base_link, url)

            result = proxy.request(url, 'main_body')
            result = client.parseDOM(result, 'div', attrs = {'class': 'tv_episode_item'})

            title = cleantitle.get(title)

            result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'class': 'tv_episode_name'}), re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in result]
            result = [(i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0]
            result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0] + [(i[0], i[1], None) for i in result if len(i[2]) == 0]
            result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0]

            url = [i for i in result if title == cleantitle.get(i[1]) and premiered == i[2]][:1]
            if len(url) == 0: url = [i for i in result if premiered == i[2]]
            if len(url) == 0 or len(url) > 1: url = [i for i in result if 'season-%01d-episode-%01d' % (int(season), int(episode)) in i[0]]

            url = client.replaceHTMLCodes(url[0][0])
            url = proxy.parse(url)
            url = re.findall('(?://.+?|)(/.+)', url)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #10
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = '%s/serie/%s' % (self.base_link, url)

            r = proxy.request(url, 'tv shows')
            r = client.parseDOM(r, 'li', attrs = {'itemprop': 'episode'})

            t = cleantitle.get(title)

            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'itemprop': 'name'}), re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in r]
            r = [(i[0], i[1][0].split('&nbsp;')[-1], i[2]) for i in r if i[1]] + [(i[0], None, i[2]) for i in r if not i[1]]
            r = [(i[0], i[1], i[2][0]) for i in r if i[2]] + [(i[0], i[1], None) for i in r if not i[2]]
            r = [(i[0][0], i[1], i[2]) for i in r if i[0]]

            url = [i for i in r if t == cleantitle.get(i[1]) and premiered == i[2]][:1]
            if not url: url = [i for i in r if t == cleantitle.get(i[1])]
            if len(url) > 1 or not url: url = [i for i in r if premiered == i[2]]
            if len(url) > 1 or not url: raise Exception() 

            url = url[0][0]
            url = proxy.parse(url)

            url = re.findall('(?://.+?|)(/.+)', url)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #11
0
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        try:
            tv_maze = tvmaze.tvMaze()
            tvshowtitle = tv_maze.showLookup('thetvdb', tvdb)
            tvshowtitle = tvshowtitle['name']

            t = cleantitle.get(tvshowtitle)

            q = urlparse.urljoin(self.base_link, self.search_link)
            q = q % urllib.quote_plus(tvshowtitle)

            r = client.request(q)

            r = client.parseDOM(r, 'ul', attrs={'class': 'items'})
            r = client.parseDOM(r, 'li')
            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), re.findall('\d{4}', i)) for i in r]
            r = [(i[0][0], i[1][0], i[2][-1]) for i in r if i[0] and i[1] and i[2]]
            r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]]
            r = r[0][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            failure = traceback.format_exc()
            log_utils.log('GoGoAnime - Exception: \n' + str(failure))
            return
コード例 #12
0
ファイル: watchfree.py プロジェクト: vphuc81/MyRepository
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = urlparse.urljoin(self.base_link, url)

            result = proxy.request(url, 'tv_episode_item')
            result = client.parseDOM(result, 'div', attrs = {'class': 'tv_episode_item'})

            title = cleantitle.get(title)
            premiered = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(premiered)[0]
            premiered = '%s %01d %s' % (premiered[1].replace('01','January').replace('02','February').replace('03','March').replace('04','April').replace('05','May').replace('06','June').replace('07','July').replace('08','August').replace('09','September').replace('10','October').replace('11','November').replace('12','December'), int(premiered[2]), premiered[0])

            result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'class': 'tv_episode_name'}), client.parseDOM(i, 'span', attrs = {'class': 'tv_num_versions'})) for i in result]
            result = [(i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0]
            result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0] + [(i[0], i[1], None) for i in result if len(i[2]) == 0]
            result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0]

            url = [i for i in result if title == cleantitle.get(i[1]) and premiered == i[2]][:1]
            if len(url) == 0: url = [i for i in result if premiered == i[2]]
            if len(url) == 0 or len(url) > 1: url = [i for i in result if 'season-%01d-episode-%01d' % (int(season), int(episode)) in i[0]]

            url = url[0][0]
            url = proxy.parse(url)
            url = re.findall('(?://.+?|)(/.+)', url)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #13
0
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        try:
            t = cleantitle.get(tvshowtitle)

            q = urllib.quote_plus(cleantitle.query(tvshowtitle))
            p = urllib.urlencode({'term': q})

            r = client.request(self.search_link, post=p, XHR=True)
            try: r = json.loads(r)
            except: r = None

            if r:
                r = [(i['seo_url'], i['value'], i['label']) for i in r if 'value' in i and 'label' in i and 'seo_url' in i]
            else:
                r = requests.get(self.search_link_2 % q, 'tv shows').text
                r = client.parseDOM(r, 'div', attrs = {'valign': '.+?'})
                r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), client.parseDOM(i, 'a')) for i in r]
                r = [(i[0][0], i[1][0], i[2][0]) for i in r if i[0] and i[1] and i[2]]

            r = [(i[0], i[1], re.findall('(\d{4})', i[2])) for i in r]
            r = [(i[0], i[1], i[2][-1]) for i in r if i[2]]
            r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]]

            url = r[0][0]
            url = proxy.parse(url)

            url = url.strip('/').split('/')[-1]
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #14
0
	def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
		try:
			query = self.search_link % urllib.quote_plus(cleantitle.query(tvshowtitle))

			# req page 3 times to workaround their BS random 404's
			# responses (legit & BS 404s) are actually very fast: timeout prob not important
			for i in range(4):
				result = client.request(query, timeout=3)
				if not result == None: break
			

			t = [tvshowtitle] + source_utils.aliases_to_array(aliases)
			t = [cleantitle.get(i) for i in set(t) if i]
			result = re.compile('itemprop="url"\s+href="([^"]+).*?itemprop="name"\s+class="serie-title">([^<]+)', re.DOTALL).findall(result)
			for i in result:
				if cleantitle.get(cleantitle.normalize(i[1])) in t and year in i[1]: url = i[0]

			url = url.encode('utf-8')
			
			#log_utils.log('\n\n~~~ outgoing tvshow() url')
			#log_utils.log(url)
			
			# returned 'url' format like: /serie/x_files 
			return url
		except:
			return
コード例 #15
0
    def __search(self, title, season):
        try:
            query = self.search_link % (urllib.quote_plus(cleantitle.query(title)))
            query = urlparse.urljoin(self.base_link, query)

            t = cleantitle.get(title)

            r = client.request(query)

            r = client.parseDOM(r, 'div', attrs={'class': 'moviefilm'})
            r = client.parseDOM(r, 'div', attrs={'class': 'movief'})
            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in r]
            r = [(i[0][0], i[1][0].lower()) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
            r = [(i[0], i[1], re.findall('(.+?)\s+(?:saison)\s+(\d+)', i[1])) for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            r = [(i[0], i[1], re.findall('\((.+?)\)$', i[1]), i[2]) for i in r]
            r = [(i[0], i[2][0] if len(i[2]) > 0 else i[1], i[3]) for i in r]
            r = [i[0] for i in r if t == cleantitle.get(i[1]) and int(i[2]) == int(season)][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #16
0
    def __search(self, titles, year):
        try:

            query = self.search_link % (urllib.quote_plus(cleantitle.getsearch(titles[0]+' '+year)))

            query = urlparse.urljoin(self.base_link, query)

            t =  cleantitle.get(titles[0])

            r = client.request(query)

            r = client.parseDOM(r, 'div', attrs={'class': 'card'})

            r = client.parseDOM(r, 'h3')

            for i in r:
                data = re.findall('<span.*?>(.+?)</span>.+?date">\s*\((\d{4}).*?</span>', i, re.DOTALL)
                for title, year in data:
                    title = cleantitle.get(title)
                    y = year
                    if title in t and year == y:
                        url = client.parseDOM(i, 'a', ret='href')[0]
                        return source_utils.strip_domain(url)

            return
        except:
            return
コード例 #17
0
    def tvshow(self, imdb, tvdb, tvshowtitle, year):
        try:
            r = 'search/tvdb/%s?type=show&extended=full' % tvdb
            r = json.loads(trakt.getTrakt(r))
            if not r: return '0'

            d = r[0]['show']['genres']
            if not ('anime' in d or 'animation' in d): return '0'

            tv_maze = tvmaze.tvMaze()
            tvshowtitle = tv_maze.showLookup('thetvdb', tvdb)
            tvshowtitle = tvshowtitle['name']

            t = cleantitle.get(tvshowtitle)

            q = urlparse.urljoin(self.base_link, self.search_link)
            q = q % urllib.quote_plus(tvshowtitle)

            r = client.request(q)

            r = client.parseDOM(r, 'ul', attrs={'class': 'items'})
            r = client.parseDOM(r, 'li')
            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), re.findall('\d{4}', i)) for i in r]
            r = [(i[0][0], i[1][0], i[2][-1]) for i in r if i[0] and i[1] and i[2]]
            r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]]
            r = r[0][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #18
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = cleantitle.getsearch(data['tvshowtitle'])

            season = '%01d' % int(season)
            episode = '%01d' % int(episode)
            query = (urllib.quote_plus(title))
            q = self.search_link % (query)
            r = urlparse.urljoin(self.base_link, q)
            checkseason = cleantitle.get(title) + "season" + season
            html = BeautifulSoup(OPEN_URL(r).content)
            containers = html.findAll('div', attrs={'class': 'ml-item'})
            for link in containers:
                    link_title = link('a')[0]['title'].encode('utf-8')
                    href = link('a')[0]['href'].encode('utf-8')
                    if cleantitle.get(link_title) == checkseason:
						url = {'url': href, 'type': 'tv_shows' , 'episode' : episode }
						url = urllib.urlencode(url)
							
						print("SOLARMOVIE PASSED", url) 
						return url

        except:
            return
コード例 #19
0
    def movie(self, imdb, title, year):
        try:
            self.elysium = []

            cleaned_title = cleantitle.get(title)
            title = cleantitle.getsearch(title)
            q = self.search_link % (urllib.quote_plus(title))
            r = urlparse.urljoin(self.base_link, q)
            html = BeautifulSoup(OPEN_URL(r).content)
            print ("ONEMOVIES EPISODES", html)
            containers = html.findAll('div', attrs={'class': 'ml-item'})
            for link in containers:
                    link_title = link('a')[0]['title'].encode('utf-8')
                    href = link('a')[0]['href'].encode('utf-8')
                    info = link('a')[0]['data-url'].encode('utf-8')
                    if cleantitle.get(link_title) == cleaned_title:
                        info = urlparse.urljoin(self.base_link, info)
                        html = OPEN_URL(info).content
                        pattern = '<div class="jt-info">%s</div>' % year
                        match = re.findall(pattern, html)
                        if match:
							url = client.replaceHTMLCodes(href)
							url = {'url': url, 'type': 'movie' }
							url = urllib.urlencode(url)
							
							print("SOLARMOVIE PASSED", url) 
							return url
        except:
            return
コード例 #20
0
    def movie(self, imdb, title, year):
        try:
            query = self.search_link % urllib.quote_plus(title)
            query = urlparse.urljoin(self.base_link, query)

            result = self.request(query, 'movie_table')
            result = client.parseDOM(result, 'div', attrs = {'class': 'movie_table'})

            title = cleantitle.get(title)
            years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]

            result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'img', ret='alt')) for i in result]
            result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
            result = [i for i in result if any(x in i[1] for x in years)]
            result = [i[0] for i in result if title == cleantitle.get(i[1])][0]

            url = client.replaceHTMLCodes(result)
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
            except: pass
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
            except: pass
            url = urlparse.urlparse(url).path
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #21
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            url = self.search_link % (cleantitle.geturl(title), year)

            q = urlparse.urljoin(self.base_link, url)

            r = proxy.geturl(q)
            if not r == None: return url

            t = cleantitle.get(title)

            q = self.search_link_2 % urllib.quote_plus(cleantitle.query(title))
            q = urlparse.urljoin(self.base_link, q)

            r = client.request(q)

            r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a'))
            r = [(i[0], re.findall('(?:\'|\")(.+?)(?:\'|\")', i[1])) for i in r]
            r = [(i[0], [re.findall('(.+?)\((\d{4})', x) for x in i[1]]) for i in r]
            r = [(i[0], [x[0] for x in i[1] if x]) for i in r]
            r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if i[1]]
            r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]]

            url = re.findall('(?://.+?|)(/.+)', r[0])[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #22
0
ファイル: afdah.py プロジェクト: CYBERxNUKE/xbmc-addon
    def movie(self, imdb, title, localtitle, aliases, year):
        try:

            query = urlparse.urljoin(self.base_link, self.search_link)
            if ':' in title:
                title2 = title.split(':')[0] + ':'
                post = 'search=%s&what=title' % title2

            else: post = 'search=%s&what=title' % cleantitle.getsearch(title)


            t = cleantitle.get(title)

            r = client.request(query, post=post)
            r = client.parseDOM(r, 'li')
            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a',)) for i in r]
            r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
            r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
            r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
            r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]

            url = urlparse.urljoin(self.base_link, re.findall('(?://.+?|)(/.+)', r)[0])
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')

            return url
        except:
            return
コード例 #23
0
ファイル: mzmovies.py プロジェクト: CYBERxNUKE/xbmc-addon
    def __search(self, titles, year):
        try:
            query = self.search_link % (urllib.quote_plus(cleantitle.getsearch(titles[0])))

            query = urlparse.urljoin(self.base_link, query)

            t = cleantitle.get(titles[0])
            scraper = cfscrape.create_scraper()
            data = scraper.get(query).content
            #data = client.request(query, referer=self.base_link)
            data = client.parseDOM(data, 'div', attrs={'class': 'result-item'})
            r = dom_parser.parse_dom(data, 'div', attrs={'class': 'title'})
            r = zip(dom_parser.parse_dom(r, 'a'), dom_parser.parse_dom(data, 'span', attrs={'class': 'year'}))

            url = []
            for i in range(len(r)):
                title = cleantitle.get(r[i][0][1])
                title = re.sub('(\d+p|4k|3d|hd|season\d+)','',title)
                y = r[i][1][1]
                link = r[i][0][0]['href']
                if 'season' in title: continue
                if t == title and y == year:
                    if 'season' in link:
                        url.append(source_utils.strip_domain(link))
                        print url[0]
                        return url[0]
                    else: url.append(source_utils.strip_domain(link))

            return url
        except:
            return
コード例 #24
0
    def movie(self, imdb, title, year):
        try:
            t = cleantitle.get(title)

            headers = {'X-Requested-With': 'XMLHttpRequest'}

            query = urllib.urlencode({'keyword': title})

            url = urlparse.urljoin(self.base_link, self.search_link)

            r = client.request(url, post=query, headers=headers)

            r = json.loads(r)['content']
            r = zip(client.parseDOM(r, 'a', ret='href', attrs = {'class': 'ss-title'}), client.parseDOM(r, 'a', attrs = {'class': 'ss-title'}))
            r = [i[0] for i in r if cleantitle.get(t) == cleantitle.get(i[1])][:2]
            r = [(i, re.findall('(\d+)', i)[-1]) for i in r]

            for i in r:
                try:
                    y, q = cache.get(self.onemovies_info, 9000, i[1])
                    if not y == year: raise Exception()
                    return urlparse.urlparse(i[0]).path
                except:
                    pass
        except:
            return
コード例 #25
0
ファイル: moviesplanet.py プロジェクト: CYBERxNUKE/xbmc-addon
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        try:
            if (self.user == '' or self.password == ''): raise Exception()

            t = cleantitle.get(tvshowtitle)

            u = urlparse.urljoin(self.base_link, self.search_link)

            p = {'q': tvshowtitle.rsplit(':', 1)[0], 'limit': '10', 'timestamp': int(time.time() * 1000), 'verifiedCheck': ''}
            p = urllib.urlencode(p)

            r = client.request(u, post=p, XHR=True)
            r = json.loads(r)

            r = [i for i in r if i['meta'].strip().split()[0].lower() == 'tv']
            r = [i['permalink'] for i in r if t == cleantitle.get(i['title'])][:2]
            r = [(i, urlparse.urljoin(self.base_link, i)) for i in r]
            r = [(i[0], client.request(i[1])) for i in r]
            r = [(i[0], i[1]) for i in r if not i[1] == None]
            r = [(i[0], re.sub('\s|<.+?>|</.+?>', '', i[1])) for i in r]
            r = [(i[0], re.findall('eleased:(\d{4})', i[1])) for i in r]
            r = [(i[0], i[1][0]) for i in r if i[1]]
            r = [i for i in r if year in i[1]]
            r = r[0][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #26
0
    def movie(self, imdb, title, year):
        try:
            if not debridstatus == 'true': raise Exception()

            t = cleantitle.get(title)

            headers = {'X-Requested-With': 'XMLHttpRequest'}

            query = self.search_link + urllib.quote_plus(title)
            query = urlparse.urljoin(self.base_link, query)

            r = client.request(query, headers=headers)
            r = json.loads(r)

            r = [i for i in r if 'category' in i and 'movie' in i['category'].lower()]
            r = [(i['url'], i['label']) for i in r if 'label' in i and 'url' in i]
            r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
            r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
            r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #27
0
ファイル: rlshd.py プロジェクト: vphuc81/MyRepository
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        self.zen_url = []	
        try:
			if not debridstatus == 'true': raise Exception()
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			data['season'], data['episode'] = season, episode
			self.zen_url = []
			title = cleantitle.getsearch(title)
			cleanmovie = cleantitle.get(title)			
			episodecheck = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
			episodecheck = str(episodecheck)
			episodecheck = episodecheck.lower()
			titlecheck = cleanmovie+episodecheck
			query = '%s+S%02dE%02d' % (urllib.quote_plus(title), int(data['season']), int(data['episode']))
			movielink = self.search_link + query
			link = client.request(movielink, timeout="10")
			match = re.compile('<h2 class="entry-title"><a href="(.+?)" rel="bookmark">(.+?)</a></h2>').findall(link)
			for movielink,title2 in match:
				c_title = cleantitle.get(title2)
				if titlecheck in c_title:
					self.zen_url.append([movielink,title])
			return self.zen_url
        except:
            return
コード例 #28
0
ファイル: movies14.py プロジェクト: CYBERxNUKE/xbmc-addon
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            t = cleantitle.get(title)

            q = '%s %s' % (title, year)
            q = self.search_link.decode('base64') % urllib.quote_plus(q)

            r = client.request(q)
            r = json.loads(r)['results']
            r = [(i['url'], i['titleNoFormatting']) for i in r]
            r = [(i[0].split('%')[0], re.findall('(?:^Watch |)(.+?)(?:\(|)(\d{4})', i[1])) for i in r]
            r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if i[1]]
            r = [i for i in r if '/watch/' in i[0] and not '-season-' in i[0]]
            r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]]
            r = r[0][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            pass

        try:
            url = re.sub('[^A-Za-z0-9]', '-', title).lower()
            url = self.moviesearch_link % (url, year)

            r = urlparse.urljoin(self.base_link, url)
            r = client.request(r, output='geturl')
            if not year in r: raise Exception()

            return url
        except:
            return
コード例 #29
0
ファイル: movie25.py プロジェクト: azumimuo/family-xbmc-addon
    def movie(self, imdb, title, localtitle, year):
        try:
            q = self.search_link_2.decode('base64') % urllib.quote_plus(title)

            r = client.request(q)
            if r == None: r = client.request(q)
            if r == None: r = client.request(q)
            if r == None: r = client.request(q)

            r = json.loads(r)['results']
            r = [(i['url'], i['titleNoFormatting']) for i in r]
            r = [(i[0], re.findall('(?:^Watch |)(.+? \(\d{4}\))', i[1])) for i in r]
            r = [(urlparse.urljoin(self.base_link, i[0]), i[1][0]) for i in r if i[1]]

            t = cleantitle.get(title)
            years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]

            r = [i for i in r if any(x in i[1] for x in years)]

            match = [i[0] for i in r if t in cleantitle.get(i[1]) and '(%s)' % str(year) in i[1] and self.base_link in i[0]]

            match2 = [i[0] for i in r]
            match2 = [x for y,x in enumerate(match2) if x not in match2[:y]]
            if match2 == []: return

            for i in match2[:5]:
                try:
                    if len(match) > 0 : url = match[0] ; break
                except:
                    pass

            return url
        except:
            pass
コード例 #30
0
    def __search(self, titles, year, season='0'):
        try:
            t = [cleantitle.get(i) for i in set(titles) if i]
            y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']

            post = {'story': utils.uni2cp(titles[0]), 'titleonly': 3, 'do': 'search', 'subaction': 'search', 'search_start': 1, 'full_search': 0, 'result_from': 1}
            html = client.request(self.base_link, post=post)

            html = html.decode('cp1251').encode('utf-8')

            r = dom_parser.parse_dom(html, 'div', attrs={'id': re.compile('news-id-\d+')})
            r = [(i.attrs['id'], dom_parser.parse_dom(i, 'a', req='href')) for i in r]
            r = [(re.sub('[^\d]+', '', i[0]), dom_parser.parse_dom(i[1], 'img', req='title')) for i in r]
            r = [(i[0], i[1][0].attrs['title'], '') for i in r if i[1]]
            r = [(i[0], i[1], i[2], re.findall(u'(.+?)\s+(\d+)\s+(?:сезон)', i[1])) for i in r]
            r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
            r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1]), i[3]) for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0', i[3]) for i in r]
            r = [(i[0], i[1], i[2], '1' if int(season) > 0 and i[3] == '0' else i[3]) for i in r]
            r = sorted(r, key=lambda i: int(i[2]), reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y and int(i[3]) == int(season)][0]
            r = dom_parser.parse_dom(html, 'a', attrs={'href': re.compile('.*/%s-' % r)}, req='href')[0].attrs['href']

            return source_utils.strip_domain(r)
        except:
            return
コード例 #31
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            try:
                if not 'tvshowtitle' in data: raise Exception()

                links = []

                f = [
                    'S%02dE%02d' % (int(data['season']), int(data['episode']))
                ]
                t = data['tvshowtitle']

                q = base64.b64decode(self.search_link) + urllib.quote_plus(
                    '%s %s' % (t, f[0]))
                q = urlparse.urljoin(self.base_link, q)

                result = client.request(q)
                result = json.loads(result)
            except:
                links = result = []

            for i in result:
                try:
                    if not cleantitle.get(t) == cleantitle.get(i['showName']):
                        raise Exception()

                    y = i['release']
                    y = re.compile('[\.|\(|\[|\s](\d{4}|S\d*E\d*)[\.|\)|\]|\s]'
                                   ).findall(y)[-1]
                    y = y.upper()
                    if not any(x == y for x in f): raise Exception()

                    quality = i['quality']

                    size = i['size']
                    size = float(size) / 1024
                    size = '%.2f GB' % size

                    if 'X265' in quality: info = '%s | HEVC' % size
                    else: info = size

                    if '1080P' in quality: quality = '1080p'
                    elif quality in ['720P', 'WEBDL']: quality = 'HD'
                    else: quality = 'SD'

                    url = i['links']
                    for x in url.keys():
                        links.append({
                            'url': url[x],
                            'quality': quality,
                            'info': info
                        })
                except:
                    pass

            for i in links:
                try:
                    url = i['url']
                    if len(url) > 1: raise Exception()
                    url = url[0].encode('utf-8')

                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostprDict: raise Exception()
                    host = host.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': i['quality'],
                        'provider': 'DirectDL',
                        'url': url,
                        'info': i['info'],
                        'direct': False,
                        'debridonly': True
                    })
                except:
                    pass

            try:
                hostDict2 = [(i.rsplit('.', 1)[0], i) for i in hostDict]

                q = ('/tv/a-z/%s',
                     data['tvshowtitle']) if 'tvshowtitle' in data else (
                         '/movies/a-z/%s', data['title'])
                q = q[0] % re.sub('^THE\s+|^A\s+', '', q[1].strip().upper())[0]

                url = cache.get(self.directdl_cache, 120, q)
                url = [i[0] for i in url if data['imdb'] == i[1]][0]
                url = urlparse.urljoin(base64.b64decode(self.b_link), url)

                try:
                    v = urlparse.parse_qs(urlparse.urlparse(url).query)['v'][0]
                except:
                    v = None

                if v == None:
                    result = self.request(url)
                    url = re.compile('(/ip[.]php.+?>)%01dx%02d' %
                                     (int(data['season']), int(
                                         data['episode']))).findall(result)[0]
                    url = re.compile('(/ip[.]php.+?)>').findall(url)[-1]
                    url = urlparse.urljoin(base64.b64decode(self.b_link), url)

                url = urlparse.parse_qs(urlparse.urlparse(url).query)['v'][0]

                u = base64.b64decode(self.u_link) % url
                r = base64.b64decode(self.r_link) % url
                j = base64.b64decode(self.j_link)
                p = base64.b64decode(self.p_link)

                result = self.request(u, referer=r)

                secret = re.compile(
                    'lastChild\.value="([^"]+)"(?:\s*\+\s*"([^"]+))?').findall(
                        result)[0]
                secret = ''.join(secret)

                t = re.compile('"&t=([^"]+)').findall(result)[0]

                s_start = re.compile('(?:\s+|,)s\s*=(\d+)').findall(result)[0]
                m_start = re.compile('(?:\s+|,)m\s*=(\d+)').findall(result)[0]

                img = re.compile('<iframe[^>]*src="([^"]+)').findall(result)
                img = img[0] if len(img) > 0 else '0'
                img = urllib.unquote(img)

                result = client.parseDOM(result,
                                         'div',
                                         attrs={'class': 'ripdiv'})
                result = [(re.compile('<b>(.*?)</b>').findall(i), i)
                          for i in result]
                result = [(i[0][0], i[1].split('<p>')) for i in result
                          if len(i[0]) > 0]
                result = [[(i[0], x) for x in i[1]] for i in result]
                result = sum(result, [])
            except:
                result = []

            for i in result:
                try:
                    quality = i[0]
                    if any(x in quality for x in ['1080p', '720p', 'HD']):
                        quality = 'HD'
                    else:
                        quality = 'SD'

                    host = client.parseDOM(i[1], 'a')[-1]
                    host = re.sub('\s|<.+?>|</.+?>|.+?#\d*:', '', host)
                    host = host.strip().rsplit('.', 1)[0].lower()
                    host = [x[1] for x in hostDict2 if host == x[0]][0]
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    s = int(s_start) + random.randint(3, 1000)
                    m = int(m_start) + random.randint(21, 1000)
                    id = client.parseDOM(i[1], 'a', ret='onclick')[-1]
                    id = re.compile('[(](.+?)[)]').findall(id)[0]
                    url = j % (id, t) + '|' + p % (id, s, m, secret, t)
                    url += '|%s' % urllib.urlencode({'Referer': u, 'Img': img})
                    url = url.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'provider': 'DirectDL',
                        'url': url,
                        'direct': False,
                        'debridonly': True
                    })
                except:
                    pass

            return sources
        except:
            return sources
コード例 #32
0
    def __search(self, imdb, titles, year):
        try:
            q = self.search_link % urllib.quote_plus(
                cleantitle.query(titles[0]))
            q = urlparse.urljoin(self.base_link, q)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(q)

            r = dom_parser.parse_dom(
                r, 'tr', attrs={'id': re.compile('coverPreview.+?')})
            r = [(dom_parser.parse_dom(i, 'a', req='href'),
                  dom_parser.parse_dom(i,
                                       'div',
                                       attrs={'style': re.compile('.+?')}),
                  dom_parser.parse_dom(i, 'img', req='src')) for i in r]
            r = [(i[0][0].attrs['href'].strip(), i[0][0].content.strip(), i[1],
                  i[2]) for i in r if i[0] and i[2]]
            r = [(i[0], i[1], [
                x.content for x in i[2]
                if x.content.isdigit() and len(x.content) == 4
            ], i[3]) for i in r]
            r = [(i[0], i[1], i[2][0] if i[2] else '0', i[3]) for i in r]
            r = [
                i for i in r if any('us_flag' in x.attrs['src'] for x in i[3])
            ]
            r = [(i[0], i[1], i[2], [
                re.findall('(\d+)', x.attrs['src']) for x in i[3]
                if 'smileys' in x.attrs['src']
            ]) for i in r]
            r = [(i[0], i[1], i[2], [x[0] for x in i[3] if x]) for i in r]
            r = [(i[0], i[1], i[2], int(i[3][0]) if i[3] else 0) for i in r]
            r = sorted(r, key=lambda x: x[3])[::-1]
            r = [(i[0], i[1], i[2], re.findall('\((.+?)\)$', i[1])) for i in r]
            r = [(i[0], i[1], i[2]) for i in r if not i[3]]
            r = [i for i in r if i[2] in y]
            r = sorted(r, key=lambda i: int(i[2]),
                       reverse=True)  # with year > no year

            r = [(client.replaceHTMLCodes(i[0]), i[1], i[2]) for i in r]

            match = [
                i[0] for i in r if cleantitle.get(i[1]) in t and year == i[2]
            ]

            match2 = [i[0] for i in r]
            match2 = [x for y, x in enumerate(match2) if x not in match2[:y]]
            if match2 == []: return

            for i in match2[:5]:
                try:
                    if match:
                        url = match[0]
                        break
                    r = client.request(urlparse.urljoin(self.base_link, i))
                    r = re.findall('(tt\d+)', r)
                    if imdb in r:
                        url = i
                        break
                except:
                    pass

            return source_utils.strip_domain(url)
        except:
            return
コード例 #33
0
    def sources(self, url, hostDict, locDict):
        sources = []

        try:
            if url == None: return sources
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)
            result = client.request(query,
                                    mobile=True,
                                    timeout=20,
                                    output='extended')
            r = json.loads(result[0])
            r = r['data']['films']

            years = [
                str(data['year']),
                str(int(data['year']) + 1),
                str(int(data['year']) - 1)
            ]

            #print r
            if 'episode' in data:
                r = [
                    i for i in r
                    if cleantitle.get(title) == cleantitle.get(i['title'])
                ]
                r = [(i, re.sub('[^0-9]', '', str(i['publishDate'])))
                     for i in r]
                r = [i[0] for i in r if any(x in i[1] for x in years)][0]
                result = client.request(urlparse.urljoin(
                    self.base_link, self.sources_link % r['id']),
                                        mobile=True,
                                        headers=result[4],
                                        output='extended')
                r = json.loads(result[0])
                r = [
                    i for i in r['data']['chapters']
                    if i['title'].replace('0', '').lower() == 's%se%s' %
                    (data['season'], data['episode'])
                ][0]

            else:
                r = [
                    i for i in r
                    if cleantitle.get(title) == cleantitle.get(i['title'])
                ]
                r = [
                    i for i in r if any(x in i['publishDate'] for x in years)
                ][0]
                #print r
                result = client.request(urlparse.urljoin(
                    self.base_link, self.sources_link % r['id']),
                                        mobile=True,
                                        headers=result[4],
                                        output='extended')
                r = json.loads(result[0])
                r = r['data']['chapters'][0]

            result = client.request(urlparse.urljoin(
                self.base_link, self.stream_link % r['id']),
                                    mobile=True,
                                    headers=result[4],
                                    output='extended')
            r = json.loads(result[0])

            r = [(i['quality'], i['server'], self._decrypt(i['stream']))
                 for i in r['data']]
            sources = []
            for i in r:
                try:
                    valid, hoster = source_utils.is_host_valid(i[2], hostDict)
                    if not valid: continue
                    urls, host, direct = source_utils.check_directstreams(
                        i[2], hoster)
                    for x in urls:
                        q = x[
                            'quality'] if host == 'gvideo' else source_utils.label_to_quality(
                                i[0])
                        u = x['url'] if host == 'gvideo' else i[2]
                        sources.append({
                            'source': host,
                            'quality': q,
                            'language': 'en',
                            'url': u,
                            'direct': direct,
                            'debridonly': False
                        })

                except:
                    pass

            return sources
        except Exception as e:
            return sources
コード例 #34
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if not control.setting(
                    'pmcached.providers') == 'true' and not control.setting(
                        'rdcached.providers') == 'true':
                return sources
            if self.pm_api_key == '' and self.rd_api_key == '': return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            category = '4' if 'tvshowtitle' in data else '3'

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            result = client.parseDOM(r, 'div', attrs={'id': 'container'})[0]
            result = client.parseDOM(result, 'tr', attrs={'class': 'bls-row'})
            result = [(re.findall('href="(magnet[^"]+)"', i)[0],
                       client.parseDOM(i, 'span', attrs={'class': 'title'})[0],
                       client.parseDOM(i, 'td', attrs={'class': 'size'})[0])
                      for i in result]

            items = []

            for item in result:
                try:
                    name = item[1]
                    magnetlink = item[0]

                    size = ''
                    try:
                        size = item[2]
                        if not len(str(size)) < 3:
                            size = float(int(size)) / 1024
                            size = '%.2f GB' % size
                        else:
                            size = ''
                    except:
                        pass

                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d+|3D)(\.|\)|\]|\s|)(.+|)',
                        '', name)
                    if not cleantitle.get(t) == cleantitle.get(title):
                        raise Exception()
                    y = re.findall(
                        '[\.|\(|\[|\s](\d{4}|(?:S|s)\d*(?:E|e)\d*|(?:S|s)\d*)[\.|\)|\]|\s]',
                        name)[-1].upper()
                    if not y == hdlr: raise Exception()
                    u = [(name, magnetlink, size)]
                    items += u
                except:
                    pass

            if control.setting('pmcached.providers'
                               ) == 'true' and not self.pm_api_key == '':
                for item in items:
                    try:
                        _hash = re.findall('btih:(.*?)\W', item[1])[0]
                        checkurl = urlparse.urljoin(
                            self.pm_base_link, self.pm_checkcache_link %
                            (self.pm_api_key, _hash, self.pm_api_key))
                        r = client.request(checkurl)
                        if not 'finished' in r: raise Exception()

                        name = client.replaceHTMLCodes(item[0])
                        quality, info = source_utils.get_release_quality(
                            name, None)
                        filetype = source_utils.getFileType(name)
                        info += [filetype.strip(), name]
                        info = filter(None, info)
                        info = ' | '.join(info)
                        if not item[2] == '':
                            info = '%s | %s' % (item[2], info)
                        url = 'magnet:?xt=urn:btih:%s' % _hash

                        sources.append({
                            'source': 'PMCACHED',
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'info': info,
                            'direct': False,
                            'debridonly': False,
                            'cached': True
                        })
                    except:
                        pass

            if control.setting('rdcached.providers'
                               ) == 'true' and not self.rd_api_key == '':
                checktorr_r = self.checkrdcache()
                checktorr_result = json.loads(checktorr_r)

                for item in items:
                    try:
                        _hash = re.findall('btih:(.*?)\W', item[1])[0]
                        _hash = _hash.lower()

                        url = ''
                        for i in checktorr_result:
                            try:
                                if _hash == i['hash'] and i[
                                        'status'] == 'downloaded':
                                    url = i['links'][0]
                                    break
                            except:
                                pass

                        if url == '':
                            checkurl = urlparse.urljoin(
                                self.rd_base_link, self.rd_checkcache_link %
                                (_hash, self.rd_api_key))
                            r = client.request(checkurl)
                            checkinstant = json.loads(r)
                            checkinstant = checkinstant[_hash]

                            checkinstant_num = 0
                            try:
                                checkinstant_num = len(checkinstant['rd'])
                            except:
                                pass

                            if checkinstant_num == 0: raise Exception()
                            url = 'rdmagnet:?xt=urn:btih:%s' % _hash

                        if url == '': raise Exception()

                        name = client.replaceHTMLCodes(item[0])
                        quality, info = source_utils.get_release_quality(
                            name, None)
                        filetype = source_utils.getFileType(name)
                        info += [filetype.strip(), name]
                        info = filter(None, info)
                        info = ' | '.join(info)
                        if not item[2] == '':
                            info = '%s | %s' % (item[2], info)

                        sources.append({
                            'source': 'RDCACHED',
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'info': info,
                            'direct': False,
                            'debridonly': False,
                            'cached': True
                        })
                    except:
                        pass

            return sources
        except:
            log_utils.log(
                '>>>> %s TRACE <<<<\n%s' %
                (__file__.upper().split('\\')[-1].split('.')[0],
                 traceback.format_exc()), log_utils.LOGDEBUG)
            return sources
コード例 #35
0
ファイル: magnetdl.py プロジェクト: fadifadisadi/LilacTV
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url is None:
             return sources
         if debrid.status() is False:
             raise Exception()
         if debrid.torrent_enabled() is False:
             raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
             'title']
         hdlr = 'S%02dE%02d' % (int(data['season']), int(
             data['episode'])) if 'tvshowtitle' in data else data['year']
         query = '%s s%02de%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode']))\
             if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
         query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
         url = urlparse.urljoin(
             self.base_link,
             self.search_link.format(query[0].lower(),
                                     cleantitle.geturl(query)))
         r = client.request(url)
         r = client.parseDOM(r, 'tbody')[0]
         posts = client.parseDOM(r, 'tr')
         posts = [i for i in posts if 'magnet:' in i]
         for post in posts:
             post = post.replace('&nbsp;', ' ')
             name = client.parseDOM(post, 'a', ret='title')[1]
             t = name.split(hdlr)[0]
             if not cleantitle.get(re.sub('(|)', '',
                                          t)) == cleantitle.get(title):
                 continue
             try:
                 y = re.findall(
                     '[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]',
                     name, re.I)[-1].upper()
             except BaseException:
                 y = re.findall(
                     '[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name,
                     re.I)[-1].upper()
             if not y == hdlr:
                 continue
             links = client.parseDOM(post, 'a', ret='href')
             magnet = [
                 i.replace('&amp;', '&') for i in links if 'magnet:' in i
             ][0]
             url = magnet.split('&tr')[0]
             quality, info = source_utils.get_release_quality(name, name)
             try:
                 size = re.findall(
                     '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                     post)[0]
                 div = 1 if size.endswith(('GB', 'GiB')) else 1024
                 size = float(
                     re.sub('[^0-9|/.|/,]', '', size.replace(',',
                                                             '.'))) / div
                 size = '%.2f GB' % size
             except BaseException:
                 size = '0'
             info.append(size)
             info = ' | '.join(info)
             sources.append({
                 'source': 'Torrent',
                 'quality': quality,
                 'language': 'en',
                 'url': url,
                 'info': info,
                 'direct': False,
                 'debridonly': True
             })
         return sources
     except BaseException:
         return sources
コード例 #36
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            posts = client.parseDOM(r, 'item')

            hostDict = hostprDict

            items = []

            for post in posts:
                try:
                    t = client.parseDOM(post, 'title')[0]

                    c = client.parseDOM(post, 'content.+?')[0]

                    s = re.findall(
                        '((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', c)
                    s = s[0] if s else '0'

                    u = zip(client.parseDOM(c, 'a', ret='href'),
                            client.parseDOM(c, 'a'))

                    u = [(i[1], i[0], s) for i in u]

                    items += u
                except:
                    pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '', name)

                    if not cleantitle.get(t) == cleantitle.get(title):
                        raise Exception()

                    y = re.findall(
                        '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                        name)[-1].upper()

                    if not y == hdlr: raise Exception()

                    fmt = re.sub(
                        '(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)',
                        '', name.upper())
                    fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
                    fmt = [i.lower() for i in fmt]

                    if any(
                            i.endswith(('subs', 'sub', 'dubbed', 'dub'))
                            for i in fmt):
                        raise Exception()
                    if any(i in ['extras'] for i in fmt): raise Exception()

                    if '1080p' in fmt: quality = '1080p'
                    elif '720p' in fmt: quality = 'HD'
                    else: quality = 'SD'
                    if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt):
                        quality = 'SCR'
                    elif any(i in [
                            'camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam',
                            'dvdts', 'cam', 'telesync', 'ts'
                    ] for i in fmt):
                        quality = 'CAM'

                    info = []

                    if '3d' in fmt: info.append('3D')

                    try:
                        size = re.findall(
                            '((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))',
                            item[2])[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    if any(i in ['hevc', 'h265', 'x265'] for i in fmt):
                        info.append('HEVC')

                    info = ' | '.join(info)

                    url = item[1]
                    if any(x in url for x in ['.rar', '.zip', '.iso']):
                        raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })
                except:
                    pass

            return sources
        except:
            return sources
コード例 #37
0
 def getMeta(self, meta):
     try:
         poster = meta['poster'] if 'poster' in meta else '0'
         thumb = meta['thumb'] if 'thumb' in meta else poster
         if poster == '0':
             poster = control.addonPoster()
         return (poster, thumb, meta)
     except:
         pass
     try:
         if not self.content == 'movie':
             raise Exception()
         meta = control.jsonrpc(
             '{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovies", "params": {"filter":{"or": [{"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}]}, "properties" : ["title", "originaltitle", "year", "genre", "studio", "country", "runtime", "rating", "votes", "mpaa", "director", "writer", "plot", "plotoutline", "tagline", "thumbnail", "file"]}, "id": 1}'
             %
             (self.year, str(int(self.year) + 1), str(int(self.year) - 1)))
         meta = unicode(meta, 'utf-8', errors='ignore')
         meta = json.loads(meta)['result']['movies']
         t = cleantitle.get(self.title)
         meta = [
             i for i in meta
             if self.year == str(i['year']) and (t == cleantitle.get(
                 i['title']) or t == cleantitle.get(i['originaltitle']))
         ][0]
         for k, v in meta.iteritems():
             if type(v) == list:
                 try:
                     meta[k] = str(' / '.join(
                         [i.encode('utf-8') for i in v]))
                 except:
                     meta[k] = ''
             else:
                 try:
                     meta[k] = str(v.encode('utf-8'))
                 except:
                     meta[k] = str(v)
         if not 'plugin' in control.infoLabel('Container.PluginName'):
             self.DBID = meta['movieid']
         poster = thumb = meta['thumbnail']
         return (poster, thumb, meta)
     except:
         pass
     try:
         if not self.content == 'episode':
             raise Exception()
         meta = control.jsonrpc(
             '{"jsonrpc": "2.0", "method": "VideoLibrary.GetTVShows", "params": {"filter":{"or": [{"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}]}, "properties" : ["title", "year", "thumbnail", "file"]}, "id": 1}'
             %
             (self.year, str(int(self.year) + 1), str(int(self.year) - 1)))
         meta = unicode(meta, 'utf-8', errors='ignore')
         meta = json.loads(meta)['result']['tvshows']
         t = cleantitle.get(self.title)
         meta = [
             i for i in meta if self.year == str(i['year'])
             and t == cleantitle.get(i['title'])
         ][0]
         tvshowid = meta['tvshowid']
         poster = meta['thumbnail']
         meta = control.jsonrpc(
             '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params":{ "tvshowid": %d, "filter":{"and": [{"field": "season", "operator": "is", "value": "%s"}, {"field": "episode", "operator": "is", "value": "%s"}]}, "properties": ["title", "season", "episode", "showtitle", "firstaired", "runtime", "rating", "director", "writer", "plot", "thumbnail", "file"]}, "id": 1}'
             % (tvshowid, self.season, self.episode))
         meta = unicode(meta, 'utf-8', errors='ignore')
         meta = json.loads(meta)['result']['episodes'][0]
         for k, v in meta.iteritems():
             if type(v) == list:
                 try:
                     meta[k] = str(' / '.join(
                         [i.encode('utf-8') for i in v]))
                 except:
                     meta[k] = ''
             else:
                 try:
                     meta[k] = str(v.encode('utf-8'))
                 except:
                     meta[k] = str(v)
         if not 'plugin' in control.infoLabel('Container.PluginName'):
             self.DBID = meta['episodeid']
         thumb = meta['thumbnail']
         return (poster, thumb, meta)
     except:
         pass
     poster, thumb, meta = '', '', {'title': self.name}
     return (poster, thumb, meta)
コード例 #38
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            try:
                if not 'tvshowtitle' in data: raise Exception()

                links = []

                f = [
                    'S%02dE%02d' % (int(data['season']), int(data['episode']))
                ]
                t = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '',
                           data['tvshowtitle'])
                t = t.replace("&", "")

                q = self.search_link + urllib.quote_plus('%s %s' % (t, f[0]))

                q = urlparse.urljoin(self.base_link, q)
                result = client.request(q)
                print(q)
                result = json.loads(result)

                result = result['results']
            except:
                links = result = []

            for i in result:
                try:
                    if not cleantitle.get(t) == cleantitle.get(i['showName']):
                        raise Exception()

                    y = i['release']
                    y = re.compile('[\.|\(|\[|\s](\d{4}|S\d*E\d*)[\.|\)|\]|\s]'
                                   ).findall(y)[-1]
                    y = y.upper()
                    if not any(x == y for x in f): raise Exception()

                    quality = i['quality']

                    quality = quality.upper()

                    size = i['size']
                    size = float(size) / 1024
                    size = '%.2f GB' % size

                    if any(x in quality for x in ['HEVC', 'X265', 'H265']):
                        info = '%s | HEVC' % size
                    else:
                        info = size

                    if '1080P' in quality: quality = '1080p'
                    elif '720P' in quality: quality = 'HD'
                    else: quality = 'SD'

                    url = i['links']
                    #for x in url.keys(): links.append({'url': url[x], 'quality': quality, 'info': info})

                    links = []

                    for x in url.keys():
                        links.append({'url': url[x], 'quality': quality})

                    for link in links:
                        try:
                            url = link['url']
                            quality2 = link['quality']
                            #url = url[1]
                            #url = link
                            if len(url) > 1: raise Exception()
                            url = url[0].encode('utf-8')

                            host = re.findall(
                                '([\w]+[.][\w]+)$',
                                urlparse.urlparse(
                                    url.strip().lower()).netloc)[0]
                            if not host in hostprDict: raise Exception()
                            host = host.encode('utf-8')

                            sources.append({
                                'source': host,
                                'quality': quality2,
                                'language': 'en',
                                'url': url,
                                'info': info,
                                'direct': False,
                                'debridonly': True
                            })
                        except:
                            pass

                except:
                    pass

            return sources
        except:
            return sources
コード例 #39
0
ファイル: myvideolinks.py プロジェクト: csu-xiao-an/LilacTV
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            result = client.request(self.base_link)
            result = re.findall('<meta http-equiv = "refresh" .+? url = (http://myvideolinks.net/...)', result)
            for url in result:
                link = url

                url = link + self.search_link
                url = url % urllib.quote_plus(query)
            r = client.request(url)

            r = client.parseDOM(r, 'h2')
            l = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
            r = [(i[0], i[1], re.sub('(\.|\(|\[|\s)(\d{4}|3D)(\.|\)|\]|\s|)(.+|)', '', i[1]), re.findall('[\.|\(|\[|\s](\d{4}|)([\.|\)|\]|\s|].+)', i[1])) for i in l]
            r = [(i[0], i[1], i[2], i[3][0][0], i[3][0][1]) for i in r if i[3]]
            r = [(i[0], i[1], i[2], i[3], re.split('\.|\(|\)|\[|\]|\s|\-', i[4])) for i in r]
            r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[2]) and data['year'] == i[3]]
            r = [i for i in r if not any(x in i[4] for x in ['HDCAM', 'CAM', 'DVDR', 'DVDRip', 'DVDSCR', 'HDTS', 'TS', '3D'])]
            r = [i for i in r if '1080p' in i[4]][:1] + [i for i in r if '720p' in i[4]][:1]

            if 'tvshowtitle' in data:
                posts = [(i[1], i[0]) for i in l]
            else:
                posts = [(i[1], i[0]) for i in l]
            hostDict = hostprDict + hostDict

            items = []
            for post in posts:
                try:
                    t = post[0]

                    u = client.request(post[1])
                    u = re.findall('"(http.+?)"', u) + re.findall('"(http.+?)"', u)
                    u = [i for i in u if '/embed/' not in i]
                    u = [i for i in u if 'youtube' not in i]

                    items += [(t, i) for i in u]
                except:
                    pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)

                    if not cleantitle.get(t) == cleantitle.get(title): raise Exception()

                    y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()

                    if not y == hdlr:
                        raise Exception()

                    fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
                    fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
                    fmt = [i.lower() for i in fmt]

                    if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception()
                    if any(i in ['extras'] for i in fmt): raise Exception()

                    if '2160p' in fmt:
                        quality = '4K'
                    elif '4K' in fmt:
                        quality = '4K'
                    elif '1080p' in fmt:
                        quality = '1080p'
                    elif '720p' in fmt:
                        quality = '720p'
                    else:
                        quality = '720p'

                    if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
                    elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'

                    info = []

                    if '3d' in fmt:
                        info.append('3D')

                    try:
                        size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size))/div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC')

                    info = ' | '.join(info)

                    url = item[1]
                    if any(x in url for x in ['.rar', '.zip', '.iso']):
                        raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if host not in hostDict:
                        raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': False})
                except:
                    pass

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check:
                sources = check

            return sources
        except:
            return sources
コード例 #40
0
    def sources(self, url, hostDict, hostprDict):
        try:
            print '-------------------------------    -------------------------------'
            sources = []

            print url

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            print data

            title = data['title']
            year = data['year'] if 'year' in data else data['year']
            season = data['season'] if 'season' in data else False
            episode = data['episode'] if 'episode' in data else False
            localtitle = data['localtitle'] if 'localtitle' in data else False

            if season and episode:
                localtitle = data[
                    'localtvshowtitle'] if 'localtvshowtitle' in data else False

            #r = 'http://www.fullmoviz.org/?s=deadpool'
            #r = client.request(r)

            #r = client.parseDOM(r, 'div', attrs={'class': 'post-thumbnail'})
            #r = client.parseDOM(r, 'a', ret='href')
            #r = client.request(r[0])
            #r = client.parseDOM(r, 'div', attrs={'class': 'tab-me-content-wrapper'})
            #r = client.parseDOM(r, 'iframe', ret='src')

            t = cleantitle.get(title)
            tq = cleantitle.query(localtitle)
            tq2 = re.sub(' ', '', cleantitle.query(localtitle).lower())
            tq = re.sub(' ', '%20', tq)
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            query = 'http://www.fullmoviz.org'

            r = client.request('http://www.fullmoviz.org/?s=%s' % tq)
            print 'http://www.fullmoviz.org/?s=%s' % tq
            r = client.parseDOM(r, 'div', attrs={'class': 'post-thumbnail'})
            r0 = client.parseDOM(r, 'a', ret='href')[0]
            r2 = client.parseDOM(r, 'a', ret='title')[0]
            r1 = re.sub('(\([0-9]{4}\)|streaming|\s+)', '', r2)

            #r = sorted(set(r))
            r = [(r0, r1) for i in r]
            #r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
            #r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            #r = [(i[0], i[1], i[2], re.findall('(.+?)\s+(?:saison|s)\s+(\d+)', i[1])) for i in r]
            #r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
            #r = [(i[0], re.sub(' \&\#[0-9]{4,6};', '', i[1]), i[2], i[3]) for i in r]
            r = [i[0] for i in r if tq2 == cleantitle.get(i[1])][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')

            r = client.request('http://www.fullmoviz.org' + url)
            print 'http://www.fullmoviz.org' + url

            r = client.parseDOM(r,
                                'div',
                                attrs={'class': 'tab-me-content-wrapper'})
            r = client.parseDOM(r, 'iframe', ret='src')

            for i in r:

                url = i

                host = re.findall(
                    '([\w]+[.][\w]+)$',
                    urlparse.urlparse(url.strip().lower()).netloc)[0]
                if not host in hostDict: continue
                host = client.replaceHTMLCodes(host)
                host = host.encode('utf-8')

                sources.append({
                    'source': host,
                    'quality': 'SD',
                    'language': 'FR',
                    'url': url,
                    'direct': False,
                    'debridonly': False
                })

            return sources
        except:
            return sources
コード例 #41
0
    def sources(self, url, hostDict, hostprDict):
        try:
            hostDict = hostDict + hostprDict

            sources = []
            query_bases = []
            options = []

            if url is None:
                return sources

            if not debrid.status():
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = (data['tvshowtitle']
                     if 'tvshowtitle' in data else data['title'])
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            # tvshowtitle
            if 'tvshowtitle' in data:
                query_bases.append('%s ' %
                                   (data['tvshowtitle'].replace("-", "")))
                if 'year' in data:
                    query_bases.append('%s %s ' %
                                       (data['tvshowtitle'], data['year']))
                options.append('S%02dE%02d' %
                               (int(data['season']), int(data['episode'])))
                options.append('S%02d' % (int(data['season'])))
            else:
                query_bases.append('%s %s ' % (data['title'], data['year']))
                query_bases.append('%s ' % (data['title']))
                query_bases.append('2160p')
                query_bases.append('')

            for option in options:
                for query_base in query_bases:
                    q = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '',
                               query_base + option)
                    q = q.replace("  ", " ").replace(" ", "+")
                    url = self.search_link % q
                    html = self.scraper.get(url)
                    if html.status_code == 200:
                        links = client.parseDOM(html.content,
                                                "div",
                                                attrs={"class": "list_items"})
                        if len(links) == 0:
                            continue
                        items = client.parseDOM(links, "a", ret='href')
                        if len(items) == 0:
                            continue
                        for url in items:
                            if len(url) == 0:
                                continue
                            html = self.scraper.get(self.base_link + url)
                            if html.status_code == 200:
                                link_blocks = client.parseDOM(
                                    html.content,
                                    "pre",
                                    attrs={"class": "links"})
                                for linkBlock in link_blocks:
                                    for href in linkBlock.splitlines():
                                        quality = source_utils.check_sd_url(
                                            href)
                                        href = href.encode('utf-8')
                                        valid, host = source_utils.is_host_valid(
                                            href, hostDict)
                                        if not valid:
                                            continue
                                        if any(x in href for x in
                                               ['.rar', '.zip', '.iso']):
                                            continue
                                        if hdlr in href.upper(
                                        ) and cleantitle.get(
                                                title) in cleantitle.get(href):
                                            sources.append({
                                                'source': host,
                                                'quality': quality,
                                                'language': 'en',
                                                'url': href,
                                                'direct': False,
                                                'debridonly': False
                                            })
                if len(sources) > 0:
                    return sources
            return sources
        except:
            return sources
コード例 #42
0
    def __search(self, titles, year, season=0, episode=False):
        try:
            query = self.search_link % (urllib.quote_plus(
                cleantitle.query(titles[0])))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'container'})
            r = dom_parser.parse_dom(r,
                                     'div',
                                     attrs={'class': 'ml-item-content'})

            f = []
            for i in r:
                _url = dom_parser.parse_dom(i,
                                            'a',
                                            attrs={'class': 'ml-image'},
                                            req='href')[0].attrs['href']

                _title = re.sub('<.+?>|</.+?>', '',
                                dom_parser.parse_dom(i,
                                                     'h6')[0].content).strip()
                try:
                    _title = re.search('(.*?)\s(?:staf+el|s)\s*(\d+)', _title,
                                       re.I).group(1)
                except:
                    pass

                _season = '0'

                _year = re.findall(
                    'calendar.+?>.+?(\d{4})', ''.join([
                        x.content for x in dom_parser.parse_dom(
                            i, 'ul', attrs={'class': 'item-params'})
                    ]))
                _year = _year[0] if len(_year) > 0 else '0'

                if season > 0:
                    s = dom_parser.parse_dom(i,
                                             'span',
                                             attrs={'class': 'season-label'})
                    s = dom_parser.parse_dom(s,
                                             'span',
                                             attrs={'class': 'el-num'})
                    if s: _season = s[0].content.strip()

                if cleantitle.get(_title) in t and _year in y and int(
                        _season) == int(season):
                    f.append((_url, _year))
            r = f
            r = sorted(r, key=lambda i: int(i[1]),
                       reverse=True)  # with year > no year
            r = [i[0] for i in r if r[0]][0]

            url = source_utils.strip_domain(r)
            if episode:
                r = client.request(urlparse.urljoin(self.base_link, url))
                r = dom_parser.parse_dom(r,
                                         'div',
                                         attrs={'class': 'season-list'})
                r = dom_parser.parse_dom(r, 'li')
                r = dom_parser.parse_dom(r, 'a', req='href')
                r = [(i.attrs['href'], i.content) for i in r]
                r = [i[0] for i in r if i[1] and int(i[1]) == int(episode)][0]
                url = source_utils.strip_domain(r)
            return url
        except:
            return
コード例 #43
0
ファイル: scenerls.py プロジェクト: csu-xiao-an/LilacTV
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            if debrid.status() is False:
                raise Exception()

            hostDict = hostprDict + hostDict

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 's%02de%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s s%02de%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            try:
                url = self.search_link % urllib.quote_plus(query)
                url = urlparse.urljoin(self.base_link, url)

                r = self.scraper.get(url).content

                posts = client.parseDOM(r, 'div', attrs={'class': 'post'})

                items = []
                dupes = []

                for post in posts:
                    try:
                        u = client.parseDOM(post,
                                            "div",
                                            attrs={"class": "postContent"})
                        u = client.parseDOM(u, "h2")
                        u = client.parseDOM(u, 'a', ret='href')
                        u = [(i.strip('/').split('/')[-1], i) for i in u]
                        items += u
                    except:
                        pass
            except:
                pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '', name)

                    if not cleantitle.get(t) == cleantitle.get(title): continue

                    quality, info = source_utils.get_release_quality(
                        name, item[1])

                    url = item[1]
                    if any(x in url for x in ['.rar', '.zip', '.iso']):
                        raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if host not in hostDict:
                        raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })
                except:
                    pass

            return sources
        except:
            return sources
コード例 #44
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else '%s' % (data['title'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            posts = client.parseDOM(r, 'item')

            items = []

            for post in posts:

                try:
                    t = client.parseDOM(post, 'title')[0]
                    t2 = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '', t)

                    if not cleantitle.get_simple(t2.replace(
                            'Watch Online', '')) == cleantitle.get(title):
                        raise Exception()

                    l = client.parseDOM(post, 'link')[0]

                    p = client.parseDOM(post, 'pubDate')[0]

                    if data['year'] in p: items += [(t, l)]

                except:
                    pass

            print items
            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    u = client.request(item[1])
                    if 'http://www.imdb.com/title/%s/' % data['imdb'] in u:

                        l = client.parseDOM(u, 'div',
                                            {'class': 'movieplay'})[0]
                        l = client.parseDOM(u, 'iframe',
                                            ret='data-lazy-src')[0]

                        quality, info = source_utils.get_release_quality(
                            name, l)
                        info = ' | '.join(info)

                        url = l

                        url = client.replaceHTMLCodes(url)
                        url = url.encode('utf-8')

                        valid, host = source_utils.is_host_valid(url, hostDict)
                        sources.append({
                            'source': host,
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'info': info,
                            'direct': False,
                            'debridonly': False
                        })
                except:
                    pass

            return sources
        except:
            return sources
コード例 #45
0
ファイル: 300mbfilms.py プロジェクト: bopopescu/fuzzybritches
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            posts = client.parseDOM(r, 'item')

            hostDict = hostprDict + hostDict

            items = []

            for post in posts:
                try:
                    t = client.parseDOM(post, 'title')[0]
                    u = client.parseDOM(post, 'link')[0]
                    s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', t)
                    s = s[0] if s else '0'

                    items += [(t, u, s) ]

                except:
                    pass

            urls = []
            for item in items:

                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)

                    if not cleantitle.get(t) == cleantitle.get(title): raise Exception()

                    y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()

                    if not y == hdlr: raise Exception()

                    quality, info = source_utils.get_release_quality(name, item[1])
                    if any(x in quality for x in ['CAM', 'SD']): continue

                    try:
                        size = re.sub('i', '', item[2])
                        div = 1 if size.endswith('GB') else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size))/div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    info = ' | '.join(info)

                    url = item[1]
                    links = self.links(url)
                    urls += [(i, quality, info) for i in links]

                except:
                    pass

            for item in urls:

                if 'earn-money' in item[0]: continue
                if any(x in item[0] for x in ['.rar', '.zip', '.iso']): continue
                url = client.replaceHTMLCodes(item[0])
                url = url.encode('utf-8')

                valid, host = source_utils.is_host_valid(url, hostDict)
                if not valid: continue
                host = client.replaceHTMLCodes(host)
                host = host.encode('utf-8')

                sources.append({'source': host, 'quality': item[1], 'language': 'en', 'url': url, 'info': item[2], 'direct': False, 'debridonly': True})

            return sources
        except:
            return sources
コード例 #46
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)
            result = result.decode('iso-8859-1').encode('utf-8')

            result = client.parseDOM(result,
                                     'div',
                                     attrs={'class': 'tv_episode_item'})

            title = cleantitle.get(title)
            premiered = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(
                premiered)[0]
            premiered = '%s %01d %s' % (premiered[1].replace(
                '01', 'January').replace('02', 'February').replace(
                    '03', 'March').replace('04', 'April').replace(
                        '05', 'May').replace('06', 'June').replace(
                            '07', 'July').replace('08', 'August').replace(
                                '09',
                                'September').replace('10', 'October').replace(
                                    '11', 'November').replace(
                                        '12', 'December'), int(
                                            premiered[2]), premiered[0])

            result = [(client.parseDOM(i, 'a', ret='href'),
                       client.parseDOM(i,
                                       'span',
                                       attrs={'class': 'tv_episode_name'}),
                       client.parseDOM(i,
                                       'span',
                                       attrs={'class': 'tv_num_versions'}))
                      for i in result]
            result = [
                (i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0
            ] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0]
            result = [
                (i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0
            ] + [(i[0], i[1], None) for i in result if len(i[2]) == 0]
            result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0]

            url = [
                i for i in result
                if title == cleantitle.get(i[1]) and premiered == i[2]
            ][:1]
            if len(url) == 0: url = [i for i in result if premiered == i[2]]
            if len(url) == 0 or len(url) > 1:
                url = [
                    i for i in result if 'season-%01d-episode-%01d' %
                    (int(season), int(episode)) in i[0]
                ]

            url = client.replaceHTMLCodes(url[0][0])
            url = urlparse.urlparse(url).path
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #47
0
    def sources(self, url, hostDict, hostprDict):
        try:
            print '-------------------------------    -------------------------------'
            sources = []

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['title']
            year = data['year'] if 'year' in data else data['year']
            season = data['season'] if 'season' in data else False
            episode = data['episode'] if 'episode' in data else False
            localtitle = data['localtitle'] if 'localtitle' in data else False

            if season and episode:
                localtitle  = data['localtvshowtitle'] if 'localtvshowtitle' in data else False

            t = cleantitle.get(title)
            tq = cleantitle.get(localtitle)
            y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']

            query = self.base_link

            r = client.request(query, post='s=%s' % tq)

            r = client.parseDOM(r, 'div', attrs={'class': 'title'})
            r = [(client.parseDOM(i, 'a', ret='href'), re.compile('title=\"Permanent Link to(.+?) \[').findall(i)) for i in r]

            r = [(i[0][0], i[1][0].lower()) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
            r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            r = [(i[0], i[1], i[2], re.findall('(.+?)\s+(?:saison|s)\s+(\d+)', i[1])) for i in r]
            r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
            r = [(i[0], re.sub(' \&\#[0-9]{4,6};', '', i[1]), i[2], i[3]) for i in r]
            r = [i[0] for i in r if tq == cleantitle.get(i[1]) and i[2] in y and int(i[3]) == int(season)][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')

            r = client.request('http://www.dpstreaming.me' + url)

            if season and episode:
                if int(episode) < 10:
                    r = re.compile('<p align="center">Épisode 0%s(.+?)</p>' % episode).findall(r)[0]
                else:
                    r = re.compile('<p align="center">Épisode %s(.+?)</p>' % episode).findall(r)[0]

                r = re.compile('<a href=\"(.+?)\"', re.MULTILINE|re.DOTALL).findall(r)
            else:
                r = client.parseDOM(r, 'div', attrs={'id': 'light'})
                r = client.parseDOM(r, 'a', ret='href')

            for url in r:

                host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                if not host in hostDict: continue
                host = client.replaceHTMLCodes(host)
                host = host.encode('utf-8')

                sources.append({'source': host, 'quality': 'SD', 'language': 'VF', 'url': url, 'direct': False, 'debridonly': False})

            return sources
        except:
            return sources
コード例 #48
0
    def sources(self, url, hostDict, hostprDict):
        try:
            print '-------------------------------    -------------------------------'
            sources = []

            print url

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            print data

            title = data['title']
            year = data['year'] if 'year' in data else data['year']
            season = data['season'] if 'season' in data else False
            episode = data['episode'] if 'episode' in data else False
            localtitle = data['localtitle'] if 'localtitle' in data else False

            if season and episode:
                localtitle = data['localtvshowtitle'] if 'localtvshowtitle' in data else False

            t = cleantitle.get(title)
            tq = cleantitle.query(localtitle)
            tq2 =  re.sub(' ', '', cleantitle.query(localtitle).lower())
            tq = re.sub(' ', '%20', tq)
            y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']

            query = 'http://www.cinemay.com'

            r = client.request('http://www.cinemay.com/?s=%s' % tq)
            print 'http://www.cinemay.com/?s=%s' % tq
            r = client.parseDOM(r, 'div', attrs={'class': 'unfilm'})
            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
            r = [(i[0][0], re.sub('(film| en streaming vf| en streaming vostfr|&rsquo;| )', '', i[1][0]).lower()) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
            r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            r = [(i[0], i[1], i[2], re.findall('(.+?)\s+(?:saison|s)\s+(\d+)', i[1])) for i in r]
            r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
            r = [(i[0], re.sub(' \&\#[0-9]{4,6};', '', i[1]), i[2], i[3]) for i in r]
            r = [i[0] for i in r if tq2 == cleantitle.get(i[1])][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')

            r = client.request('http://www.cinemay.com' + url)
            print 'http://www.cinemay.com' + url
            r = client.parseDOM(r, 'div', attrs={'class': 'module-actionbar'})
            r = client.parseDOM(r, 'a', ret='href')

            for i in r:
                if i =='#':
                    continue

                url = client.request('http://www.cinemay.com' + i)
                url = client.parseDOM(url, 'div', attrs={'class': 'wbox2 video dark'})
                url = client.parseDOM(url, 'iframe', ret='src')[0]

                host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                if not host in hostDict: continue
                host = client.replaceHTMLCodes(host)
                host = host.encode('utf-8')

                sources.append({'source': host, 'quality': 'SD', 'language': 'FR', 'url': url, 'direct': False, 'debridonly': False})

            return sources
        except:
            return sources
コード例 #49
0
ファイル: filmenstreaminghd.py プロジェクト: uguer30/Project
    def sources(self, url, hostDict, hostprDict):
        try:
            print '-------------------------------    -------------------------------'
            sources = []

            print url

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            season = data['season'] if 'season' in data else False
            episode = data['episode'] if 'episode' in data else False

            print season, episode

            if season and episode:
                print 'TV'

                self.search_link = 'query=%s&submit=Submit+Query'
                aTitle = data['tvshowtitle']

            else:
                self.search_link = 'query=%s&submit=Submit+Query'
                aTitle = data['title']


            post = self.search_link % (urllib.quote_plus(cleantitle.query(aTitle)))
            url = 'http://www.filmenstreaminghd.com/recherche/'


            t = cleantitle.get(aTitle)

            r = client.request(url, XHR=True, referer=url, post=post)

            r = client.parseDOM(r, 'div', attrs={'class': 'film-k kutu-icerik kat'})

            if season and episode:
                t = t + 'saison0' + season

            r = client.parseDOM(r, 'div', attrs={'class': 'play fa fa-play-circle'})
            r = sorted(set(r))
            r = [(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title')) for i in r]
            r = [(i[0][0], i[1][0].lower()) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
            r = [i[0] for i in r if t == cleantitle.get(i[1])][0]
            #r = sorted(set(r))

            url0 = '%s%s' % ('http://www.filmenstreaminghd.com' , r)
            print url0
            url = client.replaceHTMLCodes(url0)
            url = url0.encode('utf-8')

            r = client.request(url, XHR=True, referer=url)
            r = re.sub('(\n|\t)', '', r)

            langue = re.compile('<b class=\"fa fa-cc\"></b><span>(.+?)</span>', re.MULTILINE | re.DOTALL).findall(r)[0]
            if langue == 'VF':
                langue = 'FR'

            quality2 = re.compile('<div class=\"kalite\">(.+?)</div>', re.MULTILINE | re.DOTALL).findall(r)[0]
            quality2 = re.sub('-', '', quality2)

            if season and episode:
                unLien0a = client.parseDOM(r, 'div', attrs={'class': 'dizi-bolumleri'})[0]
                r = re.compile('Saison\s+0%s\s+\-\s+Episode\s+0%s(.+?)class=\"dropit-trigger\">' % (season, episode), re.MULTILINE | re.DOTALL).findall(unLien0a)[0]
                unLien0b = client.parseDOM(r, 'li', ret='id')
            else:
                r = client.parseDOM(r, 'div', attrs={'class': 'dizi-bolumleri film'})
                unLien0b = client.parseDOM(r, 'span', ret='id')

            counter = 0

            for unLienUrl in unLien0b:

                if 'gf-' in unLienUrl:
                    continue

                dataUrl = urllib.urlencode({'pid': unLienUrl[1:]})
                dataUrl = client.request(url0, post=dataUrl, XHR=True, referer=url0)

                try:
                    url = client.parseDOM(dataUrl, 'iframe', ret='src')[1]
                except:
                    url = client.parseDOM(dataUrl, 'iframe', ret='src')[0]

                if url.startswith('//'):
                    url = url.replace('//', '', 1)

                host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                if not host in hostDict: continue
                host = client.replaceHTMLCodes(host)
                host = host.encode('utf-8')

                url = url.encode('utf-8')

                if '1080p' in quality2:
                    quality = '1080p'
                elif '720p' in quality2 or 'bdrip' in quality2 or 'hdrip' in quality2:
                    quality = 'HD'
                else:
                    quality = 'SD'

                if 'dvdscr' in quality2 or 'r5' in quality2 or 'r6' in quality2:
                    quality2 = 'SCR'
                elif 'camrip' in quality2 or 'tsrip' in quality2 or 'hdcam' in quality2 or 'hdts' in quality2 or 'dvdcam' in quality2 or 'dvdts' in quality2 or 'cam' in quality2 or 'telesync' in quality2 or 'ts' in quality2:
                    quality2 = 'CAM'

                sources.append({'source': host, 'quality': quality, 'language': langue, 'url': url, 'direct': False, 'debridonly': False})

            print sources

            return sources
        except:
            return sources
コード例 #50
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            posts = client.parseDOM(r, 'div', attrs={'class': 'post-\d+.+?'})

            items = []

            for post in posts:
                try:
                    r = dom_parser2.parse_dom(post, 'a', req='href')
                    t = r[0].content
                    u = r[0].attrs['href']
                    items += [(t, u)]

                except:
                    pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D|Free Movie Downloads)(\.|\)|\]|\s|)(.+|)',
                        '', name)

                    if not cleantitle.get(t) == cleantitle.get(title):
                        raise Exception()

                    y = re.findall(
                        '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                        name)[-1].upper()

                    if not y == hdlr: raise Exception()

                    data = client.request(item[1])
                    data = client.parseDOM(data,
                                           'div',
                                           attrs={'class': 'postcont'})
                    data = client.parseDOM(data, 'p')[-1]
                    data = dom_parser2.parse_dom(data, 'a')
                    data = [(i.attrs['href'], i.content) for i in data]
                    data = [(i[0], i[1]) for i in data]
                    for i in data:
                        quality, info = source_utils.get_release_quality(
                            i[1], i[0])
                        url = i[0]
                        if any(x in url for x in ['.rar', '.zip', '.iso']):
                            raise Exception()
                        url = client.replaceHTMLCodes(url)
                        url = url.encode('utf-8')

                        sources.append({
                            'source': 'DL',
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'info': info,
                            'direct': True,
                            'debridonly': False
                        })
                except:
                    pass

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check

            return sources
        except:
            return sources
コード例 #51
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|\.|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            r = client.parseDOM(r, 'div', attrs={'class': 'blocks'})[0]
            r = client.parseDOM(r, 'div', attrs={'id': 'post.+?'})
            r = [
                re.findall(
                    '<a href="(.+?)" rel=".+?" title="Permanent Link: (.+?)"',
                    i, re.DOTALL) for i in r
            ]

            hostDict = hostprDict + hostDict

            items = []

            for item in r:
                try:
                    t = item[0][1]
                    t = re.sub('(\[.*?\])|(<.+?>)', '', t)
                    t1 = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d+|3D)(\.|\)|\]|\s|)(.+|)',
                        '', t)

                    if not cleantitle.get(t1) == cleantitle.get(title):
                        raise Exception()

                    y = re.findall(
                        '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                        t)[-1].upper()

                    if not y == hdlr: raise Exception()

                    data = client.request(item[0][0])
                    data = client.parseDOM(
                        data,
                        'div',
                        attrs={'class': 'post-content clear-block'})[0]
                    data = dom_parser.parse_dom(data, 'a', req='href')

                    u = [(t, i.attrs['href']) for i in data]
                    items += u

                except:
                    pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    quality, info = source_utils.get_release_quality(
                        name, item[1])

                    url = item[1]
                    if 'https://www.extmatrix.com/files/' not in url:
                        raise Exception()
                    if any(x in url for x in ['.rar', '.zip', '.iso']):
                        raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')
                    info = ' | '.join(info)
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })
                except:
                    pass

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('---Crazyhdsource Testing - Exception: \n' +
                          str(failure))
            return sources
コード例 #52
0
ファイル: library.py プロジェクト: vanhung1710/MyRepository
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            content_type = 'episode' if 'tvshowtitle' in data else 'movie'

            years = (data['year'], str(int(data['year']) + 1),
                     str(int(data['year']) - 1))

            if content_type == 'movie':
                title = cleantitle.get(data['title'])
                localtitle = cleantitle.get(data['localtitle'])
                ids = [data['imdb']]

                r = control.jsonrpc(
                    '{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovies", "params": {"filter":{"or": [{"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}]}, "properties": ["imdbnumber", "title", "originaltitle", "file"]}, "id": 1}'
                    % years)
                r = unicode(r, 'utf-8', errors='ignore')
                r = json.loads(r)['result']['movies']

                r = [
                    i for i in r if str(i['imdbnumber']) in ids or title in [
                        cleantitle.get(i['title'].encode('utf-8')),
                        cleantitle.get(i['originaltitle'].encode('utf-8'))
                    ]
                ]
                r = [
                    i for i in r
                    if not i['file'].encode('utf-8').endswith('.strm')
                ][0]

                r = control.jsonrpc(
                    '{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovieDetails", "params": {"properties": ["streamdetails", "file"], "movieid": %s }, "id": 1}'
                    % str(r['movieid']))
                r = unicode(r, 'utf-8', errors='ignore')
                r = json.loads(r)['result']['moviedetails']
            elif content_type == 'episode':
                title = cleantitle.get(data['tvshowtitle'])
                localtitle = cleantitle.get(data['localtvshowtitle'])
                season, episode = data['season'], data['episode']
                ids = [data['imdb'], data['tvdb']]

                r = control.jsonrpc(
                    '{"jsonrpc": "2.0", "method": "VideoLibrary.GetTVShows", "params": {"filter":{"or": [{"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}]}, "properties": ["imdbnumber", "title"]}, "id": 1}'
                    % years)
                r = unicode(r, 'utf-8', errors='ignore')
                r = json.loads(r)['result']['tvshows']

                r = [
                    i for i in r if str(i['imdbnumber']) in ids or title in [
                        cleantitle.get(i['title'].encode('utf-8')),
                        cleantitle.get(i['originaltitle'].encode('utf-8'))
                    ]
                ][0]

                r = control.jsonrpc(
                    '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params": {"filter":{"and": [{"field": "season", "operator": "is", "value": "%s"}, {"field": "episode", "operator": "is", "value": "%s"}]}, "properties": ["file"], "tvshowid": %s }, "id": 1}'
                    % (str(season), str(episode), str(r['tvshowid'])))
                r = unicode(r, 'utf-8', errors='ignore')
                r = json.loads(r)['result']['episodes']

                r = [
                    i for i in r
                    if not i['file'].encode('utf-8').endswith('.strm')
                ][0]

                r = control.jsonrpc(
                    '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodeDetails", "params": {"properties": ["streamdetails", "file"], "episodeid": %s }, "id": 1}'
                    % str(r['episodeid']))
                r = unicode(r, 'utf-8', errors='ignore')
                r = json.loads(r)['result']['episodedetails']

            url = r['file'].encode('utf-8')

            try:
                quality = int(r['streamdetails']['video'][0]['width'])
            except:
                quality = -1

            if quality >= 2160: quality = '4K'
            if quality >= 1440: quality = '1440p'
            if quality >= 1080: quality = '1080p'
            if 720 <= quality < 1080: quality = 'HD'
            if quality < 720: quality = 'SD'

            info = []
            try:
                f = control.openFile(url)
                s = f.size()
                f.close()
                s = '%.2f GB' % (float(s) / 1024 / 1024 / 1024)
                info.append(s)
            except:
                pass
            try:
                e = urlparse.urlparse(url).path.split('.')[-1].upper()
                info.append(e)
            except:
                pass
            info = ' | '.join(info)
            info = info.encode('utf-8')

            sources.append({
                'source': '0',
                'quality': quality,
                'language': 'en',
                'url': url,
                'info': info,
                'local': True,
                'direct': True,
                'debridonly': False
            })

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('Library - Exception: \n' + str(failure))
            return sources
コード例 #53
0
ファイル: streamlord.py プロジェクト: enursha101/xbmc-addon
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if (self.user != '' and self.password != ''):  #raise Exception()

                login = urlparse.urljoin(self.base_link, '/login.html')

                post = urllib.urlencode({
                    'username': self.user,
                    'password': self.password,
                    'submit': 'Login'
                })

                cookie = client.request(login,
                                        post=post,
                                        output='cookie',
                                        close=False)

                r = client.request(login,
                                   post=post,
                                   cookie=cookie,
                                   output='extended')

                headers = {'User-Agent': r[3]['User-Agent'], 'Cookie': r[4]}
            else:
                headers = {}

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '')
                             for i in data])

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                    'title']

                year = data['year']

                query = urlparse.urljoin(self.base_link, self.search_link)

                post = urllib.urlencode({'searchapi2': title})

                r = client.request(query, post=post, headers=headers)

                if 'tvshowtitle' in data:
                    r = re.findall('(watch-tvshow-.+?-\d+\.html)', r)
                    r = [(i, re.findall('watch-tvshow-(.+?)-\d+\.html', i))
                         for i in r]
                else:
                    r = re.findall('(watch-movie-.+?-\d+\.html)', r)
                    r = [(i, re.findall('watch-movie-(.+?)-\d+\.html', i))
                         for i in r]

                r = [(i[0], i[1][0]) for i in r if len(i[1]) > 0]
                r = [
                    i for i in r
                    if cleantitle.get(title) == cleantitle.get(i[1])
                ]
                r = [i[0] for i in r][0]

                u = urlparse.urljoin(self.base_link, r)
                for i in range(3):
                    r = client.request(u, headers=headers)
                    if not 'failed' in r: break

                if 'season' in data and 'episode' in data:
                    r = re.findall('(episode-.+?-.+?\d+.+?\d+-\d+.html)', r)
                    r = [
                        i for i in r if '-s%02de%02d-' %
                        (int(data['season']),
                         int(data['episode'])) in i.lower()
                    ][0]

                    r = urlparse.urljoin(self.base_link, r)

                    r = client.request(r, headers=headers)

            else:
                r = urlparse.urljoin(self.base_link, url)

                r = client.request(r, post=post, headers=headers)

            quality = 'HD' if '-movie-' in r else 'SD'

            try:
                f = re.findall('''["']sources['"]\s*:\s*\[(.*?)\]''', r)[0]
                f = re.findall('''['"]*file['"]*\s*:\s*([^\(]+)''', f)[0]

                u = re.findall('function\s+%s[^{]+{\s*([^}]+)' % f, r)[0]
                u = re.findall(
                    '\[([^\]]+)[^+]+\+\s*([^.]+).*?getElementById\("([^"]+)',
                    u)[0]

                a = re.findall('var\s+%s\s*=\s*\[([^\]]+)' % u[1], r)[0]
                b = client.parseDOM(r, 'span', {'id': u[2]})[0]

                url = u[0] + a + b
                url = url.replace('"', '').replace(',', '').replace('\/', '/')
                url += '|' + urllib.urlencode(headers)
            except:
                try:
                    url = r = jsunpack.unpack(r)
                    url = url.replace('"', '')
                except:
                    url = re.findall(
                        r'sources[\'"]\s*:\s*\[.*?file[\'"]\s*:\s*(\w+)\(\).*function\s+\1\(\)\s*\{\s*return\([\'"]([^\'"]+)',
                        r, re.DOTALL)[0][1]

            sources.append({
                'source': 'cdn',
                'quality': quality,
                'language': 'en',
                'url': url,
                'direct': True,
                'debridonly': False,
                'autoplay': True
            })

            return sources
        except:
            return sources
コード例 #54
0
ファイル: tvshows.py プロジェクト: MoRgUiJu/morguiju.repo
    def super_info(self, i):
        try:
            if self.list[i]['metacache'] == True: raise Exception()

            imdb = self.list[i]['imdb'] if 'imdb' in self.list[i] else '0'
            tvdb = self.list[i]['tvdb'] if 'tvdb' in self.list[i] else '0'

            if imdb == '0':
                url = self.imdb_by_query % (urllib.quote_plus(self.list[i]['title']), self.list[i]['year'])

                imdb = client.request(url, timeout='10')
                try: imdb = json.loads(imdb)['imdbID']
                except: imdb = '0'

                if imdb == None or imdb == '' or imdb == 'N/A': imdb = '0'


            if tvdb == '0' and not imdb == '0':
                url = self.tvdb_by_imdb % imdb

                result = client.request(url, timeout='10')

                try: tvdb = client.parseDOM(result, 'seriesid')[0]
                except: tvdb = '0'

                try: name = client.parseDOM(result, 'SeriesName')[0]
                except: name = '0'
                dupe = re.findall('[***]Duplicate (\d*)[***]', name)
                if dupe: tvdb = str(dupe[0])

                if tvdb == '': tvdb = '0'


            if tvdb == '0':
                url = self.tvdb_by_query % (urllib.quote_plus(self.list[i]['title']))

                years = [str(self.list[i]['year']), str(int(self.list[i]['year'])+1), str(int(self.list[i]['year'])-1)]

                tvdb = client.request(url, timeout='10')
                tvdb = re.sub(r'[^\x00-\x7F]+', '', tvdb)
                tvdb = client.replaceHTMLCodes(tvdb)
                tvdb = client.parseDOM(tvdb, 'Series')
                tvdb = [(x, client.parseDOM(x, 'SeriesName'), client.parseDOM(x, 'FirstAired')) for x in tvdb]
                tvdb = [(x, x[1][0], x[2][0]) for x in tvdb if len(x[1]) > 0 and len(x[2]) > 0]
                tvdb = [x for x in tvdb if cleantitle.get(self.list[i]['title']) == cleantitle.get(x[1])]
                tvdb = [x[0][0] for x in tvdb if any(y in x[2] for y in years)][0]
                tvdb = client.parseDOM(tvdb, 'seriesid')[0]

                if tvdb == '': tvdb = '0'


            url = self.tvdb_info_link % tvdb
            item = client.request(url, timeout='10')
            if item == None: raise Exception()

            if imdb == '0':
                try: imdb = client.parseDOM(item, 'IMDB_ID')[0]
                except: pass
                if imdb == '': imdb = '0'
                imdb = imdb.encode('utf-8')


            try: title = client.parseDOM(item, 'SeriesName')[0]
            except: title = ''
            if title == '': title = '0'
            title = client.replaceHTMLCodes(title)
            title = title.encode('utf-8')

            try: year = client.parseDOM(item, 'FirstAired')[0]
            except: year = ''
            try: year = re.compile('(\d{4})').findall(year)[0]
            except: year = ''
            if year == '': year = '0'
            year = year.encode('utf-8')

            try: premiered = client.parseDOM(item, 'FirstAired')[0]
            except: premiered = '0'
            if premiered == '': premiered = '0'
            premiered = client.replaceHTMLCodes(premiered)
            premiered = premiered.encode('utf-8')

            try: studio = client.parseDOM(item, 'Network')[0]
            except: studio = ''
            if studio == '': studio = '0'
            studio = client.replaceHTMLCodes(studio)
            studio = studio.encode('utf-8')

            try: genre = client.parseDOM(item, 'Genre')[0]
            except: genre = ''
            genre = [x for x in genre.split('|') if not x == '']
            genre = ' / '.join(genre)
            if genre == '': genre = '0'
            genre = client.replaceHTMLCodes(genre)
            genre = genre.encode('utf-8')

            try: duration = client.parseDOM(item, 'Runtime')[0]
            except: duration = ''
            if duration == '': duration = '0'
            duration = client.replaceHTMLCodes(duration)
            duration = duration.encode('utf-8')

            try: rating = client.parseDOM(item, 'Rating')[0]
            except: rating = ''
            if 'rating' in self.list[i] and not self.list[i]['rating'] == '0':
                rating = self.list[i]['rating']
            if rating == '': rating = '0'
            rating = client.replaceHTMLCodes(rating)
            rating = rating.encode('utf-8')

            try: votes = client.parseDOM(item, 'RatingCount')[0]
            except: votes = ''
            if 'votes' in self.list[i] and not self.list[i]['votes'] == '0':
                votes = self.list[i]['votes']
            if votes == '': votes = '0'
            votes = client.replaceHTMLCodes(votes)
            votes = votes.encode('utf-8')

            try: mpaa = client.parseDOM(item, 'ContentRating')[0]
            except: mpaa = ''
            if mpaa == '': mpaa = '0'
            mpaa = client.replaceHTMLCodes(mpaa)
            mpaa = mpaa.encode('utf-8')

            try: cast = client.parseDOM(item, 'Actors')[0]
            except: cast = ''
            cast = [x for x in cast.split('|') if not x == '']
            try: cast = [(x.encode('utf-8'), '') for x in cast]
            except: cast = []
            if cast == []: cast = '0'

            try: plot = client.parseDOM(item, 'Overview')[0]
            except: plot = ''
            if plot == '': plot = '0'
            plot = client.replaceHTMLCodes(plot)
            plot = plot.encode('utf-8')

            try: poster = client.parseDOM(item, 'poster')[0]
            except: poster = ''
            if not poster == '': poster = self.tvdb_image + poster
            else: poster = '0'
            if 'poster' in self.list[i] and poster == '0': poster = self.list[i]['poster']
            poster = client.replaceHTMLCodes(poster)
            poster = poster.encode('utf-8')

            try: banner = client.parseDOM(item, 'banner')[0]
            except: banner = ''
            if not banner == '': banner = self.tvdb_image + banner
            else: banner = '0'
            banner = client.replaceHTMLCodes(banner)
            banner = banner.encode('utf-8')

            try: fanart = client.parseDOM(item, 'fanart')[0]
            except: fanart = ''
            if not fanart == '': fanart = self.tvdb_image + fanart
            else: fanart = '0'
            fanart = client.replaceHTMLCodes(fanart)
            fanart = fanart.encode('utf-8')


            artmeta = True
            #art = client.request(self.fanart_tv_art_link % tvdb, headers=self.fanart_tv_headers, timeout='10', error=True)
            #try: art = json.loads(art)
            #except: artmeta = False

            try:
                poster2 = art['tvposter']
                poster2 = [x for x in poster2 if x.get('lang') == 'en'][::-1] + [x for x in poster2 if x.get('lang') == '00'][::-1]
                poster2 = poster2[0]['url'].encode('utf-8')
            except:
                poster2 = '0'

            try:
                fanart2 = art['showbackground']
                fanart2 = [x for x in fanart2 if x.get('lang') == 'en'][::-1] + [x for x in fanart2 if x.get('lang') == '00'][::-1]
                fanart2 = fanart2[0]['url'].encode('utf-8')
            except:
                fanart2 = '0'

            try:
                banner2 = art['tvbanner']
                banner2 = [x for x in banner2 if x.get('lang') == 'en'][::-1] + [x for x in banner2 if x.get('lang') == '00'][::-1]
                banner2 = banner2[0]['url'].encode('utf-8')
            except:
                banner2 = '0'

            try:
                if 'hdtvlogo' in art: clearlogo = art['hdtvlogo']
                else: clearlogo = art['clearlogo']
                clearlogo = [x for x in clearlogo if x.get('lang') == 'en'][::-1] + [x for x in clearlogo if x.get('lang') == '00'][::-1]
                clearlogo = clearlogo[0]['url'].encode('utf-8')
            except:
                clearlogo = '0'

            try:
                if 'hdclearart' in art: clearart = art['hdclearart']
                else: clearart = art['clearart']
                clearart = [x for x in clearart if x.get('lang') == 'en'][::-1] + [x for x in clearart if x.get('lang') == '00'][::-1]
                clearart = clearart[0]['url'].encode('utf-8')
            except:
                clearart = '0'

            item = {'title': title, 'year': year, 'imdb': imdb, 'tvdb': tvdb, 'poster': poster, 'poster2': poster2, 'banner': banner, 'banner2': banner2, 'fanart': fanart, 'fanart2': fanart2, 'clearlogo': clearlogo, 'clearart': clearart, 'premiered': premiered, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'cast': cast, 'plot': plot}
            item = dict((k,v) for k, v in item.iteritems() if not v == '0')
            self.list[i].update(item)

            if artmeta == False: raise Exception()

            meta = {'imdb': imdb, 'tvdb': tvdb, 'lang': self.lang, 'item': item}
            self.meta.append(meta)
        except:
            pass
コード例 #55
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            sources = []

            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            try:
                if 'tvshowtitle' not in data:
                    raise Exception()

                links = []

                f = [
                    'S%02dE%02d' % (int(data['season']), int(data['episode']))
                ]
                t = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '',
                           data['tvshowtitle'])
                t = t.replace("&", "")

                q = self.search_link + urllib.quote_plus('%s %s' % (t, f[0]))

                q = urlparse.urljoin(self.base_link, q)

                timer = control.Time(start=True)

                result = client.request(q)
                print(q)
                result = json.loads(result)

                result = result['results']
            except Exception:
                links = result = []

            for i in result:
                # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                if timer.elapsed() > sc_timeout:
                    log_utils.log('DirectDL - Timeout Reached')
                    break

                try:
                    if not cleantitle.get(t) == cleantitle.get(i['showName']):
                        raise Exception()

                    y = i['release']
                    y = re.compile('[\.|\(|\[|\s](\d{4}|S\d*E\d*)[\.|\)|\]|\s]'
                                   ).findall(y)[-1]
                    y = y.upper()
                    if not any(x == y for x in f):
                        raise Exception()

                    quality = i['quality']

                    quality = quality.upper()

                    size = i['size']
                    size = float(size) / 1024
                    size = '%.2f GB' % size

                    if any(x in quality for x in ['HEVC', 'X265', 'H265']):
                        info = '%s | HEVC' % size
                    else:
                        info = size

                    if '1080P' in quality:
                        quality = '1080p'
                    elif '720P' in quality:
                        quality = 'HD'
                    else:
                        quality = 'SD'

                    url = i['links']
                    # for x in url.keys(): links.append({'url': url[x], 'quality': quality, 'info': info})

                    links = []

                    for x in url.keys():
                        links.append({'url': url[x], 'quality': quality})

                    for link in links:
                        try:
                            url = link['url']
                            quality2 = link['quality']
                            # url = url[1]
                            # url = link
                            if len(url) > 1:
                                raise Exception()
                            url = url[0].encode('utf-8')

                            host = re.findall(
                                '([\w]+[.][\w]+)$',
                                urlparse.urlparse(
                                    url.strip().lower()).netloc)[0]
                            if host not in hostprDict:
                                raise Exception()
                            host = host.encode('utf-8')

                            sources.append({
                                'source': host,
                                'quality': quality2,
                                'language': 'en',
                                'url': url,
                                'info': info,
                                'direct': False,
                                'debridonly': debrid.status()
                            })
                        except Exception:
                            pass

                except Exception:
                    pass

            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('DirectDL - Exception: \n' + str(failure))
            return sources
コード例 #56
0
ファイル: 2ddl.py プロジェクト: southpaw99/houseatreides
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            if debrid.status() == False: raise Exception()

            scraper = cfscrape.create_scraper()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'],
                int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                data['title'],
                data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)
            html = scraper.get(url).content
            posts = client.parseDOM(html, 'item')

            hostDict = hostprDict + hostDict

            items = []

            for post in posts:
                try:
                    t = client.parseDOM(post, 'title')[0]
                    u = re.findall('<link>(.+?)</link>', post, re.IGNORECASE)[0]
                    items += [(t, u)]
                except Exception:
                    pass

            for item in items:
                try:
                    if title.lower() not in item[0].lower():
                        continue

                    url = item[1]
                    html = scraper.get(url).content

                    try:
                        download_area = client.parseDOM(html, 'div', attrs={'class': 'postpage_movie_download_area'})
                        for box in download_area:
                            if '<span>Single Links</span>' not in box:
                                continue
                            link_boxes = re.findall('anch_multilink">(.+?)</div>', box, re.DOTALL)
                            if link_boxes is None:
                                continue
                            for link_section in link_boxes:
                                url = re.findall('href="(.+?)"', link_section, re.DOTALL)[0]
                                if any(x in url for x in ['.rar', '.zip', '.iso']):
                                    continue
                                url = client.replaceHTMLCodes(url)
                                url = url.encode('utf-8')
                                valid, host = source_utils.is_host_valid(url, hostDict)
                                if not valid:
                                    continue
                                host = client.replaceHTMLCodes(host)
                                host = host.encode('utf-8')

                                name = item[0]
                                name = client.replaceHTMLCodes(name)

                                t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name, flags=re.I)

                                if not cleantitle.get(t) == cleantitle.get(title):
                                    continue

                                y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()

                                if not y == hdlr:
                                    continue

                                quality, info = source_utils.get_release_quality(name, url)

                                try:
                                    size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', item[2])[-1]
                                    div = 1 if size.endswith(('GB', 'GiB')) else 1024
                                    size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                                    size = '[B]%.2f GB[/B]' % size
                                    info.append(size)
                                except Exception:
                                    pass

                                info = ' | '.join(info)
                                sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info,
                                                'direct': False, 'debridonly': True})
                    except Exception:
                        continue
                except Exception:
                    pass

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check:
                sources = check

            return sources
        except Exception:
            return sources
コード例 #57
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url == None: return
            urldata = urlparse.parse_qs(url)
            urldata = dict((i, urldata[i][0]) for i in urldata)
            title = urldata['title'].replace(':', ' ').lower()
            year = urldata['year']

            start_url = urlparse.urljoin(
                self.base_link, self.search_link % (urllib.quote_plus(title)))

            headers = {
                'User-Agent':
                'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'
            }
            html = self.scraper.get(start_url, headers=headers).content
            r = client.parseDOM(html, 'div', attrs={'class': 'col-lg-8'})[0]
            Links = re.compile('(?s)<a\s*href="([^"]+)"\s*title="([^"]+)"',
                               re.DOTALL).findall(r)
            for link, name in Links:
                link = link.replace('\\', '')
                if cleantitle.get(title) == cleantitle.get(
                        name.split('(' + year, 1)[0]):
                    if year in name:
                        holder = self.scraper.get(link,
                                                  headers=headers).content
                        src_list1 = re.compile(
                            '<button class[^<>]+value="([^"]+)"',
                            re.DOTALL).findall(holder)
                        src_list2 = re.compile(
                            'a class="text-capitalize dropdown-item"[^<>]+href="([^"]+)"',
                            re.DOTALL).findall(holder)
                        source_list = src_list1 + src_list2
                        for url in source_list:
                            try:
                                url = 'http' + url.rsplit('http', 1)[-1]
                                if any(x in url for x in [
                                        'openload', 'oload', 'uptobox',
                                        'userscloud', '1fichier', 'turbobit',
                                        'ok.ru', 'mail.ru'
                                ]):
                                    quality = '1080p'
                                elif any(x in url for x in [
                                        'streamango', 'streamcherry',
                                        'rapidvideo'
                                ]):
                                    quality = '720p'
                                else:
                                    quality = 'SD'
                                valid, host = source_utils.is_host_valid(
                                    url, hostprDict)
                                if valid:
                                    sources.append({
                                        'source': host,
                                        'quality': quality,
                                        'language': 'en',
                                        'url': url,
                                        'info': [],
                                        'direct': False,
                                        'debridonly': True
                                    })
                                else:
                                    valid, host = source_utils.is_host_valid(
                                        url, hostDict)
                                    if valid:
                                        sources.append({
                                            'source': host,
                                            'quality': quality,
                                            'language': 'en',
                                            'url': url,
                                            'info': [],
                                            'direct': False,
                                            'debridonly': False
                                        })
                            except:
                                pass
            return sources
        except:
            return sources
コード例 #58
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)

            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'Season %d' % int(data['season']) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])

            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = self.scraper.get(url).content
            posts = client.parseDOM(r, 'article', attrs={'class': 'latestPost excerpt\s*\w*'})

            for post in posts:
                try:
                    t = re.findall('title="([^"]+)"', post)[0]
                    t2 = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', t)
                    y = re.findall('[\.|\(|\[|\s](S\d*E\d*|Season\s*\d*|\d{4})[\.|\)|\]|\s]', t)[-1]

                    if not (cleantitle.get_simple(t2.replace('720p / 1080p', '')) == cleantitle.get(
                        title) and y == hdlr): raise Exception()

                    link = client.parseDOM(post, 'a', ret='href')[0]
                    if not 'Episodes' in post: u = self.movie_links(link)
                    else:
                        sep = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
                        u = self.show_links(link, sep)

                    for item in u:
                        quality, info = source_utils.get_release_quality(item[1], None)
                        try:
                            size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) [M|G]B)', item[3])[-1]
                            div = 1 if size.endswith(' GB') else 1024
                            size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                            size = '%.2f GB' % size
                            info.append(size)
                        except:
                            pass

                        info = ' | '.join(info)

                        url = 'http://' + item[2] + item[0].split('//')[-1]
                        url = client.replaceHTMLCodes(url)
                        url = url.encode('utf-8')

                        sources.append({'source': 'popcorn', 'quality': quality, 'language': 'en', 'url': url,
                                        'info': info, 'direct': True, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            log_utils.log('>>>> %s TRACE <<<<\n%s' % (__file__.upper().split('\\')[-1].split('.')[0], traceback.format_exc()), log_utils.LOGDEBUG)
            return sources
コード例 #59
0
ファイル: movies25.py プロジェクト: ItsMYZTIK/tdbaddon
    def movie(self, imdb, title, year):
        try:
            download = True

            data = os.path.join(control.dataPath, 'provider.movie25.db')
            data_link = 'http://offshoregit.com/extest/provider.movie25.zip'

            try:
                download = abs(
                    datetime.datetime.fromtimestamp(os.path.getmtime(data)) -
                    (datetime.datetime.now())) > datetime.timedelta(days=7)
            except:
                pass

            if download == True:
                r = client.request(data_link)
                zip = zipfile.ZipFile(StringIO.StringIO(r))
                zip.extractall(control.dataPath)
                zip.close()

            dbcon = database.connect(data)
            dbcur = dbcon.cursor()
            dbcur.execute("SELECT * FROM movies WHERE imdb = '%s'" % imdb)
            url = dbcur.fetchone()[0]
            dbcon.close()

            return url
        except:
            pass

        try:
            q = self.search_link_2.decode('base64') % urllib.quote_plus(title)

            r = client.request(q)
            if r == None: r = client.request(q)
            if r == None: r = client.request(q)
            if r == None: r = client.request(q)

            r = json.loads(r)['results']
            r = [(i['url'], i['titleNoFormatting']) for i in r]
            r = [(i[0], re.findall('(?:^Watch |)(.+? \(\d{4}\))', i[1]))
                 for i in r]
            r = [(urlparse.urljoin(self.base_link, i[0]), i[1][0]) for i in r
                 if i[1]]

            t = cleantitle.get(title)
            years = [
                '(%s)' % str(year),
                '(%s)' % str(int(year) + 1),
                '(%s)' % str(int(year) - 1)
            ]

            r = [i for i in r if any(x in i[1] for x in years)]

            match = [
                i[0] for i in r
                if t == cleantitle.get(i[1]) and '(%s)' % str(year) in i[1]
            ]

            match2 = [i[0] for i in r]
            match2 = [x for y, x in enumerate(match2) if x not in match2[:y]]
            if match2 == []: return

            for i in match2[:5]:
                try:
                    if len(match) > 0:
                        url = match[0]
                        break
                    r = proxy.request(urlparse.urljoin(self.base_link, i),
                                      'ovie')
                    r = re.findall('(tt\d+)', r)
                    if imdb in r:
                        url = i
                        break
                except:
                    pass

            url = re.findall('(\d+)', url)[-1]
            return url
        except:
            pass
コード例 #60
0
    def get_sources(self, link):
        try:
            url = '%s%s' % (self.base_link, link)
            result = client.request(url)

            info_hash = re.findall('<kbd>(.+?)<', result, re.DOTALL)[0]
            url = '%s%s' % ('magnet:?xt=urn:btih:', info_hash)
            name = re.findall('<h3 class="card-title">(.+?)<', result,
                              re.DOTALL)[0]
            url = '%s%s%s' % (url, '&dn=', str(name))

            size = re.findall(
                '<div class="col-3">File size:</div><div class="col">(.+?)<',
                result, re.DOTALL)[0]

            if url in str(self.sources):
                return

            if any(x in url.lower() for x in [
                    'french', 'italian', 'spanish', 'truefrench', 'dublado',
                    'dubbed'
            ]):
                return

            t = name.split(self.hdlr)[0].replace(self.year, '').replace(
                '(', '').replace(')', '').replace('&',
                                                  'and').replace('+', ' ')

            if cleantitle.get(t) != cleantitle.get(self.title):
                return

            if self.hdlr not in name:
                return

            quality, info = source_utils.get_release_quality(name, url)

            try:
                size = re.findall(
                    '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                    size)[0]
                div = 1 if size.endswith('GB') else 1024
                size = float(re.sub('[^0-9|/.|/,]', '', size.replace(
                    ',', '.'))) / div
                size = '%.2f GB' % size
                info.insert(0, size)
            except:
                size = '0'
                pass

            info = ' | '.join(info)

            self.sources.append({
                'source': 'torrent',
                'quality': quality,
                'language': 'en',
                'url': url,
                'info': info,
                'direct': False,
                'debridonly': True
            })

        except:
            source_utils.scraper_error('YOURBITTORRENT')
            pass