コード例 #1
0
ファイル: primewire.py プロジェクト: CYBERxNUKE/xbmc-addon
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = urlparse.urljoin(self.base_link, url)

            result = proxy.request(url, 'main_body')
            result = client.parseDOM(result, 'div', attrs = {'class': 'tv_episode_item'})

            title = cleantitle.get(title)

            result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'class': 'tv_episode_name'}), re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in result]
            result = [(i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0]
            result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0] + [(i[0], i[1], None) for i in result if len(i[2]) == 0]
            result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0]

            url = [i for i in result if title == cleantitle.get(i[1]) and premiered == i[2]][:1]
            if len(url) == 0: url = [i for i in result if premiered == i[2]]
            if len(url) == 0 or len(url) > 1: url = [i for i in result if 'season-%01d-episode-%01d' % (int(season), int(episode)) in i[0]]

            url = client.replaceHTMLCodes(url[0][0])
            url = proxy.parse(url)
            url = re.findall('(?://.+?|)(/.+)', url)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #2
0
    def pftv_tvcache(self):
        try:
            url = urlparse.urljoin(self.base_link, self.search_link)

            r = proxy.request(url, 'A-Z')

            r = client.parseDOM(r, 'li')

            m = []

            for i in r:
                try:
                    title = client.parseDOM(i, 'a')[0]
                    title = client.replaceHTMLCodes(title)
                    title = cleantitle.get(title)
                    title = title.encode('utf-8')

                    url = client.parseDOM(i, 'a', ret='href')[0]
                    url = client.replaceHTMLCodes(url)
                    try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
                    except: pass
                    try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
                    except: pass
                    url = urlparse.urljoin(self.base_link, url)
                    url = re.findall('(?://.+?|)(/.+)', url)[0]
                    url = url.encode('utf-8')

                    m.append((url, title))
                except:
                    pass

            return m
        except:
            return
コード例 #3
0
ファイル: movies.py プロジェクト: freeworldxbmc/maximumTv
    def imdb_user_list(self, url):
        try:
            result = client.request(url)
            result = result.decode('iso-8859-1').encode('utf-8')
            items = client.parseDOM(result, 'div', attrs = {'class': 'list_name'})
        except:
            pass

        for item in items:
            try:
                name = client.parseDOM(item, 'a')[0]
                name = client.replaceHTMLCodes(name)
                name = name.encode('utf-8')

                url = client.parseDOM(item, 'a', ret='href')[0]
                url = url.split('/list/', 1)[-1].replace('/', '')
                url = self.imdblist_link % url
                url = client.replaceHTMLCodes(url)
                url = url.encode('utf-8')

                self.list.append({'name': name, 'url': url, 'context': url})
            except:
                pass

        self.list = sorted(self.list, key=lambda k: re.sub('(^the |^a )', '', k['name'].lower()))
        return self.list
コード例 #4
0
    def sources(self, url, hostDict, hostprDict):
        try:
			sources = []
			for links in self.zen_url:
				# print ("YMOVIES SOURCES", movielink, cookies, referer)
				if self.base_link in links:
					result = client.request(links)
					match = re.compile("file:\s*'(.*?)',.+?abel:'(.*?)',", re.DOTALL).findall(result)
					for url,quality in match: 
						# print ("WATCHOVER SOURCES", url)
						if "1080" in quality: quality = "1080p"
						elif "720" in quality: quality = "HD"
						else: quality = "SD"
						url = client.replaceHTMLCodes(url)
						url = url.encode('utf-8')
						sources.append({'source': 'gvideo', 'quality': quality, 'provider': 'Watchover', 'url': url, 'direct': True, 'debridonly': False})
				else:
					url = links
					try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
					except: host = 'Openload'
					
					else: quality = "SD"								
					url = client.replaceHTMLCodes(url)
					url = url.encode('utf-8')														
					host = client.replaceHTMLCodes(host)
					host = host.encode('utf-8')										

					
					url = client.replaceHTMLCodes(url)
					url = url.encode('utf-8')
					sources.append({'source': host, 'quality': quality, 'provider': 'Watchover', 'url': url, 'direct': False, 'debridonly': False})					
			return sources
        except:
            return sources
コード例 #5
0
    def movie(self, imdb, title, year):
        try:
            query = self.search_link % urllib.quote_plus(title)
            query = urlparse.urljoin(self.base_link, query)

            result = self.request(query, 'movie_table')
            result = client.parseDOM(result, 'div', attrs = {'class': 'movie_table'})

            title = cleantitle.get(title)
            years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]

            result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'img', ret='alt')) for i in result]
            result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
            result = [i for i in result if any(x in i[1] for x in years)]
            result = [i[0] for i in result if title == cleantitle.get(i[1])][0]

            url = client.replaceHTMLCodes(result)
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
            except: pass
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
            except: pass
            url = urlparse.urlparse(url).path
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #6
0
ファイル: 123fox.py プロジェクト: YourFriendCaspian/dotfiles
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources

            html = client.request(url)
            try:
                v = re.findall('document.write\(Base64.decode\("(.+?)"\)', html)[0]
                b64 = base64.b64decode(v)
                url = client.parseDOM(b64, 'iframe', ret='src')[0]
                try:
                    host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')
                    sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': url.replace('\/', '/'), 'direct': true, 'debridonly': False})
                except:
                    pass
            except:
                pass
            parsed = client.parseDOM(html, 'div', {'class': 'server_line'})
            parsed = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'p', attrs={'class': 'server_servername'})[0]) for i in parsed]
            if parsed:
                for i in parsed:
                    try:
                        host = re.sub('Server|Link\s*\d+', '', i[1]).lower()
                        url = i[0]
                        host = client.replaceHTMLCodes(host)
                        host = host.encode('utf-8')
                        if 'other'in host: continue
                        sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': url.replace('\/', '/'), 'direct': False, 'debridonly': False})
                    except:
                        pass
            return sources
        except:
            return
コード例 #7
0
ファイル: movies.py プロジェクト: freeworldxbmc/maximumTv
    def imdb_person_list(self, url):
        try:
            result = client.request(url)
            result = result.decode('iso-8859-1').encode('utf-8')
            items = client.parseDOM(result, 'tr', attrs = {'class': '.+? detailed'})
        except:
            return

        for item in items:
            try:
                name = client.parseDOM(item, 'a', ret='title')[0]
                name = client.replaceHTMLCodes(name)
                name = name.encode('utf-8')

                url = client.parseDOM(item, 'a', ret='href')[0]
                url = re.findall('(nm\d*)', url, re.I)[0]
                url = self.person_link % url
                url = client.replaceHTMLCodes(url)
                url = url.encode('utf-8')

                image = client.parseDOM(item, 'img', ret='src')[0]
                if not ('._SX' in image or '._SY' in image): raise Exception()
                image = re.sub('_SX\d*|_SY\d*|_CR\d+?,\d+?,\d+?,\d*','_SX500', image)
                image = client.replaceHTMLCodes(image)
                image = image.encode('utf-8')

                self.list.append({'name': name, 'url': url, 'image': image})
            except:
                pass

        return self.list
コード例 #8
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)

            links = client.parseDOM(result, 'div', attrs = {'class': 'links'})[0]
            links = client.parseDOM(links, 'tr')

            for i in links:
                try:
                    url = client.parseDOM(i, 'a', ret='href')[-1]
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in ['ishared.eu', 'shared2.me']: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    quality = client.parseDOM(i, 'td', attrs = {'class': 'quality_td'})
                    quality = quality[0].lower().strip() if len(quality) > 0 else ''
                    if quality in ['cam', 'ts']: raise Exception()

                    sources.append({'source': host, 'quality': 'SD', 'provider': 'Moviestorm', 'url': url, 'direct': False, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
コード例 #9
0
ファイル: watchfree_mv_tv.py プロジェクト: rofunds/maximumTv
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = urlparse.urljoin(self.base_link, url)

            result = client.request(url)
            result = result.decode('iso-8859-1').encode('utf-8')

            result = client.parseDOM(result, 'div', attrs = {'class': 'tv_episode_item'})

            title = cleantitle.get(title)
            premiered = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(premiered)[0]
            premiered = '%s %01d %s' % (premiered[1].replace('01','January').replace('02','February').replace('03','March').replace('04','April').replace('05','May').replace('06','June').replace('07','July').replace('08','August').replace('09','September').replace('10','October').replace('11','November').replace('12','December'), int(premiered[2]), premiered[0])

            result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'class': 'tv_episode_name'}), client.parseDOM(i, 'span', attrs = {'class': 'tv_num_versions'})) for i in result]
            result = [(i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0]
            result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0] + [(i[0], i[1], None) for i in result if len(i[2]) == 0]
            result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0]

            url = [i for i in result if title == cleantitle.get(i[1]) and premiered == i[2]][:1]
            if len(url) == 0: url = [i for i in result if premiered == i[2]]
            if len(url) == 0 or len(url) > 1: url = [i for i in result if 'season-%01d-episode-%01d' % (int(season), int(episode)) in i[0]]

            url = client.replaceHTMLCodes(url[0][0])
            url = urlparse.urlparse(url).path
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #10
0
ファイル: movies.py プロジェクト: vphuc81/MyRepository
    def imdb_person_list(self, url):
        try:
            result = client.request(url)
            items = client.parseDOM(result, 'div', attrs = {'class': '.+?etail'})
        except:
            return

        for item in items:
            try:
                name = client.parseDOM(item, 'img', ret='alt')[0]
                name = name.encode('utf-8')

                url = client.parseDOM(item, 'a', ret='href')[0]
                url = re.findall('(nm\d*)', url, re.I)[0]
                url = self.person_link % url
                url = client.replaceHTMLCodes(url)
                url = url.encode('utf-8')

                image = client.parseDOM(item, 'img', ret='src')[0]
                # if not ('._SX' in image or '._SY' in image): raise Exception()
                image = re.sub('(?:_SX|_SY|_UX|_UY|_CR|_AL)(?:\d+|_).+?\.', '_SX500.', image)
                image = client.replaceHTMLCodes(image)
                image = image.encode('utf-8')

                self.list.append({'name': name, 'url': url, 'image': image})
            except:
                pass

        return self.list
コード例 #11
0
ファイル: movies.py プロジェクト: vphuc81/MyRepository
    def imdb_user_list(self, url):
        try:
            result = client.request(url)
            items = client.parseDOM(result, 'li', attrs = {'class': 'ipl-zebra-list__item user-list'})
        except:
            pass

        for item in items:
            try:
                name = client.parseDOM(item, 'a')[0]
                name = client.replaceHTMLCodes(name)
                name = name.encode('utf-8')

                url = client.parseDOM(item, 'a', ret='href')[0]
                url = url.split('/list/', 1)[-1].strip('/')
                url = self.imdblist_link % url
                url = client.replaceHTMLCodes(url)
                url = url.encode('utf-8')

                self.list.append({'name': name, 'url': url, 'context': url})
            except:
                pass

        self.list = sorted(self.list, key=lambda k: utils.title_key(k['name']))
        return self.list
コード例 #12
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url).decode('iso-8859-1').encode('utf-8')

            if not ('/episode/' in url or 'fullhdbr.png' in r or 'Blu-Ray.gif' in r): raise Exception()

            links = re.findall('url=(.+?)&', r)
            links = [x for y,x in enumerate(links) if x not in links[:y]]

            for i in links:
                try:
                    url = base64.b64decode(i)
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({'source': host, 'quality': 'SD', 'provider': 'uFlix', 'url': url, 'direct': False, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
コード例 #13
0
    def sources(self, url, hostDict, hostprDict):
        try:
			sources = []
			
			for movielink in self.zen_url:
				mylink = client.request(movielink)
				r = client.parseDOM(mylink, 'div', attrs = {'class': 'entry-content'})
				for links in r:
					try:
						match = re.compile('href="([^"]+)[^>]+>([^<]+)').findall(links)
						for url,title in match:
							url = client.replaceHTMLCodes(url)
							url = url.encode('utf-8')	
							title = title.encode('utf-8')							
							if "1080" in title: quality = "1080p"
							elif "720" in title: quality = "HD"				
							else: quality = "SD"
							info = ''
							if "hevc" in title.lower(): info = "HEVC"
							if "3d" in title.lower(): info = "3D"
							# print ("CRAZY4AD FOUND LINKS", url,title)
							if not any(value in title for value in ['uploadkadeh','wordpress','crazy4tv','imdb.com','youtube','userboard','kumpulbagi','mexashare','myvideolink.xyz', 'myvideolinks.xyz' , 'costaction', 'crazydl','.rar', '.RAR',  'safelinking','linx.2ddl.ag','upload.so','.zip', 'go4up', 'adf.ly','.jpg','.jpeg']):
								if not any(value in url for value in ['uploadkadeh','wordpress','crazy4tv','imdb.com','youtube','userboard','kumpulbagi','mexashare','myvideolink.xyz', 'myvideolinks.xyz' , 'costaction', 'crazydl','.rar', '.RAR',  'safelinking','linx.2ddl.ag','upload.so','.zip', 'go4up', 'adf.ly','.jpg','.jpeg']):
									try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
									except: host = 'Rapidgator'
									if "sh.st" in url: host = 'SHST'
									url = client.replaceHTMLCodes(url)
									url = url.encode('utf-8')
									sources.append({'source': host, 'quality': quality, 'provider': 'Crazy', 'url': url, 'info': info,'direct': False, 'debridonly': True})
					except:
						pass
			return sources
        except:
            return sources
コード例 #14
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = '%s/serie/%s' % (self.base_link, url)

            r = client.request(url)
            r = client.parseDOM(r, 'li', attrs = {'itemprop': 'episode'})

            t = cleantitle.get(title)

            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'itemprop': 'name'}), re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in r]
            r = [(i[0], i[1][0].split('&nbsp;')[-1], i[2]) for i in r if i[1]] + [(i[0], None, i[2]) for i in r if not i[1]]
            r = [(i[0], i[1], i[2][0]) for i in r if i[2]] + [(i[0], i[1], None) for i in r if not i[2]]
            r = [(i[0][0], i[1], i[2]) for i in r if i[0]]

            url = [i for i in r if t == cleantitle.get(i[1]) and premiered == i[2]][:1]
            if not url: url = [i for i in r if t == cleantitle.get(i[1])]
            if len(url) > 1 or not url: url = [i for i in r if premiered == i[2]]
            if len(url) > 1 or not url: raise Exception() 

            url = client.replaceHTMLCodes(url[0][0])
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
            except: pass
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
            except: pass

            url = re.findall('(?://.+?|)(/.+)', url)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #15
0
    def sources(self, url, hostDict, hostprDict):
        try:
			sources = []
			for movielink,title in self.zen_url:
				mylink = client.request(movielink)
				if "1080" in title: quality = "1080p"
				elif "720" in title: quality = "HD"				
				else: quality = "SD"			
				for item in parse_dom(mylink, 'div', {'class': 'entry-content'}):
					match = re.compile('<a href="(.+?)">(.+?)</a>').findall(item)
					for url,title in match:
						myurl = str(url)
						if not any(value in myurl for value in ['sendspace','imagebam','imgserve','histat','crazy4tv','facebook','.rar', 'subscene','.jpg','.RAR',  'postimage', 'safelinking','linx.2ddl.ag','upload.so','.zip', 'go4up','imdb']):
							if not any(value in title for value in ['sendspace','imagebam','imgserve','histat','crazy4tv','facebook','.rar', 'subscene','.jpg','.RAR',  'postimage', 'safelinking','linx.2ddl.ag','upload.so','.zip', 'go4up','imdb']):

								
								url = client.replaceHTMLCodes(url)
								url = url.encode('utf-8')															
								try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
								except: host = 'Videomega'
								host = client.replaceHTMLCodes(host)
								host = host.encode('utf-8')							
								sources.append({'source': host, 'quality': quality, 'provider': 'Bmoviez', 'url': url, 'direct': False, 'debridonly': True})

	 

			return sources
        except:
            return sources
コード例 #16
0
ファイル: phtoons.py プロジェクト: mpie/repo
    def anime_list_2(self, url, image, fanart):
        try:
            url = urlparse.urljoin(self.anime_link, url)

            result = client.request(url)

            items = client.parseDOM(result, 'ul', attrs={'class': 'cat_page_box'})[-1]
            items = client.parseDOM(items, 'li')
            items = items[::-1]
        except:
        	return

        for item in items:
            try:
                title = client.parseDOM(item, 'a')[0]
                title = re.sub('<.+?>|</.+?>|\\\\|\n', ' ', title).strip()
                title = re.sub('Watch$', '', title).strip()
                title = client.replaceHTMLCodes(title)
                title = title.encode('utf-8')

                url = client.parseDOM(item, 'a', ret='href')[0]
                url = urlparse.urljoin(self.anime_link, url)
                url = url.replace(' ','%20')
                url = client.replaceHTMLCodes(url)
                url = url.encode('utf-8')

                self.list.append({'title': title, 'url': url, 'image': image})
            except:
                pass

        return self.list
コード例 #17
0
ファイル: phtoons.py プロジェクト: mpie/repo
    def anime_list_3(self, url):
        try:
            url = urlparse.urljoin(self.anime_link, url)

            result = client.request(url)

            items = client.parseDOM(result, 'div', attrs={'id': 'left_content'})[0]
            items = client.parseDOM(items, 'zi')
        except:
        	return

        for item in items:
            try:
                title = client.parseDOM(item, 'a')[0]
                if '>Movie<' in title: raise Exception()
                title = re.sub('<.+?>|</.+?>|\\\\|\n', '', title).strip()
                title = client.replaceHTMLCodes(title)
                title = title.encode('utf-8')

                url = client.parseDOM(item, 'a', ret='href')[0]
                url = urlparse.urljoin(self.anime_link, url)
                url = url.replace(' ','%20')
                url = client.replaceHTMLCodes(url)
                url = url.encode('utf-8')

                image = client.parseDOM(item, 'img', ret='src')[0]
                image = urlparse.urljoin(self.anime_link, image)
                image = image.encode('utf-8')

                self.list.append({'title': title, 'url': url, 'image': image})
            except:
                pass

        return self.list
コード例 #18
0
	def __search(self, title, year):
		try:
			r = client.request(self.base_link)
			r = re.findall('sL10n\s*=\s*({.*?});', r)[0]
			r = json.loads(r)['nonce']

			query = self.search_link % (urllib.quote_plus(cleantitle.query(title)), r)
			query = urlparse.urljoin(self.base_link, query)

			t = cleantitle.get(title)
			y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']

			r = client.request(query)
			r = json.loads(r)
			r = [(i, r[i].get('url', ''), r[i].get('title', ''), r[i].get('extra', {}).get('names', ''), r[i].get('extra', {}).get('date', '0')) for i in r]
			r = [(i[0], i[1], client.replaceHTMLCodes(i[2]), client.replaceHTMLCodes(i[3]), i[4]) for i in r]
			r = sorted(r, key=lambda i: int(i[4]), reverse=True)  # with year > no year
			r = [i[1] for i in r if (t == cleantitle.get(i[2]) or t == cleantitle.get(i[3])) and i[4] in y][0]

			url = re.findall('(?://.+?|)(/.+)', r)[0]
			url = client.replaceHTMLCodes(url)
			url = url.encode('utf-8')

			return url
		except:
			return
コード例 #19
0
ファイル: phtoons.py プロジェクト: mpie/repo
    def cartoon_list_2(self, url, image, fanart):
        try:
            url = urlparse.urljoin(self.cartoons_link, url)

            result = client.request(url)

            items = client.parseDOM(result, 'ul', attrs = {'id': 'episode_related'})[0]
            items = client.parseDOM(items, 'li')
        except:
        	return

        for item in items:
            try:
                title = client.parseDOM(item, 'a')[0]
                title = title.strip()
                title = client.replaceHTMLCodes(title)
                title = title.encode('utf-8')

                url = client.parseDOM(item, 'a', ret='href')[0]
                url = urlparse.urljoin(self.cartoons_link, url)
                url = client.replaceHTMLCodes(url)
                url = url.encode('utf-8')

                self.list.append({'title': title, 'url': url, 'image': image})
            except:
                pass

        return self.list
コード例 #20
0
ファイル: phtoons.py プロジェクト: azumimuo/family-xbmc-addon
def ACpart(url, image, fanart):
    try:
       
        result = client.request(url)

        items = client.parseDOM(result, 'table', attrs={'class': 'listing'})
        items = client.parseDOM(items, 'td')
        items = zip(client.parseDOM(items, 'a', ret='href'), client.parseDOM(items, 'a'))

        if len(items) == 1: return ACstream(items[0][0])
    except:
        return

    for item in items[::-1]:
        try:
            name = item[1]
            name = name.replace('\n', '').replace('  ','')
            name = client.replaceHTMLCodes(name)
            name = name.encode('utf-8')

            url = item[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            print url
            addDirectoryItem(name,'ACstream',image,image,fanart,url)
        except:
            pass

    episodeCategory()
コード例 #21
0
ファイル: phtoons.py プロジェクト: azumimuo/family-xbmc-addon
def CartoonCrazy(image, fanart):

    addDirectoryItem('[B]SEARCH[/B]', 'CCsearch', '0', mediaPath+'CCsearch.png', fanart, '')

    try:
        url = 'http://kisscartoon.me/CartoonList/'

        result = cloudflare.request(url)

        items = client.parseDOM(result, 'div', attrs={'id': 'container'})
        items = client.parseDOM(items, 'div', attrs={'id': 'rightside'})
        items = client.parseDOM(items, 'div', attrs={'class': 'barContent'})[1]       
        items = client.parseDOM(items, 'a', ret='href')
    except:
        return

    for item in items:
        try:
            name = '[B]'+ item[7:] +'[/B]'
            name = client.replaceHTMLCodes(name)
            name = name.encode('utf-8')

            url = item
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')

            addDirectoryItem(name, 'CCcat', image, image, fanart, url)
        except:
            pass

    endDirectory()
コード例 #22
0
ファイル: phtoons.py プロジェクト: azumimuo/family-xbmc-addon
def CCpart(url, image, fanart):
    try:
        url = urlparse.urljoin('http://kisscartoon.me', url)

        result = cloudflare.request(url)

        items = client.parseDOM(result, 'table', attrs={'class': 'listing'})
        items = client.parseDOM(items, 'td')
        items = zip(client.parseDOM(items, 'a', ret='href'), client.parseDOM(items, 'a'))

        if len(items) == 1: return CCstream(items[0][0])
    except:
        return

    for item in items[::-1]:
        try:
            name = item[1]
            name = name.replace('\n', '')
            name = client.replaceHTMLCodes(name)
            name = name.encode('utf-8')

            url = item[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')

            addDirectoryItem(name,'CCstream',image,image,fanart,url)
        except:
            pass

    episodeCategory()
コード例 #23
0
ファイル: weseries.py プロジェクト: vphuc81/MyRepository
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources
            r = client.request(url)
            r = dom_parser2.parse_dom(r, 'div', {'class': 'll-item'})
            r = [(dom_parser2.parse_dom(i, 'a', req='href'), \
                  dom_parser2.parse_dom(i, 'div', {'class': 'notes'})) \
                  for i in r if i]
            r = [(i[0][0].attrs['href'], i[0][0].content, i[1][0].content if i[1] else 'None') for i in r]
            for i in r:
                try:
                    url = i[0]
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')
                    valid, host = source_utils.is_host_valid(i[1], hostDict)
                    if not valid: continue
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')
                    
                    info = []
                    quality, info = source_utils.get_release_quality(i[2], i[2])

                    info = ' | '.join(info)
                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
コード例 #24
0
    def sources(self, url, hostDict, hostprDict):
        try:
			sources = []
			self.movielinks = []
			
			for movielink,title in self.url:
				quality = "SD"
				if "1080" in title: quality = "1080p"
				elif "720" in title: quality = "HD"					
			
				mylink = client.request(movielink)

				posts = client.parseDOM(mylink, 'div', attrs = {'class': 'postContent'})
				for item in posts:
					match = re.compile('href="([^"]+)').findall(item)
					for url in match:
						print "SCENEDOWN URLS PASSED %s" % url
						if not any(value in url for value in ['scene-rls.com','nfo.','sample','.nfo','uploadkadeh','wordpress','crazy4tv','imdb.com','youtube','userboard','kumpulbagi','mexashare','myvideolink.xyz', 'myvideolinks.xyz' , 'costaction', 'crazydl','.rar', '.RAR',  'safelinking','linx.2ddl.ag','upload.so','.zip', 'go4up', 'adf.ly','.jpg','.jpeg']):
							if any(value in url for value in hostprDict):
										try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
										except: host = 'Videomega'
										host = client.replaceHTMLCodes(host)
										host = host.encode('utf-8')							
										url = client.replaceHTMLCodes(url)
										url = url.encode('utf-8')										
										
										sources.append({'source': host, 'quality': quality, 'provider': 'Scenedown', 'url': url, 'direct': False, 'debridonly': True})

	 

			return sources
        except:
            return sources
コード例 #25
0
ファイル: phradios.py プロジェクト: rofunds/maximumTv
def radio181fm():
    try:
        url = 'http://www.181.fm/index.php?p=mp3links'

        result = client.request(url)

        index = []
        items = client.parseDOM(result, 'td', attrs={'id': 'rightlinks'})
    except:
        pass

    for item in items:
        try:
            if not item.startswith('http://'): raise Exception()

            name = items[:items.index(item)]
            name = [i for i in name if not 'http://' in i][-1]
            name = client.replaceHTMLCodes(name)
            name = name.encode('utf-8')

            url = item.split('<')[0].replace(':', '/').replace('///', '://')
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')

            index.append({'name': name, 'url': url, 'thumb': '0', 'image': radio181fmicon, 'fanart': radio181fmfanart})
        except:
            pass

    index = [i for x, i in enumerate(index) if i not in index[x+1:]]
    index = sorted(index, key=lambda k: k['name'])
    for i in index: addDirectoryItem(i['name'], i['url'], i['thumb'], i['image'], i['fanart'])

    endDirectory()
コード例 #26
0
    def sources(self, url, hostDict, hostprDict):
			
        try:
			sources = []

			
			for movielink,title in self.zen_url:
					
				quality = "SD"
				
				mylink = client.request(movielink)

				pattern = 'href=([^\s]+)'
				for item in parse_dom(mylink, 'div', {'class': 'entry-content'}):
					match = re.compile('href=([^\s]+)').findall(item)
					for url in match:
					
						if not any(value in url for value in ['bankupload','24uploading','crazy4tv','facebook','.rar', 'subscene','.jpg','.RAR',  'postimage', 'safelinking','linx.2ddl.ag','upload.so','.zip', 'go4up','imdb']):
								
								try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
								except: host = 'Videomega'
								url = client.replaceHTMLCodes(url)
								url = url.encode('utf-8')
								host = client.replaceHTMLCodes(host)
								host = host.encode('utf-8')							
								sources.append({'source': host, 'quality': quality, 'provider': 'Allrls', 'url': url, 'direct': False, 'debridonly': True})

	 

			return sources
        except:
            return sources
コード例 #27
0
ファイル: bmoviez.py プロジェクト: vphuc81/MyRepository
    def sources(self, url, hostDict, hostprDict):
        try:
			sources = []
			for movielink,title in self.zen_url:
				mylink = OPEN_URL(movielink).content
				if "1080" in title: quality = "1080p"
				elif "720" in title: quality = "HD"				
				else: quality = "SD"		
				print ("BMOVIES SOURCES movielink", movielink)				
				for item in parse_dom(mylink, 'div', {'class': 'entry-content'}):
					match = re.compile('<a href="(.+?)">(.+?)</a>').findall(item)
					for url,title in match:
						myurl = str(url)
						if any(value in myurl.lower() for value in hostprDict):
							if not any(value in myurl.lower() for value in self.blacklist_zips):	
								url = client.replaceHTMLCodes(url)
								url = url.encode('utf-8')															
								try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
								except: host = 'Videomega'
								host = client.replaceHTMLCodes(host)
								host = host.encode('utf-8')							
								sources.append({'source': host, 'quality': quality, 'provider': 'Bmoviez', 'url': url, 'direct': False, 'debridonly': True})

	 

			return sources
        except:
            return sources
コード例 #28
0
    def sources(self, url, hostDict, hostprDict):
        try:
			sources = []

			for movielink,title in url:

				if not "=episode" in title:
					mylink = client.request(movielink)
					match = re.compile('<a rel="nofollow" href="(.+?)">').findall(mylink)
					for url in match:
						if "1080" in url: quality = "1080p"
						elif "720" in url: quality = "HD"				
						else: quality = "SD"
						url = client.replaceHTMLCodes(url)
						url = url.encode('utf-8')							
						sources.append({'source': 'cdn', 'quality': quality, 'provider': 'Fdl', 'url': url, 'direct': True, 'debridonly': False})

				elif "=episode" in title:
						if "1080" in title: quality = "1080p"
						elif "720" in title: quality = "HD"				
						else: quality = "SD"
						url = movielink
						url = client.replaceHTMLCodes(url)
						url = url.encode('utf-8')							
						sources.append({'source': 'cdn', 'quality': quality, 'provider': 'Fdl', 'url': url, 'direct': False, 'debridonly': False})

			sources = [i for i in sources if i['url'].split('?')[0].split('&')[0].split('|')[0].rsplit('.')[-1].replace('/', '').lower() in ['avi','mkv','mov','mp4','xvid','divx']]

			return sources
        except:
            return sources
コード例 #29
0
    def sources(self, url, hostDict, hostprDict):
	
        try:
			sources = []
			headers = {'Accept-Language': 'en-US,en;q=0.5', 'User-Agent': random_agent()}
			
			for movielink,title in self.genesisreborn_url:
				quality = quality_tag(title)
				html = BeautifulSoup(requests.get(movielink, headers=headers, timeout=10).content)		
				containers = html.findAll('div', attrs={'class': 'txt-block'})
				for result in containers:
					print("THREEMOVIES LINKS ",result)
					links = result.findAll('a')
					
					for r_href in links: 
						url = r_href['href']
						myurl = str(url)
						if any (value in myurl for value in hostprDict):

								try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
								except: host = 'Threemovies'
								
								url = client.replaceHTMLCodes(url)
								url = url.encode('utf-8')														
								host = client.replaceHTMLCodes(host)
								host = host.encode('utf-8')										
								sources.append({'source': host, 'quality': quality, 'provider': 'Threemovies', 'url': url, 'direct': False, 'debridonly': True})

	 

			return sources
        except:
            return sources
コード例 #30
0
ファイル: phtoons.py プロジェクト: azumimuo/family-xbmc-addon
def AnimeCrazy(image, fanart):
    thumb = mediaPath+'ACsearch.png'
    try:
        url = 'http://cartoons8.co/list/'
        result = client.request(url)
        items = client.parseDOM(result, 'div', attrs={'class': 'r_Content'})[0]
        items = client.parseDOM(items, 'li')
        
        
    except:
        return
    addDirectoryItem('[B]SEARCH[/B]','ACsearch',thumb, image, fanart, '')
    for item in items:
        
        url = client.parseDOM(item, 'a', ret='href')[0]
        url = client.replaceHTMLCodes(url)
        url = url.encode('utf-8')
      
        name =client.parseDOM(item, 'a')[0]
        name = name.replace('\n','').replace(' ','').upper()
        name = client.replaceHTMLCodes(name)
        
        name = name.encode('utf-8')
        addDirectoryItem(name, 'ACcat', image, image, fanart, url+'/?filter=newest&req=anime')
        
    endDirectory()    
コード例 #31
0
    def trakt_list(self, url, user):
        try:
            q = dict(urlparse.parse_qsl(urlparse.urlsplit(url).query))
            q.update({'extended': 'full'})
            q = (urllib.urlencode(q)).replace('%2C', ',')
            u = url.replace('?' + urlparse.urlparse(url).query, '') + '?' + q

            result = trakt.getTraktAsJson(u)

            items = []
            for i in result:
                try: items.append(i['movie'])
                except: pass
            if len(items) == 0:
                items = result
        except:
            return

        try:
            q = dict(urlparse.parse_qsl(urlparse.urlsplit(url).query))
            if not int(q['limit']) == len(items): raise Exception()
            q.update({'page': str(int(q['page']) + 1)})
            q = (urllib.urlencode(q)).replace('%2C', ',')
            next = url.replace('?' + urlparse.urlparse(url).query, '') + '?' + q
            next = next.encode('utf-8')
        except:
            next = ''

        for item in items:
            try:
                title = item['title']
                title = client.replaceHTMLCodes(title)

                year = item['year']
                year = re.sub('[^0-9]', '', str(year))

                if int(year) > int((self.datetime).strftime('%Y')): raise Exception()

                imdb = item['ids']['imdb']
                if imdb == None or imdb == '': raise Exception()
                imdb = 'tt' + re.sub('[^0-9]', '', str(imdb))

                tmdb = str(item.get('ids', {}).get('tmdb', 0))

                try: premiered = item['released']
                except: premiered = '0'
                try: premiered = re.compile('(\d{4}-\d{2}-\d{2})').findall(premiered)[0]
                except: premiered = '0'

                try: genre = item['genres']
                except: genre = '0'
                genre = [i.title() for i in genre]
                if genre == []: genre = '0'
                genre = ' / '.join(genre)

                try: duration = str(item['runtime'])
                except: duration = '0'
                if duration == None: duration = '0'

                try: rating = str(item['rating'])
                except: rating = '0'
                if rating == None or rating == '0.0': rating = '0'

                try: votes = str(item['votes'])
                except: votes = '0'
                try: votes = str(format(int(votes),',d'))
                except: pass
                if votes == None: votes = '0'

                try: mpaa = item['certification']
                except: mpaa = '0'
                if mpaa == None: mpaa = '0'

                try: plot = item['overview']
                except: plot = '0'
                if plot == None: plot = '0'
                plot = client.replaceHTMLCodes(plot)

                try: tagline = item['tagline']
                except: tagline = '0'
                if tagline == None: tagline = '0'
                tagline = client.replaceHTMLCodes(tagline)

                self.list.append({'title': title, 'originaltitle': title, 'year': year, 'premiered': premiered, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'plot': plot, 'tagline': tagline, 'imdb': imdb, 'tmdb': tmdb, 'tvdb': '0', 'poster': '0', 'next': next})
            except:
                pass

        return self.list
コード例 #32
0
ファイル: ytsam.py プロジェクト: southpaw99/houseatreides
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            query = '%s %s' % (data['title'], data['year'])

            url = self.search_link % urllib.quote(query)
            url = urlparse.urljoin(self.base_link, url)
            html = self.scraper.get(url).content
            try:
                results = client.parseDOM(html, 'div', attrs={'class':
                                                              'row'})[2]
            except Exception:
                return sources

            items = re.findall(
                'class="browse-movie-bottom">(.+?)</div>\s</div>', results,
                re.DOTALL)
            if items is None:
                return sources

            for entry in items:
                try:
                    try:
                        link, name = re.findall(
                            '<a href="(.+?)" class="browse-movie-title">(.+?)</a>',
                            entry, re.DOTALL)[0]
                        name = client.replaceHTMLCodes(name)
                        if not cleantitle.get(
                                data['title']) in cleantitle.get(name):
                            continue
                    except Exception:
                        continue
                    y = entry[-4:]
                    if not y == data['year']:
                        continue

                    response = self.scraper.get(link).content
                    try:
                        entries = client.parseDOM(
                            response, 'div', attrs={'class': 'modal-torrent'})
                        for torrent in entries:
                            link, name = re.findall(
                                'href="magnet:(.+?)" class="magnet-download download-torrent magnet" title="(.+?)"',
                                torrent, re.DOTALL)[0]
                            link = 'magnet:%s' % link
                            link = str(
                                client.replaceHTMLCodes(link).split('&tr')[0])
                            quality, info = source_utils.get_release_quality(
                                name, name)
                            try:
                                size = re.findall(
                                    '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                                    torrent)[-1]
                                div = 1 if size.endswith(
                                    ('GB', 'GiB')) else 1024
                                size = float(re.sub('[^0-9|/.|/,]', '',
                                                    size)) / div
                                size = '[B]%.2f GB[/B]' % size
                                info.append(size)
                            except Exception:
                                pass

                            info = ' | '.join(info)

                            sources.append({
                                'source': 'Torrent',
                                'quality': quality,
                                'language': 'en',
                                'url': link,
                                'info': info,
                                'direct': False,
                                'debridonly': True
                            })
                    except Exception:
                        continue
                except Exception:
                    continue

            return sources
        except Exception:
            return sources
コード例 #33
0
ファイル: ultrahd.py プロジェクト: fadifadisadi/LilacTV
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['title'].replace(':', '').lower()
            year = data['year']

            query = '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = urlparse.urljoin(self.base_link, self.post_link)

            post = 'do=search&subaction=search&search_start=0&full_search=0&result_from=1&story=%s' % urllib.quote_plus(
                query)

            r = client.request(url, post=post)
            r = client.parseDOM(r, 'div', attrs={'class': 'box-out margin'})
            r = [(dom_parser2.parse_dom(i,
                                        'div',
                                        attrs={'class': 'news-title'}))
                 for i in r if data['imdb'] in i]
            r = [(dom_parser2.parse_dom(i[0], 'a', req='href')) for i in r
                 if i]
            r = [(i[0].attrs['href'], i[0].content) for i in r if i]

            hostDict = hostprDict + hostDict

            for item in r:
                try:
                    name = item[0]
                    s = re.findall(
                        '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))',
                        name)
                    s = s[0] if s else '0'
                    data = client.request(item[0])
                    data = dom_parser2.parse_dom(data,
                                                 'div',
                                                 attrs={'id': 'r-content'})
                    data = re.findall('\s*<b><a href="(.+?)".+?</a></b>',
                                      data[0].content, re.DOTALL)
                    for url in data:
                        try:
                            qual = client.request(url)
                            quals = re.findall(
                                'span class="file-title" id="file-title">(.+?)</span',
                                qual)
                            for quals in quals:
                                if '4K' in quals:
                                    quality = '4K'
                                elif '2160p' in quals:
                                    quality = '4K'
                                elif '1080p' in quals:
                                    quality = '1080p'
                                elif '720p' in quals:
                                    quality = '720p'
                                elif any(i in ['dvdscr', 'r5', 'r6']
                                         for i in quals):
                                    quality = 'SCR'
                                elif any(i in [
                                        'camrip', 'tsrip', 'hdcam', 'hdts',
                                        'dvdcam', 'dvdts', 'cam', 'telesync',
                                        'ts'
                                ] for i in quals):
                                    quality = 'CAM'
                                else:
                                    quality = '720p'

                            info = []
                            if '3D' in name or '.3D.' in quals:
                                info.append('3D')
                                quality = '1080p'
                            if any(i in ['hevc', 'h265', 'x265']
                                   for i in quals):
                                info.append('HEVC')

                            info = ' | '.join(info)

                            url = client.replaceHTMLCodes(url)
                            url = url.encode('utf-8')
                            if any(x in url for x in ['.rar', '.zip', '.iso']):
                                raise Exception()
                            if not 'turbobit' in url: continue
                            sources.append({
                                'source': 'turbobit',
                                'quality': quality,
                                'language': 'en',
                                'url': url,
                                'info': info,
                                'direct': True,
                                'debridonly': True
                            })

                        except:
                            pass
                except:
                    pass

            return sources
        except:
            return sources
コード例 #34
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']
            premDate = ''

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

            query = query.replace("&", "and")
            query = query.replace("  ", " ")
            query = query.replace(" ", "-")

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            url = "http://rlsbb.ru/" + query  # this overwrites a bunch of previous lines!
            if 'tvshowtitle' not in data:
                url = url + "-1080p"  # NB: I don't think this works anymore! 2b-checked.

            r = client.request(url)  # curl as DOM object

            if r == None and 'tvshowtitle' in data:
                season = re.search('S(.*?)E', hdlr)
                season = season.group(1)
                query = title
                query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)
                query = query + "-S" + season
                query = query.replace("&", "and")
                query = query.replace("  ", " ")
                query = query.replace(" ", "-")
                url = "http://rlsbb.ru/" + query
                r = client.request(url)

            # looks like some shows have had episodes from the current season released in s00e00 format before switching to YYYY-MM-DD
            # this causes the second fallback search above for just s00 to return results and stops it from searching by date (ex. http://rlsbb.ru/vice-news-tonight-s02)
            # so loop here if no items found on first pass and force date search second time around
            for loopCount in range(0, 2):
                if loopCount == 1 or (
                        r == None and 'tvshowtitle' in data
                ):  # s00e00 serial failed: try again with YYYY-MM-DD
                    # http://rlsbb.ru/the-daily-show-2018-07-24                                 ... example landing urls
                    # http://rlsbb.ru/stephen-colbert-2018-07-24                                ... case and "date dots" get fixed by rlsbb
                    #query= re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)','',data['tvshowtitle'])   # this RE copied from above is just trash

                    premDate = re.sub(
                        '[ \.]', '-', data['premiered']
                    )  # date looks usually YYYY-MM-DD but dunno if always
                    query = re.sub('[\\\\:;*?"<>|/\-\']', '',
                                   data['tvshowtitle']
                                   )  # quadruple backslash = one backslash :p
                    query = query.replace("&", " and ").replace(
                        "  ", " ").replace(
                            " ",
                            "-")  # throw in extra spaces around & just in case
                    query = query + "-" + premDate

                    url = "http://rlsbb.ru/" + query
                    url = url.replace('The-Late-Show-with-Stephen-Colbert',
                                      'Stephen-Colbert')  #
                    #url = url.replace('Some-Specific-Show-Title-No2','Scene-Title2')           # shows I want...
                    #url = url.replace('Some-Specific-Show-Title-No3','Scene-Title3')           #         ...but theTVDB title != Scene release

                    r = client.request(url)

                posts = client.parseDOM(r, "div", attrs={
                    "class": "content"
                })  # get all <div class=content>...</div>
                hostDict = hostprDict + hostDict  # ?
                items = []
                for post in posts:
                    try:
                        u = client.parseDOM(
                            post, 'a',
                            ret='href')  # get all <a href=..... </a>
                        for i in u:  # foreach href url
                            try:
                                name = str(i)
                                if hdlr in name.upper(): items.append(name)
                                elif len(premDate
                                         ) > 0 and premDate in name.replace(
                                             ".", "-"):
                                    items.append(
                                        name
                                    )  # s00e00 serial failed: try again with YYYY-MM-DD
                                # NOTE: the vast majority of rlsbb urls are just hashes! Future careful link grabbing would yield 2x or 3x results
                            except:
                                pass
                    except:
                        pass

                if len(items) > 0: break

            seen_urls = set()

            for item in items:
                try:
                    info = []

                    url = str(item)
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    if url in seen_urls: continue
                    seen_urls.add(url)

                    host = url.replace("\\", "")
                    host2 = host.strip('"')
                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(host2.strip().lower()).netloc)[0]

                    if not host in hostDict: raise Exception()
                    if any(x in host2 for x in ['.rar', '.zip', '.iso']):
                        continue

                    if '720p' in host2:
                        quality = 'HD'
                    elif '1080p' in host2:
                        quality = '1080p'
                    else:
                        quality = 'SD'

                    info = ' | '.join(info)
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': host2,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })
                    # why is this hardcoded to debridonly=True? seems like overkill but maybe there's a resource-management reason?
                except:
                    pass
            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check
            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('RLSBB - Exception: \n' + str(failure))
            return sources
コード例 #35
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']
            query = '%s season %d' % (title, int(
                data['season'])) if 'tvshowtitle' in data else data['title']
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            query = urllib.quote_plus(query)

            url = urlparse.urljoin(self.base_link, self.search_link % query)

            self.s = cfscrape.create_scraper()

            self.ua = {'User-Agent': client.agent(), 'Referer': self.base_link}
            r = self.s.get(url, headers=self.ua).text
            posts = client.parseDOM(r, 'div', attrs={'class': 'item'})
            posts = [(client.parseDOM(i, 'a',
                                      ret='href')[1], client.parseDOM(i,
                                                                      'a')[1])
                     for i in posts if i]

            posts = [(i[0], client.parseDOM(i[1], 'i')[0]) for i in posts if i]

            if 'tvshowtitle' in data:
                sep = 'season %d' % int(data['season'])
                sepi = 'season-%1d/episode-%1d.html' % (int(
                    data['season']), int(data['episode']))
                post = [i[0] for i in posts if sep in i[1].lower()][0]
                data = self.s.get(post, headers=self.ua).content
                link = client.parseDOM(data, 'a', ret='href')
                link = [i for i in link if sepi in i][0]
            else:
                link = [
                    i[0] for i in posts
                    if cleantitle.get(i[1]) == cleantitle.get(title)
                    and hdlr in i[1]
                ][0]

            r = self.s.get(link, headers=self.ua).content
            try:
                v = re.findall('document.write\(Base64.decode\("(.+?)"\)',
                               r)[0]
                b64 = base64.b64decode(v)
                url = client.parseDOM(b64, 'iframe', ret='src')[0]
                try:
                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(url.strip().lower()).netloc)[0]
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')
                    sources.append({
                        'source': host,
                        'quality': 'SD',
                        'language': 'en',
                        'url': url.replace('\/', '/'),
                        'direct': False,
                        'debridonly': False
                    })
                except:
                    pass
            except:
                pass
            r = client.parseDOM(r, 'div', {'class': 'server_line'})
            r = [(client.parseDOM(i, 'a', ret='href')[0],
                  client.parseDOM(i, 'p', attrs={'class':
                                                 'server_servername'})[0])
                 for i in r]
            if r:
                for i in r:
                    try:
                        host = re.sub('Server|Link\s*\d+', '', i[1]).lower()
                        url = i[0]
                        host = client.replaceHTMLCodes(host)
                        host = host.encode('utf-8')
                        if 'other' in host: continue
                        sources.append({
                            'source': host,
                            'quality': 'SD',
                            'language': 'en',
                            'url': url.replace('\/', '/'),
                            'direct': False,
                            'debridonly': False
                        })
                    except:
                        pass
            return sources
        except:
            return
コード例 #36
0
    def super_info(self, i):
        try:
            if self.list[i]['metacache'] == True: raise Exception()

            imdb = self.list[i]['imdb']

            item = trakt.getMovieSummary(imdb)

            title = item.get('title')
            title = client.replaceHTMLCodes(title)

            originaltitle = title

            year = item.get('year', 0)
            year = re.sub('[^0-9]', '', str(year))

            imdb = item.get('ids', {}).get('imdb', '0')
            imdb = 'tt' + re.sub('[^0-9]', '', str(imdb))

            tmdb = str(item.get('ids', {}).get('tmdb', 0))

            premiered = item.get('released', '0')
            try: premiered = re.compile('(\d{4}-\d{2}-\d{2})').findall(premiered)[0]
            except: premiered = '0'

            genre = item.get('genres', [])
            genre = [x.title() for x in genre]
            genre = ' / '.join(genre).strip()
            if not genre: genre = '0'

            duration = str(item.get('Runtime', 0))

            rating = item.get('rating', '0')
            if not rating or rating == '0.0': rating = '0'

            votes = item.get('votes', '0')
            try: votes = str(format(int(votes), ',d'))
            except: pass

            mpaa = item.get('certification', '0')
            if not mpaa: mpaa = '0'

            tagline = item.get('tagline', '0')

            plot = item.get('overview', '0')

            people = trakt.getPeople(imdb, 'movies')

            director = writer = ''
            if 'crew' in people and 'directing' in people['crew']:
                director = ', '.join([director['person']['name'] for director in people['crew']['directing'] if director['job'].lower() == 'director'])
            if 'crew' in people and 'writing' in people['crew']:
                writer = ', '.join([writer['person']['name'] for writer in people['crew']['writing'] if writer['job'].lower() in ['writer', 'screenplay', 'author']])

            cast = []
            for person in people.get('cast', []):
                cast.append({'name': person['person']['name'], 'role': person['character']})
            cast = [(person['name'], person['role']) for person in cast]

            try:
                if self.lang == 'en' or self.lang not in item.get('available_translations', [self.lang]): raise Exception()

                trans_item = trakt.getMovieTranslation(imdb, self.lang, full=True)

                title = trans_item.get('title') or title
                tagline = trans_item.get('tagline') or tagline
                plot = trans_item.get('overview') or plot
            except:
                pass

            try:
                artmeta = True
                #if self.fanart_tv_user == '': raise Exception()
                art = client.request(self.fanart_tv_art_link % imdb, headers=self.fanart_tv_headers, timeout='10', error=True)
                try: art = json.loads(art)
                except: artmeta = False
            except:
                pass
                

            try:
                poster2 = art['movieposter']
                poster2 = [x for x in poster2 if x.get('lang') == self.lang][::-1] + [x for x in poster2 if x.get('lang') == 'en'][::-1] + [x for x in poster2 if x.get('lang') in ['00', '']][::-1]
                poster2 = poster2[0]['url'].encode('utf-8')
            except:
                poster2 = '0'

            try:
                if 'moviebackground' in art: fanart = art['moviebackground']
                else: fanart = art['moviethumb']
                fanart = [x for x in fanart if x.get('lang') == self.lang][::-1] + [x for x in fanart if x.get('lang') == 'en'][::-1] + [x for x in fanart if x.get('lang') in ['00', '']][::-1]
                fanart = fanart[0]['url'].encode('utf-8')
            except:
                fanart = '0'

            try:
                banner = art['moviebanner']
                banner = [x for x in banner if x.get('lang') == self.lang][::-1] + [x for x in banner if x.get('lang') == 'en'][::-1] + [x for x in banner if x.get('lang') in ['00', '']][::-1]
                banner = banner[0]['url'].encode('utf-8')
            except:
                banner = '0'

            try:
                if 'hdmovielogo' in art: clearlogo = art['hdmovielogo']
                else: clearlogo = art['clearlogo']
                clearlogo = [x for x in clearlogo if x.get('lang') == self.lang][::-1] + [x for x in clearlogo if x.get('lang') == 'en'][::-1] + [x for x in clearlogo if x.get('lang') in ['00', '']][::-1]
                clearlogo = clearlogo[0]['url'].encode('utf-8')
            except:
                clearlogo = '0'

            try:
                if 'hdmovieclearart' in art: clearart = art['hdmovieclearart']
                else: clearart = art['clearart']
                clearart = [x for x in clearart if x.get('lang') == self.lang][::-1] + [x for x in clearart if x.get('lang') == 'en'][::-1] + [x for x in clearart if x.get('lang') in ['00', '']][::-1]
                clearart = clearart[0]['url'].encode('utf-8')
            except:
                clearart = '0'

            try:
                if self.tm_user == '': raise Exception()

                art2 = client.request(self.tm_art_link % imdb, timeout='10', error=True)
                art2 = json.loads(art2)
            except:
                pass

            try:
                poster3 = art2['posters']
                poster3 = [x for x in poster3 if x.get('iso_639_1') == 'hu'] + [x for x in poster3 if not x.get('iso_639_1') == 'hu']
                poster3 = [(x['width'], x['file_path']) for x in poster3]
                poster3 = [(x[0], x[1]) if x[0] < 300 else ('300', x[1]) for x in poster3]
                poster3 = self.tm_img_link % poster3[0]
                poster3 = poster3.encode('utf-8')
            except:
                poster3 = '0'

            try:
                fanart2 = art2['backdrops']
                fanart2 = [x for x in fanart2 if x.get('iso_639_1') == 'hu'] + [x for x in fanart2 if not x.get('iso_639_1') == 'hu']
                fanart2 = [x for x in fanart2 if x.get('width') == 1920] + [x for x in fanart2 if x.get('width') < 1920]
                fanart2 = [(x['width'], x['file_path']) for x in fanart2]
                fanart2 = [(x[0], x[1]) if x[0] < 1280 else ('1280', x[1]) for x in fanart2]
                fanart2 = self.tm_img_link % fanart2[0]
                fanart2 = fanart2.encode('utf-8')
            except:
                fanart2 = '0'

            item = {'title': title, 'originaltitle': originaltitle, 'year': year, 'imdb': imdb, 'tmdb': tmdb, 'poster': '0', 'poster2': poster2, 'poster3': poster3, 'banner': banner, 'fanart': fanart, 'fanart2': fanart2, 'clearlogo': clearlogo, 'clearart': clearart, 'premiered': premiered, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot, 'tagline': tagline}
            item = dict((k,v) for k, v in item.iteritems() if not v == '0')
            self.list[i].update(item)

            if artmeta == False: raise Exception()

            meta = {'imdb': imdb, 'tmdb': tmdb, 'tvdb': '0', 'lang': self.lang, 'user': self.user, 'item': item}
            self.meta.append(meta)
        except:
            pass
コード例 #37
0
    def imdb_list(self, url):
        try:
            for i in re.findall('date\[(\d+)\]', url):
                url = url.replace('date[%s]' % i, (self.datetime - datetime.timedelta(days = int(i))).strftime('%Y-%m-%d'))

            def imdb_watchlist_id(url):
                return client.parseDOM(client.request(url), 'meta', ret='content', attrs = {'property': 'pageId'})[0]

            if url == self.imdbwatchlist_link:
                url = cache.get(imdb_watchlist_id, 8640, url)
                url = self.imdblist_link % url

            elif url == self.imdbwatchlist2_link:
                url = cache.get(imdb_watchlist_id, 8640, url)
                url = self.imdblist2_link % url

            result = client.request(url)

            result = result.replace('\n', ' ')

            items = client.parseDOM(result, 'div', attrs = {'class': 'lister-item .+?'})
            items += client.parseDOM(result, 'div', attrs = {'class': 'list_item.+?'})
        except:
            return

        try:
            next = client.parseDOM(result, 'a', ret='href', attrs = {'class': '.+?ister-page-nex.+?'})

            if len(next) == 0:
                next = client.parseDOM(result, 'div', attrs = {'class': 'pagination'})[0]
                next = zip(client.parseDOM(next, 'a', ret='href'), client.parseDOM(next, 'a'))
                next = [i[0] for i in next if 'Next' in i[1]]

            next = url.replace(urlparse.urlparse(url).query, urlparse.urlparse(next[0]).query)
            next = client.replaceHTMLCodes(next)
            next = next.encode('utf-8')
        except:
            next = ''

        for item in items:
            try:
                title = client.parseDOM(item, 'a')[1]
                title = client.replaceHTMLCodes(title)
                title = title.encode('utf-8')

                year = client.parseDOM(item, 'span', attrs = {'class': 'lister-item-year.+?'})
                year += client.parseDOM(item, 'span', attrs = {'class': 'year_type'})
                try: year = re.compile('(\d{4})').findall(year)[0]
                except: year = '0'
                year = year.encode('utf-8')

                if int(year) > int((self.datetime).strftime('%Y')): raise Exception()

                imdb = client.parseDOM(item, 'a', ret='href')[0]
                imdb = re.findall('(tt\d*)', imdb)[0]
                imdb = imdb.encode('utf-8')

                try: poster = client.parseDOM(item, 'img', ret='loadlate')[0]
                except: poster = '0'
                if '/nopicture/' in poster: poster = '0'
                poster = re.sub('(?:_SX|_SY|_UX|_UY|_CR|_AL)(?:\d+|_).+?\.', '_SX500.', poster)
                poster = client.replaceHTMLCodes(poster)
                poster = poster.encode('utf-8')

                try: genre = client.parseDOM(item, 'span', attrs = {'class': 'genre'})[0]
                except: genre = '0'
                genre = ' / '.join([i.strip() for i in genre.split(',')])
                if genre == '': genre = '0'
                genre = client.replaceHTMLCodes(genre)
                genre = genre.encode('utf-8')

                try: duration = re.findall('(\d+?) min(?:s|)', item)[-1]
                except: duration = '0'
                duration = duration.encode('utf-8')

                rating = '0'
                try: rating = client.parseDOM(item, 'span', attrs = {'class': 'rating-rating'})[0]
                except: pass
                try: rating = client.parseDOM(rating, 'span', attrs = {'class': 'value'})[0]
                except: rating = '0'
                try: rating = client.parseDOM(item, 'div', ret='data-value', attrs = {'class': '.*?imdb-rating'})[0]
                except: pass
                if rating == '' or rating == '-': rating = '0'
                rating = client.replaceHTMLCodes(rating)
                rating = rating.encode('utf-8')

                try: votes = client.parseDOM(item, 'div', ret='title', attrs = {'class': '.*?rating-list'})[0]
                except: votes = '0'
                try: votes = re.findall('\((.+?) vote(?:s|)\)', votes)[0]
                except: votes = '0'
                if votes == '': votes = '0'
                votes = client.replaceHTMLCodes(votes)
                votes = votes.encode('utf-8')

                try: mpaa = client.parseDOM(item, 'span', attrs = {'class': 'certificate'})[0]
                except: mpaa = '0'
                if mpaa == '' or mpaa == 'NOT_RATED': mpaa = '0'
                mpaa = mpaa.replace('_', '-')
                mpaa = client.replaceHTMLCodes(mpaa)
                mpaa = mpaa.encode('utf-8')

                try: director = re.findall('Director(?:s|):(.+?)(?:\||</div>)', item)[0]
                except: director = '0'
                director = client.parseDOM(director, 'a')
                director = ' / '.join(director)
                if director == '': director = '0'
                director = client.replaceHTMLCodes(director)
                director = director.encode('utf-8')

                try: cast = re.findall('Stars(?:s|):(.+?)(?:\||</div>)', item)[0]
                except: cast = '0'
                cast = client.replaceHTMLCodes(cast)
                cast = cast.encode('utf-8')
                cast = client.parseDOM(cast, 'a')
                if cast == []: cast = '0'

                plot = '0'
                try: plot = client.parseDOM(item, 'p', attrs = {'class': 'text-muted'})[0]
                except: pass
                try: plot = client.parseDOM(item, 'div', attrs = {'class': 'item_description'})[0]
                except: pass
                plot = plot.rsplit('<span>', 1)[0].strip()
                plot = re.sub('<.+?>|</.+?>', '', plot)
                if plot == '': plot = '0'
                plot = client.replaceHTMLCodes(plot)
                plot = plot.encode('utf-8')

                self.list.append({'title': title, 'originaltitle': title, 'year': year, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'cast': cast, 'plot': plot, 'tagline': '0', 'imdb': imdb, 'tmdb': '0', 'tvdb': '0', 'poster': poster, 'next': next})
            except:
                pass

        return self.list
コード例 #38
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            posts = client.parseDOM(r, 'item')

            hostDict = hostprDict + hostDict

            items = []

            for post in posts:
                try:
                    t = client.parseDOM(post, 'title')[0]

                    c = client.parseDOM(post, 'content.+?')[0]

                    u = re.findall('<singlelink>(.+?)(?:<download>|$)', c.replace('\n', ''))[0]
                    u = client.parseDOM(u, 'a', ret='href')

                    s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', c)
                    s = s[0] if s else '0'

                    items += [(t, i, s) for i in u]
                except:
                    pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)

                    if not cleantitle.get(t) == cleantitle.get(title): raise Exception()

                    y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()

                    if not y == hdlr: raise Exception()

                    fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
                    fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
                    fmt = [i.lower() for i in fmt]

                    if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception()
                    if any(i in ['extras'] for i in fmt): raise Exception()

                    if '1080p' in fmt: quality = '1080p'
                    elif '720p' in fmt: quality = 'HD'
                    else: quality = 'SD'
                    if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
                    elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'

                    info = []

                    if '3d' in fmt: info.append('3D')

                    try:
                        size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', item[2])[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size))/div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC')

                    info = ' | '.join(info)

                    url = item[1]
                    if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
                except:
                    pass

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check

            return sources
        except:
            return sources
コード例 #39
0
    def super_info(self, i):
        try:
            if self.list[i]['metacache'] == True: raise Exception()

            imdb = self.list[i]['imdb']


            url = self.imdb_info_link % imdb

            item = client.request(url, timeout='10')
            item = json.loads(item)

            title = item['Title']
            title = title.encode('utf-8')

            originaltitle = title

            year = item['Year']
            year = year.encode('utf-8')

            imdb = item['imdbID']
            if imdb == None or imdb == '' or imdb == 'N/A': imdb = '0'
            imdb = imdb.encode('utf-8')

            premiered = item['Released']
            if premiered == None or premiered == '' or premiered == 'N/A': premiered = '0'
            premiered = re.findall('(\d*) (.+?) (\d*)', premiered)
            try: premiered = '%s-%s-%s' % (premiered[0][2], {'Jan':'01', 'Feb':'02', 'Mar':'03', 'Apr':'04', 'May':'05', 'Jun':'06', 'Jul':'07', 'Aug':'08', 'Sep':'09', 'Oct':'10', 'Nov':'11', 'Dec':'12'}[premiered[0][1]], premiered[0][0])
            except: premiered = '0'
            premiered = premiered.encode('utf-8')

            genre = item['Genre']
            if genre == None or genre == '' or genre == 'N/A': genre = '0'
            genre = genre.replace(', ', ' / ')
            genre = genre.encode('utf-8')

            duration = item['Runtime']
            if duration == None or duration == '' or duration == 'N/A': duration = '0'
            duration = re.sub('[^0-9]', '', str(duration))
            duration = duration.encode('utf-8')

            rating = item['imdbRating']
            if rating == None or rating == '' or rating == 'N/A' or rating == '0.0': rating = '0'
            rating = rating.encode('utf-8')

            votes = item['imdbVotes']
            try: votes = str(format(int(votes),',d'))
            except: pass
            if votes == None or votes == '' or votes == 'N/A': votes = '0'
            votes = votes.encode('utf-8')

            mpaa = item['Rated']
            if mpaa == None or mpaa == '' or mpaa == 'N/A': mpaa = '0'
            mpaa = mpaa.encode('utf-8')

            director = item['Director']
            if director == None or director == '' or director == 'N/A': director = '0'
            director = director.replace(', ', ' / ')
            director = re.sub(r'\(.*?\)', '', director)
            director = ' '.join(director.split())
            director = director.encode('utf-8')

            writer = item['Writer']
            if writer == None or writer == '' or writer == 'N/A': writer = '0'
            writer = writer.replace(', ', ' / ')
            writer = re.sub(r'\(.*?\)', '', writer)
            writer = ' '.join(writer.split())
            writer = writer.encode('utf-8')

            cast = item['Actors']
            if cast == None or cast == '' or cast == 'N/A': cast = '0'
            cast = [x.strip() for x in cast.split(',') if not x == '']
            try: cast = [(x.encode('utf-8'), '') for x in cast]
            except: cast = []
            if cast == []: cast = '0'

            plot = item['Plot']
            if plot == None or plot == '' or plot == 'N/A': plot = '0'
            plot = client.replaceHTMLCodes(plot)
            plot = plot.encode('utf-8')

            poster = item['Poster']
            if poster == None or poster == '' or poster == 'N/A': poster = '0'
            if '/nopicture/' in poster: poster = '0'
            poster = re.sub('(?:_SX|_SY|_UX|_UY|_CR|_AL)(?:\d+|_).+?\.', '_SX500.', poster)
            if 'poster' in self.list[i] and poster == '0': poster = self.list[i]['poster']
            poster = poster.encode('utf-8')


            artmeta = True
            art = client.request(self.fanart_tv_art_link % imdb, headers=self.fanart_tv_headers, timeout='10', error=True)
            try: art = json.loads(art)
            except: artmeta = False

            try:
                poster2 = art['movieposter']
                poster2 = [x for x in poster2 if x.get('lang') == 'en'][::-1] + [x for x in poster2 if x.get('lang') == '00'][::-1]
                poster2 = poster2[0]['url'].encode('utf-8')
            except:
                poster2 = '0'

            try:
                if 'moviebackground' in art: fanart = art['moviebackground']
                else: fanart = art['moviethumb']
                fanart = [x for x in fanart if x.get('lang') == 'en'][::-1] + [x for x in fanart if x.get('lang') == '00'][::-1]
                fanart = fanart[0]['url'].encode('utf-8')
            except:
                fanart = '0'

            try:
                banner = art['moviebanner']
                banner = [x for x in banner if x.get('lang') == 'en'][::-1] + [x for x in banner if x.get('lang') == '00'][::-1]
                banner = banner[0]['url'].encode('utf-8')
            except:
                banner = '0'

            try:
                if 'hdmovielogo' in art: clearlogo = art['hdmovielogo']
                else: clearlogo = art['clearlogo']
                clearlogo = [x for x in clearlogo if x.get('lang') == 'en'][::-1] + [x for x in clearlogo if x.get('lang') == '00'][::-1]
                clearlogo = clearlogo[0]['url'].encode('utf-8')
            except:
                clearlogo = '0'

            try:
                if 'hdmovieclearart' in art: clearart = art['hdmovieclearart']
                else: clearart = art['clearart']
                clearart = [x for x in clearart if x.get('lang') == 'en'][::-1] + [x for x in clearart if x.get('lang') == '00'][::-1]
                clearart = clearart[0]['url'].encode('utf-8')
            except:
                clearart = '0'


            try:
                if self.tm_user == '': raise Exception()

                art2 = client.request(self.tm_art_link % imdb, timeout='10', error=True)
                art2 = json.loads(art2)
            except:
                pass

            try:
                poster3 = art2['posters']
                poster3 = [x for x in poster3 if x.get('iso_639_1') == 'en'] + [x for x in poster3 if not x.get('iso_639_1') == 'en']
                poster3 = [(x['width'], x['file_path']) for x in poster3]
                poster3 = [(x[0], x[1]) if x[0] < 300 else ('300', x[1]) for x in poster3]
                poster3 = self.tm_img_link % poster3[0]
                poster3 = poster3.encode('utf-8')
            except:
                poster3 = '0'

            try:
                fanart2 = art2['backdrops']
                fanart2 = [x for x in fanart2 if x.get('iso_639_1') == 'en'] + [x for x in fanart2 if not x.get('iso_639_1') == 'en']
                fanart2 = [x for x in fanart2 if x.get('width') == 1920] + [x for x in fanart2 if x.get('width') < 1920]
                fanart2 = [(x['width'], x['file_path']) for x in fanart2]
                fanart2 = [(x[0], x[1]) if x[0] < 1280 else ('1280', x[1]) for x in fanart2]
                fanart2 = self.tm_img_link % fanart2[0]
                fanart2 = fanart2.encode('utf-8')
            except:
                fanart2 = '0'


            try:
                if self.lang == 'en': raise Exception()

                url = self.trakt_lang_link % (imdb, self.lang)

                item = trakt.getTrakt(url)
                item = json.loads(item)[0]

                t = item['title']
                if not (t == None or t == ''):
                    try: title = t.encode('utf-8')
                    except: pass

                t = item['overview']
                if not (t == None or t == ''):
                    try: plot = t.encode('utf-8')
                    except: pass
            except:
                pass


            item = {'title': title, 'originaltitle': originaltitle, 'year': year, 'imdb': imdb, 'poster': poster, 'poster2': poster2, 'poster3': poster3, 'banner': banner, 'fanart': fanart, 'fanart2': fanart2, 'clearlogo': clearlogo, 'clearart': clearart, 'premiered': premiered, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot}
            item = dict((k,v) for k, v in item.iteritems() if not v == '0')
            self.list[i].update(item)

            if artmeta == False: raise Exception()

            meta = {'imdb': imdb, 'tvdb': '0', 'lang': self.lang, 'user': self.user, 'item': item}
            self.meta.append(meta)
        except:
            pass
コード例 #40
0
ファイル: channels.py プロジェクト: bopopescu/repo-1
	def items_list(self, i):
		try:
			item = cache.get(trakt.SearchAll, 3, urllib.quote_plus(i[1]), i[2], True)[0]

			content = item.get('movie')
			if not content: content = item.get('show')
			item = content

			title = item.get('title')
			title = client.replaceHTMLCodes(title)

			originaltitle = title

			year = item.get('year', 0)
			year = re.sub('[^0-9]', '', str(year))

			imdb = item.get('ids', {}).get('imdb', '0')
			imdb = 'tt' + re.sub('[^0-9]', '', str(imdb))

			tmdb = str(item.get('ids', {}).get('tmdb', 0))

			premiered = item.get('released', '0')
			try: premiered = re.compile('(\d{4}-\d{2}-\d{2})').findall(premiered)[0]
			except: premiered = '0'

			genre = item.get('genres', [])
			genre = [x.title() for x in genre]
			genre = ' / '.join(genre).strip()
			if not genre: genre = '0'

			duration = str(item.get('Runtime', 0))

			rating = item.get('rating', '0')
			if not rating or rating == '0.0': rating = '0'

			votes = item.get('votes', '0')
			try: votes = str(format(int(votes), ',d'))
			except: pass

			mpaa = item.get('certification', '0')
			if not mpaa: mpaa = '0'

			tagline = item.get('tagline', '0')

			plot = item.get('overview', '0')

			people = trakt.getPeople(imdb, 'movies')
			director = writer = ''
			cast = []

			if people:
				if 'crew' in people and 'directing' in people['crew']:
					director = ', '.join([director['person']['name'] for director in people['crew']['directing'] if director['job'].lower() == 'director'])
				if 'crew' in people and 'writing' in people['crew']:
					writer = ', '.join([writer['person']['name'] for writer in people['crew']['writing'] if writer['job'].lower() in ['writer', 'screenplay', 'author']])
				for person in people.get('cast', []):
					cast.append({'name': person['person']['name'], 'role': person['character']})
				cast = [(person['name'], person['role']) for person in cast]

			try:
				if self.lang == 'en' or self.lang not in item.get('available_translations', [self.lang]): raise Exception()

				trans_item = trakt.getMovieTranslation(imdb, self.lang, full = True)

				title = trans_item.get('title') or title
				tagline = trans_item.get('tagline') or tagline
				plot = trans_item.get('overview') or plot
			except:
				pass

			self.list.append({'title': title, 'originaltitle': originaltitle, 'year': year, 'premiered': premiered, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot, 'tagline': tagline, 'imdb': imdb, 'tmdb': tmdb, 'poster': '0', 'channel': i[0]})
		except:
			pass
コード例 #41
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()
            
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            posts = client.parseDOM(r, 'item')

            hostDict = hostprDict + hostDict

            items = []

            for post in posts:
                try:
                    t = client.parseDOM(post, 'title')[0]

                    c = client.parseDOM(post, 'content.+?')

                    u = c[0].split('<h1 ')
                    u = [i for i in u if 'Download Links' in i]
                    u = client.parseDOM(u, 'a', ret='href')

                    try: s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', c[0])[0]
                    except: s = '0'

                    items += [(t, i, s) for i in u]
                except:
                    pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)

                    if not cleantitle.get(t) == cleantitle.get(title): raise Exception()

                    y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()

                    if not y == hdlr: raise Exception()

                    quality, info = source_utils.get_release_quality(name, item[1])

                    try:
                        size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    info = ' | '.join(info)

                    url = item[1]
                    if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if not valid: continue
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': 'HEVC', 'direct': False, 'debridonly': True})
                except:
                    pass

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check

            return sources
        except:
            return sources
コード例 #42
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            hostDict = hostprDict + hostDict

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            try:
                feed = True

                url = self.search_link % urllib.quote_plus(query)
                url = urlparse.urljoin(self.base_link, url)

                r = client.request(url)
                if r == None: feed = False

                posts = client.parseDOM(r, 'item')
                if not posts: feed = False

                items = []

                for post in posts:
                    try:
                        u = client.parseDOM(post, 'enclosure', ret='url')
                        u = [(i.strip('/').split('/')[-1], i) for i in u]
                        items += u
                    except:
                        pass
            except:
                pass

            try:
                if feed == True: raise Exception()

                url = self.search_link_2 % urllib.quote_plus(query)
                url = urlparse.urljoin(self.base_link, url)

                r = client.request(url)

                posts = client.parseDOM(r, 'div', attrs={'class': 'post'})

                items = []
                dupes = []

                for post in posts:
                    try:
                        t = client.parseDOM(post, 'a')[0]
                        t = re.sub('<.+?>|</.+?>', '', t)

                        x = re.sub(
                            '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                            '', t)
                        if not cleantitle.get(title) in cleantitle.get(x):
                            raise Exception()
                        y = re.findall(
                            '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                            t)[-1].upper()
                        if not y == hdlr: raise Exception()

                        fmt = re.sub(
                            '(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)',
                            '', t.upper())
                        fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
                        fmt = [i.lower() for i in fmt]
                        if not any(i in ['1080p', '720p'] for i in fmt):
                            raise Exception()

                        if len(dupes) > 2: raise Exception()
                        dupes += [x]

                        u = client.parseDOM(post, 'a', ret='href')[0]

                        r = client.request(u)
                        u = client.parseDOM(r, 'a', ret='href')
                        u = [(i.strip('/').split('/')[-1], i) for i in u]
                        items += u
                    except:
                        pass
            except:
                pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '', name)

                    if not cleantitle.get(t) == cleantitle.get(title):
                        raise Exception()

                    y = re.findall(
                        '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                        name)[-1].upper()

                    if not y == hdlr: raise Exception()

                    quality, info = source_utils.get_release_quality(
                        name, item[1])

                    url = item[1]
                    if any(x in url for x in ['.rar', '.zip', '.iso']):
                        raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })
                except:
                    pass

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('SceneRls - Exception: \n' + str(failure))
            return sources
コード例 #43
0
 def searchShow(self, title, season, year, aliases, headers):
     try:
         title = cleantitle.normalize(title)
         t = cleantitle.get(title)
         url = urlparse.urljoin(
             self.base_link, self.search_link % urllib.quote_plus(
                 cleantitle.query('%s S%02d' %
                                  (title.replace('\'', '-'), int(season)))))
         sr = client.request(url, headers=headers, timeout='10')
         if sr:
             r = client.parseDOM(sr, 'h2', attrs={'class': 'tit'})
             r = [(client.parseDOM(i, 'a', ret='href'),
                   client.parseDOM(i, 'a', ret='title')) for i in r]
             r = [(i[0][0], i[1][0]) for i in r
                  if len(i[0]) > 0 and len(i[1]) > 0]
             r = [(i[0], re.findall('(.+?)\s+-\s+S(\d+)', i[1])) for i in r]
             r = [(i[0], i[1][0][0], i[1][0][1]) for i in r
                  if len(i[1]) > 0]
             r = [
                 i[0] for i in r
                 if t == cleantitle.get(i[1]) and int(season) == int(i[2])
             ][0]
         else:
             url = urlparse.urljoin(
                 self.base_link, self.search_link % urllib.quote_plus(
                     cleantitle.query(
                         '%s Season %01d' %
                         (title.replace('\'', '-'), int(season)))))
             sr = client.request(url, headers=headers, timeout='10')
             if sr:
                 r = client.parseDOM(sr, 'h2', attrs={'class': 'tit'})
                 r = [(client.parseDOM(i, 'a', ret='href'),
                       client.parseDOM(i, 'a', ret='title')) for i in r]
                 r = [(i[0][0], i[1][0]) for i in r
                      if len(i[0]) > 0 and len(i[1]) > 0]
                 r = [(i[0], re.findall('(.+?)\s+-\s+Season\s+(\d+)', i[1]))
                      for i in r]
                 r = [(i[0], i[1][0][0], i[1][0][1]) for i in r
                      if len(i[1]) > 0]
                 r = [
                     i[0] for i in r if t == cleantitle.get(i[1])
                     and int(season) == int(i[2])
                 ][0]
             else:
                 url = urlparse.urljoin(
                     self.base_link, self.search_link % urllib.quote_plus(
                         cleantitle.query(
                             '%s %01d' %
                             (title.replace('\'', '-'), int(year)))))
                 sr = client.request(url, headers=headers, timeout='10')
                 if sr:
                     r = client.parseDOM(sr, 'h2', attrs={'class': 'tit'})
                     r = [(client.parseDOM(i, 'a', ret='href'),
                           client.parseDOM(i, 'a', ret='title')) for i in r]
                     r = [(i[0][0], i[1][0]) for i in r
                          if len(i[0]) > 0 and len(i[1]) > 0]
                     r = [(i[0], re.findall('(.+?) \((\d{4})', i[1]))
                          for i in r]
                     r = [(i[0], i[1][0][0], i[1][0][1]) for i in r
                          if len(i[1]) > 0]
                     r = [
                         i[0] for i in r
                         if t == cleantitle.get(i[1]) and year == i[2]
                     ][0]
         url = re.findall('(?://.+?|)(/.+)', r)[0]
         url = client.replaceHTMLCodes(url)
         return url.encode('utf-8')
     except:
         return
コード例 #44
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['title']
            localtitle = data['localtitle']
            year = data['year']

            years = cache.get(self.get_years, 720)
            year_id = [i[0] for i in years if year == i[1]][0]

            query = urllib.urlencode({
                'f[g][text]': localtitle,
                'f[g][tags][]': year_id
            })
            query = urlparse.urljoin(self.base_link, self.search_link % query)
            r = client.request(query)
            result = json.loads(r)
            if result['total'] == 0: raise Exception()

            result = client.parseDOM(result['items'],
                                     'div',
                                     attrs={'class': 'moduleItemIntrotext'})
            result = [
                (client.parseDOM(i, 'img',
                                 ret='alt')[0].split('- online')[0].strip(),
                 client.parseDOM(i, 'a', ret='href')[0]) for i in result
            ]
            result = [(client.replaceHTMLCodes(i[0]), i[1]) for i in result]
            result = [
                i[1] for i in result if cleantitle.get(localtitle) ==
                cleantitle.get(i[0].encode('utf-8')) or cleantitle.get(
                    title) == cleantitle.get(i[0].encode('utf-8'))
            ]
            if len(result) == 0: raise Exception()

            query = urlparse.urljoin(self.base_link, result[0])
            r = client.request(query)

            urlr = client.parseDOM(r, 'div', attrs={'class': 'tovabb'})[0]
            urlr = client.parseDOM(urlr, 'a', ret='href')[0]

            r1 = client.request(urlr)

            result = client.parseDOM(r1, 'table', attrs={'class':
                                                         'tablazat'})[0]
            result = client.parseDOM(result, 'tr')

            locDict = [(i.rsplit('.', 1)[0], i) for i in hostDict]

            for item in result:
                try:
                    host = client.parseDOM(item,
                                           'td',
                                           attrs={'class': 'oszlop2'})[0]
                    host = client.parseDOM(
                        host, 'img',
                        ret='src')[0].rsplit('/')[-1].split('.')[0]
                    host = host.strip().lower()
                    host = [x[1] for x in locDict if host == x[0]][0]
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')
                    url = 'http:' + client.parseDOM(
                        item, 'a', ret='href')[0].split(':')[-1]
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')
                    q = client.parseDOM(item,
                                        'td',
                                        attrs={'class': 'oszlop_minoseg'})[0]
                    q = client.parseDOM(q, 'a', ret='title')[0].lower()
                    if any(x in q for x in ['kamer', 'r5', 'cam']):
                        quality = 'CAM'
                    elif any(x in q for x in ['dvdrip', 'bdrip']):
                        quality = 'SD'
                    else:
                        quality = 'SD'
                    l = client.parseDOM(item, 'td', attrs={'class':
                                                           'oszlop1'})[0]
                    l = client.parseDOM(
                        l, 'img', ret='src')[0].rsplit('/')[-1].split('.')[0]
                    info = 'szinkron' if l == 'hu-hu' else ''
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'hu',
                        'info': info,
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })
                except:
                    pass

            return sources
        except:
            log_utils.log(
                '>>>> %s TRACE <<<<\n%s' %
                (__file__.upper().split('\\')[-1].split('.')[0],
                 traceback.format_exc()), log_utils.LOGDEBUG)
            return sources
コード例 #45
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            hostDict = hostprDict + hostDict

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            imdb = data['imdb']

            content = 'episode' if 'tvshowtitle' in data else 'movie'

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            try:
                #if feed == True: raise Exception()

                url = self.search_link_2 % urllib.quote_plus(query)
                url = urlparse.urljoin(self.base_link, url)
                myheaders = {
                    'User-Agent':
                    'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0',
                    'Host': 'scenedown.in',
                    'Connection': 'keep-alive',
                    'Accept-Encoding': 'gzip, deflate, sdch'
                }
                r = client.request(url, headers=myheaders)

                posts = client.parseDOM(r, 'div', attrs={'class': 'post'})

                items = []
                dupes = []

                for post in posts:
                    try:
                        t = client.parseDOM(post, 'a')[0]

                        if content == 'movie':
                            x = re.findall('/(tt\d+)', post)[0]
                            if not x == imdb: raise Exception()
                            q = re.findall(
                                '<strong>\s*Video\s*:\s*</strong>.+?\s(\d+)',
                                post)[0]
                            if not int(q) >= 720: raise Exception()
                            if len(dupes) > 3: raise Exception()
                            dupes += [x]

                        elif content == 'episode':
                            x = re.sub(
                                '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                                '', t)
                            if not cleantitle.get(title) in cleantitle.get(x):
                                raise Exception()
                            y = re.findall(
                                '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                                t)[-1].upper()
                            if not y == hdlr: raise Exception()
                            if len(dupes) > 0: raise Exception()
                            dupes += [x]

                        u = client.parseDOM(post, 'a', ret='href')[0]
                        myheaders['Referer'] = url
                        r = client.request(u, headers=myheaders).replace(
                            '\n', '')

                        u = client.parseDOM(r,
                                            'div',
                                            attrs={'class': 'postContent'})[0]
                        u = re.split('id\s*=\s*"more-\d+"', u)[-1]

                        if content == 'episode':
                            u = re.compile(
                                '(?:<strong>|)(.+?)</strong>(.+?)(?:<strong>|$)',
                                re.MULTILINE | re.DOTALL).findall(u)
                            u = [(re.sub('<.+?>|</.+?>|>', '', i[0]), i[1])
                                 for i in u]
                            u = [i for i in u if '720p' in i[0].lower()][0]
                            u, r, t = u[1], u[1], u[0]

                        u = client.parseDOM(u, 'p')
                        u = [client.parseDOM(i, 'a', ret='href') for i in u]
                        u = [i[0] for i in u if len(i) == 1]
                        if not u: raise Exception()

                        s = re.findall(
                            '((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', r)
                        s = s[0] if s else '0'

                        items += [(t, i, s) for i in u]
                    except:
                        pass
            except:
                pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '', name)

                    if not cleantitle.get(t) == cleantitle.get(title):
                        raise Exception()

                    y = re.findall(
                        '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                        name)[-1].upper()

                    if not y == hdlr: raise Exception()

                    fmt = re.sub(
                        '(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)',
                        '', name.upper())
                    fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
                    fmt = [i.lower() for i in fmt]

                    if any(
                            i.endswith(('subs', 'sub', 'dubbed', 'dub'))
                            for i in fmt):
                        raise Exception()
                    if any(i in ['extras'] for i in fmt): raise Exception()

                    if '1080p' in fmt: quality = '1080p'
                    elif '720p' in fmt: quality = 'HD'
                    else: quality = 'SD'
                    if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt):
                        quality = 'SCR'
                    elif any(i in [
                            'camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam',
                            'dvdts', 'cam', 'telesync', 'ts'
                    ] for i in fmt):
                        quality = 'CAM'

                    info = []

                    if '3d' in fmt: info.append('3D')

                    try:
                        size = re.findall(
                            '((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))',
                            item[2])[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    if any(i in ['hevc', 'h265', 'x265'] for i in fmt):
                        info.append('HEVC')

                    info = ' | '.join(info)

                    url = item[1]
                    if any(x in url for x in ['.rar', '.zip', '.iso']):
                        raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })
                except:
                    pass

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check

            return sources
        except:
            return sources
コード例 #46
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            data = urlparse.parse_qs(url)

            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if\
                'tvshowtitle' in data else '%s' % data['title']
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)
            headers = {
                'Referer':
                'http://www.ddlvalley.me/',
                'User-Agent':
                'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'
            }
            r = self.scraper.get(url, headers=headers).content

            items = dom_parser2.parse_dom(r, 'h2')
            items = [
                dom_parser2.parse_dom(i.content,
                                      'a',
                                      req=['href', 'rel', 'data-wpel-link'])
                for i in items
            ]
            items = [(i[0].content, i[0].attrs['href']) for i in items]

            hostDict = hostprDict + hostDict

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    headers = {
                        'Referer':
                        'http://www.ddlvalley.me/',
                        'User-Agent':
                        'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'
                    }
                    r = self.scraper.get(item[1], headers=headers).content
                    links = dom_parser2.parse_dom(
                        r, 'a', req=['href', 'rel', 'data-wpel-link'])
                    links = [i.attrs['href'] for i in links]
                    for url in links:
                        try:
                            if hdlr in name:
                                fmt = re.sub(
                                    '(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)',
                                    '', name.upper())
                                fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
                                fmt = [i.lower() for i in fmt]

                                if any(
                                        i.endswith(('subs', 'sub', 'dubbed',
                                                    'dub')) for i in fmt):
                                    raise Exception()
                                if any(i in ['extras'] for i in fmt):
                                    raise Exception()

                                if '2160p' in fmt: quality = '4K'
                                elif '1080p' in fmt: quality = '1080p'
                                elif '720p' in fmt: quality = '720p'
                                else: quality = 'SD'
                                if any(i in ['dvdscr', 'r5', 'r6']
                                       for i in fmt):
                                    quality = 'SCR'
                                elif any(i in [
                                        'camrip', 'tsrip', 'hdcam', 'hdts',
                                        'dvdcam', 'dvdts', 'cam', 'telesync',
                                        'ts'
                                ] for i in fmt):
                                    quality = 'CAM'

                                info = []

                                if '3d' in fmt: info.append('3D')

                                try:
                                    size = re.findall(
                                        '((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))',
                                        name[2])[-1]
                                    div = 1 if size.endswith(
                                        ('GB', 'GiB')) else 1024
                                    size = float(
                                        re.sub('[^0-9|/.|/,]', '', size)) / div
                                    size = '%.2f GB' % size
                                    info.append(size)
                                except:
                                    pass

                                if any(i in ['hevc', 'h265', 'x265']
                                       for i in fmt):
                                    info.append('HEVC')

                                info = ' | '.join(info)

                                if not any(x in url
                                           for x in ['.rar', '.zip', '.iso']):
                                    url = client.replaceHTMLCodes(url)
                                    url = url.encode('utf-8')

                                    host = re.findall(
                                        '([\w]+[.][\w]+)$',
                                        urlparse.urlparse(
                                            url.strip().lower()).netloc)[0]
                                    if host in hostDict:
                                        host = client.replaceHTMLCodes(host)
                                        host = host.encode('utf-8')

                                        sources.append({
                                            'source': host,
                                            'quality': quality,
                                            'language': 'en',
                                            'url': url,
                                            'info': info,
                                            'direct': False,
                                            'debridonly': True
                                        })
                        except:
                            pass
                except:
                    pass
            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check

            return sources
        except:
            return
コード例 #47
0
ファイル: ddlvalley.py プロジェクト: southpaw99/houseatreides
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            sources = []

            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if\
                'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])

            url = self.search_link % urllib.quote_plus(query).lower()
            url = urlparse.urljoin(self.base_link, url)

            headers = {
                'Referer':
                'http://www.ddlvalley.me',
                'Accept':
                'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
                'Accept-Encoding':
                'gzip, deflate',
                'Accept-Language':
                'en-US,en;q=0.9',
                'User-Agent':
                'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0'
            }
            r = self.cfscraper.get(url, headers=headers).content

            items = dom_parser2.parse_dom(r, 'h2')
            items = [
                dom_parser2.parse_dom(i.content,
                                      'a',
                                      req=['href', 'rel', 'data-wpel-link'])
                for i in items
            ]
            items = [(i[0].content, i[0].attrs['href']) for i in items]

            hostDict = hostprDict + hostDict

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)
                    query = query.lower().replace(' ', '-')
                    if query not in item[1]:
                        continue
                    url = item[1]
                    headers = {
                        'Referer':
                        'http://www.ddlvalley.me',
                        'Accept':
                        'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
                        'Accept-Encoding':
                        'gzip, deflate',
                        'Accept-Language':
                        'en-US,en;q=0.9',
                        'User-Agent':
                        'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0'
                    }
                    r = self.cfscraper.get(url, headers=headers).content
                    links = dom_parser2.parse_dom(
                        r, 'a', req=['href', 'rel', 'data-wpel-link'])
                    links = [i.attrs['href'] for i in links]
                    for url in links:
                        try:
                            if hdlr in name:
                                fmt = re.sub(
                                    '(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)',
                                    '', name.upper())
                                fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
                                fmt = [i.lower() for i in fmt]

                                if any(
                                        i.endswith(('subs', 'sub', 'dubbed',
                                                    'dub')) for i in fmt):
                                    raise Exception()
                                if any(i in ['extras'] for i in fmt):
                                    raise Exception()

                                if '2160p' in fmt:
                                    quality = '4K'
                                elif '1080p' in fmt:
                                    quality = '1080p'
                                elif '720p' in fmt:
                                    quality = '720p'
                                else:
                                    quality = 'SD'
                                if any(i in ['dvdscr', 'r5', 'r6']
                                       for i in fmt):
                                    quality = 'SCR'
                                elif any(i in [
                                        'camrip', 'tsrip', 'hdcam', 'hdts',
                                        'dvdcam', 'dvdts', 'cam', 'telesync',
                                        'ts'
                                ] for i in fmt):
                                    quality = 'CAM'

                                info = []

                                if '3d' in fmt:
                                    info.append('3D')

                                try:
                                    size = re.findall(
                                        '((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))',
                                        name[2])[-1]
                                    div = 1 if size.endswith(
                                        ('GB', 'GiB')) else 1024
                                    size = float(
                                        re.sub('[^0-9|/.|/,]', '', size)) / div
                                    size = '%.2f GB' % size
                                    info.append(size)
                                except Exception:
                                    pass

                                if any(i in ['hevc', 'h265', 'x265']
                                       for i in fmt):
                                    info.append('HEVC')

                                info = ' | '.join(info)

                                if not any(x in url
                                           for x in ['.rar', '.zip', '.iso']):
                                    url = client.replaceHTMLCodes(url)
                                    url = url.encode('utf-8')

                                    host = re.findall(
                                        '([\w]+[.][\w]+)$',
                                        urlparse.urlparse(
                                            url.strip().lower()).netloc)[0]
                                    if host in hostDict:
                                        host = client.replaceHTMLCodes(host)
                                        host = host.encode('utf-8')

                                        sources.append({
                                            'source':
                                            host,
                                            'quality':
                                            quality,
                                            'language':
                                            'en',
                                            'url':
                                            url,
                                            'info':
                                            info,
                                            'direct':
                                            False,
                                            'debridonly':
                                            debrid.status()
                                        })
                        except Exception:
                            pass
                except Exception:
                    pass
            check = [i for i in sources if not i['quality'] == 'CAM']
            if check:
                sources = check

            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('DDLValley - Exception: \n' + str(failure))
            return
コード例 #48
0
    def sources(self, url, hostDict, hostprDict):
        try:
            print '-------------------------------    -------------------------------'
            sources = []

            print url

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            print data

            title = data['title']
            year = data['year'] if 'year' in data else data['year']
            season = data['season'] if 'season' in data else False
            episode = data['episode'] if 'episode' in data else False
            localtitle = data['localtitle'] if 'localtitle' in data else False

            if season and episode:
                localtitle = data[
                    'localtvshowtitle'] if 'localtvshowtitle' in data else False

            t = cleantitle.get(title)
            tq = cleantitle.query(localtitle)
            tq2 = re.sub(' ', '', cleantitle.query(localtitle).lower())
            tq = re.sub(' ', '%20', tq)
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            query = 'http://www.cinemay.com'

            r = client.request('http://www.cinemay.com/?s=%s' % tq)
            print 'http://www.cinemay.com/?s=%s' % tq
            r = client.parseDOM(r, 'div', attrs={'class': 'unfilm'})
            r = [(client.parseDOM(i, 'a', ret='href'),
                  client.parseDOM(i, 'a', ret='title')) for i in r]
            r = [(i[0][0],
                  re.sub(
                      '(film| en streaming vf| en streaming vostfr|&rsquo;| )',
                      '', i[1][0]).lower()) for i in r
                 if len(i[0]) > 0 and len(i[1]) > 0]
            r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1],
                  i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            r = [(i[0], i[1], i[2],
                  re.findall('(.+?)\s+(?:saison|s)\s+(\d+)', i[1])) for i in r]
            r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2],
                  i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
            r = [(i[0], re.sub(' \&\#[0-9]{4,6};', '', i[1]), i[2], i[3])
                 for i in r]
            r = [i[0] for i in r if tq2 == cleantitle.get(i[1])][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')

            r = client.request('http://www.cinemay.com' + url)
            print 'http://www.cinemay.com' + url
            r = client.parseDOM(r, 'div', attrs={'class': 'module-actionbar'})
            r = client.parseDOM(r, 'a', ret='href')

            for i in r:
                if i == '#':
                    continue

                url = client.request('http://www.cinemay.com' + i)
                url = client.parseDOM(url,
                                      'div',
                                      attrs={'class': 'wbox2 video dark'})
                url = client.parseDOM(url, 'iframe', ret='src')[0]

                host = re.findall(
                    '([\w]+[.][\w]+)$',
                    urlparse.urlparse(url.strip().lower()).netloc)[0]
                if not host in hostDict: continue
                host = client.replaceHTMLCodes(host)
                host = host.encode('utf-8')

                sources.append({
                    'source': host,
                    'quality': 'SD',
                    'language': 'FR',
                    'url': url,
                    'direct': False,
                    'debridonly': False
                })

            return sources
        except:
            return sources
コード例 #49
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else '%s' % (data['title'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            posts = client.parseDOM(r, 'item')

            items = []

            for post in posts:

                try:
                    t = client.parseDOM(post, 'title')[0]
                    t2 = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '', t)

                    if not cleantitle.get_simple(t2.replace(
                            'Watch Online', '')) == cleantitle.get(title):
                        raise Exception()

                    l = client.parseDOM(post, 'link')[0]

                    p = client.parseDOM(post, 'pubDate')[0]

                    if data['year'] in p: items += [(t, l)]

                except:
                    pass

            print items
            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    u = client.request(item[1])
                    if 'http://www.imdb.com/title/%s/' % data['imdb'] in u:

                        l = client.parseDOM(u, 'div',
                                            {'class': 'movieplay'})[0]
                        l = client.parseDOM(u, 'iframe',
                                            ret='data-lazy-src')[0]

                        'SD', info = source_utils.get_release_quality(name, l)
                        info = ' | '.join(info)

                        url = l

                        url = client.replaceHTMLCodes(url)
                        url = url.encode('utf-8')

                        valid, host = source_utils.is_host_valid(url, hostDict)
                        sources.append({
                            'source': host,
                            'quality': 'SD',
                            'language': 'en',
                            'url': url,
                            'info': info,
                            'direct': False,
                            'debridonly': False
                        })
                except:
                    pass

            return sources
        except:
            return sources
コード例 #50
0
ファイル: ultrahd.py プロジェクト: uguer30/Project
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['title'].replace(':', '').lower()
            year = data['year']

            query = '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = urlparse.urljoin(self.base_link, self.post_link)

            post = 'do=search&subaction=search&search_start=0&full_search=0&result_from=1&story=%s' % urllib.quote_plus(
                query)

            r = client.request(url, post=post)
            r = client.parseDOM(r, 'div', attrs={'class': 'box-out margin'})
            r = [(dom_parser2.parse_dom(i,
                                        'div',
                                        attrs={'class': 'news-title'}))
                 for i in r if data['imdb'] in i]
            r = [(dom_parser2.parse_dom(i[0], 'a', req='href')) for i in r
                 if i]
            r = [(i[0].attrs['href'], i[0].content) for i in r if i]

            hostDict = hostprDict + hostDict

            for item in r:
                try:
                    name = item[1]
                    y = re.findall('\((\d{4})\)', name)[0]
                    if not y == year: raise Exception()

                    s = re.findall(
                        '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))',
                        name)
                    s = s[0] if s else '0'
                    data = client.request(item[0])
                    data = dom_parser2.parse_dom(data,
                                                 'div',
                                                 attrs={'id': 'r-content'})
                    data = re.findall(
                        '\s*<b><a href=.+?>(.+?)</b>.+?<u><b><a href="(.+?)".+?</a></b></u>',
                        data[0].content, re.DOTALL)
                    u = [(i[0], i[1], s) for i in data if i]

                    for name, url, size in u:
                        try:
                            if '4K' in name:
                                quality = '4K'
                            elif '1080p' in name:
                                quality = '1080p'
                            elif '720p' in name:
                                quality = '720p'
                            elif any(i in ['dvdscr', 'r5', 'r6']
                                     for i in name):
                                quality = 'SCR'
                            elif any(i in [
                                    'camrip', 'tsrip', 'hdcam', 'hdts',
                                    'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'
                            ] for i in name):
                                quality = 'CAM'
                            else:
                                quality = '720p'

                            info = []
                            if '3D' in name or '.3D.' in url:
                                info.append('3D')
                                quality = '1080p'
                            if any(i in ['hevc', 'h265', 'x265']
                                   for i in name):
                                info.append('HEVC')
                            try:
                                size = re.findall(
                                    '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))',
                                    size)[-1]
                                div = 1 if size.endswith(
                                    ('Gb', 'GiB', 'GB')) else 1024
                                size = float(re.sub('[^0-9|/.|/,]', '',
                                                    size)) / div
                                size = '%.2f GB' % size
                                info.append(size)
                            except:
                                pass

                            info = ' | '.join(info)

                            url = client.replaceHTMLCodes(url)
                            url = url.encode('utf-8')
                            if any(x in url
                                   for x in ['.rar', '.zip', '.iso', 'turk']):
                                continue

                            if 'ftp' in url:
                                host = 'COV'
                                direct = True
                            else:
                                direct = False
                                host = 'turbobit.net'
                            #if not host in hostDict: continue

                            host = client.replaceHTMLCodes(host)
                            host = host.encode('utf-8')

                            sources.append({
                                'source': host,
                                'quality': quality,
                                'language': 'en',
                                'url': url,
                                'info': info,
                                'direct': direct,
                                'debridonly': True
                            })

                        except:
                            pass
                except:
                    pass

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('UltraHD - Exception: \n' + str(failure))
            return sources
コード例 #51
0
    def movie(self, imdb, title, year):
        try:
            t = cleantitle.get(title)
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            q = self.search_link % urllib.quote_plus(cleantitle.query(title))
            q = urlparse.urljoin(self.base_link, q)

            r = proxy.request(q, 'flag')
            r = client.parseDOM(r, 'TR', attrs={'id': 'coverPreview.+?'})
            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a'),
                  client.parseDOM(i, 'div', attrs={'style': '.+?'}),
                  client.parseDOM(i, 'img', ret='src')) for i in r]
            r = [(i[0][0].strip(), i[1][0].strip(), i[2], i[3]) for i in r
                 if i[0] and i[1] and i[3]]
            r = [(i[0], i[1], [x for x in i[2]
                               if x.isdigit() and len(x) == 4], i[3])
                 for i in r]
            r = [(i[0], i[1], i[2][0] if i[2] else '0', i[3]) for i in r]
            r = [i for i in r if any('us_flag_' in x for x in i[3])]
            r = [(i[0], i[1], i[2],
                  [re.findall('(\d+)', x) for x in i[3] if 'smileys' in x])
                 for i in r]
            r = [(i[0], i[1], i[2], [x[0] for x in i[3] if x]) for i in r]
            r = [(i[0], i[1], i[2], int(i[3][0]) if i[3] else 0) for i in r]
            r = sorted(r, key=lambda x: x[3])[::-1]
            r = [(i[0], i[1], i[2], re.findall('\((.+?)\)$', i[1])) for i in r]
            r = [(i[0], i[1], i[2]) for i in r if not i[3]]
            r = [i for i in r if i[2] in y]

            r = [(proxy.parse(i[0]), i[1], i[2]) for i in r]

            match = [
                i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]
            ]

            match2 = [i[0] for i in r]
            match2 = [x for y, x in enumerate(match2) if x not in match2[:y]]
            if match2 == []: return

            for i in match2[:5]:
                try:
                    if match:
                        url = match[0]
                        break
                    r = proxy.request(urlparse.urljoin(self.base_link, i),
                                      'tablemoviesindex2')
                    r = re.findall('(tt\d+)', r)
                    if imdb in r:
                        url = i
                        break
                except:
                    pass

            url = urlparse.urljoin(self.base_link, url)

            url = re.findall('(?://.+?|)(/.+)', url)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #52
0
def to_items(url):  #34
    data = client.request(url, headers=headers)
    posts = zip(
        client.parseDOM(data, 'div', attrs={'class': 'content item-content'}),
        client.parseDOM(data, 'h2'))

    for post, name in posts:
        try:
            plot = re.findall(r'''Plot</div>(.+?)<div class="plot''', post,
                              re.DOTALL)[0]
        except IndexError:
            plot = 'N/A'
        desc = client.replaceHTMLCodes(plot)
        desc = tools.clear_Title(desc)
        desc = desc.encode('utf-8')
        try:
            title = client.parseDOM(name, 'a')[0]
        except BaseException:
            title = client.parseDOM(name, 'img', ret='alt')[0]
        # try:
        #     year = client.parseDOM(data, 'div', {'class': 'metadata'})[0]
        #     year = client.parseDOM(year, 'span')[0]
        #     year = '[COLOR lime]({0})[/COLOR]'.format(year)
        # except IndexError:
        #     year = '(N/A)'
        title = tools.clear_Title(title)
        title = '[B][COLOR white]{}[/COLOR][/B]'.format(title).encode('utf-8')
        link = client.parseDOM(name, 'a', ret='href')[0]
        link = client.replaceHTMLCodes(link).encode('utf-8', 'ignore')
        try:
            poster = client.parseDOM(post, 'img', ret='src')[0]
        except IndexError:
            poster = client.parseDOM(post, 'img', ret='data-src')[0]
        poster = client.replaceHTMLCodes(poster).encode('utf-8', 'ignore')
        # if '/tvshows/' in link:
        #     addon.add_directory({'mode': 'to_seasons', 'url': link}, {'title': title, 'plot': str(desc)},
        #                         allfun, img=poster, fanart=FANART)
        # else:
        addon.add_directory({
            'mode': 'ddl_links',
            'url': link
        }, {
            'title': title,
            'plot': str(desc)
        },
                            allfun,
                            img=poster,
                            fanart=FANART)
    try:
        np = client.parseDOM(data, 'a', ret='href', attrs={'rel': 'next'})[0]
        # np = dom_parser.parse_dom(np, 'a', req='href')
        # np = [i.attrs['href'] for i in np if 'icon-chevron-right' in i.content][0]
        page = re.findall(r'page=(\d+)$', np)[0]
        title = control.lang(32010).encode('utf-8') + \
                ' [COLORwhite]([COLORlime]{}[/COLOR])[/COLOR]'.format(page)
        addon.add_directory({
            'mode': 'ddl_items',
            'url': np
        }, {'title': title},
                            img=ART + 'next_page.png',
                            fanart=FANART)
    except BaseException:
        pass
    control.content(int(sys.argv[1]), 'movies')
    control.directory(int(sys.argv[1]))
    view.setView('movies', {'skin.estuary': 55, 'skin.confluence': 500})
コード例 #53
0
ファイル: hevcfilm.py プロジェクト: uguer30/Project
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            posts = client.parseDOM(r, 'item')

            hostDict = hostprDict + hostDict

            items = []

            for post in posts:
                try:
                    t = client.parseDOM(post, 'title')[0]
                    u = client.parseDOM(post, 'link')[0]
                    s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', t)
                    s = s[0] if s else '0'

                    items += [(t, u, s) ]

                except:
                    pass

            urls = []
            for item in items:

                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)

                    if not cleantitle.get(t) == cleantitle.get(title): raise Exception()

                    y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()

                    if not y == hdlr: raise Exception()

                    quality, info = source_utils.get_release_quality(name, item[1])
                    if any(x in quality for x in ['CAM', 'SD']): continue

                    try:
                        size = re.sub('i', '', item[2])
                        div = 1 if size.endswith('GB') else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size))/div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    info = ' | '.join(info)

                    url = item[1]
                    links = self.links(url)
                    urls += [(i, quality, info) for i in links]

                except:
                    pass

            for item in urls:

                if 'earn-money' in item[0]: continue
                if any(x in item[0] for x in ['.rar', '.zip', '.iso']): continue
                url = client.replaceHTMLCodes(item[0])
                url = url.encode('utf-8')

                valid, host = source_utils.is_host_valid(url, hostDict)
                if not valid: continue
                host = client.replaceHTMLCodes(host)
                host = host.encode('utf-8')

                sources.append({'source': host, 'quality': item[1], 'language': 'en', 'url': url, 'info': item[2], 'direct': False, 'debridonly': True})

            return sources
        except:
            return sources
コード例 #54
0
ファイル: pubfilm_mv_tv.py プロジェクト: PandoraClub/WizaTV
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            content = re.compile('(.+?)\?episode=\d*$').findall(url)
            content = 'movie' if len(content) == 0 else 'episode'

            try:
                url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(
                    url)[0]
            except:
                pass

            result = client.request(url)

            url = zip(
                client.parseDOM(result,
                                'a',
                                ret='href',
                                attrs={'target': 'EZWebPlayer'}),
                client.parseDOM(result, 'a', attrs={'target': 'EZWebPlayer'}))
            url = [(i[0], re.compile('(\d+)').findall(i[1])) for i in url]
            url = [(i[0], i[1][-1]) for i in url if len(i[1]) > 0]

            if content == 'episode':
                url = [i for i in url if i[1] == '%01d' % int(episode)]

            links = [client.replaceHTMLCodes(i[0]) for i in url]

            for u in links:

                try:
                    result = client.request(u)
                    result = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
                    result = re.findall(
                        '"file"\s*:\s*"(.+?)".+?"label"\s*:\s*"(.+?)"', result)

                    url = [{
                        'url': i[0],
                        'quality': '1080p'
                    } for i in result if '1080' in i[1]]
                    url += [{
                        'url': i[0],
                        'quality': 'HD'
                    } for i in result if '720' in i[1]]

                    for i in url:
                        sources.append({
                            'source': 'gvideo',
                            'quality': i['quality'],
                            'provider': 'Pubfilm',
                            'url': i['url'],
                            'direct': True,
                            'debridonly': False
                        })
                except:
                    pass

            return sources
        except:
            return sources
コード例 #55
0
ファイル: Galaxy (18).py プロジェクト: krazware/therealufo
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            imdb = data['imdb']

            try:
                query = urlparse.urljoin(self.base_link, self.search_link)
                result = client.request(query)
                m = re.findall(
                    'Movie Size:(.+?)<.+?href="(.+?)".+?href="(.+?)"\s*onMouse',
                    result, re.DOTALL)
                m = [(i[0], i[1], i[2]) for i in m if imdb in i[1]]
                if m:
                    link = m
                else:
                    query = urlparse.urljoin(self.base_link, self.search_link2)
                    result = client.request(query)
                    m = re.findall(
                        'Movie Size:(.+?)<.+?href="(.+?)".+?href="(.+?)"\s*onMouse',
                        result, re.DOTALL)
                    m = [(i[0], i[1], i[2]) for i in m if imdb in i[1]]
                    if m:
                        link = m
                    else:
                        query = urlparse.urljoin(self.base_link,
                                                 self.search_link3)
                        result = client.request(query)
                        m = re.findall(
                            'Movie Size:(.+?)<.+?href="(.+?)".+?href="(.+?)"\s*onMouse',
                            result, re.DOTALL)
                        m = [(i[0], i[1], i[2]) for i in m if imdb in i[1]]
                        if m: link = m

            except:
                return

            for item in link:
                try:

                    quality, info = source_utils.get_release_quality(
                        item[2], None)

                    try:
                        size = re.findall(
                            '((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))',
                            item[0])[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    info = ' | '.join(info)

                    url = item[2]
                    if any(x in url for x in ['.rar', '.zip', '.iso']):
                        raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    sources.append({
                        'source': 'DL',
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': True,
                        'debridonly': False
                    })
                except:
                    pass

            return sources
        except:
            return sources
コード例 #56
0
ファイル: movie25_mv.py プロジェクト: codixor/maximumTv
    def movie(self, imdb, title, year):
        try:
            #query = self.search_link % (urllib.quote_plus(cleantitle.query(title)), str(int(year)-1), str(int(year)+1))
            #query = urlparse.urljoin(self.base_link, query)

            query = self.search_link % urllib.quote_plus(
                cleantitle.query(title))
            query = urlparse.urljoin(self.base_link, query)

            result = str(self.request(query, 'movie_table'))
            if 'page=2' in result or 'page%3D2' in result:
                result += str(self.request(query + '&page=2', 'movie_table'))

            result = client.parseDOM(result,
                                     'div',
                                     attrs={'class': 'movie_table'})

            title = cleantitle.get(title)
            years = [
                '(%s)' % str(year),
                '(%s)' % str(int(year) + 1),
                '(%s)' % str(int(year) - 1)
            ]

            result = [(client.parseDOM(i, 'a', ret='href'),
                       client.parseDOM(i, 'img', ret='alt')) for i in result]
            result = [(i[0][0], i[1][0]) for i in result
                      if len(i[0]) > 0 and len(i[1]) > 0]
            result = [i for i in result if any(x in i[1] for x in years)]

            try:
                result = [
                    (urlparse.parse_qs(urlparse.urlparse(i[0]).query)['q'][0],
                     i[1]) for i in result
                ]
            except:
                pass
            try:
                result = [
                    (urlparse.parse_qs(urlparse.urlparse(i[0]).query)['u'][0],
                     i[1]) for i in result
                ]
            except:
                pass
            try:
                result = [(urlparse.urlparse(i[0]).path, i[1]) for i in result]
            except:
                pass

            match = [
                i[0] for i in result
                if title == cleantitle.get(i[1]) and '(%s)' % str(year) in i[1]
            ]

            match2 = [i[0] for i in result]
            match2 = [x for y, x in enumerate(match2) if x not in match2[:y]]
            if match2 == []: return

            for i in match2[:5]:
                try:
                    if len(match) > 0:
                        url = match[0]
                        break
                    result = self.request(urlparse.urljoin(self.base_link, i),
                                          'link_name')
                    if imdb in str(result):
                        url = i
                        break
                except:
                    pass

            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #57
0
    def __search(self, imdb, titles, year):
        try:
            q = self.search_link % urllib.quote_plus(
                cleantitle.query(titles[0]))
            q = urlparse.urljoin(self.base_link, q)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(q)

            r = dom_parser.parse_dom(
                r, 'tr', attrs={'id': re.compile('coverPreview.+?')})
            r = [(dom_parser.parse_dom(i, 'a', req='href'),
                  dom_parser.parse_dom(i,
                                       'div',
                                       attrs={'style': re.compile('.+?')}),
                  dom_parser.parse_dom(i, 'img', req='src')) for i in r]
            r = [(i[0][0].attrs['href'].strip(), i[0][0].content.strip(), i[1],
                  i[2]) for i in r if i[0] and i[2]]
            r = [(i[0], i[1], [
                x.content for x in i[2]
                if x.content.isdigit() and len(x.content) == 4
            ], i[3]) for i in r]
            r = [(i[0], i[1], i[2][0] if i[2] else '0', i[3]) for i in r]
            r = [
                i for i in r if any('us_flag' in x.attrs['src'] for x in i[3])
            ]
            r = [(i[0], i[1], i[2], [
                re.findall('(\d+)', x.attrs['src']) for x in i[3]
                if 'smileys' in x.attrs['src']
            ]) for i in r]
            r = [(i[0], i[1], i[2], [x[0] for x in i[3] if x]) for i in r]
            r = [(i[0], i[1], i[2], int(i[3][0]) if i[3] else 0) for i in r]
            r = sorted(r, key=lambda x: x[3])[::-1]
            r = [(i[0], i[1], i[2], re.findall('\((.+?)\)$', i[1])) for i in r]
            r = [(i[0], i[1], i[2]) for i in r if not i[3]]
            r = [i for i in r if i[2] in y]
            r = sorted(r, key=lambda i: int(i[2]),
                       reverse=True)  # with year > no year

            r = [(client.replaceHTMLCodes(i[0]), i[1], i[2]) for i in r]

            match = [
                i[0] for i in r if cleantitle.get(i[1]) in t and year == i[2]
            ]

            match2 = [i[0] for i in r]
            match2 = [x for y, x in enumerate(match2) if x not in match2[:y]]
            if match2 == []: return

            for i in match2[:5]:
                try:
                    if match:
                        url = match[0]
                        break
                    r = client.request(urlparse.urljoin(self.base_link, i))
                    r = re.findall('(tt\d+)', r)
                    if imdb in r:
                        url = i
                        break
                except:
                    pass

            return source_utils.strip_domain(url)
        except:
            return
コード例 #58
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None:
             return sources
         if debrid.status() is False:
             raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
             'title']
         hdlr = 'S%02dE%02d' % (int(data['season']), int(
             data['episode'])) if 'tvshowtitle' in data else data['year']
         query = '%s s%02de%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) \
             if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
         query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
         url = self.search_link % urllib.quote_plus(query)
         url = urlparse.urljoin(self.base_link, url)
         r = client.request(url)
         posts = client.parseDOM(r, 'h2')
         hostDict = hostprDict + hostDict
         urls = []
         for item in posts:
             try:
                 item = re.compile('a href="(.+?)"').findall(item)
                 name = item[0]
                 query = query.replace(" ", "-").lower()
                 if query not in name:
                     continue
                 name = client.replaceHTMLCodes(name)
                 quality, info = source_utils.get_release_quality(
                     name, item[0])
                 if any(x in quality for x in ['CAM', 'SD']):
                     continue
                 url = item
                 links = self.links(url)
                 urls += [(i, quality, info) for i in links]
             except:
                 pass
         for item in urls:
             if 'earn-money' in item[0]:
                 continue
             if any(x in item[0] for x in ['.rar', '.zip', '.iso']):
                 continue
             url = client.replaceHTMLCodes(item[0])
             url = url.encode('utf-8')
             valid, host = source_utils.is_host_valid(url, hostDict)
             if not valid:
                 continue
             host = client.replaceHTMLCodes(host)
             host = host.encode('utf-8')
             sources.append({
                 'source': host,
                 'quality': item[1],
                 'language': 'en',
                 'url': url,
                 'direct': False,
                 'debridonly': True
             })
         return sources
     except:
         return sources
コード例 #59
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if not debrid.status(): raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = self.scraper.get(url).content
            r = client.parseDOM(r, 'h2')
            r = [
                re.findall('''<a.+?href=["']([^"']+)["']>(.+?)</a>''', i,
                           re.DOTALL) for i in r
            ]

            hostDict = hostprDict + hostDict

            items = []

            for item in r:
                try:
                    t = item[0][1]
                    t = re.sub('(\[.*?\])|(<.+?>)', '', t)
                    t1 = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '', t)

                    if not cleantitle.get(t1) == cleantitle.get(title):
                        raise Exception()

                    y = re.findall(
                        '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                        t)[-1].upper()

                    if not y == hdlr: raise Exception()

                    data = self.scraper.get(
                        urlparse.urljoin(self.base_link, item[0][0])).content
                    data = dom_parser2.parse_dom(data,
                                                 'a',
                                                 attrs={'target': '_blank'})
                    u = [(t, i.content) for i in data]
                    items += u

                except:
                    pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    quality, info = source_utils.get_release_quality(
                        name, item[1])

                    try:
                        size = re.findall(
                            '((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))',
                            name)[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    info = ' | '.join(info)

                    url = item[1]
                    if not url.startswith('http'): continue
                    if any(x in url for x in ['.rar', '.zip', '.iso']):
                        raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if not valid: continue
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })
                except:
                    pass

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('TVRelease - Exception: \n' + str(failure))
            return sources
コード例 #60
0
ファイル: imdb.py プロジェクト: noizex/plugin.video.venom
	def imdb_list(self, url, isRatinglink=False):
		list = []
		try:
			for i in re.findall(r'date\[(\d+)\]', url):
				url = url.replace('date[%s]' % i, (self.date_time - timedelta(days=int(i))).strftime('%Y-%m-%d'))
			def imdb_watchlist_id(url):
				return client.parseDOM(client.request(url), 'meta', ret='content', attrs = {'property': 'pageId'})[0]
				# return client.parseDOM(client.request(url).decode('iso-8859-1').encode('utf-8'), 'meta', ret='content', attrs = {'property': 'pageId'})[0]
			if url == self.imdbwatchlist_link:
				url = cache.get(imdb_watchlist_id, 8640, url)
				url = self.imdblist_link % url
			result = client.request(url)
			result = result.replace('\n', ' ')
			# result = result.decode('iso-8859-1').encode('utf-8')
			items = client.parseDOM(result, 'div', attrs = {'class': '.+? lister-item'}) + client.parseDOM(result, 'div', attrs = {'class': 'lister-item .+?'})
			items += client.parseDOM(result, 'div', attrs = {'class': 'list_item.+?'})
		except:
			log_utils.error()
			return

		next = ''
		try:
			# HTML syntax error, " directly followed by attribute name. Insert space in between. parseDOM can otherwise not handle it.
			result = result.replace('"class="lister-page-next', '" class="lister-page-next')
			# next = client.parseDOM(result, 'a', ret='href', attrs = {'class': '.+?ister-page-nex.+?'})
			next = client.parseDOM(result, 'a', ret='href', attrs = {'class': 'lister-page-next.+?'})
			if len(next) == 0:
				next = client.parseDOM(result, 'div', attrs = {'class': 'pagination'})[0]
				next = zip(client.parseDOM(next, 'a', ret='href'), client.parseDOM(next, 'a'))
				next = [i[0] for i in next if 'Next' in i[1]]
			next = url.replace(urlparse(url).query, urlparse(next[0]).query)
			next = client.replaceHTMLCodes(next)
		except:
			next = ''

		for item in items:
			try:
				title = client.replaceHTMLCodes(client.parseDOM(item, 'a')[1])
				title = py_tools.ensure_str(title)

				year = client.parseDOM(item, 'span', attrs = {'class': 'lister-item-year.+?'})
				try: year = re.findall(r'(\d{4})', year[0])[0]
				except: continue
				if int(year) > int((self.date_time).strftime('%Y')): continue

				try: show = '–'.decode('utf-8') in str(year).decode('utf-8') or '-'.decode('utf-8') in str(year).decode('utf-8') # check with Matrix
				except: show = False
				if show or 'Episode:' in item: raise Exception() # Some lists contain TV shows.

				try: genre = client.parseDOM(item, 'span', attrs = {'class': 'genre'})[0]
				except: genre = ''
				genre = ' / '.join([i.strip() for i in genre.split(',')])
				genre = client.replaceHTMLCodes(genre)

				try: mpaa = client.parseDOM(item, 'span', attrs = {'class': 'certificate'})[0]
				except: mpaa = ''
				if isRatinglink and 'Short' not in genre:
					if mpaa in ['TV-Y', 'TV-Y7', 'TV-G', 'TV-PG', 'TV-13', 'TV-14', 'TV-MA']:
						raise Exception()
				if mpaa == '' or mpaa == 'NOT_RATED': mpaa = ''
				mpaa = mpaa.replace('_', '-')
				mpaa = client.replaceHTMLCodes(mpaa)

				imdb = client.parseDOM(item, 'a', ret='href')[0]
				imdb = re.findall(r'(tt\d*)', imdb)[0]

				try: # parseDOM cannot handle elements without a closing tag.
					from bs4 import BeautifulSoup
					html = BeautifulSoup(item, "html.parser")
					poster = html.find_all('img')[0]['loadlate']
				except: poster = ''

				if '/nopicture/' in poster: poster = ''
				if poster:
					poster = re.sub(r'(?:_SX|_SY|_UX|_UY|_CR|_AL)(?:\d+|_).+?\.', '_SX500.', poster)
					poster = client.replaceHTMLCodes(poster)

				try: duration = re.findall(r'(\d+?) min(?:s|)', item)[-1]
				except: duration = ''

				rating = ''
				try: rating = client.parseDOM(item, 'span', attrs = {'class': 'rating-rating'})[0]
				except:
					try: rating = client.parseDOM(rating, 'span', attrs = {'class': 'value'})[0]
					except:
						try: rating = client.parseDOM(item, 'div', ret='data-value', attrs = {'class': '.*?imdb-rating'})[0]
						except: pass
				if rating == '-': rating = ''
				if not rating:
					try:
						rating = client.parseDOM(item, 'span', attrs = {'class': 'ipl-rating-star__rating'})[0]
						if rating == '-': rating = ''
					except: pass
				rating = client.replaceHTMLCodes(rating)

				votes = ''
				try: votes = client.parseDOM(item, 'span', attrs = {'name': 'nv'})[0]
				except:
					try: votes = client.parseDOM(item, 'div', ret='title', attrs = {'class': '.*?rating-list'})[0]
					except:
						try: votes = re.findall(r'\((.+?) vote(?:s|)\)', votes)[0]
						except: pass
				votes = client.replaceHTMLCodes(votes)

				try: director = re.findall(r'Director(?:s|):(.+?)(?:\||</div>)', item)[0]
				except: director = ''
				director = client.parseDOM(director, 'a')
				director = ' / '.join(director)
				director = client.replaceHTMLCodes(director) # check if this needs ensure_str()

				plot = ''
				try: plot = client.parseDOM(item, 'p', attrs = {'class': 'text-muted'})[0]
				except:
					try: plot = client.parseDOM(item, 'div', attrs = {'class': 'item_description'})[0]
					except: pass
				plot = plot.rsplit('<span>', 1)[0].strip()
				plot = re.sub(r'<.+?>|</.+?>', '', plot)
				if not plot:
					try:
						plot = client.parseDOM(item, 'div', attrs = {'class': 'lister-item-content'})[0] # not sure on this, check html
						plot = re.sub(r'<p\s*class="">', '<p class="plot_">', plot)
						plot = client.parseDOM(plot, 'p', attrs = {'class': 'plot_'})[0]
						plot = re.sub(r'<.+?>|</.+?>', '', plot)
					except: pass
				plot = client.cleanHTML(plot)

				values = {}
				values = {'content': 'movie', 'title': title, 'originaltitle': title, 'year': year, 'premiered': premiered,
						'studio': '', 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa,
						'director': director, 'writer': writer, 'cast': cast, 'plot': plot, 'code': tmdb,
						'imdb': imdb, 'tmdb': tmdb, 'tvdb': '', 'poster': poster, 'poster2': '', 'poster3': '', 'banner': '',
						'fanart': '', 'fanart2': '', 'fanart3': '', 'clearlogo': '', 'clearart': '', 'landscape': '',
						'metacache': False, 'next': next}
				meta = {'imdb': imdb, 'tmdb': tmdb, 'tvdb': '', 'lang': self.lang, 'user': self.tmdb_key, 'item': values}
				if not self.disable_fanarttv:
					extended_art = cache.get(fanarttv.get_movie_art, 168, imdb, tmdb)
					if extended_art:
						values.update(extended_art)
						meta.update(values)

				values = dict((k, v) for k, v in control.iteritems(values) if v is not None and v != '')
				self.list.append(values)

				if 'next' in meta.get('item'): del meta['item']['next'] # next can not exist in metacache
				self.meta.append(meta)
				self.meta = [i for i in self.meta if i.get('tmdb')] # without this ui removed missing tmdb but it still writes these cases to metacache?
				metacache.insert(self.meta) # don't insert until cleaned up and can also be fetched.
			except:
				log_utils.error()
		return self.list