예제 #1
0
 def movie(self, imdb, title, year):
     self.zen_url = []
     try:
         if not debridstatus == 'true': raise Exception()
         self.zen_url = []
         cleanmovie = cleantitle_get(title)
         title = cleantitle_query(title)
         titlecheck = cleanmovie + year
         query = self.movie_link % (urllib.quote_plus(title), year)
         query = urlparse.urljoin(self.base_link, query)
         link = OPEN_URL(query).text
         match = re.compile(
             '<a class="title" href="(.+?)">(.+?)</a>').findall(link)
         for h, t in match:
             print("RAPIDMOVIES", h, t)
             h = h.encode('utf-8')
             t = t.encode('utf-8')
             check = cleantitle_get_2(t)
             print("RAPIDMOVIES check", check)
             if h.startswith("/"): h = self.base_link + h
             if year in t:
                 if titlecheck in check:
                     info = get_size(t)
                     quality = quality_tag(t)
                     if "1080" in quality or "HD" in quality:
                         self.count += 1
                         if not self.count > 6:
                             print("RAPIDMOVIES PASSED", t, quality, info)
                             self.zen_url.append([h, quality, info])
         return self.zen_url
     except:
         return
예제 #2
0
    def movie(self, imdb, title, year):
        self.elysium_url = []
        try:
			if not debridstatus == 'true': raise Exception()
			self.elysium_url = []
			cleanmovie = cleantitle_get(title)
			title = cleantitle_query(title)
			titlecheck = cleanmovie+year
			query = self.movie_link % (urllib.quote_plus(title),year)
			query = urlparse.urljoin(self.base_link, query)
			link = OPEN_URL(query).text
			match = re.compile('<a class="title" href="(.+?)">(.+?)</a>').findall(link)
			for h,t in match:
				print ("RAPIDMOVIES", h,t)
				h = h.encode('utf-8')
				t = t.encode('utf-8')
				check = cleantitle_get_2(t)
				print ("RAPIDMOVIES check", check)
				if h.startswith("/"): h = self.base_link + h
				if year in t:
					if titlecheck in check:
						info = get_size(t)
						quality = quality_tag(t)
						if "1080" in quality or "HD" in quality:
							self.count += 1
							if not self.count >6:
								print ("RAPIDMOVIES PASSED", t,quality,info)
								self.elysium_url.append([h,quality,info])
			return self.elysium_url
        except:
            return
예제 #3
0
    def sources(self, url, hostDict, hostprDict):
        try:
			sources = []
			for url,title,type in self.elysium_url:

					req = OPEN_URL(url).content
					r = BeautifulSoup(req)
					r = r.findAll('div', attrs = {'class': 'post_content'})
					
					pattern = '<h.+?>(.*?)</h(.*?)</ul>'
					for container in r:
						html = re.compile(pattern, re.DOTALL).findall(str(container))
						for titles, block in html:
					
							quality = "SD"
							quality = quality_tag(titles)
							info = ''
							if "hevc" in titles.lower(): info = "HEVC"	
							info = get_size(block)
								
							links = re.compile('href="([^"]+)').findall(block)
							for href in links:
									
								
								if any(value in href for value in hostprDict):
									try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(href.strip().lower()).netloc)[0]
									except: host = 'Videomega'
									url = client.replaceHTMLCodes(href)
									url = url.encode('utf-8')
									sources.append({'source': host, 'quality': quality, 'provider': 'Myvideolink', 'url': url, 'info': info,'direct': False, 'debridonly': True})
								
			return sources
        except:
            return sources
예제 #4
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            self.zen_url = []
            if not debridstatus == 'true': raise Exception()
            if url == None: return
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            title = cleantitle_query(title)
            cleanmovie = cleantitle.get(title)
            data['season'], data['episode'] = season, episode
            ep_query = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
            
            titlecheck = cleanmovie+ep_query.lower()
			
            query = "%s+%s" % (urllib.quote_plus(title), ep_query)
            query = self.search_link % query
            query = urlparse.urljoin(self.search_base_link, query)
            r = client.request(query, headers=self.search_header_link, referer=query)
            posts = []
            dupes = []
            print ("RELEASEBB QUERY", r)
			
            try: posts += json.loads(re.findall('({.+?})$', r)[0])['results']
            except: pass			
            for post in posts:
				try:
					name = post['post_title'].encode('utf-8')
					url = post['post_name'].encode('utf-8')
					if url in dupes: raise Exception()
					dupes.append(url)
					print ("RELEASEBB 2", name,url)
					t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
					print ("RELEASEBB 3 TV", t)					
					if not titlecheck in cleantitle_get(name): raise Exception()
					print ("RELEASEBB 3 PASSED", t)
					content = post['post_content']
					url = [i for i in client.parseDOM(content, 'a', ret='href')]
					
					size = get_size(content)
					quality = 'getbyurl'
					self.zen_url.append([size,quality,url])
				
					
				except:
					pass
            print("RELEASEBB PASSED", self.zen_url)
            return self.zen_url

        except:
            return
예제 #5
0
    def movie(self, imdb, title, year):
        try:
            if not debridstatus == 'true': raise Exception()
            self.elysium_url = []
            query = cleantitle_query(title)
            cleanmovie = cleantitle_get(title)
            query = "%s+%s" % (urllib.quote_plus(query), year)
            query = self.search_link % query
            query = urlparse.urljoin(self.search_base_link, query)
            r = client.request(query,
                               headers=self.search_header_link,
                               referer=query)
            posts = []
            dupes = []
            print("RELEASEBB QUERY", r)

            try:
                posts += json.loads(re.findall('({.+?})$', r)[0])['results']
            except:
                pass
            for post in posts:
                try:
                    name = post['post_title'].encode('utf-8')
                    url = post['post_name'].encode('utf-8')
                    if url in dupes: raise Exception()
                    dupes.append(url)
                    print("RELEASEBB 2", name, url)
                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '', name)

                    if not cleanmovie in cleantitle_get(
                            name) or not year in name:
                        raise Exception()
                    print("RELEASEBB 3 PASSED", t)
                    content = post['post_content']
                    url = [
                        i for i in client.parseDOM(content, 'a', ret='href')
                    ]

                    size = get_size(content)
                    quality = quality_tag(name)
                    self.elysium_url.append([size, quality, url])

                except:
                    pass
            print("RELEASEBB PASSED", self.elysium_url)
            return self.elysium_url

        except:
            return
예제 #6
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            for url, title, type in self.elysium_url:

                req = OPEN_URL(url).content
                r = BeautifulSoup(req)
                r = r.findAll('div', attrs={'class': 'post_content'})

                pattern = '<h.+?>(.*?)</h(.*?)</ul>'
                for container in r:
                    html = re.compile(pattern,
                                      re.DOTALL).findall(str(container))
                    for titles, block in html:

                        quality = "SD"
                        quality = quality_tag(titles)
                        info = ''
                        if "hevc" in titles.lower(): info = "HEVC"
                        info = get_size(block)

                        links = re.compile('href="([^"]+)').findall(block)
                        for href in links:

                            if any(value in href for value in hostprDict):
                                try:
                                    host = re.findall(
                                        '([\w]+[.][\w]+)$',
                                        urlparse.urlparse(
                                            href.strip().lower()).netloc)[0]
                                except:
                                    host = 'Videomega'
                                url = client.replaceHTMLCodes(href)
                                url = url.encode('utf-8')
                                sources.append({
                                    'source': host,
                                    'quality': quality,
                                    'provider': 'Myvideolink',
                                    'url': url,
                                    'info': info,
                                    'direct': False,
                                    'debridonly': True
                                })

            return sources
        except:
            return sources
예제 #7
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        self.zen_url = []
        try:
            if not debridstatus == 'true': raise Exception()
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            data['season'], data['episode'] = season, episode
            self.zen_url = []
            title = cleantitle.getsearch(title)
            cleanmovie = cleantitle.get(title)
            episodecheck = 'S%02dE%02d' % (int(
                data['season']), int(data['episode']))
            episodecheck = str(episodecheck)
            episodecheck = episodecheck.lower()
            titlecheck = cleanmovie + episodecheck

            query = self.shows_link % (urllib.quote_plus(title), episodecheck)
            query = urlparse.urljoin(self.base_link, query)
            link = OPEN_URL(query).text
            match = re.compile(
                '<a class="title" href="(.+?)">(.+?)</a>').findall(link)
            for h, t in match:
                print("RAPIDMOVIES", h, t)
                h = h.encode('utf-8')
                t = t.encode('utf-8')
                check = cleantitle_get_2(t)
                print("RAPIDMOVIES check", check)
                if h.startswith("/"): h = self.base_link + h
                if titlecheck in check:
                    info = get_size(t)
                    quality = quality_tag(t)
                    if "1080" in quality or "HD" in quality:
                        self.count += 1
                        if not self.count > 6:
                            print("RAPIDMOVIES PASSED", t, quality, info)
                            self.zen_url.append([h, quality, info])
            return self.zen_url
        except:
            return
예제 #8
0
    def movie(self, imdb, title, year):
        try:
            if not debridstatus == 'true': raise Exception()
            self.zen_url = []
            query = cleantitle_query(title)
            cleanmovie = cleantitle_get(title)
            query = "%s+%s" % (urllib.quote_plus(query), year)
            query = self.search_link % query
            query = urlparse.urljoin(self.search_base_link, query)
            r = client.request(query, headers=self.search_header_link, referer=query)
            posts = []
            dupes = []
            print ("RELEASEBB QUERY", r)
			
            try: posts += json.loads(re.findall('({.+?})$', r)[0])['results']
            except: pass			
            for post in posts:
				try:
					name = post['post_title'].encode('utf-8')
					url = post['post_name'].encode('utf-8')
					if url in dupes: raise Exception()
					dupes.append(url)
					print ("RELEASEBB 2", name,url)
					t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
					
					if not cleanmovie in cleantitle_get(name) or not year in name: raise Exception()
					print ("RELEASEBB 3 PASSED", t)
					content = post['post_content']
					url = [i for i in client.parseDOM(content, 'a', ret='href')]
					
					size = get_size(content)
					quality = quality_tag(name)
					self.zen_url.append([size,quality,url])
				
					
				except:
					pass
            print("RELEASEBB PASSED", self.zen_url)
            return self.zen_url

        except:
            return
예제 #9
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        self.elysium_url = []	
        try:
			if not debridstatus == 'true': raise Exception()
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			data['season'], data['episode'] = season, episode
			self.elysium_url = []
			title = cleantitle.getsearch(title)
			cleanmovie = cleantitle.get(title)
			episodecheck = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
			episodecheck = str(episodecheck)
			episodecheck = episodecheck.lower()
			titlecheck = cleanmovie+episodecheck

			query = self.shows_link % (urllib.quote_plus(title),episodecheck)
			query = urlparse.urljoin(self.base_link, query)
			link = OPEN_URL(query).text
			match = re.compile('<a class="title" href="(.+?)">(.+?)</a>').findall(link)
			for h,t in match:
				print ("RAPIDMOVIES", h,t)
				h = h.encode('utf-8')
				t = t.encode('utf-8')
				check = cleantitle_get_2(t)
				print ("RAPIDMOVIES check", check)
				if h.startswith("/"): h = self.base_link + h
				if titlecheck in check:
						info = get_size(t)
						quality = quality_tag(t)
						if "1080" in quality or "HD" in quality:
							self.count += 1
							if not self.count > 6:
								print ("RAPIDMOVIES PASSED", t,quality,info)
								self.elysium_url.append([h,quality,info])
			return self.elysium_url
        except:
            return
예제 #10
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            for movielink, title in self.elysium_url:
                mylink = client.request(movielink)
                info = get_size(title)

                quality = quality_tag(title)

                match2 = re.compile(
                    "<a target='_blank' href='(.+?)'>").findall(mylink)
                for url in match2:
                    if any(value in url for value in hostprDict):
                        if "http" in url:
                            try:
                                host = re.findall(
                                    '([\w]+[.][\w]+)$',
                                    urlparse.urlparse(
                                        url.strip().lower()).netloc)[0]
                            except:
                                host = 'Videomega'
                            host = client.replaceHTMLCodes(host)
                            host = host.encode('utf-8')
                            url = client.replaceHTMLCodes(url)
                            url = url.encode('utf-8')
                            sources.append({
                                'source': host,
                                'quality': quality,
                                'provider': 'Tvrls',
                                'url': url,
                                'info': info,
                                'direct': False,
                                'debridonly': True
                            })

            return sources
        except:
            return sources
예제 #11
0
    def sources(self, url, hostDict, hostprDict):
        try:
			sources = []
			for movielink,title in self.elysium_url:
				mylink = client.request(movielink)
				info = get_size(title)
	
				quality = quality_tag(title)
				
				match2 = re.compile("<a target='_blank' href='(.+?)'>").findall(mylink)			
				for url in match2:
						if any(value in url for value in hostprDict):
							if "http" in url:
								try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
								except: host = 'Videomega'
								host = client.replaceHTMLCodes(host)
								host = host.encode('utf-8')							
								url = client.replaceHTMLCodes(url)
								url = url.encode('utf-8')								
								sources.append({'source': host, 'quality': quality, 'provider': 'Tvrls', 'url': url, 'info': info, 'direct': False, 'debridonly': True})

			return sources
        except:
            return sources