Exemplo n.º 1
0
    def movie(self, imdb, title, year):
        self.elysium_url = []	
        try:
            if not alluc_status == 'true': raise Exception()
            print ("ALLUC STARTED" , alluc_user, alluc_pw, max_items)
            headers = {'User-Agent': random_agent()}
            search_title = cleantitle.getsearch(title)
            cleanmovie = cleantitle.get(title) + year
            query = "%s+%s" % (urllib.quote_plus(search_title),year)
            print ("ALLUC r1", query)
            query =  self.api_link % (alluc_user, alluc_pw, query)
            if alluc_debrid == 'true': query =	query + max_result_string
            else: query = query + '+%23newlinks' + max_result_string
            print ("ALLUC r2", query)
            html = requests.get(query, headers=headers, timeout=15).json()
            for result in html['result']:
				if len(result['hosterurls']) > 1: continue
				if result['extension'] == 'rar': continue
				stream_url = result['hosterurls'][0]['url'].encode('utf-8')
				stream_title = result['title'].encode('utf-8')
				stream_title = cleantitle.getsearch(stream_title)
				if cleanmovie in cleantitle.get(stream_title): 
					self.elysium_url.append([stream_url,stream_title])		
					print ("ALLUC r3", self.elysium_url)
            return self.elysium_url
        except:
            return	
Exemplo n.º 2
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = cleantitle.getsearch(data['tvshowtitle'])

            season = '%01d' % int(season)
            episode = '%01d' % int(episode)
            query = (urllib.quote_plus(title)) + "+season+" + season
            q = self.search_link % (query)
            r = urlparse.urljoin(self.base_link, q)

            print ("ONEMOVIES EPISODES", r)
            checkseason = cleantitle.get(title) + "season" + season
            headers = {'User-Agent': random_agent()}
            html = BeautifulSoup(requests.get(r, headers=headers, timeout=20).content)
            containers = html.findAll('div', attrs={'class': 'ml-item'})
            for result in containers:
               
                links = result.findAll('a')

                for link in links:
                    link_title = str(link['title'])
                    href = str(link['href'])
                    href = client.replaceHTMLCodes(href)
                    if cleantitle.get(link_title) == checkseason:
                        ep_id = '?episode=%01d' % int(episode)
                        url = href + ep_id
                        # print("ONEMOVIES Passed", href)
                        return url

        except:
            return
Exemplo n.º 3
0
    def __search(self, titles, year):
        try:

            query = self.search_link % (urllib.quote_plus(cleantitle.getsearch(titles[0]+' '+year)))

            query = urlparse.urljoin(self.base_link, query)

            t =  cleantitle.get(titles[0])

            r = client.request(query)

            r = client.parseDOM(r, 'div', attrs={'class': 'card'})

            r = client.parseDOM(r, 'h3')

            for i in r:
                data = re.findall('<span.*?>(.+?)</span>.+?date">\s*\((\d{4}).*?</span>', i, re.DOTALL)
                for title, year in data:
                    title = cleantitle.get(title)
                    y = year
                    if title in t and year == y:
                        url = client.parseDOM(i, 'a', ret='href')[0]
                        return source_utils.strip_domain(url)

            return
        except:
            return
Exemplo n.º 4
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = cleantitle.getsearch(data['tvshowtitle'])

            season = '%01d' % int(season)
            episode = '%01d' % int(episode)
            query = (urllib.quote_plus(title))
            q = self.search_link % (query)
            r = urlparse.urljoin(self.base_link, q)
            checkseason = cleantitle.get(title) + "season" + season
            html = BeautifulSoup(OPEN_URL(r).content)
            containers = html.findAll('div', attrs={'class': 'ml-item'})
            for link in containers:
                    link_title = link('a')[0]['title'].encode('utf-8')
                    href = link('a')[0]['href'].encode('utf-8')
                    if cleantitle.get(link_title) == checkseason:
						url = {'url': href, 'type': 'tv_shows' , 'episode' : episode }
						url = urllib.urlencode(url)
							
						print("SOLARMOVIE PASSED", url) 
						return url

        except:
            return
Exemplo n.º 5
0
    def movie(self, imdb, title, year):
        self.elysium_url = []	
        try:
			if not debridstatus == 'true': raise Exception()
			self.tvurl = []
			self.elysium_url = []
			title = cleantitle.getsearch(title)
			cleanmovie = cleantitle.get(title)
			titlecheck = cleanmovie+year
			query = self.search_link % (urllib.quote_plus(title),year)
			query = urlparse.urljoin(self.base_link, query)
			link = client.request(query)
			for item in parse_dom(link, 'div', {'class': 'entry clearfix'}):
				match = re.compile('<h2 class="title"><a href="(.+?)">(.+?)</a></h2>').findall(item)
				for movielink,title2 in match:
					
					title = cleantitle_get_2(title2)
					if year in title2:
						if titlecheck in title:
							for item2 in parse_dom(item, 'div', {'class': 'entry-content clearfix'}):
								match2 = re.compile('href="([^"]+)').findall(item2)
								for movielink in match2:
									quality = "SD"
									if "1080" in title: quality = "1080p"
									elif "720" in title: quality = "HD"				
									if "1080" in movielink: quality = "1080p"						
									elif "720" in movielink: quality = "HD"								
									
							
									self.elysium_url.append([movielink,quality])
			
			return self.elysium_url

        except:
            return
Exemplo n.º 6
0
    def movie(self, imdb, title, year):
        try:
            self.elysium = []

            cleaned_title = cleantitle.get(title)
            title = cleantitle.getsearch(title)
            q = self.search_link % (urllib.quote_plus(title))
            r = urlparse.urljoin(self.base_link, q)
            html = BeautifulSoup(OPEN_URL(r).content)
            print ("ONEMOVIES EPISODES", html)
            containers = html.findAll('div', attrs={'class': 'ml-item'})
            for link in containers:
                    link_title = link('a')[0]['title'].encode('utf-8')
                    href = link('a')[0]['href'].encode('utf-8')
                    info = link('a')[0]['data-url'].encode('utf-8')
                    if cleantitle.get(link_title) == cleaned_title:
                        info = urlparse.urljoin(self.base_link, info)
                        html = OPEN_URL(info).content
                        pattern = '<div class="jt-info">%s</div>' % year
                        match = re.findall(pattern, html)
                        if match:
							url = client.replaceHTMLCodes(href)
							url = {'url': url, 'type': 'movie' }
							url = urllib.urlencode(url)
							
							print("SOLARMOVIE PASSED", url) 
							return url
        except:
            return
Exemplo n.º 7
0
    def movie(self, imdb, title, year):
        self.elysium_url = []	
        try:
			if not debridstatus == 'true': raise Exception()
			self.elysium_url = []
			headers = {'Accept-Language': 'en-US,en;q=0.5', 'User-Agent': random_agent()}

			cleanmovie = cleantitle.get(title)
			title = cleantitle.getsearch(title)
			titlecheck = cleanmovie+year
			query = self.search_link % (urllib.quote_plus(title), ep_search)
			query = urlparse.urljoin(self.base_link, query)
			html = BeautifulSoup(requests.get(query, headers=headers, timeout=10).content)
			containers = html.findAll('h2', attrs={'class': 'title'})
			for result in containers:
				
				r_title = result.findAll('a')[0]
				r_title = r_title.string
				r_href = result.findAll('a')[0]["href"]
				r_href = r_href.encode('utf-8')
				r_title = r_title.encode('utf-8')
				c_title = cleantitle_get_2(r_title)
				if titlecheck in c_title:
					self.elysium_url.append([r_href,r_title])

			
			return self.elysium_url

        except:
            return
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        self.zen_url = []
        try:
			if not debridstatus == 'true': raise Exception()
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			cleanmovie = cleantitle.get(title)
			title = cleantitle.getsearch(title)
			data['season'], data['episode'] = season, episode
			episodecheck = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
			episodecheck = str(episodecheck).lower()
			query = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
			query = self.search_link % (urllib.quote_plus(title),query)
			mylink = urlparse.urljoin(self.base_link, query)
			link = client.request(mylink)
			r = client.parseDOM(link, 'h2', attrs = {'class': 'postTitle'})
			# print ("RLSBBONLINE TV r", r)
			for item in r:
				href = client.parseDOM(item, 'a', ret = 'href')[0]
				item_title = client.parseDOM(item, 'a', ret = 'title')[0]
				href = href.encode('utf-8')
				item_title = item_title.encode('utf-8')
				if cleanmovie in cleantitle.get(item_title):
					if episodecheck in cleantitle.get(item_title):
						self.zen_url.append([href,item_title])
						# print ("RLSBBONLINE TV PASSED", self.zen_url)
							
			return self.zen_url
        except:
            return			
Exemplo n.º 9
0
    def movie(self, imdb, title, year):
		self.genesisreborn_url = []
		try:
			if not debridstatus == 'true': raise Exception()			
			title = cleantitle.getsearch(title)
			cleanmovie = cleantitle.get(title)
			titlecheck = cleanmovie+year
			query = self.search_link % (urllib.quote_plus(title),year)
			query = self.base_link + query
			r = client.request(query)
			posts = client.parseDOM(r, 'item')	
			items = []
			for post in posts:
				try:
						
					t = client.parseDOM(post, 'title')[0]
					t = t.encode('utf-8')
					if not cleanmovie in cleantitle.get(t) and year in t.lower(): continue
					
					c = client.parseDOM(post, 'content.+?')[0]
					u = client.parseDOM(post, 'a', ret='href')
					
					if not u: raise Exception()
					u = [(t, i) for i in u]
					self.genesisreborn_url += u
					
				except:
					pass
			print ("RLSLOG PASSED", self.genesisreborn_url)		
			return self.genesisreborn_url

		except:
			return	
    def movie(self, imdb, title, year):
        self.zen_url = []
        try:
			if not debridstatus == 'true': raise Exception()			
			self.zen_url = []
			
			title = cleantitle.getsearch(title)
			cleanmovie = cleantitle.get(title)
			query = self.search_link % (urllib.quote_plus(title),year)
			query = urlparse.urljoin(self.base_link, query)
			# print ("RLSBBONLINE query", query)
			link = client.request(query)
			r = client.parseDOM(link, 'h2', attrs = {'class': 'postTitle'})
			# print ("RLSBBONLINE r", r)
			for item in r:
				href = client.parseDOM(item, 'a', ret = 'href')[0]
				item_title = client.parseDOM(item, 'a', ret = 'title')[0]
				href = href.encode('utf-8')
				item_title = item_title.encode('utf-8')
				if year in item_title:
					if cleanmovie in cleantitle.get(item_title):
						self.zen_url.append([href,item_title])
						# print "RLSBBONLINE MOVIES %s %s" % (item_title , href)
			return self.zen_url
        except:
            return
Exemplo n.º 11
0
    def movie(self, imdb, title, year):
		self.genesisreborn_url = []
		try:
			title = cleantitle.getsearch(title)
			cleanmovie = cleantitle.get(title)
			
			headers={'Host':'webapp.bobbyhd.com',
					'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
					'User-Agent':'Mozilla/5.0 (iPhone; CPU iPhone OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69',
					'Accept-Language':'en-gb',
					'Accept-Encoding':'gzip, deflate',
					'Connection':'keep-alive'}
			query = self.search_link % (urllib.quote_plus(title),year)
			query = urlparse.urljoin(self.base_link, query)
			r = session.get(query,headers=headers).content
			# print ("BOBBYAPP", r)
			match=re.compile('alias=(.+?)\'">(.+?)</a>').findall(r)
			for id,name in match:
				name = cleantitle.get(name)
				# print ("BOBBYAPP id name", id, name)
				if cleanmovie == name:
					type = 'type_movies'
					ep = '0'
					self.genesisreborn_url.append([id,type,ep])

			# print ("BOBBY PASSED", self.genesisreborn_url)		
			return self.genesisreborn_url

		except:
			return	
Exemplo n.º 12
0
    def movie(self, imdb, title, year):
		try:
			self.elysium_url = []
			if not debridstatus == 'true': raise Exception() 
			headers = {'User-Agent': random_agent()}

			cleanmovie = cleantitle.get(title)
			title = cleantitle.getsearch(title)
			titlecheck = cleanmovie+year
			query = self.search_link % (urllib.quote_plus(title), year)
			query = urlparse.urljoin(self.base_link, query)
			print("HEVC query", query)
			html = BeautifulSoup(rq.get(query, headers=headers, timeout=10).content)
			
			containers = html.findAll('div', attrs={'class': 'postcontent'})
			
			for result in containers:
				print("HEVC containers", result)
				r_title = result.findAll('a')[0]["title"]
				r_href = result.findAll('a')[0]["href"]
				r_href = r_href.encode('utf-8')
				r_title = r_title.encode('utf-8')
				c_title = cleantitle.get(r_title)
				if year in r_title and cleanmovie in c_title:
					self.elysium_url.append([r_href,r_title])
					print("HEVC PASSED MOVIE ", r_title, r_href)
			return self.elysium_url
		except:
			return
Exemplo n.º 13
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        self.zen_url = []
        try:
			if not debridstatus == 'true': raise Exception()
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			title = cleantitle.getsearch(title)
			cleanmovie = cleantitle.get(title)
			data['season'], data['episode'] = season, episode
			
			episodecheck = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
			episodecheck = str(episodecheck).lower()
			query = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
			query = self.search_link % (urllib.quote_plus(title),query)
			query = urlparse.urljoin(self.base_link, query)
			query = query + "&x=0&y=0"	
			
			link = client.request(query)
			r = client.parseDOM(link, 'div', attrs = {'class': 'post'})
			for item in r:
				match = re.compile('<a href="(.*?)" rel="bookmark" title="(.*?)">').findall(item)
				for url,title in match:
					title = cleantitle.get(title)
					if cleanmovie in title:
						if episodecheck in title:
							self.zen_url.append([url,title])
							# print "SCNSRC MOVIES %s %s" % (title , url)
							
			return self.zen_url
        except:
            return			
Exemplo n.º 14
0
    def movie(self, imdb, title, year):
        self.zen_url = []
        try:
				
			self.zen_url = []
			title = cleantitle.getsearch(title)
			cleanmovie = cleantitle.get(title)
			checktitle = cleanmovie+year
			query = self.search_link % (urllib.quote_plus(title),year)
			query = urlparse.urljoin(self.base_link, query)
			
			link = client.request(query)
			result = json.loads(link)
			items = result['suggestions']
			for item in items:
				href = item['data']['href']
				value = item['value']
				url = href.encode('utf-8')
				value = value.encode('utf-8')
				if checktitle == cleantitle.get(value):
					if not self.base_link in url: url = urlparse.urljoin(self.base_link, url)
					print ("MVGEE PASSED", url)
					return url
					
        except:
            return
Exemplo n.º 15
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        self.zen_url = []	
        try:
			if not debridstatus == 'true': raise Exception() 
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			data['season'], data['episode'] = season, episode
			self.zen_url = []
			title = cleantitle.getsearch(title)
			cleanmovie = cleantitle.get(title)
			episodecheck = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
			episodecheck = episodecheck.lower()
			titlecheck = cleanmovie+episodecheck
			query = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
			query =  self.search_link % (urllib.quote_plus(title),query)
			query = urlparse.urljoin(self.base_link, query)
			link = client.request(query)
			match = re.compile('<h2><a href="(.+?)" rel=".+?" title=".+?" data-wpel-link="internal">(.+?)</a></h2>').findall(link)
			for movielink,title2 in match:
				r_title = cleantitle.get(title2)
				if titlecheck in r_title:
					self.zen_url.append([movielink,r_title])
			return self.zen_url
        except:
            return
Exemplo n.º 16
0
    def movie(self, imdb, title, year):

        self.zen_url = []
        try:
			if not debridstatus == 'true': raise Exception() 
			title = cleantitle.getsearch(title)
			cleanmovie = cleantitle.get(title)
			query =  self.search_link % (urllib.quote_plus(title),year)
			query = urlparse.urljoin(self.base_link, query)
			link = client.request(query)
			titlecheck = cleanmovie+year
			match = re.compile('<h2><a href="(.+?)" rel=".+?" title=".+?" data-wpel-link="internal">(.+?)</a></h2>').findall(link)

			
			for movielink,r_title in match:
				if year in r_title:
					r_title = cleantitle_get_2(r_title)
					if titlecheck in r_title:
						self.zen_url.append([movielink,r_title])


	 

			return self.zen_url
        except:
            return
Exemplo n.º 17
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        self.genesisreborn_url = []
        try:
			if not debridstatus == 'true': raise Exception()
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			title = cleantitle.getsearch(title)
			cleanmovie = cleantitle.get(title)
			data['season'], data['episode'] = season, episode
			episodecheck = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
			episodecheck = str(episodecheck).lower()
			titlecheck = cleanmovie+episodecheck
			query = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
			query = self.search_link % (urllib.quote_plus(title),query)
			mylink = urlparse.urljoin(self.base_link, query)
			link = client.request(mylink)
			r = client.parseDOM(link, 'div', attrs = {'id': 'post-.+?'})
			for item in r:
				href = client.parseDOM(item, 'a', ret = 'href')[0]
				item_title = client.parseDOM(item, 'a', ret = 'title')[0]
				href = href.encode('utf-8')
				item_title = item_title.encode('utf-8')
				item_title = cleantitle.get(item_title)
				if titlecheck in item_title:
					item_title = item_title + "=episode"
					self.genesisreborn_url.append([href,item_title])
						# print "DAILYRLS TV SHOWS %s %s" % (item_title , href)
							
			return self.genesisreborn_url
        except:
            return			
Exemplo n.º 18
0
    def __search(self, titles, year):
        try:
            query = self.search_link % (urllib.quote_plus(cleantitle.getsearch(titles[0])))

            query = urlparse.urljoin(self.base_link, query)

            t = cleantitle.get(titles[0])
            scraper = cfscrape.create_scraper()
            data = scraper.get(query).content
            #data = client.request(query, referer=self.base_link)
            data = client.parseDOM(data, 'div', attrs={'class': 'result-item'})
            r = dom_parser.parse_dom(data, 'div', attrs={'class': 'title'})
            r = zip(dom_parser.parse_dom(r, 'a'), dom_parser.parse_dom(data, 'span', attrs={'class': 'year'}))

            url = []
            for i in range(len(r)):
                title = cleantitle.get(r[i][0][1])
                title = re.sub('(\d+p|4k|3d|hd|season\d+)','',title)
                y = r[i][1][1]
                link = r[i][0][0]['href']
                if 'season' in title: continue
                if t == title and y == year:
                    if 'season' in link:
                        url.append(source_utils.strip_domain(link))
                        print url[0]
                        return url[0]
                    else: url.append(source_utils.strip_domain(link))

            return url
        except:
            return
Exemplo n.º 19
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:

            query = urlparse.urljoin(self.base_link, self.search_link)
            if ':' in title:
                title2 = title.split(':')[0] + ':'
                post = 'search=%s&what=title' % title2

            else: post = 'search=%s&what=title' % cleantitle.getsearch(title)


            t = cleantitle.get(title)

            r = client.request(query, post=post)
            r = client.parseDOM(r, 'li')
            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a',)) for i in r]
            r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
            r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
            r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
            r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]

            url = urlparse.urljoin(self.base_link, re.findall('(?://.+?|)(/.+)', r)[0])
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')

            return url
        except:
            return
Exemplo n.º 20
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        self.zen_url = []	
        try:
			if not debridstatus == 'true': raise Exception()
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			data['season'], data['episode'] = season, episode
			self.zen_url = []
			title = cleantitle.getsearch(title)
			cleanmovie = cleantitle.get(title)			
			episodecheck = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
			episodecheck = str(episodecheck)
			episodecheck = episodecheck.lower()
			titlecheck = cleanmovie+episodecheck
			query = '%s+S%02dE%02d' % (urllib.quote_plus(title), int(data['season']), int(data['episode']))
			movielink = self.search_link + query
			link = client.request(movielink, timeout="10")
			match = re.compile('<h2 class="entry-title"><a href="(.+?)" rel="bookmark">(.+?)</a></h2>').findall(link)
			for movielink,title2 in match:
				c_title = cleantitle.get(title2)
				if titlecheck in c_title:
					self.zen_url.append([movielink,title])
			return self.zen_url
        except:
            return
Exemplo n.º 21
0
    def __search(self, titles, year):
        try:
            query = self.search_link % (urllib.quote_plus(cleantitle.getsearch(titles[0]+' '+year)))

            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i][0]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'v_pict'})

            for i in r:
                title = re.findall('alt="(.+?)"',i[1], re.DOTALL)[0]
                y = re.findall('(\d{4})', title, re.DOTALL)[0]
                title = re.sub('<\w+>|</\w+>','',title)
                title = cleantitle.get(title)
                title = re.findall('(\w+)', cleantitle.get(title))[0]

                if title in t and year == y:
                    url = re.findall('href="(.+?)"',i[1], re.DOTALL)[0]
                    return source_utils.strip_domain(url)
            return
        except:
            return
Exemplo n.º 22
0
    def __search(self, titles, year):
        try:
            query = self.search_link % (cleantitle.getsearch(titles[0].replace(' ','%20')))

            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i][0]

            r = client.request(query)

            r = client.parseDOM(r, 'li', attrs={'class': 'item everyone-item over_online haveTooltip'})

            for i in r:
                title = client.parseDOM(i, 'a', ret='title')[0]
                url = client.parseDOM(i, 'a', ret='href')[0]
                data = client.request(url)
                y = re.findall('<p><span>Año:</span>(\d{4})',data)[0]
                original_t = re.findall('movie-text">.+?h2.+?">\((.+?)\)</h2>',data, re.DOTALL)[0]
                original_t, title = cleantitle.get(original_t), cleantitle.get(title)

                if (t in title or t in original_t) and y == year :
                    x = dom_parser.parse_dom(i, 'a', req='href')
                    return source_utils.strip_domain(x[0][0]['href'])

            return
        except:
            return
Exemplo n.º 23
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        self.zen_url = []	
        try:
			
			if not debridstatus == 'true': raise Exception()
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			year = data['year'] 
			title = cleantitle.getsearch(title)
			cleanmovie = cleantitle.get(title)
			data['season'], data['episode'] = season, episode
			ep_query = "S%02dE%02d" % (int(data['season']),int(data['episode']))
			query = self.search_link % (urllib.quote_plus(title),ep_query )
			query = urlparse.urljoin(self.base_link, query)
			# print("CRAZY4AD query", query)
			link = client.request(query)
			r = client.parseDOM(link, 'h1', attrs = {'class': 'entry-title'})
			for items in r:
				href = client.parseDOM(items, 'a', ret = 'href')[0]
				item_title = client.parseDOM(items, 'a', ret = 'title')[0]
				href = href.encode('utf-8')
				item_title = item_title.encode('utf-8')
				# print("CRAZY4AD LINKS", href,item_title)
				if ep_query.lower() in cleantitle.get(item_title):
					if cleanmovie in cleantitle.get(item_title):
						# print("CRAZY4AD LINKS PASSED", href,item_title)
						self.zen_url.append(href)
			return self.zen_url
        except:
            return
Exemplo n.º 24
0
    def __search(self, titles, year, content):
        try:

            query = self.search_link % (urllib.quote_plus(cleantitle.getsearch(titles[0])))

            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i][0] #cleantitle.get(titles[0])

            r = client.request(query)

            r = client.parseDOM(r, 'div', attrs={'class': 'tab-content clearfix'})

            if content == 'movies':
                r = client.parseDOM(r, 'div', attrs={'id': 'movies'})
            else:
                r = client.parseDOM(r, 'div', attrs={'id': 'series'})

            data = dom_parser.parse_dom(r, 'figcaption')

            for i in data:
                title = i[0]['title']
                title = cleantitle.get(title)
                if title in t:
                    x = dom_parser.parse_dom(i, 'a', req='href')
                    return source_utils.strip_domain(x[0][0]['href'])
                else:
                    url = dom_parser.parse_dom(i, 'a', req='href')
                    data = client.request(url[0][0]['href'])
                    data = re.findall('<h1><a.+?">(.+?)\((\d{4})\).*?</a></h1>', data, re.DOTALL)[0]
                    if titles[0] in data[0] and year == data[1]: return source_utils.strip_domain(url[0][0]['href'])

            return
        except:
            return
Exemplo n.º 25
0
    def movie(self, imdb, title, year):
        self.elysium_url = []
        try:
			if not debridstatus == 'true': raise Exception()
			count = 0
			self.elysium_url = []
			cleanmovie = cleantitle.get(title)
			title = cleantitle.getsearch(title)
			titlecheck = cleanmovie+year
			query = self.search_link % (urllib.quote_plus(title),year)
			query = urlparse.urljoin(self.base_link, query)
			
			link = client.request(query)
			r = client.parseDOM(link, 'div', attrs = {'id': 'content'})
			for containers in r:
				print ("TVRLS containers", containers)
				match = re.compile("<a href='(.+?)'>(.+?)</a>").findall(containers)
				for movielink,title2 in match:
					title3 = cleantitle_get_2(title2)
					if titlecheck in title3:
						if "1080" in title2 or "720" in title2:
							count += 1
							if not count > 6:				
								self.elysium_url.append([movielink,title2])
			return self.elysium_url
        except:
            return
Exemplo n.º 26
0
    def movie(self, imdb, title, year):
        self.zen_url = []	
        try:
			if not debridstatus == 'true': raise Exception()
			self.zen_url = []
			title = cleantitle.getsearch(title)
			cleanmovie = cleantitle.get(title)
			query = self.search_link % (urllib.quote_plus(title),year)
			query = urlparse.urljoin(self.base_link, query)
			# print("CRAZY4AD query", query)
			link = client.request(query)
			r = client.parseDOM(link, 'h1', attrs = {'class': 'entry-title'})
			for items in r:
				href = client.parseDOM(items, 'a', ret = 'href')[0]
				item_title = client.parseDOM(items, 'a', ret = 'title')[0]
				href = href.encode('utf-8')
				item_title = item_title.encode('utf-8')
				# print("CRAZY4AD LINKS", href,item_title)
				if year in item_title:
					if cleanmovie in cleantitle.get(item_title):
						# print("CRAZY4AD LINKS PASSED", href,item_title)
						self.zen_url.append(href)
			return self.zen_url
        except:
            return
Exemplo n.º 27
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        self.elysium_url = []		
        try:
			if not debridstatus == 'true': raise Exception()
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			data['season'], data['episode'] = season, episode
			self.elysium_url = []
			title = cleantitle.getsearch(title)
			cleanmovie = cleantitle.get(title)
			episodecheck = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
			episodecheck = str(episodecheck)
			episodecheck = episodecheck.lower()
			titlecheck = cleanmovie+episodecheck
			query = '%s+S%02dE%02d' % (urllib.quote_plus(title), int(data['season']), int(data['episode']))
			movielink = "http://scnlog.eu/tv-shows/?s=" + str(query)
			link = client.request(movielink)
			match = re.compile('<a href="(.+?)" rel="bookmark" title="(.+?)">').findall(link)
			for movielink,title2 in match:
				title = cleantitle.get(title2)
				if titlecheck in title:
					self.elysium_url.append([movielink,title])
			return self.elysium_url
        except:
            return
Exemplo n.º 28
0
    def movie(self, imdb, title, year):
		self.zen_url = []
		try:
			if not debridstatus == 'true': raise Exception()			
			title = cleantitle.getsearch(title)
			cleanmovie = cleantitle.get(title)
			query = self.search_link % (urllib.quote_plus(title),year)
			query = urlparse.urljoin(self.base_link, query)
			print "%s QUERY %s" % (self.base_link, query)
			r = client.request(query)
			r = BeautifulSoup(r)
			r = r.findAll('h2', attrs = {'class': 'entry-title'})
			
			for item in r:
				try:
					t = item.findAll('a')[0].string
					t = t.encode('utf-8')
					h = item.findAll('a')[0]['href'].encode('utf-8')
					
					if cleanmovie in cleantitle_get(t) and year in t:

						self.zen_url.append([t,h])
					# self.zen_url.append([links,t])
					
				except:
					pass
				
			return self.zen_url

		except:
			return	
Exemplo n.º 29
0
    def movie(self, imdb, title, year):
		self.zen_url = []
		try:
			if not debridstatus == 'true': raise Exception()			
			title = cleantitle.getsearch(title)
			cleanmovie = cleantitle.get(title)
			query = self.search_link % (urllib.quote_plus(title),year)
			query = urlparse.urljoin(self.base_link, query)
			print "%s QUERY %s" % (self.base_link, query)
			r = client.request(query)
			posts = client.parseDOM(r, 'item')	
			items = []
			for post in posts:
				try:
					t = client.parseDOM(post, 'title')[0]
					t = t.encode('utf-8')
					if not cleanmovie in cleantitle.get(t) and year in t: continue
					c = client.parseDOM(post, 'content.+?')[0]
					u = client.parseDOM(c, 'p')
					u = [client.parseDOM(i, 'a', ret='href') for i in u]
					u = [i[0] for i in u if len(i) == 1]
					if not u: raise Exception()

					u = [(t, i) for i in u]

					self.zen_url += u
					# self.zen_url.append([links,t])
					
				except:
					pass
			# print ("SCENEDOWN PASSED", self.zen_url)		
			return self.zen_url

		except:
			return	
Exemplo n.º 30
0
    def movie(self, imdb, title, year):
        self.elysium_url = []
        try:
            if not debridstatus == 'true': raise Exception()			
            self.elysium_url = []
           
            cleanmovie = cleantitle.get(title)
            title = cleantitle.getsearch(title)
            titlecheck = cleanmovie+year
			
            query = self.search_link % (urllib.quote_plus(title),year)
            query = urlparse.urljoin(self.base_link, query)
            query = query + "&x=0&y=0"
            headers = {'User-Agent': random_agent()}
            html = BeautifulSoup(requests.get(query, headers=headers, timeout=30).content)
           
            result = html.findAll('div', attrs={'class': 'post'})

            for r in result:
				r_href = r.findAll('a')[0]["href"]
				r_href = r_href.encode('utf-8')
                # print ("MOVIEXK r2", r_href)
				r_title = r.findAll('a')[0]["title"]
                # print ("MOVIEXK r3", r_title)
				r_title = r_title.encode('utf-8')
				c_title = cleantitle_get_2(r_title)		
				if year in r_title:
					if titlecheck in c_title:
						self.elysium_url.append([r_href,r_title])
						# print "SCNSRC MOVIES %s %s" % (r_title , r_href)
            return self.elysium_url
        except:
            return
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if (self.user != '' and self.password != ''): #raise Exception()

               login = urlparse.urljoin(self.base_link, '/login.html')

               post = urllib.urlencode({'username': self.user, 'password': self.password, 'submit': 'Login'})

               cookie = client.request(login, post=post, output='cookie', close=False)

               r = client.request(login, post=post, cookie=cookie, output='extended')

               headers = {'User-Agent': r[3]['User-Agent'], 'Cookie': r[4]}
            else:
               headers = {}


            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
                if 'season' in data: season = data['season']
                if 'episode' in data: episode = data['episode']
                year = data['year']

                query = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
                query2 = urlparse.urljoin(self.base_link, self.search_link % re.sub('\s','+',title))
                r = client.request(query)
                r = client.parseDOM(r, 'div', attrs = {'class': 'ml-item'})
                if len(r)==0:
                    r = client.request(query2)
                    r = client.parseDOM(r, 'div', attrs = {'class': 'ml-item'})
                r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'), client.parseDOM(r, 'a', ret='data-url'))
                
                if 'tvshowtitle' in data:                   
                    cltitle = cleantitle.get(title+'season'+season)
                    cltitle2 = cleantitle.get(title+'season%02d'%int(season))
                else:
                    cltitle = cleantitle.get(title)

                r = [i for i in r if cltitle == cleantitle.get(i[1]) or cltitle2 == cleantitle.get(i[1])]
                id = [re.findall('/(\d+)$',i[2])[0] for i in r][0]

                ajx = urlparse.urljoin(self.base_link, '/ajax/movie_episodes/'+id)

                r = client.request(ajx)
                if 'episode' in data:
                    eids = re.findall(r'title=\\"Episode\s+%02d.*?data-id=\\"(\d+)'%int(episode),r)
                else:
                    eids = re.findall(r'title=.*?data-id=\\"(\d+)',r)

                for eid in eids:
                    try:
                        ajx = 'ajax/movie_token?eid=%s&mid=%s&_=%d' % (eid, id, int(time.time() * 1000))
                        ajx = urlparse.urljoin(self.base_link, ajx)
                        r = client.request(ajx)
                        [x,y] = re.findall(r"_x='([^']+)',\s*_y='([^']+)'",r)[0]
                        ajx = 'ajax/movie_sources/%s?x=%s&y=%s'%(eid,x,y)
                        ajx = urlparse.urljoin(self.base_link, ajx)
                        r = client.request(ajx)
                        r = json.loads(r)
                        r = r['playlist'][0]['sources']
                        for i in r:
                            try: label = source_utils.label_to_quality(i['label']) 
                            except: label = 'SD'
                            sources.append({'source': 'cdn', 'quality': label, 'language': 'en', 'url': i['file'], 'direct': True, 'debridonly': False})
                    except:
                        pass

            return sources
        except:
            return sources
 def clean_serach(self, serach_str):
     result = cleantitle.getsearch(serach_str)
     result = re.sub(' +', ' ', result)
     return result.strip()
Exemplo n.º 33
0
    def searchShow(self, title, season, aliases, headers):
        try:
            title = cleantitle.normalize(title)
            search = '%s Season %s' % (title, season)
            url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(search)))

            r = client.request(url)

            url = re.findall('<a href=\"(.+?\/movie\/%s-season-%s-.+?\.html)\"' % (cleantitle.geturl(title), season), r)[0]

            return url

        except:
            return
Exemplo n.º 34
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '')
                             for i in data])

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                    'title']
                if 'season' in data: season = data['season']
                if 'episode' in data: episode = data['episode']
                year = data['year']

                r = client.request(self.base_link,
                                   output='extended',
                                   timeout='10')
                cookie = r[4]
                headers = r[3]
                result = r[0]
                headers['Cookie'] = cookie

                query = urlparse.urljoin(
                    self.base_link, self.search_link %
                    urllib.quote_plus(cleantitle.getsearch(title)))
                #query2 = urlparse.urljoin(self.base_link, self.search_link % re.sub('\s','+',title))
                r = client.request(query, headers=headers, XHR=True)
                r = json.loads(r)['content']
                r = zip(client.parseDOM(r, 'a', ret='href'),
                        client.parseDOM(r, 'a'))

                if 'tvshowtitle' in data:
                    cltitle = cleantitle.get(title + 'season' + season)
                    cltitle2 = cleantitle.get(title +
                                              'season%02d' % int(season))
                    r = [
                        i for i in r if cltitle == cleantitle.get(i[1])
                        or cltitle2 == cleantitle.get(i[1])
                    ]
                    vurl = '%s%s-episode-%s' % (self.base_link, str(
                        r[0][0]).replace('/info', ''), episode)
                    vurl2 = None
                else:
                    cltitle = cleantitle.getsearch(title)
                    cltitle2 = cleantitle.getsearch('%s (%s)' % (title, year))
                    r = [
                        i for i in r if cltitle2 == cleantitle.getsearch(i[1])
                        or cltitle == cleantitle.getsearch(i[1])
                    ]
                    vurl = '%s%s-episode-0' % (self.base_link, str(
                        r[0][0]).replace('/info', ''))
                    vurl2 = '%s%s-episode-1' % (self.base_link, str(
                        r[0][0]).replace('/info', ''))

                r = client.request(vurl, headers=headers)
                headers['Referer'] = vurl

                slinks = client.parseDOM(r,
                                         'div',
                                         attrs={'class': 'anime_muti_link'})
                slinks = client.parseDOM(slinks, 'li', ret='data-video')
                if len(slinks) == 0 and not vurl2 == None:
                    r = client.request(vurl2, headers=headers)
                    headers['Referer'] = vurl2
                    slinks = client.parseDOM(
                        r, 'div', attrs={'class': 'anime_muti_link'})
                    slinks = client.parseDOM(slinks, 'li', ret='data-video')

                for slink in slinks:
                    try:
                        if 'vidnode.net/streaming.php' in slink:
                            r = client.request('https:%s' % slink,
                                               headers=headers)
                            clinks = re.findall(r'sources:\[(.*?)\]', r)[0]
                            clinks = re.findall(
                                r'file:\s*\'(http[^\']+)\',label:\s*\'(\d+)',
                                clinks)
                            for clink in clinks:
                                q = source_utils.label_to_quality(clink[1])
                                sources.append({
                                    'source': 'cdn',
                                    'quality': q,
                                    'language': 'en',
                                    'url': clink[0],
                                    'direct': True,
                                    'debridonly': False
                                })
                        else:
                            valid, hoster = source_utils.is_host_valid(
                                slink, hostDict)
                            if valid:
                                sources.append({
                                    'source': hoster,
                                    'quality': 'SD',
                                    'language': 'en',
                                    'url': slink,
                                    'direct': False,
                                    'debridonly': False
                                })
                    except:
                        pass

            return sources
        except:
            return sources
Exemplo n.º 35
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     url = cleantitle.getsearch(localtvshowtitle)
     return url
Exemplo n.º 36
0
    def searchMovie(self, title, year, aliases, headers):
        try:
            title = cleantitle.normalize(title)
            url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
            r = client.request(url, headers=headers, timeout='15')
            r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
            r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
            results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
            try:
                r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
                url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0]
            except:
                url = None
                pass

            if (url == None):
                url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0]
            return url
        except:

            return
Exemplo n.º 37
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            t = data['tvshowtitle']
            season = '%01d' % int(data['season'])
            episode = '%02d' % int(data['episode'])

            query = cleantitle.getsearch(t)
            r = urlparse.urljoin(self.base_link, self.search_link2)
            post = {'query': query}
            r = client.request(r, post=post)
            r = dom_parser2.parse_dom(r, 'a')
            r = [(i.attrs['href'],
                  dom_parser2.parse_dom(i.content,
                                        'span',
                                        attrs={'class': 'searchheading'}))
                 for i in r]
            try:
                url = []
                for i in r:
                    t1 = i[1][0].content
                    t2 = re.sub('[Ss]eason\s*\d+', '', t1)
                    if not str(int(season)) in t1: continue
                    if cleantitle.get(t) == cleantitle.get(
                            t2) and not 'pack' in i[0]:
                        url.append(i[0])
                    if len(url) > 1:
                        url = [(i) for i in url if 'hd' in i][0]
                    else:
                        url = url[0]

            except:
                pass
            if len(url) < 0:
                try:
                    r = urlparse.urljoin(self.base_link, self.search_link)
                    t = '%s season %s' % (t, season)
                    post = 'do=search&subaction=search&story=%s' % urllib.quote_plus(
                        cleantitle.getsearch(t))
                    r = client.request(r, post=post)
                    r = dom_parser2.parse_dom(r, 'h4')
                    r = [
                        dom_parser2.parse_dom(i.content, 'a', req=['href'])
                        for i in r if i
                    ]
                    r = [(i[0].attrs['href'], i[0].content) for i in r if i]
                    r = [(i[0], i[1]) for i in r
                         if t.lower() == i[1].replace(' -', '').lower()]
                    r = [(i[0]) for i in r if not 'pack' in i[0]]
                    url = r[0][0]

                except:
                    pass

            links = []

            r = client.request(url)
            name = re.findall('<b>Release Name :.+?">(.+?)</span>', r,
                              re.DOTALL)[0]
            link = client.parseDOM(r,
                                   'span',
                                   attrs={'class': 'downloads nobr'})
            link = [(re.findall(
                '<a href="(.+?)"\s*target="_blank">[Ee]pisode\s*(\d+)</a>', i,
                re.DOTALL)) for i in link]
            for item in link:
                link = [(i[0], i[1]) for i in item if i[1] == str(episode)]
                links.append(link[0][0])

            quality, info = source_utils.get_release_quality(name, None)

            for url in links:
                try:
                    if "protect" in url:
                        redirect = client.request(url)
                        url = re.findall('<a href="(.*?)" target="_blank">',
                                         redirect)
                        url = url[0]

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': True
                    })
                except:
                    pass

            return sources
        except:
            return sources
    def _search(self, title, year, aliases, headers):
        try:
            q = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
            r = client.request(q)
            r = client.parseDOM(r, 'div', attrs={'class':'ml-img'})
            r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'img', ret='alt'))
            url = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1]) and year in i[1]][0][0]

            return url
        except:
            pass