def movie(self, imdb, title, year):
        self.elysium_url = []
        try:
			if not debridstatus == 'true': raise Exception()
			self.elysium_url = []
			cleanmovie = cleantitle_get(title)
			title = cleantitle_query(title)
			titlecheck = cleanmovie+year
			query = self.movie_link % (urllib.quote_plus(title),year)
			query = urlparse.urljoin(self.base_link, query)
			link = OPEN_URL(query).text
			match = re.compile('<a class="title" href="(.+?)">(.+?)</a>').findall(link)
			for h,t in match:
				print ("RAPIDMOVIES", h,t)
				h = h.encode('utf-8')
				t = t.encode('utf-8')
				check = cleantitle_get_2(t)
				print ("RAPIDMOVIES check", check)
				if h.startswith("/"): h = self.base_link + h
				if year in t:
					if titlecheck in check:
						info = get_size(t)
						quality = quality_tag(t)
						if "1080" in quality or "HD" in quality:
							self.count += 1
							if not self.count >6:
								print ("RAPIDMOVIES PASSED", t,quality,info)
								self.elysium_url.append([h,quality,info])
			return self.elysium_url
        except:
            return
Beispiel #2
0
	def movie(self, imdb, title, year):
		self.zen_url = []
		try:
			
			# print("WATCHCARTOON")
			title = cleantitle_query(title)
			cleanmovie = cleantitle_get(title)
			query = self.search_link % (urllib.quote_plus(title),year)
			query = urlparse.urljoin(self.base_link, query)
			
			r = OPEN_URL(query, timeout='15')
			
			html = BeautifulSoup(r.content)
			print ("WONLINE BeautifulSoup", html)
			r = html.findAll('div', attrs={'class': 'resultado'})
			print ("WONLINE s1",  r)
			for u in r:
				r_title = u.findAll('img')[0]['alt']
				r_title = r_title.encode('utf-8')
			
				r_href = u.findAll('a')[0]['href']
				r_title = r_title.encode('utf-8')
				
				
				print("WONLINE MATCHES", r_title,r_href)
				if year in r_title and cleanmovie == cleantitle_get(r_title): 
					if "http:" in r_href: url = replaceHTMLCodes(r_href)
					print("WONLINE PASSED", url)
					return url
					
			
		except:
			return
    def movie(self, imdb, title, year):
	
        try:
            checktitle = cleantitle_get(title)
            print ("SOLARMOVIE", checktitle)	
            q = self.search_link % (urllib.quote_plus(cleantitle_query(title)))
            q = urlparse.urljoin(self.base_link, q)
            print ("SOLARMOVIE 2", q)	
            r = OPEN_URL(q).content
			
            r = BeautifulSoup(r)
            # print ("SOLARMOVIE 3", r)			
            r = r.findAll('div', attrs = {'class': 'ml-item'})
            for items in r:
                # print ("SOLARMOVIE ITEMS", items)
                try:
					h = items.findAll('a')[0]['href'].encode('utf-8')
					t = items.findAll('a')[0]['title'].encode('utf-8')
					if cleantitle_get(t) == checktitle:
						info = items.findAll('a')[0]['data-url'].encode('utf-8')
						info =  urlparse.urljoin(self.base_link, info)
						y , q = self.movies_info(info, year)
						# print ("SOLARMOVIE INFOS ", y)						
						if not y == year: raise Exception()
						self.quality = q
						return h
                except:
                    pass
        except:
            return
 def movie(self, imdb, title, year):
     self.zen_url = []
     try:
         if not debridstatus == 'true': raise Exception()
         self.zen_url = []
         cleanmovie = cleantitle_get(title)
         title = cleantitle_query(title)
         titlecheck = cleanmovie + year
         query = self.movie_link % (urllib.quote_plus(title), year)
         query = urlparse.urljoin(self.base_link, query)
         link = OPEN_URL(query).text
         match = re.compile(
             '<a class="title" href="(.+?)">(.+?)</a>').findall(link)
         for h, t in match:
             print("RAPIDMOVIES", h, t)
             h = h.encode('utf-8')
             t = t.encode('utf-8')
             check = cleantitle_get_2(t)
             print("RAPIDMOVIES check", check)
             if h.startswith("/"): h = self.base_link + h
             if year in t:
                 if titlecheck in check:
                     info = get_size(t)
                     quality = quality_tag(t)
                     if "1080" in quality or "HD" in quality:
                         self.count += 1
                         if not self.count > 6:
                             print("RAPIDMOVIES PASSED", t, quality, info)
                             self.zen_url.append([h, quality, info])
         return self.zen_url
     except:
         return
Beispiel #5
0
    def movie(self, imdb, title, year):

        try:
            checktitle = cleantitle_get(title)
            print("SOLARMOVIE", checktitle)
            q = self.search_link % (urllib.quote_plus(cleantitle_query(title)))
            q = urlparse.urljoin(self.base_link, q)
            print("SOLARMOVIE 2", q)
            r = OPEN_URL(q).content

            r = BeautifulSoup(r)
            # print ("SOLARMOVIE 3", r)
            r = r.findAll('div', attrs={'class': 'ml-item'})
            for items in r:
                # print ("SOLARMOVIE ITEMS", items)
                try:
                    h = items.findAll('a')[0]['href'].encode('utf-8')
                    t = items.findAll('a')[0]['title'].encode('utf-8')
                    if cleantitle_get(t) == checktitle:
                        info = items.findAll('a')[0]['data-url'].encode(
                            'utf-8')
                        info = urlparse.urljoin(self.base_link, info)
                        y, q = self.movies_info(info, year)
                        # print ("SOLARMOVIE INFOS ", y)
                        if not y == year: raise Exception()
                        self.quality = q
                        return h
                except:
                    pass
        except:
            return
Beispiel #6
0
    def movie(self, imdb, title, year):
        self.zen_url = []
        try:

            # print("WATCHCARTOON")
            title = cleantitle_query(title)
            cleanmovie = cleantitle_get(title)
            query = self.search_link % (urllib.quote_plus(title), year)
            query = urlparse.urljoin(self.base_link, query)

            r = OPEN_URL(query, timeout='15')

            html = BeautifulSoup(r.content)
            print("WONLINE BeautifulSoup", html)
            r = html.findAll('div', attrs={'class': 'resultado'})
            print("WONLINE s1", r)
            for u in r:
                r_title = u.findAll('img')[0]['alt']
                r_title = r_title.encode('utf-8')

                r_href = u.findAll('a')[0]['href']
                r_title = r_title.encode('utf-8')

                print("WONLINE MATCHES", r_title, r_href)
                if year in r_title and cleanmovie == cleantitle_get(r_title):
                    if "http:" in r_href: url = replaceHTMLCodes(r_href)
                    print("WONLINE PASSED", url)
                    return url

        except:
            return
Beispiel #7
0
    def movie(self, imdb, title, year):
        self.elysium_url = []
        try:

            # print("WATCHCARTOON")
            title = cleantitle_query(title)
            title = title.replace(' ', '-')
            query = self.movie_link % (title)
            url = urlparse.urljoin(self.base_link, query)
            return url

        except:
            return
Beispiel #8
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            self.zen_url = []
            if not debridstatus == 'true': raise Exception()
            if url == None: return
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            title = cleantitle_query(title)
            cleanmovie = cleantitle.get(title)
            data['season'], data['episode'] = season, episode
            ep_query = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
            
            titlecheck = cleanmovie+ep_query.lower()
			
            query = "%s+%s" % (urllib.quote_plus(title), ep_query)
            query = self.search_link % query
            query = urlparse.urljoin(self.search_base_link, query)
            r = client.request(query, headers=self.search_header_link, referer=query)
            posts = []
            dupes = []
            print ("RELEASEBB QUERY", r)
			
            try: posts += json.loads(re.findall('({.+?})$', r)[0])['results']
            except: pass			
            for post in posts:
				try:
					name = post['post_title'].encode('utf-8')
					url = post['post_name'].encode('utf-8')
					if url in dupes: raise Exception()
					dupes.append(url)
					print ("RELEASEBB 2", name,url)
					t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
					print ("RELEASEBB 3 TV", t)					
					if not titlecheck in cleantitle_get(name): raise Exception()
					print ("RELEASEBB 3 PASSED", t)
					content = post['post_content']
					url = [i for i in client.parseDOM(content, 'a', ret='href')]
					
					size = get_size(content)
					quality = 'getbyurl'
					self.zen_url.append([size,quality,url])
				
					
				except:
					pass
            print("RELEASEBB PASSED", self.zen_url)
            return self.zen_url

        except:
            return
Beispiel #9
0
    def movie(self, imdb, title, year):
        try:
            if not debridstatus == 'true': raise Exception()
            self.elysium_url = []
            query = cleantitle_query(title)
            cleanmovie = cleantitle_get(title)
            query = "%s+%s" % (urllib.quote_plus(query), year)
            query = self.search_link % query
            query = urlparse.urljoin(self.search_base_link, query)
            r = client.request(query,
                               headers=self.search_header_link,
                               referer=query)
            posts = []
            dupes = []
            print("RELEASEBB QUERY", r)

            try:
                posts += json.loads(re.findall('({.+?})$', r)[0])['results']
            except:
                pass
            for post in posts:
                try:
                    name = post['post_title'].encode('utf-8')
                    url = post['post_name'].encode('utf-8')
                    if url in dupes: raise Exception()
                    dupes.append(url)
                    print("RELEASEBB 2", name, url)
                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '', name)

                    if not cleanmovie in cleantitle_get(
                            name) or not year in name:
                        raise Exception()
                    print("RELEASEBB 3 PASSED", t)
                    content = post['post_content']
                    url = [
                        i for i in client.parseDOM(content, 'a', ret='href')
                    ]

                    size = get_size(content)
                    quality = quality_tag(name)
                    self.elysium_url.append([size, quality, url])

                except:
                    pass
            print("RELEASEBB PASSED", self.elysium_url)
            return self.elysium_url

        except:
            return
Beispiel #10
0
    def movie(self, imdb, title, year):
        self.zentester_url = []
        try:

            # print("WATCHCARTOON")
            title = cleantitle_query(title)
            title = cleantitle_geturl(title)
            query = self.movie_link % (title, year)
            url = urlparse.urljoin(self.base_link, query)

            return url

        except:
            return
Beispiel #11
0
	def movie(self, imdb, title, year):
		self.zen_url = []
		try:
			
			# print("WATCHCARTOON")
			title = cleantitle_query(title)
			title = title.replace(' ','-')
			query = self.movie_link % (title)
			url = urlparse.urljoin(self.base_link, query)
			return url
					
			
		except:
			return
Beispiel #12
0
	def movie(self, imdb, title, year):
		self.zentester_url = []
		try:
			
			# print("WATCHCARTOON")
			title = cleantitle_query(title)
			title = cleantitle_geturl(title)
			query = self.movie_link % (title, year)
			url = urlparse.urljoin(self.base_link, query)

			
			return url
					
			
		except:
			return
Beispiel #13
0
	def episode(self, url, imdb, tvdb, title, premiered, season, episode):
		self.zentester_url = []
		try:
			
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			data['season'], data['episode'] = season, episode
			episodeid = "s%02de%02d" % (int(data['season']) , int(data['episode']))
			title = cleantitle_query(title)
			title =cleantitle_geturl(title)
			
			query= self.ep_link % (title,episodeid)
			url = urlparse.urljoin(self.base_link, query)
			return url
		except:
			return
Beispiel #14
0
	def episode(self, url, imdb, tvdb, title, premiered, season, episode):
		self.genesisreborn_url = []
		try:
			
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			data['season'], data['episode'] = season, episode
			episodeid = "%01dx%01d" % (int(data['season']) , int(data['episode']))
			title = cleantitle_query(title)
			title = title.replace(' ','-')
			query = title + "-" + episodeid
			query= self.ep_link % query
			url = urlparse.urljoin(self.base_link, query)
			print("Watchfilm TV SHOW", url)

			return url
		except:
			return
Beispiel #15
0
	def episode(self, url, imdb, tvdb, title, premiered, season, episode):
		self.zen_url = []
		try:
			
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			data['season'], data['episode'] = season, episode
			episodeid = "s%02de%02d" % (int(data['season']) , int(data['episode']))
			title = cleantitle_query(title)
			title = title.replace(' ','-')
			query = title + "-" + episodeid
			query= self.ep_link % query
			url = urlparse.urljoin(self.base_link, query)
			print("WONLINE TV SHOW", url)

			return url
		except:
			return
Beispiel #16
0
    def movie(self, imdb, title, year):
        try:
            if not debridstatus == 'true': raise Exception()
            self.zen_url = []
            query = cleantitle_query(title)
            cleanmovie = cleantitle_get(title)
            query = "%s+%s" % (urllib.quote_plus(query), year)
            query = self.search_link % query
            query = urlparse.urljoin(self.search_base_link, query)
            r = client.request(query, headers=self.search_header_link, referer=query)
            posts = []
            dupes = []
            print ("RELEASEBB QUERY", r)
			
            try: posts += json.loads(re.findall('({.+?})$', r)[0])['results']
            except: pass			
            for post in posts:
				try:
					name = post['post_title'].encode('utf-8')
					url = post['post_name'].encode('utf-8')
					if url in dupes: raise Exception()
					dupes.append(url)
					print ("RELEASEBB 2", name,url)
					t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
					
					if not cleanmovie in cleantitle_get(name) or not year in name: raise Exception()
					print ("RELEASEBB 3 PASSED", t)
					content = post['post_content']
					url = [i for i in client.parseDOM(content, 'a', ret='href')]
					
					size = get_size(content)
					quality = quality_tag(name)
					self.zen_url.append([size,quality,url])
				
					
				except:
					pass
            print("RELEASEBB PASSED", self.zen_url)
            return self.zen_url

        except:
            return
Beispiel #17
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        self.zentester_url = []
        try:

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            data['season'], data['episode'] = season, episode
            episodeid = "s%02de%02d" % (int(
                data['season']), int(data['episode']))
            title = cleantitle_query(title)
            title = cleantitle_geturl(title)

            query = self.ep_link % (title, episodeid)
            url = urlparse.urljoin(self.base_link, query)
            return url
        except:
            return