Пример #1
0
    def movie(self, imdb, title, year):
        self.zen_url = []
        try:

            # print("WATCHCARTOON")
            title = cleantitle_query(title)
            cleanmovie = cleantitle_get(title)
            query = self.search_link % (urllib.quote_plus(title), year)
            query = urlparse.urljoin(self.base_link, query)

            r = OPEN_URL(query, timeout='15')

            html = BeautifulSoup(r.content)
            print("WONLINE BeautifulSoup", html)
            r = html.findAll('div', attrs={'class': 'resultado'})
            print("WONLINE s1", r)
            for u in r:
                r_title = u.findAll('img')[0]['alt']
                r_title = r_title.encode('utf-8')

                r_href = u.findAll('a')[0]['href']
                r_title = r_title.encode('utf-8')

                print("WONLINE MATCHES", r_title, r_href)
                if year in r_title and cleanmovie == cleantitle_get(r_title):
                    if "http:" in r_href: url = replaceHTMLCodes(r_href)
                    print("WONLINE PASSED", url)
                    return url

        except:
            return
Пример #2
0
    def movie(self, imdb, title, year):
	
        try:
            checktitle = cleantitle_get(title)
            print ("SOLARMOVIE", checktitle)	
            q = self.search_link % (urllib.quote_plus(cleantitle_query(title)))
            q = urlparse.urljoin(self.base_link, q)
            print ("SOLARMOVIE 2", q)	
            r = OPEN_URL(q).content
			
            r = BeautifulSoup(r)
            # print ("SOLARMOVIE 3", r)			
            r = r.findAll('div', attrs = {'class': 'ml-item'})
            for items in r:
                # print ("SOLARMOVIE ITEMS", items)
                try:
					h = items.findAll('a')[0]['href'].encode('utf-8')
					t = items.findAll('a')[0]['title'].encode('utf-8')
					if cleantitle_get(t) == checktitle:
						info = items.findAll('a')[0]['data-url'].encode('utf-8')
						info =  urlparse.urljoin(self.base_link, info)
						y , q = self.movies_info(info, year)
						# print ("SOLARMOVIE INFOS ", y)						
						if not y == year: raise Exception()
						self.quality = q
						return h
                except:
                    pass
        except:
            return
Пример #3
0
	def movie(self, imdb, title, year):
		self.zen_url = []
		try:
			
			# print("WATCHCARTOON")
			title = cleantitle_query(title)
			cleanmovie = cleantitle_get(title)
			query = self.search_link % (urllib.quote_plus(title),year)
			query = urlparse.urljoin(self.base_link, query)
			
			r = OPEN_URL(query, timeout='15')
			
			html = BeautifulSoup(r.content)
			print ("WONLINE BeautifulSoup", html)
			r = html.findAll('div', attrs={'class': 'resultado'})
			print ("WONLINE s1",  r)
			for u in r:
				r_title = u.findAll('img')[0]['alt']
				r_title = r_title.encode('utf-8')
			
				r_href = u.findAll('a')[0]['href']
				r_title = r_title.encode('utf-8')
				
				
				print("WONLINE MATCHES", r_title,r_href)
				if year in r_title and cleanmovie == cleantitle_get(r_title): 
					if "http:" in r_href: url = replaceHTMLCodes(r_href)
					print("WONLINE PASSED", url)
					return url
					
			
		except:
			return
Пример #4
0
    def movie(self, imdb, title, year):

        try:
            checktitle = cleantitle_get(title)
            print("SOLARMOVIE", checktitle)
            q = self.search_link % (urllib.quote_plus(cleantitle_query(title)))
            q = urlparse.urljoin(self.base_link, q)
            print("SOLARMOVIE 2", q)
            r = OPEN_URL(q).content

            r = BeautifulSoup(r)
            # print ("SOLARMOVIE 3", r)
            r = r.findAll('div', attrs={'class': 'ml-item'})
            for items in r:
                # print ("SOLARMOVIE ITEMS", items)
                try:
                    h = items.findAll('a')[0]['href'].encode('utf-8')
                    t = items.findAll('a')[0]['title'].encode('utf-8')
                    if cleantitle_get(t) == checktitle:
                        info = items.findAll('a')[0]['data-url'].encode(
                            'utf-8')
                        info = urlparse.urljoin(self.base_link, info)
                        y, q = self.movies_info(info, year)
                        # print ("SOLARMOVIE INFOS ", y)
                        if not y == year: raise Exception()
                        self.quality = q
                        return h
                except:
                    pass
        except:
            return
Пример #5
0
    def movie(self, imdb, title, year):
        try:
            if not debridstatus == 'true': raise Exception()
            self.elysium_url = []
            query = cleantitle_query(title)
            cleanmovie = cleantitle_get(title)
            query = "%s+%s" % (urllib.quote_plus(query), year)
            query = self.search_link % query
            query = urlparse.urljoin(self.search_base_link, query)
            r = client.request(query,
                               headers=self.search_header_link,
                               referer=query)
            posts = []
            dupes = []
            print("RELEASEBB QUERY", r)

            try:
                posts += json.loads(re.findall('({.+?})$', r)[0])['results']
            except:
                pass
            for post in posts:
                try:
                    name = post['post_title'].encode('utf-8')
                    url = post['post_name'].encode('utf-8')
                    if url in dupes: raise Exception()
                    dupes.append(url)
                    print("RELEASEBB 2", name, url)
                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '', name)

                    if not cleanmovie in cleantitle_get(
                            name) or not year in name:
                        raise Exception()
                    print("RELEASEBB 3 PASSED", t)
                    content = post['post_content']
                    url = [
                        i for i in client.parseDOM(content, 'a', ret='href')
                    ]

                    size = get_size(content)
                    quality = quality_tag(name)
                    self.elysium_url.append([size, quality, url])

                except:
                    pass
            print("RELEASEBB PASSED", self.elysium_url)
            return self.elysium_url

        except:
            return
Пример #6
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        self.elysium_url = []
        try:
            if not debridstatus == 'true': raise Exception()
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            data['season'], data['episode'] = season, episode
            today = datetime.datetime.today().date()
            today = today.strftime('%Y.%m.%d')

            title = cleantitle_get(title)
            titlecheck = "s%02de%02d" % (int(
                data['season']), int(data['episode']))
            titlecheck = title + titlecheck
            premiered = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(
                premiered)[0]
            year = premiered[0]
            days = premiered[-1]
            month = premiered[1]
            next_day = int(days) + 1

            ep_date = "%s.%02d.%02d" % (year, int(month), int(days))
            # print ("HDTV PREMIERE", ep_date , today)
            if int(re.sub('[^0-9]', '', str(ep_date))) > int(
                    re.sub('[^0-9]', '', str(today))):
                raise Exception()
            ep_next_date = "%s.%02d.%02d" % (year, int(month), int(next_day))
            # print ("HDTV PREMIERE", ep_date, ep_next_date)
            # print ("HDTV PREMIERE", today, ep_date, ep_next_date)
            for day in [ep_date, ep_next_date]:
                html = self.search_link % day
                html = urlparse.urljoin(self.base_link, html)
                # print ("HDTV PREMIERE 2 ", html)
                r = OPEN_URL(html).content
                for match in re.finditer(
                        '<center>\s*<b>\s*(.*?)\s*</b>.*?<tr>(.*?)</tr>', r,
                        re.DOTALL):
                    release, links = match.groups()
                    release = re.sub('</?[^>]*>', '', release)
                    release = cleantitle_get(release)
                    if titlecheck in release:
                        # print ("HDTV PREMIERE 3 FOUND", release , links)
                        self.elysium_url.append([release, links])

            return self.elysium_url
        except:
            pass
Пример #7
0
 def movie(self, imdb, title, year):
     self.zen_url = []
     try:
         if not debridstatus == 'true': raise Exception()
         self.zen_url = []
         cleanmovie = cleantitle_get(title)
         title = cleantitle_query(title)
         titlecheck = cleanmovie + year
         query = self.movie_link % (urllib.quote_plus(title), year)
         query = urlparse.urljoin(self.base_link, query)
         link = OPEN_URL(query).text
         match = re.compile(
             '<a class="title" href="(.+?)">(.+?)</a>').findall(link)
         for h, t in match:
             print("RAPIDMOVIES", h, t)
             h = h.encode('utf-8')
             t = t.encode('utf-8')
             check = cleantitle_get_2(t)
             print("RAPIDMOVIES check", check)
             if h.startswith("/"): h = self.base_link + h
             if year in t:
                 if titlecheck in check:
                     info = get_size(t)
                     quality = quality_tag(t)
                     if "1080" in quality or "HD" in quality:
                         self.count += 1
                         if not self.count > 6:
                             print("RAPIDMOVIES PASSED", t, quality, info)
                             self.zen_url.append([h, quality, info])
         return self.zen_url
     except:
         return
Пример #8
0
    def movie(self, imdb, title, year):
        self.elysium_url = []
        try:
			if not debridstatus == 'true': raise Exception()
			self.elysium_url = []
			cleanmovie = cleantitle_get(title)
			title = cleantitle_query(title)
			titlecheck = cleanmovie+year
			query = self.movie_link % (urllib.quote_plus(title),year)
			query = urlparse.urljoin(self.base_link, query)
			link = OPEN_URL(query).text
			match = re.compile('<a class="title" href="(.+?)">(.+?)</a>').findall(link)
			for h,t in match:
				print ("RAPIDMOVIES", h,t)
				h = h.encode('utf-8')
				t = t.encode('utf-8')
				check = cleantitle_get_2(t)
				print ("RAPIDMOVIES check", check)
				if h.startswith("/"): h = self.base_link + h
				if year in t:
					if titlecheck in check:
						info = get_size(t)
						quality = quality_tag(t)
						if "1080" in quality or "HD" in quality:
							self.count += 1
							if not self.count >6:
								print ("RAPIDMOVIES PASSED", t,quality,info)
								self.elysium_url.append([h,quality,info])
			return self.elysium_url
        except:
            return
Пример #9
0
    def movie(self, imdb, title, year):
		self.zen_url = []
		try:
			if not debridstatus == 'true': raise Exception()			
			title = cleantitle.getsearch(title)
			cleanmovie = cleantitle.get(title)
			query = self.search_link % (urllib.quote_plus(title),year)
			query = urlparse.urljoin(self.base_link, query)
			print "%s QUERY %s" % (self.base_link, query)
			r = client.request(query)
			r = BeautifulSoup(r)
			r = r.findAll('h2', attrs = {'class': 'entry-title'})
			
			for item in r:
				try:
					t = item.findAll('a')[0].string
					t = t.encode('utf-8')
					h = item.findAll('a')[0]['href'].encode('utf-8')
					
					if cleanmovie in cleantitle_get(t) and year in t:

						self.zen_url.append([t,h])
					# self.zen_url.append([links,t])
					
				except:
					pass
				
			return self.zen_url

		except:
			return	
Пример #10
0
    def movie(self, imdb, title, year):
        self.zen_url = []
        try:
            if not debridstatus == 'true': raise Exception()
            title = cleantitle.getsearch(title)
            cleanmovie = cleantitle.get(title)
            query = self.search_link % (urllib.quote_plus(title), year)
            query = urlparse.urljoin(self.base_link, query)
            print "%s QUERY %s" % (self.base_link, query)
            r = client.request(query)
            r = BeautifulSoup(r)
            r = r.findAll('h2', attrs={'class': 'entry-title'})

            for item in r:
                try:
                    t = item.findAll('a')[0].string
                    t = t.encode('utf-8')
                    h = item.findAll('a')[0]['href'].encode('utf-8')

                    if cleanmovie in cleantitle_get(t) and year in t:

                        self.zen_url.append([t, h])
                    # self.zen_url.append([links,t])

                except:
                    pass

            return self.zen_url

        except:
            return
Пример #11
0
    def movie(self, imdb, title, year):
        try:
            if not debridstatus == 'true': raise Exception()
            self.zen_url = []
            query = cleantitle_query(title)
            cleanmovie = cleantitle_get(title)
            query = "%s+%s" % (urllib.quote_plus(query), year)
            query = self.search_link % query
            query = urlparse.urljoin(self.search_base_link, query)
            r = client.request(query, headers=self.search_header_link, referer=query)
            posts = []
            dupes = []
            print ("RELEASEBB QUERY", r)
			
            try: posts += json.loads(re.findall('({.+?})$', r)[0])['results']
            except: pass			
            for post in posts:
				try:
					name = post['post_title'].encode('utf-8')
					url = post['post_name'].encode('utf-8')
					if url in dupes: raise Exception()
					dupes.append(url)
					print ("RELEASEBB 2", name,url)
					t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
					
					if not cleanmovie in cleantitle_get(name) or not year in name: raise Exception()
					print ("RELEASEBB 3 PASSED", t)
					content = post['post_content']
					url = [i for i in client.parseDOM(content, 'a', ret='href')]
					
					size = get_size(content)
					quality = quality_tag(name)
					self.zen_url.append([size,quality,url])
				
					
				except:
					pass
            print("RELEASEBB PASSED", self.zen_url)
            return self.zen_url

        except:
            return
Пример #12
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            self.zen_url = []
            if not debridstatus == 'true': raise Exception()
            if url == None: return
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            title = cleantitle_query(title)
            cleanmovie = cleantitle.get(title)
            data['season'], data['episode'] = season, episode
            ep_query = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
            
            titlecheck = cleanmovie+ep_query.lower()
			
            query = "%s+%s" % (urllib.quote_plus(title), ep_query)
            query = self.search_link % query
            query = urlparse.urljoin(self.search_base_link, query)
            r = client.request(query, headers=self.search_header_link, referer=query)
            posts = []
            dupes = []
            print ("RELEASEBB QUERY", r)
			
            try: posts += json.loads(re.findall('({.+?})$', r)[0])['results']
            except: pass			
            for post in posts:
				try:
					name = post['post_title'].encode('utf-8')
					url = post['post_name'].encode('utf-8')
					if url in dupes: raise Exception()
					dupes.append(url)
					print ("RELEASEBB 2", name,url)
					t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
					print ("RELEASEBB 3 TV", t)					
					if not titlecheck in cleantitle_get(name): raise Exception()
					print ("RELEASEBB 3 PASSED", t)
					content = post['post_content']
					url = [i for i in client.parseDOM(content, 'a', ret='href')]
					
					size = get_size(content)
					quality = 'getbyurl'
					self.zen_url.append([size,quality,url])
				
					
				except:
					pass
            print("RELEASEBB PASSED", self.zen_url)
            return self.zen_url

        except:
            return
Пример #13
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        self.zen_url = []
        try:
            if not debridstatus == 'true': raise Exception()
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = cleantitle.getsearch(title)
            cleanmovie = cleantitle.get(title)
            data['season'], data['episode'] = season, episode
            episodecheck = 'S%02dE%02d' % (int(
                data['season']), int(data['episode']))
            episodecheck = cleanmovie + episodecheck.lower()
            query = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
            query = self.search_link % (urllib.quote_plus(title), query)
            query = urlparse.urljoin(self.base_link, query)

            r = client.request(query)
            r = BeautifulSoup(r)
            r = r.findAll('h2', attrs={'class': 'entry-title'})

            for item in r:
                try:
                    t = item.findAll('a')[0].string
                    t = t.encode('utf-8')
                    h = item.findAll('a')[0]['href'].encode('utf-8')

                    if episodecheck in cleantitle_get(t):

                        self.zen_url.append([t, h])
                    # self.zen_url.append([links,t])

                except:
                    pass

            return self.zen_url
        except:
            return
Пример #14
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        self.zen_url = []
        try:
			if not debridstatus == 'true': raise Exception()
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			title = cleantitle.getsearch(title)
			cleanmovie = cleantitle.get(title)
			data['season'], data['episode'] = season, episode
			episodecheck = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
			episodecheck = cleanmovie + episodecheck.lower()
			query = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
			query = self.search_link % (urllib.quote_plus(title),query)
			query = urlparse.urljoin(self.base_link, query)

			r = client.request(query)
			r = BeautifulSoup(r)
			r = r.findAll('h2', attrs = {'class': 'entry-title'})
			
			for item in r:
				try:
					t = item.findAll('a')[0].string
					t = t.encode('utf-8')
					h = item.findAll('a')[0]['href'].encode('utf-8')
					
					if episodecheck in cleantitle_get(t):

						self.zen_url.append([t,h])
					# self.zen_url.append([links,t])
					
				except:
					pass
					
			return self.zen_url
        except:
            return