示例#1
0
    def movie(self, imdb, title, year):
        self.elysium_url = []
        try:
            
            self.elysium_url = []
            cleanmovie = cleantitle.get(title)
            title = cleantitle.getsearch(title)
			
            query = "%s+%s" % (urllib.quote_plus(title),year)
            query = self.search_link %query
            query = urlparse.urljoin(self.base_link, query)
            print ("PUTMOVIE query", query)
            html = OPEN_URL(query).content
            html = BeautifulSoup(html)
            r = html.findAll('div', attrs={'class': 'movie_pic'})
            
            for s in r:
                print ("PUTMOVIE RESULTS", s)
                t = s.findAll('img')[0]['alt'].encode('utf-8')
                h = s.findAll('a')[0]['href'].encode('utf-8')
                url = h
                if year in t and cleanmovie == cleantitle.get(t): return url
					
        except:
            return	
示例#2
0
	def movie(self, imdb, title, year):
		self.zen_url = []
		try:
			title = cleantitle_query(title)
			titlecheck = cleantitle_get(title)
			query = self.search_link % (urllib.quote_plus(title))
			url = urlparse.urljoin(self.base_link, query)
			# print ("ZEN SCRAPER ITEMS", url)
			r = OPEN_URL(url).content
			r = BeautifulSoup(r)
			r = r.findAll('article', attrs={'class': 'movie_item'})
			for u in r:
				
				h = u.findAll('a')[0]['href'].encode('utf-8')
				t = u.findAll('a')[0]['data-title'].encode('utf-8')	
				# print ("ZEN SCRAPER ITEMS", titlecheck, t,h)
				if titlecheck == cleantitle_get(t):
					type = 'movies'
					season = ''
					episode = ''
					self.zen_url.append([h,imdb,type,season,episode])
					# print ("ZEN SCRAPER ITEMS PASSED", h) 
					return self.zen_url
		except:
			return
示例#3
0
    def movie(self, imdb, title, year):
        self.genesisreborn_url = []
        try:
            
            self.genesisreborn_url = []
            cleanmovie = cleantitle.get(title)
            title = cleantitle.getsearch(title)
			
            query = "%s+%s" % (urllib.quote_plus(title),year)
            query = self.search_link %query
            query = urlparse.urljoin(self.base_link, query)
            print ("PUTMOVIE query", query)
            html = OPEN_URL(query).content
            html = BeautifulSoup(html)
            r = html.findAll('div', attrs={'class': 'movie_pic'})
            
            for s in r:
                print ("PUTMOVIE RESULTS", s)
                t = s.findAll('img')[0]['alt'].encode('utf-8')
                h = s.findAll('a')[0]['href'].encode('utf-8')
                url = h
                if year in t and cleanmovie == cleantitle.get(t): return url
					
        except:
            return	
示例#4
0
	def episode(self, url, imdb, tvdb, title, premiered, season, episode):
		self.zen_url = []
		try:
			
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			data['season'], data['episode'] = season, episode
			title = cleantitle_query(title)
			titlecheck = cleantitle_get(title)
			query = self.search_link % (urllib.quote_plus(title))
			url = urlparse.urljoin(self.base_link, query)
			# print ("ZEN SCRAPER ITEMS", url)
			r = OPEN_URL(url).content
			r = BeautifulSoup(r)
			r = r.findAll('article', attrs={'class': 'movie_item'})
			for u in r:
				h = u.findAll('a')[0]['href'].encode('utf-8')
				t = u.findAll('a')[0]['data-title'].encode('utf-8')	
				# print ("ZEN SCRAPER ITEMS", titlecheck, t,h)
				if titlecheck == cleantitle_get(t):
					type = 'shows'
					self.zen_url.append([h,imdb,type,season,episode])
					# print ("ZEN SCRAPER ITEMS PASSED", h) 
					return self.zen_url
		except:
			return
示例#5
0
    def movie(self, imdb, title, year):

        try:
            checktitle = cleantitle_get(title)
            print("SOLARMOVIE", checktitle)
            q = self.search_link % (urllib.quote_plus(cleantitle_query(title)))
            q = urlparse.urljoin(self.base_link, q)
            print("SOLARMOVIE 2", q)
            r = OPEN_URL(q).content

            r = BeautifulSoup(r)
            # print ("SOLARMOVIE 3", r)
            r = r.findAll('div', attrs={'class': 'ml-item'})
            for items in r:
                # print ("SOLARMOVIE ITEMS", items)
                try:
                    h = items.findAll('a')[0]['href'].encode('utf-8')
                    t = items.findAll('a')[0]['title'].encode('utf-8')
                    if cleantitle_get(t) == checktitle:
                        info = items.findAll('a')[0]['data-url'].encode(
                            'utf-8')
                        info = urlparse.urljoin(self.base_link, info)
                        y, q = self.movies_info(info, year)
                        # print ("SOLARMOVIE INFOS ", y)
                        if not y == year: raise Exception()
                        self.quality = q
                        return h
                except:
                    pass
        except:
            return
示例#6
0
    def movie(self, imdb, title, year):
	
        try:
            checktitle = cleantitle_get(title)
            print ("SOLARMOVIE", checktitle)	
            q = self.search_link % (urllib.quote_plus(cleantitle_query(title)))
            q = urlparse.urljoin(self.base_link, q)
            print ("SOLARMOVIE 2", q)	
            r = OPEN_URL(q).content
			
            r = BeautifulSoup(r)
            # print ("SOLARMOVIE 3", r)			
            r = r.findAll('div', attrs = {'class': 'ml-item'})
            for items in r:
                # print ("SOLARMOVIE ITEMS", items)
                try:
					h = items.findAll('a')[0]['href'].encode('utf-8')
					t = items.findAll('a')[0]['title'].encode('utf-8')
					if cleantitle_get(t) == checktitle:
						info = items.findAll('a')[0]['data-url'].encode('utf-8')
						info =  urlparse.urljoin(self.base_link, info)
						y , q = self.movies_info(info, year)
						# print ("SOLARMOVIE INFOS ", y)						
						if not y == year: raise Exception()
						self.quality = q
						return h
                except:
                    pass
        except:
            return
示例#7
0
    def movie(self, imdb, title, year):
        try:
            self.zen = []

            cleaned_title = cleantitle.get(title)
            title = cleantitle.getsearch(title)
            q = self.search_link % (urllib.quote_plus(title))
            r = urlparse.urljoin(self.base_link, q)
            html = BeautifulSoup(OPEN_URL(r).content)
            print("ONEMOVIES EPISODES", html)
            containers = html.findAll('div', attrs={'class': 'ml-item'})
            for link in containers:
                link_title = link('a')[0]['title'].encode('utf-8')
                href = link('a')[0]['href'].encode('utf-8')
                info = link('a')[0]['data-url'].encode('utf-8')
                if cleantitle.get(link_title) == cleaned_title:
                    info = urlparse.urljoin(self.base_link, info)
                    html = OPEN_URL(info).content
                    pattern = '<div class="jt-info">%s</div>' % year
                    match = re.findall(pattern, html)
                    if match:
                        url = client.replaceHTMLCodes(href)
                        url = {'url': url, 'type': 'movie'}
                        url = urllib.urlencode(url)

                        print("SOLARMOVIE PASSED", url)
                        return url
        except:
            return
示例#8
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        self.zen_url = []
        try:

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = cleantitle_query(title)
            cleanmovie = cleantitle_get(title)

            data['season'], data['episode'] = season, episode
            season = "S%02d" % int(data['season'])
            episode = "%02d" % int(data['episode'])
            episode = "E0" + episode
            episodecheck = season + episode
            print("CINEMABOX episodecheck", episodecheck)

            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)

            print("CINEMABOX query", query)
            r = OPEN_URL(query, mobile=True, timeout=30).json()

            html = r['data']['films']
            for item in html:
                # print ("CINEMABOX ITEMS 3", item)
                t = item['title'].encode('utf-8')
                h = re.findall('''['"]id['"]\s*:\s*(\d+)''', str(item))[0]

                if title_match(cleanmovie, cleantitle_get(t),
                               amount=1.0) == True:

                    s = self.sources_link % h
                    s = urlparse.urljoin(self.base_link, s)
                    print("CINEMABOX PASSED 4", t, h, s)
                    s = OPEN_URL(s, mobile=True).json()
                    s = s['data']['chapters']
                    if len(s) > 0:
                        for src in s:
                            name = src['title'].encode('utf8')

                            if episodecheck.lower() == name.lower():

                                id = re.findall('''['"]id['"]\s*:\s*(\d+)''',
                                                str(src))[0]
                                print("CINEMABOX PASSED 6", name.lower())

                    stream = self.stream_link % id
                    stream = urlparse.urljoin(self.base_link, stream)
                    self.zen_url.append(stream)

                    print(">>>>>>>>> Cinemabox FOUND LINK", stream)

            return self.zen_url

        except:
            return
示例#9
0
	def sources(self, url, hostDict, hostprDict):
		sources = []
		try:
			for url,imdb,type,season,episode in self.zen_url:
				player_items = []
				if url == None: return
				
				url = urlparse.urljoin(self.base_link, url)
				print ("ZEN URL SOURCES", url)
				r = OPEN_URL(url).content
				if not imdb in r: raise Exception()
				if type == "movies":
					link_id = re.findall("p_link_id='(.+?)'",r)[0]
					link_id = link_id.encode('utf-8')
					api_link = "/api/plink?id=%s&res=" % link_id
					player_items.append(api_link)
				elif type == "shows":
					pattern = 'season%s-%s-' % (season, episode)
					# print ("ZEN URL TV SOURCES", pattern)
					r = BeautifulSoup(r)
					r =r.findAll('li')
					for items in r:
						try:
							ids = items['id'].encode('utf-8')
							href = items['data-click'].encode('utf-8')
							print ("ZEN URL TV SOURCES", ids, href)
							if pattern in ids:
									if "/api/plink" in href:
										player_items.append(href)
						except:
							pass

						
				for items in player_items:	
					
						api = items.split('res=')[0]
						print ("ZEN API ITEMS", api)
						res = ['1080','720', '360']
						for s in res:
							s = "res=%s" %s
							player = api + s
							player = urlparse.urljoin(self.base_link, player)
							
							try:
								url = OPEN_URL(player, output='geturl')
							# b = a.url
								# print ("ZEN FINAL REDIRECTED URL", url)
								
								quality = google_tag(url)
								# href = href.encode('utf-8')
								sources.append({'source': 'gvideo', 'quality':quality, 'provider': 'Vumoo', 'url': url, 'direct': True, 'debridonly': False})
							except:
								pass

					

			return sources
		except:
			return sources
示例#10
0
    def resolve(self, url):

            r = OPEN_URL(url, timeout='3').content
            r = BeautifulSoup(r)

            url = r.findAll('iframe')[0]['src'].encode('utf-8')
   
            return url
示例#11
0
    def resolve(self, url):

            r = OPEN_URL(url, timeout='3').content
            r = BeautifulSoup(r)

            url = r.findAll('iframe')[0]['src'].encode('utf-8')
   
            return url
示例#12
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            f = urlparse.urljoin(self.base_link, url)
            print("MOVIEXK SOURCES", f)
            url = f.rsplit('?', 1)[0]
            direct = url
            print("MOVIEXK SOURCES 2", url)
            r = OPEN_URL(url, mobile=True).content
            # print("MOVIEXK SOURCES 3", r)
            p = client.parseDOM(r, 'div', attrs = {'id': 'servers'})

            if not p:
                p = client.parseDOM(r, 'div', attrs = {'class': 'btn-groups.+?'})
                p = client.parseDOM(p, 'a', ret='href')[0]

                p = OPEN_URL(p, mobile=True).content
                p = client.parseDOM(p, 'div', attrs = {'id': 'servers'})

            servers = client.parseDOM(p, 'li')

            links = []
            try:
                s = urlparse.parse_qs(urlparse.urlparse(f).query)['season'][0]
                e = urlparse.parse_qs(urlparse.urlparse(f).query)['episode'][0]
                check_ep =  ["e%02d" % (int(e)), "s%02d%02d" % (int(s), int(e)), "ep%02d" % (int(e))]
                check_s = ["-season-%02d-" % (int(s)), "-season-%01d-" % (int(s))]
                for items in servers:
					h = client.parseDOM(items, 'a', ret='href')[0]
					h = h.encode('utf-8')
					t = client.parseDOM(items, 'a', ret='title')[0]
					clean_ep_title = cleantitle.get(t.encode('utf-8'))
					if any(value in clean_ep_title for value in check_ep) and  any(value in h for value in check_s) :
							links.append(h)			
										

            except:
				links.append(direct)
					
						

            for u in links:
                try:
                    url = OPEN_URL(u, mobile=True).content
                    url = client.parseDOM(url, 'source', ret='src')
                    url = [i.strip().split()[0] for i in url]
                    for i in url:
                        try: sources.append({'source': 'gvideo', 'quality': google_tag(i), 'provider': 'Moviexk', 'url': i, 'direct': True, 'debridonly': False})
                        except: pass
                except:
                    pass

            return sources
        except:
            return sources
示例#13
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        self.zen_url = []	
        try:
            # print ("MOVIEXK")
            headers = {'User-Agent': random_agent()}
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            title = cleantitle.getsearch(title)
            cleanmovie = cleantitle.get(title)
            data['season'], data['episode'] = season, episode
            year = data['year']
            query = self.search_link % (urllib.quote_plus(title),year)
            query = urlparse.urljoin(self.base_link, query)
            cleaned_title = cleantitle.get(title)
            ep_id = int(episode)
            season_id = int(season)
            season_check = "%02d" % (int(data['season']))
            ep_check =season_id + ep_id
            # print("MOVIEXK EPISODE CHECK", ep_check)
            html = BeautifulSoup(OPEN_URL(query, mobile=True).content)
            containers = html.findAll('div', attrs={'class': 'name'})
            for container in containers:
                # print ("MOVIEXK r1", container)
                r_href = container.findAll('a')[0]["href"]
                r_href = r_href.encode('utf-8')
                # print ("MOVIEXK r2", r_href)
                r_title = re.findall('</span>(.*?)</a>', str(container))[0]
                # print ("MOVIEXK r3", r_title)
                r_title = r_title.encode('utf-8')

                r_title = re.sub('^(watch movies)|(watch movie)|(watch)', '', r_title.lower())
                # print ("MOVIEXK RESULTS", r_title, r_href)
                if cleaned_title in cleantitle.get(r_title):
						redirect = OPEN_URL(r_href, mobile=True).text
						try:
							r_url_trailer = re.search('<dd>[Tt]railer</dd>', redirect)
							if r_url_trailer: continue
						except:
							pass
						try:
							p = client.parseDOM(redirect, 'div', attrs = {'class': 'btn-groups.+?'})
							r_url = client.parseDOM(p, 'a', ret='href')[0]
							print ("MOVIEXK PLAY BUTTON 1", r_url)
							url = '%s?season=%01d&episode=%01d' % (r_url.encode('utf-8'), int(season), int(episode))
							return url
						except:
							p = client.parseDOM(redirect, 'div', attrs = {'id': 'servers'})
							r = client.parseDOM(p, 'li')
							r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
							r = [i[0] for i in r]
							r = r[0]
							r_url = r.encode('utf-8')
							print ("MOVIEXK PLAY BUTTON 2", r)
							url = '%s?season=%01d&episode=%01d' % (r_url, int(season), int(episode))
							return url
        except:
            return		
示例#14
0
    def movie(self, imdb, title, year):
        self.zen_url = []
        try:
            query = self.search_link % (urllib.quote_plus(title), year)
            query = urlparse.urljoin(self.base_link, query)
            cleaned_title = cleantitle.get(title)
            html = BeautifulSoup(OPEN_URL(query, mobile=True).content)

            containers = html.findAll('div', attrs={'class': 'name'})
            for container in containers:
                # print ("MOVIEXK r1", container)
                r_href = container.findAll('a')[0]["href"]
                r_href = r_href.encode('utf-8')
                # print ("MOVIEXK r2", r_href)
                r_title = re.findall('</span>(.*?)</a>', str(container))[0]
                # print ("MOVIEXK r3", r_title)
                r_title = r_title.encode('utf-8')

                # print ("MOVIEXK RESULTS", r_title, r_href)
                if year in r_title:
                    r_title = re.sub('^(watch movies)|(watch movie)|(watch)',
                                     '', r_title.lower())
                    if cleaned_title in cleantitle.get(r_title):
                        redirect = OPEN_URL(r_href, mobile=True).content
                        try:
                            r_url_trailer = re.search('<dd>[Tt]railer</dd>',
                                                      redirect)
                            if r_url_trailer: continue
                        except:
                            pass
                        try:
                            p = client.parseDOM(
                                redirect,
                                'div',
                                attrs={'class': 'btn-groups.+?'})
                            r = client.parseDOM(p, 'a', ret='href')[0]
                            r_url = r.encode('utf-8')
                            print("MOVIEXK PLAY BUTTON 1", r_url)
                            url = r_url
                            return url
                        except:
                            p = client.parseDOM(redirect,
                                                'div',
                                                attrs={'id': 'servers'})
                            r = client.parseDOM(p, 'li')
                            r = zip(client.parseDOM(r, 'a', ret='href'),
                                    client.parseDOM(r, 'a', ret='title'))
                            r = [i[0] for i in r]
                            r = r[0]
                            r_url = r.encode('utf-8')
                            print("MOVIEXK PLAY BUTTON 2", r_url)
                            url = r_url
                            return url
        except:
            return
示例#15
0
    def sources(self, url, hostDict, hostprDict):

        sources = []
        try:
            

            for url in self.zen_url:
				print ("ONEMOVIES SOURCES", url)
				if "embed.123movieshd" in url:
					try:
						r = OPEN_URL(url).content
						r = get_sources(r)
						for u in r:
							h = get_files(u)
							for s in h:
								href = s.encode('utf-8')
								quality = meta_gvideo_quality(href)
								print ("ONEMOVIES SOURCES 2", href)
								sources.append({'source': 'gvideo', 'quality': quality, 'provider': 'onemovies', 'url': href, 'direct': True, 'debridonly': False})
					except:
						pass
				elif "embed2.seriesonline.io" in url:
					try:
						r = OPEN_URL(url).content
						r = BeautifulSoup(r)
						r = r.findAll('source')
						for u in r:
							href = u['src'].encode('utf-8')
							quality = meta_gvideo_quality(href)
							print ("ONEMOVIES SOURCES 2", href)
							sources.append({'source': 'gvideo', 'quality': quality, 'provider': 'onemovies', 'url': href, 'direct': True, 'debridonly': False})
					except:
						pass
						
				else:
					try:
						href = url.encode('utf-8')
						host = meta_host(url)
						quality = "SD"
						if not host in hostDict: raise Exception()
						sources.append({'source': host, 'quality': quality, 'provider': 'onemovies', 'url': href, 'direct': False, 'debridonly': False})
					except:
						pass
				

        except:
            pass
        return sources
示例#16
0
    def movie(self, imdb, title, year):
        self.zen_url = []

        try:
            if not debridstatus == 'true': raise Exception()

            print("MYVIDEOLINK 2")
            self.real_link = self.base_link

            title = cleantitle.getsearch(title)
            cleanmovie = cleantitle.get(title)
            type = 'zen_movies'
            query = self.search_link % (urllib.quote_plus(title), year)
            query = urlparse.urljoin(self.real_link, query)
            req = OPEN_URL(query).content

            r = client.parseDOM(req, 'h2', attrs={'class': 'post-titl.+?'})
            r = [(client.parseDOM(i, 'a', ret='href'),
                  client.parseDOM(i, 'a', ret='title')) for i in r]
            r = [(i[0][0], i[1][0]) for i in r]
            r = [(i[0], i[1]) for i in r
                 if cleanmovie in cleantitle.get(i[1]) and year in i[1]]
            u = [(i[0].encode('utf-8'), i[1].encode('utf-8'), type) for i in r]
            self.zen_url += u
            print("MOVIES PASSED MYVIDEOLINK", self.zen_url)

            return self.zen_url

        except:
            return
示例#17
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            headers = {'User-Agent': random_agent()}
            html = OPEN_URL(url)
            r = BeautifulSoup(html.content)
            r = r.findAll('tr')
            for items in r:
                href = items.findAll('a')[0]['href'].encode('utf-8')
                print("AFMOVIE R2", href)
                try:
                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(href.strip().lower()).netloc)[0]
                except:
                    host = 'Afmovies'
                if not host in hostDict: continue
                sources.append({
                    'source': host,
                    'quality': 'SD',
                    'provider': 'Afmovies',
                    'url': href,
                    'direct': False,
                    'debridonly': False
                })

            return sources
        except:
            return sources
示例#18
0
    def movie(self, imdb, title, year):
        self.elysium_url = []
        try:
            if not debridstatus == 'true': raise Exception()
            self.elysium_url = []

            cleanmovie = cleantitle.get(title)
            title = cleantitle.getsearch(title)
            titlecheck = cleanmovie + year
            query = self.search_link % (urllib.quote_plus(title), year)
            query = urlparse.urljoin(self.base_link, query)

            html = BeautifulSoup(OPEN_URL(query).content)

            containers = html.findAll('h1', attrs={'class': 'entry-title'})

            for result in containers:
                print("BMOVIES SOURCES movielink", result)
                r_title = result.findAll('a')[0]
                r_title = r_title.string
                r_href = result.findAll('a')[0]["href"]
                r_href = r_href.encode('utf-8')
                r_title = r_title.encode('utf-8')

                r_title2 = cleantitle_get_2(r_title)
                if titlecheck in r_title2:
                    self.elysium_url.append([r_href, r_title])
            return self.elysium_url
        except:
            return
示例#19
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
		try:
			if not debridstatus == 'true': raise Exception()
			self.zen_url = []
			headers = {'Accept-Language': 'en-US,en;q=0.5', 'User-Agent': random_agent()}
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			
			cleanmovie = cleantitle.get(title)
			data['season'], data['episode'] = season, episode
			ep_search = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
			episodecheck = str(ep_search).lower()
			query = self.search_link % (urllib.quote_plus(title), ep_search)
			query = urlparse.urljoin(self.base_link, query)
			titlecheck = cleanmovie + episodecheck
			html = BeautifulSoup(OPEN_URL(query).content)
			
			containers = html.findAll('h1', attrs={'class': 'entry-title'})
			
			for result in containers:
				
				r_title = result.findAll('a')[0]
				r_title = r_title.string
				r_href = result.findAll('a')[0]["href"]
				r_href = r_href.encode('utf-8')
				r_title = r_title.encode('utf-8')
				r_title = cleantitle.get(r_title)
				if titlecheck in r_title:
					self.zen_url.append([r_href,r_title])
					
			return self.url
		except:
			return
示例#20
0
    def sources(self, url, hostDict, hostprDict):
        try:
			sources = []
			for movielink,title in self.zen_url:
				mylink = OPEN_URL(movielink).content
				if "1080" in title: quality = "1080p"
				elif "720" in title: quality = "HD"				
				else: quality = "SD"		
				print ("BMOVIES SOURCES movielink", movielink)				
				for item in parse_dom(mylink, 'div', {'class': 'entry-content'}):
					match = re.compile('<a href="(.+?)">(.+?)</a>').findall(item)
					for url,title in match:
						myurl = str(url)
						if any(value in myurl.lower() for value in hostprDict):
							if not any(value in myurl.lower() for value in self.blacklist_zips):	
								url = client.replaceHTMLCodes(url)
								url = url.encode('utf-8')															
								try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
								except: host = 'Videomega'
								host = client.replaceHTMLCodes(host)
								host = host.encode('utf-8')							
								sources.append({'source': host, 'quality': quality, 'provider': 'Bmoviez', 'url': url, 'direct': False, 'debridonly': True})

	 

			return sources
        except:
            return sources
示例#21
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        self.zen_url = []
        try:
            if not debridstatus == 'true': raise Exception()
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = cleantitle.getsearch(title)
            cleanmovie = cleantitle.get(title)
            self.real_link = self.base_link
            type = 'zen_shows'
            data['season'], data['episode'] = season, episode
            episodecheck = 'S%02dE%02d' % (int(
                data['season']), int(data['episode']))
            episodecheck = str(episodecheck).lower()
            query = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
            query = self.search_link % (urllib.quote_plus(title), query)
            query = urlparse.urljoin(self.real_link, query)

            req = OPEN_URL(query).content
            r = client.parseDOM(req, 'h2', attrs={'class': 'post-titl.+?'})
            r = [(client.parseDOM(i, 'a', ret='href'),
                  client.parseDOM(i, 'a', ret='title')) for i in r]
            r = [(i[0][0], i[1][0]) for i in r]
            r = [(i[0], i[1]) for i in r if cleanmovie in cleantitle.get(i[1])
                 and episodecheck in cleantitle.get(i[1])]
            u = [(i[0].encode('utf-8'), i[1].encode('utf-8'), type) for i in r]
            self.zen_url += u
            print("MYVIDEOLINK SHOWS", self.zen_url)

            return self.zen_url
        except:
            return
示例#22
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            for url in self.zen_url:
                if url == None: return
                r = OPEN_URL(url).content
                s = re.compile('sources:\[(.+?)\]').findall(r)
                for src in s:
                    print("123MOVIES SOURCES", src)
                    match = re.findall('''['"]?file['"]?\s*:\s*['"]([^'"]*)''',
                                       src)
                    for href in match:
                        print("123MOVIES SOURCES 2", href)
                        quality = google_tag(href)
                        print("123MOVIES SOURCES 3", href)
                        sources.append({
                            'source': 'gvideo',
                            'quality': quality,
                            'provider': 'Onemx',
                            'url': href,
                            'direct': True,
                            'debridonly': False
                        })

            return sources
        except:
            return sources
示例#23
0
文件: wso.py 项目: ItsMYZTIK/tdbaddon
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:

            if url == None: return

            link = OPEN_URL(url, timeout='10').content
            html = BeautifulSoup(link)
            r = html.findAll('div', attrs={'class': 'play-btn'})

            for result in r:
                href = result.findAll('a')[0]['href'].encode('utf-8')
                try:
                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(href.strip().lower()).netloc)[0]
                except:
                    host = 'none'
                quality = 'SD'

                url = replaceHTMLCodes(href)
                url = url.encode('utf-8')
                if host in hostDict:
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'provider': 'Wso',
                        'url': href,
                        'direct': False,
                        'debridonly': False
                    })

            return sources
        except:
            return sources
示例#24
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = cleantitle.getsearch(data['tvshowtitle'])

            season = '%01d' % int(season)
            episode = '%01d' % int(episode)
            query = (urllib.quote_plus(title))
            q = self.search_link % (query)
            r = urlparse.urljoin(self.base_link, q)
            checkseason = cleantitle.get(title) + "season" + season
            html = BeautifulSoup(OPEN_URL(r).content)
            containers = html.findAll('div', attrs={'class': 'ml-item'})
            for link in containers:
                link_title = link('a')[0]['title'].encode('utf-8')
                href = link('a')[0]['href'].encode('utf-8')
                if cleantitle.get(link_title) == checkseason:
                    url = {'url': href, 'type': 'tv_shows', 'episode': episode}
                    url = urllib.urlencode(url)

                    print("SOLARMOVIE PASSED", url)
                    return url

        except:
            return
示例#25
0
    def movie(self, imdb, title, year):
        self.zen_url = []
        try:

            # print("WATCHCARTOON")
            title = cleantitle_query(title)
            cleanmovie = cleantitle_get(title)
            query = self.search_link % (urllib.quote_plus(title), year)
            query = urlparse.urljoin(self.base_link, query)

            r = OPEN_URL(query, timeout='15')

            html = BeautifulSoup(r.content)
            print("WONLINE BeautifulSoup", html)
            r = html.findAll('div', attrs={'class': 'resultado'})
            print("WONLINE s1", r)
            for u in r:
                r_title = u.findAll('img')[0]['alt']
                r_title = r_title.encode('utf-8')

                r_href = u.findAll('a')[0]['href']
                r_title = r_title.encode('utf-8')

                print("WONLINE MATCHES", r_title, r_href)
                if year in r_title and cleanmovie == cleantitle_get(r_title):
                    if "http:" in r_href: url = replaceHTMLCodes(r_href)
                    print("WONLINE PASSED", url)
                    return url

        except:
            return
示例#26
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            for link, quality, info in self.zen_url:
                html = OPEN_URL(link)
                r = BeautifulSoup(html.content)
                r = r.findAll('pre', attrs={'class': 'links'})
                for u in r:
                    url = u.string
                    if any(value in url for value in hostprDict):

                        try:
                            host = re.findall(
                                '([\w]+[.][\w]+)$',
                                urlparse.urlparse(
                                    url.strip().lower()).netloc)[0]
                        except:
                            host = 'noe'
                        host = client.replaceHTMLCodes(host)
                        host = host.encode('utf-8')
                        url = client.replaceHTMLCodes(url)
                        url = url.encode('utf-8')
                        sources.append({
                            'source': host,
                            'quality': quality,
                            'provider': 'Rapidmovies',
                            'url': url,
                            'info': info,
                            'direct': False,
                            'debridonly': True
                        })

            return sources
        except:
            return sources
示例#27
0
 def movie(self, imdb, title, year):
     self.zen_url = []
     try:
         if not debridstatus == 'true': raise Exception()
         self.zen_url = []
         cleanmovie = cleantitle_get(title)
         title = cleantitle_query(title)
         titlecheck = cleanmovie + year
         query = self.movie_link % (urllib.quote_plus(title), year)
         query = urlparse.urljoin(self.base_link, query)
         link = OPEN_URL(query).text
         match = re.compile(
             '<a class="title" href="(.+?)">(.+?)</a>').findall(link)
         for h, t in match:
             print("RAPIDMOVIES", h, t)
             h = h.encode('utf-8')
             t = t.encode('utf-8')
             check = cleantitle_get_2(t)
             print("RAPIDMOVIES check", check)
             if h.startswith("/"): h = self.base_link + h
             if year in t:
                 if titlecheck in check:
                     info = get_size(t)
                     quality = quality_tag(t)
                     if "1080" in quality or "HD" in quality:
                         self.count += 1
                         if not self.count > 6:
                             print("RAPIDMOVIES PASSED", t, quality, info)
                             self.zen_url.append([h, quality, info])
         return self.zen_url
     except:
         return
示例#28
0
	def sources(self, url, hostDict, hostprDict):
		sources = []
		try:
			
			for url in self.zen_url:
				if url == None: return
				
				html = OPEN_URL(url).content
				match = re.compile('<a href="[^"]+go.php\?url=([^"]+)" target="_blank">').findall(html)
				for url in match:
					try:
						
						host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
						host = host.encode('utf-8')			
						if not host in hostDict: raise Exception()
						quality = "SD"
							# print("OpenMovies SOURCE", stream_url, label)
						sources.append({'source': host, 'quality':quality, 'provider': 'Solar', 'url': url, 'direct': False, 'debridonly': False})
					except:
						pass


			return sources
		except:
			return sources
示例#29
0
    def movie(self, imdb, title, year):
        self.zen_url = []
        try:

            title = cleantitle_query(title)
            cleanmovie = cleantitle_get(title)
            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)
            print("CINEMABOX query", query)
            r = OPEN_URL(query, mobile=True, timeout=30).json()
            print("CINEMABOX ITEMS", r)
            html = r['data']['films']
            for item in html:
                print("CINEMABOX ITEMS 3", item)
                t = item['title'].encode('utf-8')
                h = re.findall('''['"]id['"]\s*:\s*(\d+)''', str(item))[0]
                print("CINEMABOX ITEMS 4", t, h)
                if title_match(cleanmovie, cleantitle_get(t),
                               amount=1.0) == True:
                    if year in item['publishDate']:
                        s = self.sources_link % h
                        s = urlparse.urljoin(self.base_link, s)
                        print("CINEMABOX ITEMS PASSED 5", t, h, s)
                        s = OPEN_URL(s, mobile=True).json()
                        s = s['data']['chapters']
                        if len(s) > 0:
                            for src in s:
                                name = src['title'].encode('utf8')
                                if title_match(cleanmovie,
                                               cleantitle_get(name),
                                               amount=1.0) == True:
                                    id = re.findall(
                                        '''['"]id['"]\s*:\s*(\d+)''',
                                        str(src))[0]

                        stream = self.stream_link % id
                        stream = urlparse.urljoin(self.base_link, stream)
                        self.zen_url.append(stream)

                        print(">>>>>>>>> Cinemabox FOUND LINK", stream)

            return self.zen_url

        except:
            return
示例#30
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            self.zen_url = []
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = cleantitle.getsearch(data['tvshowtitle'])
            print ("ONEMOVIES EPISODES STARTED")
            season = '%01d' % int(season)
            episode = '%01d' % int(episode)
            query = cleantitle_geturl(title) + "-season-" + season
            q = self.search_link % (query)
            r = urlparse.urljoin(self.base_link, q)
            cleaned_title = cleantitle_get(title) + "season" + season
            print ("ONEMOVIES EPISODES", q)
            html = BeautifulSoup(OPEN_URL(r).content)
            containers = html.findAll('div', attrs={'class': 'ml-item'})
            for result in containers:
                links = result.findAll('a')
                for link in links:
                    link_title = link['title'].encode('utf-8')
                    href = link['href'].encode('utf-8')
                    href = urlparse.urljoin(self.base_link, href)
                    href = re.sub('/watching.html','', href)
                    href = href + '/watching.html'

                    # print("ONEMOVIES", link_title, href)
                    if title_match(cleantitle_get(link_title), cleaned_title) == True:
						print("ONEMOVIES FOUND MATCH", link_title, href)
						html = OPEN_URL(href).content
   						s = BeautifulSoup(html)
							
						s = s.findAll('div', attrs={'class': 'les-content'})
						for u in s:
							print("ONEMOVIES PASSED u", u)
							player = u.findAll('a')[0]['player-data'].encode('utf-8')
							ep_id = u.findAll('a')[0]['episode-data'].encode('utf-8')
							if not ep_id == episode: raise Exception()
								
							if not player in self.zen_url:	self.zen_url.append(player)
							

						return self.zen_url

        except:
            return
示例#31
0
    def movie(self, imdb, title, year):
        try:
            self.zen_url = []


            cleaned_title = cleantitle_get(title)
            title = cleantitle_query(title)
                      
            q = self.search_link % (cleantitle_geturl(title))
            r = urlparse.urljoin(self.base_link, q)
            print ("ONEMOVIES EPISODES", r)
            html = BeautifulSoup(OPEN_URL(r).content)
            containers = html.findAll('div', attrs={'class': 'ml-item'})
            for result in containers:
                links = result.findAll('a')
                for link in links:
                    link_title = link['title'].encode('utf-8')
                    href = link['href'].encode('utf-8')
                    href = urlparse.urljoin(self.base_link, href)
                    href = re.sub('/watching.html','', href)
                    href = href + '/watching.html'

                    print("ONEMOVIES", link_title, href)
                    if title_match(cleantitle_get(link_title), cleaned_title) == True:
                        
                        html = OPEN_URL(href).content
                        
                        match = re.findall('<strong>Release:</strong>(.+?)</p>', html)[0]
                        if year in match:
							
							s = BeautifulSoup(html)
							
							s = s.findAll('div', attrs={'class': 'les-content'})
							for u in s:
								print("ONEMOVIES PASSED u", u)
								player = u.findAll('a')[0]['player-data'].encode('utf-8')
								
								if not player in self.zen_url:	self.zen_url.append(player)
							

							return self.zen_url
        except:
            return
示例#32
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        self.elysium_url = []
        try:
            if not debridstatus == 'true': raise Exception()
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            data['season'], data['episode'] = season, episode
            today = datetime.datetime.today().date()
            today = today.strftime('%Y.%m.%d')

            title = cleantitle_get(title)
            titlecheck = "s%02de%02d" % (int(
                data['season']), int(data['episode']))
            titlecheck = title + titlecheck
            premiered = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(
                premiered)[0]
            year = premiered[0]
            days = premiered[-1]
            month = premiered[1]
            next_day = int(days) + 1

            ep_date = "%s.%02d.%02d" % (year, int(month), int(days))
            # print ("HDTV PREMIERE", ep_date , today)
            if int(re.sub('[^0-9]', '', str(ep_date))) > int(
                    re.sub('[^0-9]', '', str(today))):
                raise Exception()
            ep_next_date = "%s.%02d.%02d" % (year, int(month), int(next_day))
            # print ("HDTV PREMIERE", ep_date, ep_next_date)
            # print ("HDTV PREMIERE", today, ep_date, ep_next_date)
            for day in [ep_date, ep_next_date]:
                html = self.search_link % day
                html = urlparse.urljoin(self.base_link, html)
                # print ("HDTV PREMIERE 2 ", html)
                r = OPEN_URL(html).content
                for match in re.finditer(
                        '<center>\s*<b>\s*(.*?)\s*</b>.*?<tr>(.*?)</tr>', r,
                        re.DOTALL):
                    release, links = match.groups()
                    release = re.sub('</?[^>]*>', '', release)
                    release = cleantitle_get(release)
                    if titlecheck in release:
                        # print ("HDTV PREMIERE 3 FOUND", release , links)
                        self.elysium_url.append([release, links])

            return self.elysium_url
        except:
            pass
示例#33
0
    def movies_info(self, url, year):
        try:
            r = OPEN_URL(url).content

            q = client.parseDOM(r, 'div', attrs={'class': 'jtip-quality'})[0]
            q = quality_tag(q)
            y = client.parseDOM(r, 'div', attrs={'class': 'jt-info'})
            for items in y:
                # print ("SOLARMOVIES INFOs CHECK", year, y)
                if year in items: y = year

            if not y == year: y = '0'
            return (y, q)
        except:
            return
示例#34
0
 def movie(self, imdb, title, year):
     self.zen_url = []
     try:
         url = self.base_link
         title = cleantitle_geturl(title)
         query = title
         query = self.movie_link % query
         url = urlparse.urljoin(self.base_link, query)
         r = OPEN_URL(url).content
         check = re.findall('span class="date">(.+?)</span>', r)[0]
         if year in check:
             print("123MOVIES FOUND MOVIE", url)
             self.zen_url.append(url)
             return self.zen_url
     except:
         return
示例#35
0
    def sources(self, url, hostDict, hostprDict):
        original_url = url
        sources = []
        results = []
        try:
            

            if url == None: return sources
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            url = data['url'].encode('utf-8')
            type = data['type'].encode('utf-8')
            referer = url

            url = url.replace('/watching.html', '')
            url = url.replace('.html','')
            if not url.endswith('/'): url = url + "/watching.html"
            else : url = url + "watching.html"
			

            referer=url
		
            request = OPEN_URL(url)
            html = request.content

            try:mid = re.compile('name="movie_id" value="(.+?)"').findall(html)[0]
            except:mid = re.compile('id: "(.+?)"').findall(html)[0]
					
						
            time_now = int(time.time() * 10000)
            EPISODES = '/ajax/v4_movie_episodes/%s' % (mid)
            EPISODES = urlparse.urljoin(self.base_link, EPISODES)	
            r = OPEN_URL(EPISODES).content
            r = clean_html(r)
            print("SOLARMOVIE SOURCES", EPISODES) 				
            match = re.compile('data-id="(.+?)" id=".+?">.+?a href=".+?"\s*title="(.+?)">').findall(r)
            for data_id,t, in match:
				if type == 'tv_shows':
					episode = data['episode'].encode('utf-8')
					
					episode = "%02d" % int(episode)
					ep_check1 = episode + ":"
					ep_check2 = "Episode %s:" % episode
					
					if ep_check1 in t or ep_check2 in t: results.append(data_id)
				else: results.append(data_id)	
				
            for data_id in results:
					try:
							s = '/ajax/movie_token'
							src = urlparse.urljoin(self.base_link, s)	
							payload = {'eid':data_id, 'mid': mid, '_':time_now}
							

							data = OPEN_URL(src, params=payload, XHR=True).content
							
							if '$_$' in data:
								p = self.uncensored1(data)
								
							elif data.startswith('[]') and data.endswith('()'):
								
								p = self.uncensored2(data)
								
							else:
								continue

							xx ,xy = p
							
							print ("UNCENSORED DATA", xx, xy, data_id)
							if xx:
								servers = '/ajax/movie_sources/%s' % (data_id)
								payload = {'x':xx, 'y':xy}
								srv = urlparse.urljoin(self.base_link, servers)	
								
								srv = OPEN_URL(srv, params=payload, XHR=True).json()
								
								playlist = srv['playlist'][0]['sources']
								for u in playlist:
									url = u['file'].encode('utf-8')
									quality = meta_gvideo_quality(url)
									sources.append({'source': 'gvideo', 'quality': quality, 'provider': 'Solarmovie', 'url': url, 'direct': True, 'debridonly': False})
						


					except:
						continue

        except:
            pass
        return sources