Ejemplo n.º 1
0
    def movie(self, imdb, title, year):
        self.zen_url = []
        try:
            headers = {'User-Agent': random_agent()}
            self.zen_url = []
            cleanmovie = cleantitle.get(title)
            title = cleantitle.getsearch(title)

            query = "%s+%s" % (urllib.quote_plus(title), year)
            query = self.search_link % query
            query = urlparse.urljoin(self.base_link, query)

            html = requests.get(query, headers=headers, timeout=15).content

            containers = re.compile(
                '<span class="year">(.+?)</span><a class="play" href="(.+?)" title="(.+?)">'
            ).findall(html)
            for r_year, r_href, r_title in containers:
                if cleanmovie == cleantitle.get(r_title):
                    if r_year == year:

                        url = r_href
            return url
        except:
            return
Ejemplo n.º 2
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):

        try:
            headers = {'User-Agent': random_agent()}
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            data['season'], data['episode'] = season, episode
            year = data['year']
            cleanmovie = cleantitle.get(title)
            title = cleantitle.getsearch(title)
            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)
            seasoncheck = "s%02d" % int(season)
            html = requests.get(query, headers=headers, timeout=15).content
            containers = re.compile(
                '<span class="year">(.+?)</span><a class="play" href="(.+?)" title="(.+?)">'
            ).findall(html)
            for r_year, r_href, r_title in containers:
                if cleanmovie in cleantitle.get(r_title):
                    if seasoncheck in cleantitle.get(r_title):
                        if year == r_year:
                            url = r_href.encode(
                                'utf-8') + "?p=" + episode + "&s=11"
                            print("SOCKSHARE PASSED EPISODE", url)
                            return url
        except:
            pass
Ejemplo n.º 3
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources
            referer = url
            headers = {'User-Agent': random_agent(), 'X-Requested-With': 'XMLHttpRequest', 'Referer': referer}
            url_plugin = urlparse.urljoin(self.base_link, '/ip.file/swf/plugins/ipplugins.php')
            html = BeautifulSoup(requests.get(referer, headers=headers, timeout=15).content)
            # print ("SOCKSHARE NEW SOURCES", html)
            r = html.findAll('div', attrs={'class': 'new_player'})
            for container in r:
				block = container.findAll('a')
				for items in block:
					p1 = items['data-film'].encode('utf-8')
					p2 = items['data-name'].encode('utf-8')
					p3 = items['data-server'].encode('utf-8')
					post = {'ipplugins': '1', 'ip_film': p1, 'ip_name': p2 , 'ip_server': p3}
					req = requests.post(url_plugin, data=post, headers=headers).json()
					token = req['s'].encode('utf-8')
					server = req['v'].encode('utf-8')
					url = urlparse.urljoin(self.base_link, '/ip.file/swf/ipplayer/ipplayer.php')
					post = {'u': token, 'w': '100%', 'h': '360' , 's': server, 'n':'0'}
					req_player = requests.post(url, data=post, headers=headers).json()
					# print ("SOCKSHARE SOURCES", req_player)
					result = req_player['data']
					result = [i['files'] for i in result]
					for i in result:
						try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'Sockshare', 'url': i, 'direct': True, 'debridonly': False})
						except: pass


            return sources
        except:
            return sources
Ejemplo n.º 4
0
    def movie(self, imdb, title, year):
        self.elysium_url = []	
        try:
            if not alluc_status == 'true': raise Exception()
            print ("ALLUC STARTED" , alluc_user, alluc_pw, max_items)
            headers = {'User-Agent': random_agent()}
            search_title = cleantitle.getsearch(title)
            cleanmovie = cleantitle.get(title) + year
            query = "%s+%s" % (urllib.quote_plus(search_title),year)
            print ("ALLUC r1", query)
            query =  self.api_link % (alluc_user, alluc_pw, query)
            if alluc_debrid == 'true': query =	query + max_result_string
            else: query = query + '+%23newlinks' + max_result_string
            print ("ALLUC r2", query)
            html = requests.get(query, headers=headers, timeout=15).json()
            for result in html['result']:
				if len(result['hosterurls']) > 1: continue
				if result['extension'] == 'rar': continue
				stream_url = result['hosterurls'][0]['url'].encode('utf-8')
				stream_title = result['title'].encode('utf-8')
				stream_title = cleantitle.getsearch(stream_title)
				if cleanmovie in cleantitle.get(stream_title): 
					self.elysium_url.append([stream_url,stream_title])		
					print ("ALLUC r3", self.elysium_url)
            return self.elysium_url
        except:
            return	
Ejemplo n.º 5
0
    def movie(self, imdb, title, year):
        self.zen_url = []
        try:
            if not debridstatus == 'true': raise Exception()
            self.zen_url = []
            headers = {
                'Accept-Language': 'en-US,en;q=0.5',
                'User-Agent': random_agent()
            }

            cleanmovie = cleantitle.get(title)
            title = cleantitle.getsearch(title)
            titlecheck = cleanmovie + year
            query = self.search_link % (urllib.quote_plus(title), ep_search)
            query = urlparse.urljoin(self.base_link, query)
            html = BeautifulSoup(
                requests.get(query, headers=headers, timeout=10).content)
            containers = html.findAll('h2', attrs={'class': 'title'})
            for result in containers:

                r_title = result.findAll('a')[0]
                r_title = r_title.string
                r_href = result.findAll('a')[0]["href"]
                r_href = r_href.encode('utf-8')
                r_title = r_title.encode('utf-8')
                c_title = cleantitle_get_2(r_title)
                if titlecheck in c_title:
                    self.zen_url.append([r_href, r_title])

            return self.zen_url

        except:
            return
Ejemplo n.º 6
0
    def movie(self, imdb, title, year):
        self.elysium_url = []
        try:
            if not debridstatus == 'true': raise Exception()
            self.elysium_url = []

            cleanmovie = cleantitle.get(title)
            title = cleantitle.getsearch(title)
            titlecheck = cleanmovie + year

            query = self.search_link % (urllib.quote_plus(title), year)
            query = urlparse.urljoin(self.base_link, query)
            query = query + "&x=0&y=0"
            headers = {'User-Agent': random_agent()}
            html = BeautifulSoup(
                requests.get(query, headers=headers, timeout=30).content)

            result = html.findAll('div', attrs={'class': 'post'})

            for r in result:
                r_href = r.findAll('a')[0]["href"]
                r_href = r_href.encode('utf-8')
                # print ("MOVIEXK r2", r_href)
                r_title = r.findAll('a')[0]["title"]
                # print ("MOVIEXK r3", r_title)
                r_title = r_title.encode('utf-8')
                c_title = cleantitle_get_2(r_title)
                if year in r_title:
                    if titlecheck in c_title:
                        self.elysium_url.append([r_href, r_title])
                        # print "SCNSRC MOVIES %s %s" % (r_title , r_href)
            return self.elysium_url
        except:
            return
Ejemplo n.º 7
0
    def movie(self, imdb, title, year):
        try:
            self.zen_url = []
            cleanmovie = cleantitle.get(title)
            title = cleantitle.getsearch(title)
            headers = {
                'User-Agent': random_agent(),
                'X-Requested-With': 'XMLHttpRequest'
            }
            search_url = urlparse.urljoin(
                self.base_link, '/wp-content/themes/afdah/ajax-search.php')
            data = {'test1': title, 'test2': 'title'}

            moviesearch = requests.post(search_url, headers=headers, data=data)
            moviesearch = moviesearch.content
            match = re.compile('<li><a href="(.+?)">(.+?)</a></li>').findall(
                moviesearch)
            for href, movietitle in match:
                if year in movietitle and cleanmovie == cleantitle.get(
                        movietitle):

                    url = href.encode('utf-8')
                    if not "http" in url:
                        url = urlparse.urljoin(self.base_link, url)
                    return url
        except:
            return
Ejemplo n.º 8
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            headers = {'User-Agent': random_agent()}
            if url == None: return sources
            # print("ANIMETOON SOURCES", url)
            r = BeautifulSoup(requests.get(url, headers=headers).content)
            r = r.findAll('iframe')
            # print ("ANIMETOON s1",  r)
            for u in r:
                try:
                    u = u['src'].encode('utf-8')
                    # print ("ANIMETOON s2",  u)
                    
                    html = requests.get(u, headers=headers).text
                    r_src = re.compile("url:\s*'(.+?)'").findall(html)
                    for src in r_src:
                        # print ("ANIMETOON s3",  src)
                        vid_url = src.encode('utf-8')
                        sources.append({'source': 'cdn', 'quality': 'SD', 'provider': 'Animetoon', 'url': vid_url, 'direct': True, 'debridonly': False})
                       
                except:
                    pass

            return sources
        except:
            return sources
Ejemplo n.º 9
0
    def movie(self, imdb, title, year):
        self.zen_url = []	
        try:
            headers = {'User-Agent': random_agent()}
            query = self.search_link % (urllib.quote_plus(title),year)
            query = urlparse.urljoin(self.base_link, query)
            cleaned_title = cleantitle.get(title)
            html = BeautifulSoup(requests.get(query, headers=headers, timeout=30).content)
           
            containers = html.findAll('div', attrs={'class': 'name'})
            for container in containers:
                # print ("MOVIEXK r1", container)
                r_href = container.findAll('a')[0]["href"]
                r_href = r_href.encode('utf-8')
                # print ("MOVIEXK r2", r_href)
                r_title = re.findall('</span>(.*?)</a>', str(container))[0]
                # print ("MOVIEXK r3", r_title)
                r_title = r_title.encode('utf-8')
                # print ("MOVIEXK RESULTS", r_title, r_href)
                if year in r_title:

					if cleaned_title == cleantitle.get(r_title):
						redirect = requests.get(r_href, headers=headers, timeout=30).text
						try:
							r_url_trailer = re.search('<dd>[Tt]railer</dd>', redirect)
							if r_url_trailer: continue
						except:
							pass
						r_url = re.findall('<a href="(.*?)" class="btn-watch"',redirect)[0]
						r_url = r_url.encode('utf-8')
						print ("MOVIEXK PLAY URL", r_url)
						self.zen_url.append(r_url)
            return self.zen_url
        except:
            return	
Ejemplo n.º 10
0
 def movie(self, imdb, title, year):
     self.elysium_url = []
     try:
         if not alluc_status == 'true': raise Exception()
         print("ALLUC STARTED", alluc_user, alluc_pw, max_items)
         headers = {'User-Agent': random_agent()}
         search_title = cleantitle.getsearch(title)
         cleanmovie = cleantitle.get(title) + year
         query = "%s+%s" % (urllib.quote_plus(search_title), year)
         print("ALLUC r1", query)
         query = self.api_link % (alluc_user, alluc_pw, query)
         if alluc_debrid == 'true': query = query + max_result_string
         else: query = query + '+%23newlinks' + max_result_string
         print("ALLUC r2", query)
         html = requests.get(query, headers=headers, timeout=15).json()
         for result in html['result']:
             if len(result['hosterurls']) > 1: continue
             if result['extension'] == 'rar': continue
             stream_url = result['hosterurls'][0]['url'].encode('utf-8')
             stream_title = result['title'].encode('utf-8')
             stream_title = cleantitle.getsearch(stream_title)
             if cleanmovie in cleantitle.get(stream_title):
                 self.elysium_url.append([stream_url, stream_title])
                 print("ALLUC r3", self.elysium_url)
         return self.elysium_url
     except:
         return
Ejemplo n.º 11
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         count = 0
         for url in self.zen_url:
             headers = {'User-Agent': random_agent()}
             html = BeautifulSoup(
                 requests.get(url, headers=headers, timeout=30).content)
             print("WATCHEPISODES SOURCES", url)
             r = html.findAll('div', attrs={'class': 'site'})
             for container in r:
                 if count > 100: break
                 try:
                     count += 1
                     r_url = container.findAll(
                         'a')[0]['data-actuallink'].encode('utf-8')
                     print("WATCHEPISODES r_url", r_url)
                     host = re.findall(
                         '([\w]+[.][\w]+)$',
                         urlparse.urlparse(r_url.strip().lower()).netloc)[0]
                     host = host.encode('utf-8')
                     if not host in hostDict: raise Exception()
                     sources.append({
                         'source': host,
                         'quality': 'SD',
                         'provider': 'Watchepisodes',
                         'url': r_url,
                         'direct': False,
                         'debridonly': False
                     })
                 except:
                     pass
         return sources
     except:
         return sources
Ejemplo n.º 12
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            for url in self.zen_url:

                headers = {'User-Agent': random_agent()}
                html = BeautifulSoup(
                    requests.get(url, headers=headers, timeout=30).content)
                r = html.findAll('source')
                for r_source in r:
                    url = r_source['src'].encode('utf-8')
                    quality = r_source['data-res'].encode('utf-8')
                    if "1080" in quality: quality = "1080p"
                    elif "720" in quality: quality = "HD"
                    else: quality = "SD"
                    # print ("MOVIEXK SOURCES", url,quality)
                    sources.append({
                        'source': 'gvideo',
                        'quality': quality,
                        'provider': 'Moviexk',
                        'url': url,
                        'direct': True,
                        'debridonly': False
                    })
            return sources
        except:
            return sources
Ejemplo n.º 13
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = cleantitle.getsearch(data['tvshowtitle'])

            season = '%01d' % int(season)
            episode = '%01d' % int(episode)
            query = (urllib.quote_plus(title)) + "+season+" + season
            q = self.search_link % (query)
            r = urlparse.urljoin(self.base_link, q)

            print("ONEMOVIES EPISODES", r)
            checkseason = cleantitle.get(title) + "season" + season
            headers = {'User-Agent': random_agent()}
            html = BeautifulSoup(
                requests.get(r, headers=headers, timeout=20).content)
            containers = html.findAll('div', attrs={'class': 'ml-item'})
            for result in containers:

                links = result.findAll('a')

                for link in links:
                    link_title = str(link['title'])
                    href = str(link['href'])
                    href = client.replaceHTMLCodes(href)
                    if cleantitle.get(link_title) == checkseason:
                        ep_id = '?episode=%01d' % int(episode)
                        url = href + ep_id
                        # print("ONEMOVIES Passed", href)
                        return url

        except:
            return
Ejemplo n.º 14
0
    def movie(self, imdb, title, year):
		try:
			self.elysium_url = []
			if not debridstatus == 'true': raise Exception() 
			headers = {'User-Agent': random_agent()}

			cleanmovie = cleantitle.get(title)
			title = cleantitle.getsearch(title)
			titlecheck = cleanmovie+year
			query = self.search_link % (urllib.quote_plus(title), year)
			query = urlparse.urljoin(self.base_link, query)
			print("HEVC query", query)
			html = BeautifulSoup(rq.get(query, headers=headers, timeout=10).content)
			
			containers = html.findAll('div', attrs={'class': 'postcontent'})
			
			for result in containers:
				print("HEVC containers", result)
				r_title = result.findAll('a')[0]["title"]
				r_href = result.findAll('a')[0]["href"]
				r_href = r_href.encode('utf-8')
				r_title = r_title.encode('utf-8')
				c_title = cleantitle.get(r_title)
				if year in r_title and cleanmovie in c_title:
					self.elysium_url.append([r_href,r_title])
					print("HEVC PASSED MOVIE ", r_title, r_href)
			return self.elysium_url
		except:
			return
Ejemplo n.º 15
0
	def sources(self, url, hostDict, hostprDict):
		sources = []
		try:
			headers = {'User-Agent': random_agent()}
			for url in self.genesisreborn_url:
				if url == None: return
				
				html = requests.get(url, headers=headers, timeout=10).text
				
				match = re.compile('<a href="[^"]+go.php\?url=([^"]+)" target="_blank">').findall(html)
				for url in match:
					try:
						# print("SOLAR SOURCE", url)
						host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
						host = host.encode('utf-8')			
						if not host in hostDict: raise Exception()
						quality = "SD"
							# print("OpenMovies SOURCE", stream_url, label)
						sources.append({'source': host, 'quality':quality, 'provider': 'Solar', 'url': url, 'direct': False, 'debridonly': False})
					except:
						pass


			return sources
		except:
			return sources
Ejemplo n.º 16
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            headers = {'User-Agent': random_agent()}
            if url == None: return sources
            # print("ANIMETOON SOURCES", url)
            r = BeautifulSoup(requests.get(url, headers=headers).content)
            r = r.findAll('iframe')
            # print ("ANIMETOON s1",  r)
            for u in r:
                try:
                    u = u['src'].encode('utf-8')
                    # print ("ANIMETOON s2",  u)

                    html = requests.get(u, headers=headers).text
                    r_src = re.compile("url:\s*'(.+?)'").findall(html)
                    for src in r_src:
                        # print ("ANIMETOON s3",  src)
                        vid_url = src.encode('utf-8')
                        sources.append({
                            'source': 'cdn',
                            'quality': 'SD',
                            'provider': 'Animetoon',
                            'url': vid_url,
                            'direct': True,
                            'debridonly': False
                        })

                except:
                    pass

            return sources
        except:
            return sources
Ejemplo n.º 17
0
    def sources(self, url, hostDict, hostprDict):
        try:
			sources = []
			for movielink,title in self.zen_url:

				headers = {'User-Agent': random_agent()}
				html = BeautifulSoup(requests.get(movielink, headers=headers, timeout=15).content)
				result = html.findAll('div', attrs={'class': 'comm_content'})[:3]
				for r in result:
					r_href = r.findAll('a')
					for items in r_href:
						url = items['href'].encode('utf-8')
						if "1080" in url: quality = "1080p"
						elif "720" in url: quality = "HD"
						else: quality = "SD"
						info = ''
						if "hevc" in url.lower(): info = "HEVC"
						if not any(value in url for value in ['sample','uploadkadeh','wordpress','crazy4tv','imdb.com','youtube','userboard','kumpulbagi','mexashare','myvideolink.xyz', 'myvideolinks.xyz' , 'costaction', 'crazydl','.rar', '.RAR', 'ul.to', 'safelinking','linx.2ddl.ag','upload.so','.zip', 'go4up', 'adf.ly','.jpg','.jpeg']):
							if any(value in url for value in hostprDict):
									try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
									except: host = 'Videomega'
									url = client.replaceHTMLCodes(url)
									url = url.encode('utf-8')
									sources.append({'source': host, 'quality': quality, 'provider': 'Scnsrc', 'url': url, 'info': info,'direct': False, 'debridonly': True})
			return sources
        except:
            return sources
Ejemplo n.º 18
0
    def sources(self, url, hostDict, hostprDict):
	
        try:
			sources = []
			headers = {'Accept-Language': 'en-US,en;q=0.5', 'User-Agent': random_agent()}
			
			for movielink,title in self.genesisreborn_url:
				quality = quality_tag(title)
				html = BeautifulSoup(requests.get(movielink, headers=headers, timeout=10).content)		
				containers = html.findAll('div', attrs={'class': 'txt-block'})
				for result in containers:
					print("THREEMOVIES LINKS ",result)
					links = result.findAll('a')
					
					for r_href in links: 
						url = r_href['href']
						myurl = str(url)
						if any (value in myurl for value in hostprDict):

								try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
								except: host = 'Threemovies'
								
								url = client.replaceHTMLCodes(url)
								url = url.encode('utf-8')														
								host = client.replaceHTMLCodes(host)
								host = host.encode('utf-8')										
								sources.append({'source': host, 'quality': quality, 'provider': 'Threemovies', 'url': url, 'direct': False, 'debridonly': True})

	 

			return sources
        except:
            return sources
Ejemplo n.º 19
0
    def movie(self, imdb, title, year):
        self.elysium_url = []	
        try:
			if not debridstatus == 'true': raise Exception()
			self.elysium_url = []
			headers = {'Accept-Language': 'en-US,en;q=0.5', 'User-Agent': random_agent()}

			cleanmovie = cleantitle.get(title)
			title = cleantitle.getsearch(title)
			titlecheck = cleanmovie+year
			query = self.search_link % (urllib.quote_plus(title), ep_search)
			query = urlparse.urljoin(self.base_link, query)
			html = BeautifulSoup(requests.get(query, headers=headers, timeout=10).content)
			containers = html.findAll('h2', attrs={'class': 'title'})
			for result in containers:
				
				r_title = result.findAll('a')[0]
				r_title = r_title.string
				r_href = result.findAll('a')[0]["href"]
				r_href = r_href.encode('utf-8')
				r_title = r_title.encode('utf-8')
				c_title = cleantitle_get_2(r_title)
				if titlecheck in c_title:
					self.elysium_url.append([r_href,r_title])

			
			return self.elysium_url

        except:
            return
Ejemplo n.º 20
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            headers = {'User-Agent': random_agent()}
            if url == None: return sources
            url = url.replace('https', 'http')
            # print("ANIMETOON SOURCES", url)
            mobile_url = url.replace('www', 'm')

            html = requests.get(mobile_url, verify=False).text

            match_playlink = re.compile('<source src="(.+?)"').findall(html)
            for playlink in match_playlink:
                url = playlink.encode('utf-8')
                sources.append({
                    'source': 'cdn',
                    'quality': 'SD',
                    'provider': 'Watchcartoon',
                    'url': url,
                    'direct': True,
                    'debridonly': False
                })

            return sources
        except:
            return sources
Ejemplo n.º 21
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = cleantitle.getsearch(data['tvshowtitle'])

            season = '%01d' % int(season)
            episode = '%01d' % int(episode)
            query = (urllib.quote_plus(title)) + "+season+" + season
            q = self.search_link % (query)
            r = urlparse.urljoin(self.base_link, q)

            print ("ONEMOVIES EPISODES", r)
            checkseason = cleantitle.get(title) + "season" + season
            headers = {'User-Agent': random_agent()}
            html = BeautifulSoup(requests.get(r, headers=headers, timeout=20).content)
            containers = html.findAll('div', attrs={'class': 'ml-item'})
            for result in containers:
               
                links = result.findAll('a')

                for link in links:
                    link_title = str(link['title'])
                    href = str(link['href'])
                    href = client.replaceHTMLCodes(href)
                    if cleantitle.get(link_title) == checkseason:
                        ep_id = '?episode=%01d' % int(episode)
                        url = href + ep_id
                        # print("ONEMOVIES Passed", href)
                        return url

        except:
            return
Ejemplo n.º 22
0
    def sources(self, url, hostDict, hostprDict):
        try:
			sources = []
			for movielink,title in self.elysium_url:

				headers = {'User-Agent': random_agent()}
				html = BeautifulSoup(requests.get(movielink, headers=headers, timeout=15).content)
				result = html.findAll('div', attrs={'class': 'comm_content'})[:3]
				for r in result:
					r_href = r.findAll('a')
					for items in r_href:
						url = items['href'].encode('utf-8')
						if "1080" in url: quality = "1080p"
						elif "720" in url: quality = "HD"
						else: quality = "SD"
						info = ''
						if "hevc" in url.lower(): info = "HEVC"
						if not any(value in url for value in ['sample','uploadkadeh','wordpress','crazy4tv','imdb.com','youtube','userboard','kumpulbagi','mexashare','myvideolink.xyz', 'myvideolinks.xyz' , 'costaction', 'crazydl','.rar', '.RAR', 'ul.to', 'safelinking','linx.2ddl.ag','upload.so','.zip', 'go4up', 'adf.ly','.jpg','.jpeg']):
							if any(value in url for value in hostprDict):
									try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
									except: host = 'Videomega'
									url = client.replaceHTMLCodes(url)
									url = url.encode('utf-8')
									sources.append({'source': host, 'quality': quality, 'provider': 'Scnsrc', 'url': url, 'info': info,'direct': False, 'debridonly': True})
			return sources
        except:
            return sources
Ejemplo n.º 23
0
    def movie(self, imdb, title, year):
        self.elysium_url = []
        try:
            if not debridstatus == 'true': raise Exception()			
            self.elysium_url = []
           
            cleanmovie = cleantitle.get(title)
            title = cleantitle.getsearch(title)
            titlecheck = cleanmovie+year
			
            query = self.search_link % (urllib.quote_plus(title),year)
            query = urlparse.urljoin(self.base_link, query)
            query = query + "&x=0&y=0"
            headers = {'User-Agent': random_agent()}
            html = BeautifulSoup(requests.get(query, headers=headers, timeout=30).content)
           
            result = html.findAll('div', attrs={'class': 'post'})

            for r in result:
				r_href = r.findAll('a')[0]["href"]
				r_href = r_href.encode('utf-8')
                # print ("MOVIEXK r2", r_href)
				r_title = r.findAll('a')[0]["title"]
                # print ("MOVIEXK r3", r_title)
				r_title = r_title.encode('utf-8')
				c_title = cleantitle_get_2(r_title)		
				if year in r_title:
					if titlecheck in c_title:
						self.elysium_url.append([r_href,r_title])
						# print "SCNSRC MOVIES %s %s" % (r_title , r_href)
            return self.elysium_url
        except:
            return
Ejemplo n.º 24
0
	def episode(self, url, imdb, tvdb, title, premiered, season, episode):
		self.zen_url = []
		try:
			headers = {'User-Agent': random_agent()}
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			data['season'], data['episode'] = season, episode
			self.zen_url = []
			title = cleantitle.getsearch(title)
			title = title.replace(' ','-')
			query = title + "-" + season + "x" + episode
			query= self.ep_link % query
			query = urlparse.urljoin(self.base_link, query)
			r = BeautifulSoup(requests.get(query, headers=headers, timeout=10).content)
			r = r.findAll('iframe')
            # print ("ANIMETOON s1",  r)
			for u in r:
				u = u['src'].encode('utf-8')
				if u.startswith("//"): u = "http:" + u
				
				self.zen_url.append(u)
			return self.zen_url
		except:
			return
Ejemplo n.º 25
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            headers = {'User-Agent': random_agent()}
            if url == None: return sources
            url = urlparse.urljoin(self.base_link, url)
            r = BeautifulSoup(requests.get(url, headers=headers).content)
            r = r.findAll('iframe')
            # print ("GOGOANIME s1",  r)
            for u in r:
                try:
                    u = u['src'].encode('utf-8')
                    # print ("GOGOANIME s2",  u)
                    if not  'vidstreaming' in u: raise Exception()
                    html = BeautifulSoup(requests.get(u, headers=headers).content)
                    r_src = html.findAll('source')
                    for src in r_src:
                        vid_url = src['src'].encode('utf-8')
                        try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(vid_url)[0]['quality'], 'provider': 'Gogoanime', 'url': vid_url, 'direct': True, 'debridonly': False})
                        except: pass
                except:
                    pass

            return sources
        except:
            return sources
Ejemplo n.º 26
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
		try:
			if not debridstatus == 'true': raise Exception()
			self.zen_url = []
			headers = {'Accept-Language': 'en-US,en;q=0.5', 'User-Agent': random_agent()}
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			
			cleanmovie = cleantitle.get(title)
			data['season'], data['episode'] = season, episode
			ep_search = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
			episodecheck = str(ep_search).lower()
			query = self.search_link % (urllib.quote_plus(title), ep_search)
			query = urlparse.urljoin(self.base_link, query)
			titlecheck = cleanmovie + episodecheck
			html = BeautifulSoup(OPEN_URL(query).content)
			
			containers = html.findAll('h1', attrs={'class': 'entry-title'})
			
			for result in containers:
				
				r_title = result.findAll('a')[0]
				r_title = r_title.string
				r_href = result.findAll('a')[0]["href"]
				r_href = r_href.encode('utf-8')
				r_title = r_title.encode('utf-8')
				r_title = cleantitle.get(r_title)
				if titlecheck in r_title:
					self.zen_url.append([r_href,r_title])
					
			return self.url
		except:
			return
Ejemplo n.º 27
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        self.zen_url = []
        try:
            headers = {'User-Agent': random_agent()}
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            data['season'], data['episode'] = season, episode
            self.zen_url = []
            title = cleantitle.getsearch(title)
            title = title.replace(' ', '-')
            query = title + "-" + season + "x" + episode
            query = self.ep_link % query
            query = urlparse.urljoin(self.base_link, query)
            r = BeautifulSoup(
                requests.get(query, headers=headers, timeout=10).content)
            r = r.findAll('iframe')
            # print ("ANIMETOON s1",  r)
            for u in r:
                u = u['src'].encode('utf-8')
                if u.startswith("//"): u = "http:" + u

                self.zen_url.append(u)
            return self.zen_url
        except:
            return
Ejemplo n.º 28
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        
        try:
            headers = {'User-Agent': random_agent()}	
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            data['season'], data['episode'] = season, episode
            year = data['year']
            cleanmovie = cleantitle.get(title)
            title = cleantitle.getsearch(title)
            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)
            seasoncheck = "s%02d" % int(season)
            html = requests.get(query, headers=headers, timeout=15).content
            containers = re.compile('<span class="year">(.+?)</span><a class="play" href="(.+?)" title="(.+?)">').findall(html)
            for r_year, r_href, r_title in containers:
                if cleanmovie in cleantitle.get(r_title):
					if seasoncheck in cleantitle.get(r_title):
						if year == r_year:
							url = r_href.encode('utf-8') + "?p=" + episode + "&s=11"
							print ("SOCKSHARE PASSED EPISODE", url)
							return url			
        except:
            pass
Ejemplo n.º 29
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
		try:
			self.elysium_url = []
			if not debridstatus == 'true': raise Exception() 
			headers = {'Accept-Language': 'en-US,en;q=0.5', 'User-Agent': random_agent()}
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			year = data['year'] 
			cleanmovie = cleantitle.get(title)
			data['season'], data['episode'] = season, episode
			ep_search = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
			episodecheck = str(ep_search).lower()
			titlecheck = cleanmovie+episodecheck
			query = self.search_link % (urllib.quote_plus(title), ep_search)
			query = urlparse.urljoin(self.base_link, query)
			print("HEVC query", query)
			html = BeautifulSoup(rq.get(query, headers=headers, timeout=10).content)
			
			containers = html.findAll('div', attrs={'class': 'postcontent'})
			
			for result in containers:
				print("HEVC containers", result)
				r_title = result.findAll('a')[0]["title"]
				r_href = result.findAll('a')[0]["href"]
				r_href = r_href.encode('utf-8')
				r_title = r_title.encode('utf-8')
				check = cleantitle.get(r_title)
				if titlecheck in check:
					self.elysium_url.append([r_href,r_title])
					print("HEVC PASSED MOVIE ", r_title, r_href)
			return self.url
		except:
			return
Ejemplo n.º 30
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources
            referer = url
            headers = {'User-Agent': random_agent(), 'X-Requested-With': 'XMLHttpRequest', 'Referer': referer}
            url_plugin = urlparse.urljoin(self.base_link, '/ip.file/swf/plugins/ipplugins.php')
            html = BeautifulSoup(requests.get(referer, headers=headers, timeout=15).content)
            # print ("SOCKSHARE NEW SOURCES", html)
            r = html.findAll('div', attrs={'class': 'new_player'})
            for container in r:
				block = container.findAll('a')
				for items in block:
					p1 = items['data-film'].encode('utf-8')
					p2 = items['data-name'].encode('utf-8')
					p3 = items['data-server'].encode('utf-8')
					post = {'ipplugins': '1', 'ip_film': p1, 'ip_name': p2 , 'ip_server': p3}
					req = requests.post(url_plugin, data=post, headers=headers).json()
					token = req['s'].encode('utf-8')
					server = req['v'].encode('utf-8')
					url = urlparse.urljoin(self.base_link, '/ip.file/swf/ipplayer/ipplayer.php')
					post = {'u': token, 'w': '100%', 'h': '360' , 's': server, 'n':'0'}
					req_player = requests.post(url, data=post, headers=headers).json()
					# print ("SOCKSHARE SOURCES", req_player)
					result = req_player['data']
					result = [i['files'] for i in result]
					for i in result:
						try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'Sockshare', 'url': i, 'direct': True, 'debridonly': False})
						except: pass


            return sources
        except:
            return sources
Ejemplo n.º 31
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            headers = {'User-Agent': random_agent()}
            html = OPEN_URL(url)
            r = BeautifulSoup(html.content)
            r = r.findAll('tr')
            for items in r:
                href = items.findAll('a')[0]['href'].encode('utf-8')
                print("AFMOVIE R2", href)
                try:
                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(href.strip().lower()).netloc)[0]
                except:
                    host = 'Afmovies'
                if not host in hostDict: continue
                sources.append({
                    'source': host,
                    'quality': 'SD',
                    'provider': 'Afmovies',
                    'url': href,
                    'direct': False,
                    'debridonly': False
                })

            return sources
        except:
            return sources
Ejemplo n.º 32
0
    def movie(self, imdb, title, year):
        try:
            self.genesisreborn_url = []
            if not debridstatus == 'true': raise Exception()
            headers = {'User-Agent': random_agent()}

            cleanmovie = cleantitle.get(title)
            title = cleantitle.getsearch(title)
            titlecheck = cleanmovie + year
            query = self.search_link % (urllib.quote_plus(title), year)
            query = urlparse.urljoin(self.base_link, query)
            print("HEVC query", query)
            html = BeautifulSoup(
                rq.get(query, headers=headers, timeout=10).content)

            containers = html.findAll('div', attrs={'class': 'postcontent'})

            for result in containers:
                print("HEVC containers", result)
                r_title = result.findAll('a')[0]["title"]
                r_href = result.findAll('a')[0]["href"]
                r_href = r_href.encode('utf-8')
                r_title = r_title.encode('utf-8')
                c_title = cleantitle.get(r_title)
                if year in r_title and cleanmovie in c_title:
                    self.genesisreborn_url.append([r_href, r_title])
                    print("HEVC PASSED MOVIE ", r_title, r_href)
            return self.genesisreborn_url
        except:
            return
Ejemplo n.º 33
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            headers = {'User-Agent': random_agent()}
            if url == None: return sources
            url = urlparse.urljoin(self.base_link, url)
            r = BeautifulSoup(requests.get(url, headers=headers).content)
            r = r.findAll('iframe')
            # print ("GOGOANIME s1",  r)
            for u in r:
                try:
                    u = u['src'].encode('utf-8')
                    # print ("GOGOANIME s2",  u)
                    if not  'vidstreaming' in u: raise Exception()
                    html = BeautifulSoup(requests.get(u, headers=headers).content)
                    r_src = html.findAll('source')
                    for src in r_src:
                        vid_url = src['src'].encode('utf-8')
                        try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(vid_url)[0]['quality'], 'provider': 'Gogoanime', 'url': vid_url, 'direct': True, 'debridonly': False})
                        except: pass
                except:
                    pass

            return sources
        except:
            return sources
Ejemplo n.º 34
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        self.zen_url = []	
        try:
            # print ("MOVIEXK")
            headers = {'User-Agent': random_agent()}
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            title = cleantitle.getsearch(title)
            cleanmovie = cleantitle.get(title)
            data['season'], data['episode'] = season, episode
            year = data['year']
            query = self.search_link % (urllib.quote_plus(title),year)
            query = urlparse.urljoin(self.base_link, query)
            cleaned_title = cleantitle.get(title)
            ep_id = int(episode)
            season_id = int(season)
            season_check = "%02d" % (int(data['season']))
            ep_check =season_id + ep_id
            # print("MOVIEXK EPISODE CHECK", ep_check)
            html = BeautifulSoup(OPEN_URL(query, mobile=True).content)
            containers = html.findAll('div', attrs={'class': 'name'})
            for container in containers:
                # print ("MOVIEXK r1", container)
                r_href = container.findAll('a')[0]["href"]
                r_href = r_href.encode('utf-8')
                # print ("MOVIEXK r2", r_href)
                r_title = re.findall('</span>(.*?)</a>', str(container))[0]
                # print ("MOVIEXK r3", r_title)
                r_title = r_title.encode('utf-8')

                r_title = re.sub('^(watch movies)|(watch movie)|(watch)', '', r_title.lower())
                # print ("MOVIEXK RESULTS", r_title, r_href)
                if cleaned_title in cleantitle.get(r_title):
						redirect = OPEN_URL(r_href, mobile=True).text
						try:
							r_url_trailer = re.search('<dd>[Tt]railer</dd>', redirect)
							if r_url_trailer: continue
						except:
							pass
						try:
							p = client.parseDOM(redirect, 'div', attrs = {'class': 'btn-groups.+?'})
							r_url = client.parseDOM(p, 'a', ret='href')[0]
							print ("MOVIEXK PLAY BUTTON 1", r_url)
							url = '%s?season=%01d&episode=%01d' % (r_url.encode('utf-8'), int(season), int(episode))
							return url
						except:
							p = client.parseDOM(redirect, 'div', attrs = {'id': 'servers'})
							r = client.parseDOM(p, 'li')
							r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
							r = [i[0] for i in r]
							r = r[0]
							r_url = r.encode('utf-8')
							print ("MOVIEXK PLAY BUTTON 2", r)
							url = '%s?season=%01d&episode=%01d' % (r_url, int(season), int(episode))
							return url
        except:
            return		
Ejemplo n.º 35
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            referer = url

            for i in range(3):
                u = requests.get(referer).text
                if not u == None: break


            links = []

            try:


                    headers = {'User-Agent': random_agent(), 'X-Requested-With': 'XMLHttpRequest', 'Referer': referer}

                    url = urlparse.urljoin(self.base_link, '/ip.file/swf/plugins/ipplugins.php')

                    iframe = re.compile('<a data-film="(.+?)" data-name="(.+?)" data-server="(.+?)"').findall(u)
                    for p1, p2, p3 in iframe:
						try:
							post = {'ipplugins': '1', 'ip_film': p1, 'ip_name': p2 , 'ip_server': p3}
							# post = urllib.urlencode(post)
							# print ("PUTMV URL", post)

							for i in range(3):
								req = requests.post(url, data=post, headers=headers).content
							# print ("PUTMV req1", req)

							result = json.loads(req)
							token = result['s'].encode('utf-8')
							server = result['v'].encode('utf-8')
		  
							# print ("PUTMV server", token,server)
							
							url = urlparse.urljoin(self.base_link, '/ip.file/swf/ipplayer/ipplayer.php')

							post = {'u': token, 'w': '100%', 'h': '500' , 's': server, 'n':'0'}
							req_player = requests.post(url, data=post, headers=headers).content
							# print ("PUTMV req_player", req_player)
							result = json.loads(req_player)['data']
							result = [i['files'] for i in result]

							for i in result:
								try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'Putmovie', 'url': i, 'direct': True, 'debridonly': False})
								except: pass
						except:
							pass
            except:
                pass

            return sources
        except:
            return sources
Ejemplo n.º 36
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        self.zen_url = []
        try:
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            data['season'], data['episode'] = season, episode
            headers = {'User-Agent': random_agent()}
            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)
            cleaned_title = cleantitle.get(title)
            ep_id = int(episode)
            season_id = int(season)
            html = requests.get(query, headers=headers, timeout=30).json()
            results = html['series']
            for item in results:
                r_title = item['label'].encode('utf-8')
                r_link = item['seo'].encode('utf-8')
                if cleaned_title == cleantitle.get(r_title):
                    r_page = self.base_link + "/" + r_link
                    print("WATCHEPISODES r1", r_title, r_page)
                    r_html = BeautifulSoup(
                        requests.get(r_page, headers=headers,
                                     timeout=30).content)
                    r = r_html.findAll(
                        'div', attrs={'class': re.compile('\s*el-item\s*')})
                    for container in r:
                        try:
                            r_href = container.findAll('a')[0]['href'].encode(
                                'utf-8')
                            r_title = container.findAll(
                                'a')[0]['title'].encode('utf-8')
                            print("WATCHEPISODES r3", r_href, r_title)
                            episode_check = "[sS]%02d[eE]%02d" % (int(season),
                                                                  int(episode))
                            match = re.search(episode_check, r_title)
                            if match:
                                print("WATCHEPISODES PASSED EPISODE", r_href)
                                self.zen_url.append(r_href)

                            else:
                                match2 = re.search(episode_check, r_href)
                                if match2:
                                    self.zen_url.append(r_href)
                        except:
                            pass
            print("WATCHEPISODES LIST", self.zen_url)
            return self.zen_url
        except:
            pass
Ejemplo n.º 37
0
	def movie(self, imdb, title, year):
		self.zen_url = []
		try:
			headers = {'User-Agent': random_agent()}
			
			title = cleantitle_geturl(title)
			title = title + "-" + year
			query = self.movie_link % title
			u = urlparse.urljoin(self.base_link, query)
			self.zen_url.append(u)
			return self.zen_url
		except:
			return
Ejemplo n.º 38
0
    def movie(self, imdb, title, year):
        self.genesisreborn_url = []
        try:
            headers = {'User-Agent': random_agent()}

            title = cleantitle.getsearch(title)
            title = title.replace(' ', '-')
            title = title + "-" + year
            query = self.movie_link % title
            u = urlparse.urljoin(self.base_link, query)
            self.genesisreborn_url.append(u)
            return self.genesisreborn_url
        except:
            return
Ejemplo n.º 39
0
	def movie(self, imdb, title, year):
		self.genesisreborn_url = []
		try:
			headers = {'User-Agent': random_agent()}
			
			title = cleantitle.getsearch(title)
			title = title.replace(' ','-')
			title = title + "-" + year
			query = self.movie_link % title
			u = urlparse.urljoin(self.base_link, query)
			self.genesisreborn_url.append(u)
			return self.genesisreborn_url
		except:
			return
Ejemplo n.º 40
0
    def tvshow(self, imdb, tvdb, tvshowtitle, year):
        try:
            headers = {'User-Agent': random_agent()}
            # print("WATCHCARTOON")
            title = cleantitle.get(tvshowtitle)
            for url in self.cartoon_link:
				r = requests.get(url, headers=headers).text
				match = re.compile('<a href="(.+?)" title=".+?">(.+?)</a>').findall(r)
				for url, name in match:
					if title == cleantitle.get(name):
						print("WATCHCARTOON PASSED", url)
						return url
        except:
            return
Ejemplo n.º 41
0
 def tvshow(self, imdb, tvdb, tvshowtitle, year):
     try:
         headers = {'User-Agent': random_agent()}
         # print("WATCHCARTOON")
         title = cleantitle.get(tvshowtitle)
         for url in self.cartoon_link:
             r = requests.get(url, headers=headers).text
             match = re.compile(
                 '<a href="(.+?)" title=".+?">(.+?)</a>').findall(r)
             for url, name in match:
                 if title == cleantitle.get(name):
                     print("WATCHCARTOON PASSED", url)
                     return url
     except:
         return
Ejemplo n.º 42
0
	def movie(self, imdb, title, year):
		self.elysium_url = []
		try:
			headers = {'User-Agent': random_agent()}
			
			title = cleantitle_geturl(title)
			
			query = self.movie_link % title
			u = urlparse.urljoin(self.base_link, query)
			url = {'url': u, 'year': year, 'type': 'movie'}
			url = urllib.urlencode(url)

			return url
		except:
			return
Ejemplo n.º 43
0
    def movie(self, imdb, title, year):
        self.zen_url = []
        try:
            headers = {'User-Agent': random_agent()}

            title = cleantitle_geturl(title)

            query = self.movie_link % title
            u = urlparse.urljoin(self.base_link, query)
            url = {'url': u, 'year': year, 'type': 'movie'}
            url = urllib.urlencode(url)

            return url
        except:
            return
Ejemplo n.º 44
0
    def sources(self, url, hostDict, hostprDict):

        try:
            sources = []
            headers = {
                'Accept-Language': 'en-US,en;q=0.5',
                'User-Agent': random_agent()
            }

            for movielink, title in self.zen_url:
                quality = quality_tag(title)
                html = BeautifulSoup(
                    requests.get(movielink, headers=headers,
                                 timeout=10).content)
                containers = html.findAll('div', attrs={'class': 'txt-block'})
                for result in containers:
                    print("THREEMOVIES LINKS ", result)
                    links = result.findAll('a')

                    for r_href in links:
                        url = r_href['href']
                        myurl = str(url)
                        if any(value in myurl for value in hostprDict):

                            try:
                                host = re.findall(
                                    '([\w]+[.][\w]+)$',
                                    urlparse.urlparse(
                                        url.strip().lower()).netloc)[0]
                            except:
                                host = 'Threemovies'

                            url = client.replaceHTMLCodes(url)
                            url = url.encode('utf-8')
                            host = client.replaceHTMLCodes(host)
                            host = host.encode('utf-8')
                            sources.append({
                                'source': host,
                                'quality': quality,
                                'provider': 'Threemovies',
                                'url': url,
                                'direct': False,
                                'debridonly': True
                            })

            return sources
        except:
            return sources
Ejemplo n.º 45
0
	def sources(self, url, hostDict, hostprDict):
		sources = []
		try:
			headers = {'User-Agent': random_agent()}
			for url in self.zen_url:
				if url == None: return
				
				r = requests.get(url, headers=headers, timeout=10).text
				
				match = re.compile('file:\s*"(.+?)",label:"(.+?)",').findall(r)
				for href, quality in match:
					quality = quality_tag(quality)
					sources.append({'source': 'gvideo', 'quality':quality, 'provider': 'Bcinema', 'url': href, 'direct': True, 'debridonly': False})

			return sources
		except:
			return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            for movielink, referer in self.url:
                try:
                    # print("CMOVIES SOURCE LINKS", movielink)
                    referer = referer
                    pages = client.request(movielink, timeout='10')
                    scripts = re.findall('hash\s*:\s*"([^"]+)', pages)[0]
                    # print("CMOVIES SERVER SCRIPT", scripts)
                    if scripts:
                        token = self.__get_token()
                        key = hashlib.md5('(*&^%$#@!' +
                                          scripts[46:58]).hexdigest()
                        cookie = '%s=%s' % (key, token)
                        stream_url = self.stream_link % (
                            scripts,
                            hashlib.md5('!@#$%^&*(' + token).hexdigest())
                        # print("CMOVIES PLAYABLE LINKS", stream_url)
                        headers = {
                            'Referer': referer,
                            'User-Agent': random_agent(),
                            'Cookie': cookie
                        }
                        req = s.get(stream_url, headers=headers,
                                    timeout=5).json()
                        playlist = req['playlist'][0]['sources']
                        for item in playlist:
                            url = item['file'].encode('utf-8')
                            r_quality = item['label'].encode('utf-8')
                            quality = quality_tag(r_quality)
                            # print("CMOVIES playlist", quality ,url)
                            sources.append({
                                'source': 'gvideo',
                                'quality': quality,
                                'provider': 'Watch5s',
                                'url': url,
                                'direct': True,
                                'debridonly': False
                            })

                except:
                    pass
            return sources
        except:
            return sources
Ejemplo n.º 47
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return
            num = base64.b64decode(
                'aHR0cDovL3RoZXR2ZGIuY29tL2FwaS8xRDYyRjJGOTAwMzBDNDQ0L3Nlcmllcy8lcy9kZWZhdWx0LyUwMWQvJTAxZA=='
            )
            num = num % (tvdb, int(season), int(episode))
            num = client.request(num)
            num = client.parseDOM(num, 'absolute_number')[0]
            absolute_id = num.encode('utf-8')
            # print("ANIMETOON EPISODES", season, episode, num)
            headers = {'User-Agent': random_agent()}
            r = BeautifulSoup(requests.get(url, headers=headers).content)
            r = r.findAll('div', attrs={'id': 'videos'})
            for containers in r:
                r_block = containers.findAll('a')
                for links in r_block:
                    ep_href = links['href'].encode('utf-8')
                    # print("ANIMETOON", ep_href)
                    if "-season-" in ep_href:
                        # print("ANIMETOON SEASON MATCHING")
                        checkseason = re.search("-season-(\d+)-", ep_href)
                        if checkseason:
                            checkseason = checkseason.group(1)
                            if checkseason == season:
                                checkepisode = re.search(
                                    "-episode-(\d+)", ep_href)
                                if checkepisode:
                                    checkepisode = checkepisode.group(1)
                                    if checkepisode == episode:
                                        # print("ANIMETOON PASSED", ep_href)
                                        url = ep_href
                                        return url

                    else:
                        # print("ANIMETOON ABSOLUTEID MATCHING")
                        checkepisode = re.search("-episode-(\d+)", ep_href)
                        if checkepisode:
                            checkepisode = checkepisode.group(1)
                            if checkepisode == absolute_id:
                                # print("ANIMETOON PASSED", ep_href)
                                url = ep_href
                                return url

        except:
            return
Ejemplo n.º 48
0
	def episode(self, url, imdb, tvdb, title, premiered, season, episode):
		self.zen_url = []
		try:
			headers = {'User-Agent': random_agent()}
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			data['season'], data['episode'] = season, episode
			self.zen_url = []
			title = cleantitle_geturl(title)
			query = title + "-season-" + season + "-episode-" + episode
			query= self.ep_link % query
			# print("SOLAR query", query)
			u = urlparse.urljoin(self.base_link, query)
			self.zen_url.append(u)
			return self.zen_url
		except:
			return
Ejemplo n.º 49
0
    def tvshow(self, imdb, tvdb, tvshowtitle, year):
        try:
            headers = {'User-Agent': random_agent()}
            query = self.search_link % (urllib.quote_plus(tvshowtitle))
            q = urlparse.urljoin(self.base_link, query)
            r = BeautifulSoup(requests.get(q, headers=headers).content)
            r = r.findAll('div', attrs={'class': 'right_col'})
            for containers in r:
				r_block = containers.findAll('a')[0]
				r_title = r_block.text
				r_title = r_title.encode('utf-8')
				r_href = r_block['href'].encode('utf-8')
				if cleantitle.get(tvshowtitle) == cleantitle.get(r_title):
					# print("ANIMETOON PASSED", r_title)
					url = r_href
					return url
        except:
            return
Ejemplo n.º 50
0
    def tvshow(self, imdb, tvdb, tvshowtitle, year):
        try:
            headers = {'User-Agent': random_agent()}
            query = self.search_link % (urllib.quote_plus(tvshowtitle))
            q = urlparse.urljoin(self.base_link, query)
            r = BeautifulSoup(requests.get(q, headers=headers).content)
            r = r.findAll('div', attrs={'class': re.compile('last_episodes.+?')})
            for containers in r:
				# print ("GOGOANIME r1", containers)
				r_url = containers.findAll('a')[0]['href'].encode('utf-8')
				r_title = containers.findAll('a')[0]['title'].encode('utf-8')
				if cleantitle.get(r_title) == cleantitle.get(tvshowtitle):
					url = re.findall('(?://.+?|)(/.+)', r_url)[0]
					url = client.replaceHTMLCodes(url)
					url = url.encode('utf-8')
					return url
        except:
            return
Ejemplo n.º 51
0
    def tvshow(self, imdb, tvdb, tvshowtitle, year):
        try:
            headers = {'User-Agent': random_agent()}
            query = self.search_link % (urllib.quote_plus(tvshowtitle))
            q = urlparse.urljoin(self.base_link, query)
            r = BeautifulSoup(requests.get(q, headers=headers).content)
            r = r.findAll('div', attrs={'class': re.compile('last_episodes.+?')})
            for containers in r:
				# print ("GOGOANIME r1", containers)
				r_url = containers.findAll('a')[0]['href'].encode('utf-8')
				r_title = containers.findAll('a')[0]['title'].encode('utf-8')
				if cleantitle.get(r_title) == cleantitle.get(tvshowtitle):
					url = re.findall('(?://.+?|)(/.+)', r_url)[0]
					url = client.replaceHTMLCodes(url)
					url = url.encode('utf-8')
					return url
        except:
            return
Ejemplo n.º 52
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        self.zen_url = []
        try:
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            data['season'], data['episode'] = season, episode
            headers = {'User-Agent': random_agent()}
            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)
            cleaned_title = cleantitle.get(title)
            ep_id = int(episode)
            season_id = int(season)
            html = requests.get(query, headers=headers, timeout=30).json()
            results = html['series']
            for item in results:
				r_title = item['label'].encode('utf-8')
				r_link = item['seo'].encode('utf-8')
				if cleaned_title == cleantitle.get(r_title):
					r_page = self.base_link + "/" + r_link
					print("WATCHEPISODES r1", r_title,r_page)
					r_html = BeautifulSoup(requests.get(r_page, headers=headers, timeout=30).content)
					r = r_html.findAll('div', attrs={'class': re.compile('\s*el-item\s*')})
					for container in r:
						try:
							r_href = container.findAll('a')[0]['href'].encode('utf-8')
							r_title = container.findAll('a')[0]['title'].encode('utf-8')
							print("WATCHEPISODES r3", r_href,r_title)
							episode_check = "[sS]%02d[eE]%02d" % (int(season), int(episode))
							match = re.search(episode_check, r_title)
							if match:
								print("WATCHEPISODES PASSED EPISODE", r_href)
								self.zen_url.append(r_href)
								
							else:
								match2 = re.search(episode_check, r_href)
								if match2:
									self.zen_url.append(r_href)
						except:
							pass
            print ("WATCHEPISODES LIST", self.zen_url)
            return self.zen_url					
        except:
            pass
Ejemplo n.º 53
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return
            num = base64.b64decode('aHR0cDovL3RoZXR2ZGIuY29tL2FwaS8xRDYyRjJGOTAwMzBDNDQ0L3Nlcmllcy8lcy9kZWZhdWx0LyUwMWQvJTAxZA==')
            num = num % (tvdb, int(season), int(episode))
            num = client.request(num)
            num = client.parseDOM(num, 'absolute_number')[0]
            absolute_id = num.encode('utf-8')
            # print("ANIMETOON EPISODES", season, episode, num)
            headers = {'User-Agent': random_agent()}
            r = BeautifulSoup(requests.get(url, headers=headers).content)
            r = r.findAll('div', attrs={'id': 'videos'})
            for containers in r:
                r_block = containers.findAll('a')
                for links in r_block:
                    ep_href = links['href'].encode('utf-8')
                    # print("ANIMETOON", ep_href)
                    if "-season-" in ep_href:
						# print("ANIMETOON SEASON MATCHING")
						checkseason = re.search("-season-(\d+)-", ep_href)
						if checkseason:
							checkseason = checkseason.group(1)
							if checkseason == season:
								checkepisode = re.search("-episode-(\d+)", ep_href)
								if checkepisode:
										checkepisode = checkepisode.group(1)
										if checkepisode == episode:
											# print("ANIMETOON PASSED", ep_href)
											url = ep_href
											return url

                    else:
						# print("ANIMETOON ABSOLUTEID MATCHING")
						checkepisode = re.search("-episode-(\d+)", ep_href)
						if checkepisode:
								checkepisode = checkepisode.group(1)
								if checkepisode == absolute_id:
									# print("ANIMETOON PASSED", ep_href)
									url = ep_href
									return url
										
           
        except:
            return
Ejemplo n.º 54
0
    def sources(self, url, hostDict, hostprDict):
        try:
			sources = []
			
			headers = {'User-Agent': random_agent()}
			html = OPEN_URL(url)
			r = BeautifulSoup(html.content)
			r = r.findAll('tr')
			for items in r:
				href = items.findAll('a')[0]['href'].encode('utf-8')
				print ("AFMOVIE R2", href)
				try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(href.strip().lower()).netloc)[0]
				except: host = 'Afmovies'
				if not host in hostDict: continue
				sources.append({'source': host, 'quality': 'SD', 'provider': 'Afmovies', 'url': href,  'direct': False, 'debridonly': False})

			return sources
        except:
            return sources
Ejemplo n.º 55
0
    def sources(self, url, hostDict, hostprDict):
        try:
			sources = []
			for url in self.zen_url:
				
				headers = {'User-Agent': random_agent()}
				html = BeautifulSoup(requests.get(url, headers=headers, timeout=30).content)
				r = html.findAll('source')
				for r_source in r:
					url = r_source['src'].encode('utf-8')
					quality = r_source['data-res'].encode('utf-8')
					if "1080" in quality: quality = "1080p"
					elif "720" in quality: quality = "HD"
					else: quality = "SD"
					# print ("MOVIEXK SOURCES", url,quality)
					sources.append({'source': 'gvideo', 'quality': quality, 'provider': 'Moviexk', 'url': url, 'direct': True, 'debridonly': False})
			return sources
        except:
            return sources
Ejemplo n.º 56
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return
            url = url.replace('https', 'http')
            num = base64.b64decode('aHR0cDovL3RoZXR2ZGIuY29tL2FwaS8xRDYyRjJGOTAwMzBDNDQ0L3Nlcmllcy8lcy9kZWZhdWx0LyUwMWQvJTAxZA==')
            num = num % (tvdb, int(season), int(episode))
            num = client.request(num)
            num = client.parseDOM(num, 'absolute_number')[0]
            absolute_id = num.encode('utf-8')
            print("WATCHCARTOON EPISODES", season, episode, absolute_id)
            headers = {'User-Agent': random_agent()}
            r = requests.get(url, headers=headers).text
            r = re.compile('<a href="(.+?)" rel="bookmark" title=".+?"').findall(r)
            for ep_href in r:
                    # print("WATCHCARTOON EPISODES", ep_href)
                    if "-season-" in ep_href:
						# print("WATCHCARTOON SEASON MATCHING")
						checkseason = re.search("-season-(\d+)-", ep_href)
						if checkseason:
							checkseason = checkseason.group(1)
							if checkseason == season:
								checkepisode = re.search("-episode-(\d+)", ep_href)
								if checkepisode:
										checkepisode = checkepisode.group(1)
										if checkepisode == episode:
											print("WATCHCARTOON PASSED", ep_href)
											url = ep_href
											return url

                    else:
						# print("WATCHCARTOON ABSOLUTEID MATCHING")
						checkepisode = re.search("-episode-(\d+)", ep_href)
						if checkepisode:
								checkepisode = checkepisode.group(1)
								if checkepisode == absolute_id:
									# print("WATCHCARTOON PASSED", ep_href)
									url = ep_href
									return url
										
           
        except:
            return