Esempio n. 1
0
    def tvshow(self, imdb, tvdb, tvshowtitle, year):
        try:
            # title = geturl(tvshowtitle)
            check = cleantitle.get(tvshowtitle)

            q = urllib.quote_plus(cleantitle_query(tvshowtitle))

            q = self.search_link_2 % q
            print("WATCHSERIES SEARCH", q, year)

            r = client.request(q, timeout='10')

            r = re.compile(
                '<a href="(.+?)" title="(.+?)" target="_blank">(.+?)</a>'
            ).findall(r)
            for u, t, t2 in r:

                u = u.encode('utf-8')
                t = t.encode('utf-8')
                t2 = t2.encode('utf-8')
                print("WATCHSERIES SEARCH 2", u, t, t2)
                # if not year in t2:
                # if not year in t: raise Exception()
                if not title_match(cleantitle_get(t), check,
                                   amount=0.9) == True:
                    raise Exception()
                print("WATCHSERIES PASSED", u)

                url = u.encode('utf-8')
            return url
        except:
            return
Esempio n. 2
0
    def tvshow(self, imdb, tvdb, tvshowtitle, year):
        try:
            # title = geturl(tvshowtitle)
            check = cleantitle.get(tvshowtitle)

            q = urllib.quote_plus(cleantitle_query(tvshowtitle))
            
            q =  self.search_link_2 % q
            print ("WATCHSERIES SEARCH", q, year)

            r = client.request(q, timeout='10')
            
            
            r = re.compile('<a href="(.+?)" title="(.+?)" target="_blank">(.+?)</a>').findall(r)
            for u,t,t2 in r:
				
				u = u.encode('utf-8')
				t = t.encode('utf-8')
				t2 = t2.encode('utf-8')
				print ("WATCHSERIES SEARCH 2", u,t,t2)
				# if not year in t2:
					# if not year in t: raise Exception()
				if not title_match(cleantitle_get(t), check, amount=0.9) == True: raise Exception()
				print ("WATCHSERIES PASSED", u)
				
				
				url = u.encode('utf-8')
            return url
        except:
            return
Esempio n. 3
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        self.zen_url = []
        try:

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = cleantitle_query(title)
            cleanmovie = cleantitle_get(title)

            data['season'], data['episode'] = season, episode
            season = "S%02d" % int(data['season'])
            episode = "%02d" % int(data['episode'])
            episode = "E0" + episode
            episodecheck = season + episode
            print("CINEMABOX episodecheck", episodecheck)

            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)

            print("CINEMABOX query", query)
            r = OPEN_URL(query, mobile=True, timeout=30).json()

            html = r['data']['films']
            for item in html:
                # print ("CINEMABOX ITEMS 3", item)
                t = item['title'].encode('utf-8')
                h = re.findall('''['"]id['"]\s*:\s*(\d+)''', str(item))[0]

                if title_match(cleanmovie, cleantitle_get(t),
                               amount=1.0) == True:

                    s = self.sources_link % h
                    s = urlparse.urljoin(self.base_link, s)
                    print("CINEMABOX PASSED 4", t, h, s)
                    s = OPEN_URL(s, mobile=True).json()
                    s = s['data']['chapters']
                    if len(s) > 0:
                        for src in s:
                            name = src['title'].encode('utf8')

                            if episodecheck.lower() == name.lower():

                                id = re.findall('''['"]id['"]\s*:\s*(\d+)''',
                                                str(src))[0]
                                print("CINEMABOX PASSED 6", name.lower())

                    stream = self.stream_link % id
                    stream = urlparse.urljoin(self.base_link, stream)
                    self.zen_url.append(stream)

                    print(">>>>>>>>> Cinemabox FOUND LINK", stream)

            return self.zen_url

        except:
            return
Esempio n. 4
0
    def movie(self, imdb, title, year):
        self.zen_url = []
        try:

            title = cleantitle_query(title)
            cleanmovie = cleantitle_get(title)
            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)
            print("CINEMABOX query", query)
            r = OPEN_URL(query, mobile=True, timeout=30).json()
            print("CINEMABOX ITEMS", r)
            html = r['data']['films']
            for item in html:
                print("CINEMABOX ITEMS 3", item)
                t = item['title'].encode('utf-8')
                h = re.findall('''['"]id['"]\s*:\s*(\d+)''', str(item))[0]
                print("CINEMABOX ITEMS 4", t, h)
                if title_match(cleanmovie, cleantitle_get(t),
                               amount=1.0) == True:
                    if year in item['publishDate']:
                        s = self.sources_link % h
                        s = urlparse.urljoin(self.base_link, s)
                        print("CINEMABOX ITEMS PASSED 5", t, h, s)
                        s = OPEN_URL(s, mobile=True).json()
                        s = s['data']['chapters']
                        if len(s) > 0:
                            for src in s:
                                name = src['title'].encode('utf8')
                                if title_match(cleanmovie,
                                               cleantitle_get(name),
                                               amount=1.0) == True:
                                    id = re.findall(
                                        '''['"]id['"]\s*:\s*(\d+)''',
                                        str(src))[0]

                        stream = self.stream_link % id
                        stream = urlparse.urljoin(self.base_link, stream)
                        self.zen_url.append(stream)

                        print(">>>>>>>>> Cinemabox FOUND LINK", stream)

            return self.zen_url

        except:
            return
Esempio n. 5
0
    def movie(self, imdb, title, year):
		self.elysium_url = []
		try:
				
			title = cleantitle_query(title)
			cleanmovie = cleantitle_get(title)
			query = self.search_link % (urllib.quote_plus(title))
			query = urlparse.urljoin(self.base_link, query)
			print ("CINEMABOX query", query)
			r = OPEN_URL(query, mobile=True, timeout=30).json()
			print ("CINEMABOX ITEMS", r)
			html = r['data']['films']
			for item in html:
				print ("CINEMABOX ITEMS 3", item)			
				t = item['title'].encode('utf-8')
				h = re.findall('''['"]id['"]\s*:\s*(\d+)''', str(item))[0]
				print ("CINEMABOX ITEMS 4", t,h)
				if title_match(cleanmovie,cleantitle_get(t),  amount=1.0) == True:
					if year in item['publishDate']:
						s = self.sources_link % h
						s = urlparse.urljoin(self.base_link, s)
						print ("CINEMABOX ITEMS PASSED 5", t,h,s)
						s = OPEN_URL(s, mobile=True).json()
						s= s['data']['chapters']
						if len(s)> 0:
							for src in s:            
								name= src['title'].encode('utf8')
								if title_match(cleanmovie,cleantitle_get(name),  amount=1.0) == True:
									id = re.findall('''['"]id['"]\s*:\s*(\d+)''', str(src))[0]

								
						stream = self.stream_link % id
						stream = urlparse.urljoin(self.base_link, stream)
						self.elysium_url.append(stream)

								
									
						print (">>>>>>>>> Cinemabox FOUND LINK", stream)

			return self.elysium_url

		except:
			return	
Esempio n. 6
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            self.zen_url = []
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = cleantitle.getsearch(data['tvshowtitle'])
            print("SERIESONLINE EPISODES STARTED")
            season = '%01d' % int(season)
            episode = '%01d' % int(episode)
            query = cleantitle_geturl(title) + "-season-" + season
            q = self.search_link % (query)
            r = urlparse.urljoin(self.base_link, q)
            cleaned_title = cleantitle_get(title) + "season" + season
            print("SERIESONLINE EPISODES", q)
            html = BeautifulSoup(client.request(r))
            containers = html.findAll('div', attrs={'class': 'ml-item'})
            for result in containers:
                links = result.findAll('a')
                for link in links:
                    link_title = link['title'].encode('utf-8')
                    href = link['href'].encode('utf-8')
                    href = urlparse.urljoin(self.base_link, href)
                    href = re.sub('/watching.html', '', href)
                    href = href + '/watching.html'

                    # print("SERIESONLINE", link_title, href)
                    if title_match(cleantitle_get(link_title),
                                   cleaned_title) == True:
                        print("SERIESONLINE FOUND MATCH", link_title, href)
                        referer = href
                        html = client.request(href)
                        s = BeautifulSoup(html)

                        s = s.findAll('div', attrs={'class': 'les-content'})
                        for x in s:
                            try:
                                items = x.findAll('a')
                                for u in items:

                                    player = u['player-data'].encode('utf-8')
                                    ep_id = u['episode-data'].encode('utf-8')
                                    if ep_id == episode:

                                        if not player in self.zen_url:
                                            self.zen_url.append(
                                                [player, referer])
                            except:
                                pass

            print("SERIESONLINE PASSED", self.zen_url)
            return self.zen_url

        except:
            return
Esempio n. 7
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            self.elysium_url = []
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = cleantitle.getsearch(data['tvshowtitle'])
            print ("SERIESONLINE EPISODES STARTED")
            season = '%01d' % int(season)
            episode = '%01d' % int(episode)
            query = cleantitle_geturl(title) + "-season-" + season
            q = self.search_link % (query)
            r = urlparse.urljoin(self.base_link, q)
            cleaned_title = cleantitle_get(title) + "season" + season
            print ("SERIESONLINE EPISODES", q)
            html = BeautifulSoup(client.request(r))
            containers = html.findAll('div', attrs={'class': 'ml-item'})
            for result in containers:
                links = result.findAll('a')
                for link in links:
                    link_title = link['title'].encode('utf-8')
                    href = link['href'].encode('utf-8')
                    href = urlparse.urljoin(self.base_link, href)
                    href = re.sub('/watching.html','', href)
                    href = href + '/watching.html'

                    # print("SERIESONLINE", link_title, href)
                    if title_match(cleantitle_get(link_title), cleaned_title) == True:
						print("SERIESONLINE FOUND MATCH", link_title, href)
						referer = href
						html = client.request(href)
   						s = BeautifulSoup(html)
							
						s = s.findAll('div', attrs={'class': 'les-content'})
						for x in s:
							try:
								items = x.findAll('a')
								for u in items:
									
									player = u['player-data'].encode('utf-8')
									ep_id = u['episode-data'].encode('utf-8')
									if ep_id == episode: 
									
										if not player in self.elysium_url:	self.elysium_url.append([player, referer])
							except:
								pass
							
            print("SERIESONLINE PASSED", self.elysium_url)
            return self.elysium_url

        except:
            return
Esempio n. 8
0
    def movie(self, imdb, title, year):
        try:
            self.zen_url = []

            cleaned_title = cleantitle_get(title)
            title = cleantitle_query(title)

            q = self.search_link % (cleantitle_geturl(title))
            r = urlparse.urljoin(self.base_link, q)
            print("SERIESONLINE EPISODES", r)
            html = BeautifulSoup(client.request(r))
            containers = html.findAll('div', attrs={'class': 'ml-item'})
            for result in containers:
                links = result.findAll('a')
                for link in links:
                    link_title = link['title'].encode('utf-8')
                    href = link['href'].encode('utf-8')
                    href = urlparse.urljoin(self.base_link, href)
                    href = re.sub('/watching.html', '', href)
                    href = href + '/watching.html'

                    print("SERIESONLINE PASSED", link_title, href)
                    if title_match(cleantitle_get(link_title),
                                   cleaned_title) == True:
                        referer = href
                        html = client.request(href)

                        match = re.findall(
                            '<strong>Release:</strong>(.+?)</p>', html)[0]
                        if year in match:

                            s = BeautifulSoup(html)

                            s = s.findAll('div',
                                          attrs={'class': 'les-content'})
                            for u in s:
                                print("SERIESONLINE PASSED u", u)
                                player = u.findAll(
                                    'a')[0]['player-data'].encode('utf-8')

                                if not player in self.zen_url:
                                    self.zen_url.append([player, referer])

                            return self.zen_url
        except:
            return
Esempio n. 9
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            self.zen_url = []
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = cleantitle.getsearch(data['tvshowtitle'])
            print ("ONEMOVIES EPISODES STARTED")
            season = '%01d' % int(season)
            episode = '%01d' % int(episode)
            query = cleantitle_geturl(title) + "-season-" + season
            q = self.search_link % (query)
            r = urlparse.urljoin(self.base_link, q)
            cleaned_title = cleantitle_get(title) + "season" + season
            print ("ONEMOVIES EPISODES", q)
            html = BeautifulSoup(OPEN_URL(r).content)
            containers = html.findAll('div', attrs={'class': 'ml-item'})
            for result in containers:
                links = result.findAll('a')
                for link in links:
                    link_title = link['title'].encode('utf-8')
                    href = link['href'].encode('utf-8')
                    href = urlparse.urljoin(self.base_link, href)
                    href = re.sub('/watching.html','', href)
                    href = href + '/watching.html'

                    # print("ONEMOVIES", link_title, href)
                    if title_match(cleantitle_get(link_title), cleaned_title) == True:
						print("ONEMOVIES FOUND MATCH", link_title, href)
						html = OPEN_URL(href).content
   						s = BeautifulSoup(html)
							
						s = s.findAll('div', attrs={'class': 'les-content'})
						for u in s:
							print("ONEMOVIES PASSED u", u)
							player = u.findAll('a')[0]['player-data'].encode('utf-8')
							ep_id = u.findAll('a')[0]['episode-data'].encode('utf-8')
							if not ep_id == episode: raise Exception()
								
							if not player in self.zen_url:	self.zen_url.append(player)
							

						return self.zen_url

        except:
            return
Esempio n. 10
0
    def movie(self, imdb, title, year):
        try:
            self.elysium_url = []


            cleaned_title = cleantitle_get(title)
            title = cleantitle_query(title)
                    
            q = self.search_link % (cleantitle_geturl(title))
            r = urlparse.urljoin(self.base_link, q)
            print ("SERIESONLINE EPISODES", r)
            html = BeautifulSoup(client.request(r))
            containers = html.findAll('div', attrs={'class': 'ml-item'})
            for result in containers:
                links = result.findAll('a')
                for link in links:
                    link_title = link['title'].encode('utf-8')
                    href = link['href'].encode('utf-8')
                    href = urlparse.urljoin(self.base_link, href)
                    href = re.sub('/watching.html','', href)
                    href = href + '/watching.html'

                    print("SERIESONLINE PASSED", link_title, href)
                    if title_match(cleantitle_get(link_title), cleaned_title) == True:
                        referer = href
                        html = client.request(href)
                        
                        match = re.findall('<strong>Release:</strong>(.+?)</p>', html)[0]
                        if year in match:
							
							s = BeautifulSoup(html)
							
							s = s.findAll('div', attrs={'class': 'les-content'})
							for u in s:
								print("SERIESONLINE PASSED u", u)
								player = u.findAll('a')[0]['player-data'].encode('utf-8')
								
								if not player in self.elysium_url:	self.elysium_url.append([player, referer])
							

							return self.elysium_url
        except:
            return
Esempio n. 11
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        self.elysium_url = []
        try:
			
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			title = cleantitle_query(title)
			cleanmovie = cleantitle_get(title)

			data['season'], data['episode'] = season, episode
			season = "S%02d" % int(data['season'])
			episode = "%02d" % int(data['episode'])
			episode = "E0" + episode
			episodecheck = season + episode
			print ("CINEMABOX episodecheck", episodecheck)		

			query = self.search_link % (urllib.quote_plus(title))
			query = urlparse.urljoin(self.base_link, query)
			
			
			print ("CINEMABOX query", query)
			r = OPEN_URL(query, mobile=True, timeout=30).json()

			html = r['data']['films']
			for item in html:
				# print ("CINEMABOX ITEMS 3", item)			
				t = item['title'].encode('utf-8')
				h = re.findall('''['"]id['"]\s*:\s*(\d+)''', str(item))[0]

				if title_match(cleanmovie,cleantitle_get(t),  amount=1.0) == True:
						
						s = self.sources_link % h
						s = urlparse.urljoin(self.base_link, s)
						print ("CINEMABOX PASSED 4", t,h,s)
						s = OPEN_URL(s, mobile=True).json()
						s= s['data']['chapters']
						if len(s)> 0:
							for src in s:            
								name = src['title'].encode('utf8')
								
								if episodecheck.lower() == name.lower():
									
									
									id = re.findall('''['"]id['"]\s*:\s*(\d+)''', str(src))[0]
									print ("CINEMABOX PASSED 6", name.lower())


									
								
						stream = self.stream_link % id
						stream = urlparse.urljoin(self.base_link, stream)
						self.elysium_url.append(stream)

								
									
						print (">>>>>>>>> Cinemabox FOUND LINK", stream)

			return self.elysium_url

        except:
			return