def tvrageEpisode(self, tvrage, title, date, season, episode): monthMap = {'01':'Jan', '02':'Feb', '03':'Mar', '04':'Apr', '05':'May', '06':'Jun', '07':'Jul', '08':'Aug', '09':'Sep', '10':'Oct', '11':'Nov', '12':'Dec'} title = cleantitle.tv(title) try: url = self.tvrage_link % tvrage result = client.request(url, timeout='5') search = re.compile('<td.+?><a.+?title=.+?season.+?episode.+?>(\d+?)x(\d+?)<.+?<td.+?>(\d+?/.+?/\d+?)<.+?<td.+?>.+?href=.+?>(.+?)<').findall(result.replace('\n','')) d = '%02d/%s/%s' % (int(date.split('-')[2]), monthMap[date.split('-')[1]], date.split('-')[0]) match = [i for i in search if d == i[2]] if len(match) == 1: return (str('%01d' % int(match[0][0])), str('%01d' % int(match[0][1]))) match = [i for i in search if title == cleantitle.tv(i[3])] if len(match) == 1: return (str('%01d' % int(match[0][0])), str('%01d' % int(match[0][1]))) except: pass try: url = self.epguides_link % tvrage result = client.request(url, timeout='5') search = re.compile('\d+?,(\d+?),(\d+?),.+?,(\d+?/.+?/\d+?),"(.+?)",.+?,".+?"').findall(result) d = '%02d/%s/%s' % (int(date.split('-')[2]), monthMap[date.split('-')[1]], date.split('-')[0][-2:]) match = [i for i in search if d == i[2]] if len(match) == 1: return (str('%01d' % int(match[0][0])), str('%01d' % int(match[0][1]))) match = [i for i in search if title == cleantitle.tv(i[3])] if len(match) == 1: return (str('%01d' % int(match[0][0])), str('%01d' % int(match[0][1]))) except: pass
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: tvshowtitle, year = re.compile('(.+?) [(](\d{4})[)]$').findall(url)[0] query = self.search_link % urllib.quote(tvshowtitle) query = urlparse.urljoin(self.base_link, query) result = client.source(query) tvshowtitle = cleantitle.tv(tvshowtitle) season = '%01d' % int(season) episode = '%01d' % int(episode) years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)] result = client.parseDOM(result, 'div', attrs = {'class': 'ml-item'}) result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'h2'), re.compile('class *= *[\'|\"]jt-info[\'|\"]>(\d{4})<').findall(i)) for i in result] result = [(i[0][0], i[1][0], i[2][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0] result = [(i[0], re.compile('(.+?) - Season (\d*)$').findall(i[1]), i[2]) for i in result] result = [(i[0], i[1][0][0], i[1][0][1], i[2]) for i in result if len(i[1]) > 0] result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])] result = [i for i in result if season == i[2]] result = [(i[0], i[1], str(int(i[3]) - int(i[2]) + 1)) for i in result] result = [i[0] for i in result if any(x in i[2] for x in years)][0] result += '?S%02dE%02d' % (int(season), int(episode)) try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: tvshowtitle, year = re.compile('(.+?) [(](\d{4})[)]$').findall(url)[0] season, episode = '%01d' % int(season), '%01d' % int(episode) query = '%s season %s' % (tvshowtitle, season) query = self.search_link % (urllib.quote_plus(query)) result = client.request(query) result = json.loads(result) result = result['results'] tvshowtitle = cleantitle.tv(tvshowtitle) years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)] result = [(i['url'], i['titleNoFormatting']) for i in result] result = [(i[0], re.compile('(^Watch Full "|^Watch |)(.+?[(]\d{4}[)])').findall(i[1])) for i in result] result = [(i[0], i[1][0][-1].lower()) for i in result if len(i[1]) > 0] result = [(i[0], re.compile('(.+) season (\d+)\s*[(](\d{4})[)]').findall(i[1])) for i in result] result = [(i[0], cleantitle.tv(i[1][0][0]), i[1][0][1], i[1][0][2]) for i in result if len(i[1]) > 0] result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])] result = [i for i in result if season == i[2]] result = [(i[0], i[1], str(int(i[3]) - int(i[2]) + 1)) for i in result] result = [i[0] for i in result if any(x in i[2] for x in years)][0] result += '?S%02dE%02d' % (int(season), int(episode)) try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: query = self.search_link % (urllib.quote_plus(tvshowtitle)) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = result.decode("iso-8859-1").encode("utf-8") result = client.parseDOM(result, "ol", attrs={"id": "searchresult"})[0] result = client.parseDOM(result, "h2") tvshowtitle = cleantitle.tv(tvshowtitle) result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a")[0]) for i in result] result = [(i[0], re.sub("<.+?>|</.+?>", "", i[1])) for i in result] result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])] result = result[-1][0] try: url = re.compile("//.+?(/.+)").findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode("utf-8") return url except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: tvshowtitle, year = re.compile('(.+?) [(](\d{4})[)]$').findall(url)[0] season, episode = '%01d' % int(season), '%01d' % int(episode) query = '%s season %s' % (tvshowtitle, season) query = self.search_link % (urllib.quote_plus(query)) result = client.source(query) result = json.loads(result) result = result['results'] tvshowtitle = cleantitle.tv(tvshowtitle) years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)] result = [(i['url'], i['titleNoFormatting']) for i in result] result = [(i[0], re.compile('(^Watch Full "|^Watch |)(.+?[(]\d{4}[)])').findall(i[1])) for i in result] result = [(i[0], i[1][0][-1].lower()) for i in result if len(i[1]) > 0] result = [(i[0], re.compile('(.+) season (\d+)\s*[(](\d{4})[)]').findall(i[1])) for i in result] result = [(i[0], cleantitle.tv(i[1][0][0]), i[1][0][1], i[1][0][2]) for i in result if len(i[1]) > 0] result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])] result = [i for i in result if season == i[2]] result = [(i[0], i[1], str(int(i[3]) - int(i[2]) + 1)) for i in result] result = [i[0] for i in result if any(x in i[2] for x in years)][0] result += '?S%02dE%02d' % (int(season), int(episode)) try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: if url == None: return title = url hdlr = 'S%02dE%02d' % (int(season), int(episode)) query = self.search_link % (urllib.quote_plus('%s "%s"' % (title, hdlr))) query = urlparse.urljoin(self.tvbase_link, query) result = client.source(query) result = client.parseDOM(result, 'header', attrs={'class': "post-title"}) title = cleantitle.tv(title) result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in result] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [(i[0], re.compile('(.+?) (S\d*E\d*)').findall(i[1])) for i in result] result = [(i[0], i[1][0][0], i[1][0][1]) for i in result if len(i[1]) > 0] result = [i for i in result if title == cleantitle.tv(i[1])] result = [i[0] for i in result if hdlr == i[2]][0] url = result.replace(self.tvbase_link, '') url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: result = cache.get(self.moviefarsi_shows, 168, table='chronic') if result == None: return tvshowtitle = cleantitle.tv(tvshowtitle) years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)] result = [i[0] for i in result if tvshowtitle == cleantitle.tv(i[1])][0] url = urlparse.urljoin(self.base_link, result) result = client.source(url, cookie=self.cookie_link) if result == None: result = cloudflare.source(url) result = client.parseDOM(result, 'article', attrs = {'id': 'post-\d*'})[0] y = client.parseDOM(result, 'strong')[0] y = re.compile('(\d{4})').findall(y)[0] if not y in years: return result = client.parseDOM(result, 'a', ret='href')[0] url = re.compile('//.+?/(\d*)').findall(result)[0] url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: query = urlparse.urljoin(self.base_link, self.tvsearch_link) result = client.source(query) result = client.parseDOM(result, 'div', attrs = {'class': 'movies_content'})[0] tvshowtitle = cleantitle.tv(tvshowtitle) result = re.compile('(<li>.+?</li>)').findall(result) result = [re.compile('href="(.+?)">(.+?)<').findall(i) for i in result] result = [i[0] for i in result if len(i) > 0] result = [i[0] for i in result if tvshowtitle == cleantitle.tv(i[1])][0] check = urlparse.urljoin(self.base_link, result) check = client.source(check) if not str(imdb) in check: raise Exception() try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: result = client.source(self.base_link, headers=self.headers) if not "'index show'" in result: cookie = client.source(self.sign_link, headers=self.headers, post=self.key_link, output="cookie") result = client.source(self.base_link, headers=self.headers, cookie=cookie) result = client.parseDOM(result, "div", attrs={"class": "index show"}) result = [ ( client.parseDOM(i, "a", attrs={"class": "name"})[0], client.parseDOM(i, "span", attrs={"class": "value"})[0], client.parseDOM(i, "a", ret="href")[0], ) for i in result ] tvshowtitle = cleantitle.tv(tvshowtitle) years = [str(year), str(int(year) + 1), str(int(year) - 1)] result = [i for i in result if any(x in i[1] for x in years)] result = [i[2] for i in result if tvshowtitle == cleantitle.tv(i[0])][0] try: url = re.compile("//.+?(/.+)").findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode("utf-8") return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: query = self.search_link % (urllib.quote_plus(tvshowtitle)) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, 'ol', attrs={'id': 'searchresult'})[0] result = client.parseDOM(result, 'h2') tvshowtitle = cleantitle.tv(tvshowtitle) result = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'a')[0]) for i in result] result = [(i[0], re.sub('<.+?>|</.+?>', '', i[1])) for i in result] result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])] result = result[-1][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: query = self.moviesearch_link % (urllib.unquote(tvshowtitle)) query = urlparse.urljoin(self.base_link, query) control.log('ALLTUBE URL %s' % query) result = client.source(query) result = json.loads(result) control.log('ALLTUBE URL %s' % result) control.log('ALLTUBE tvshowtitle %s' % tvshowtitle) tvshowtitle = cleantitle.tv(tvshowtitle) control.log('ALLTUBE tvshowtitle %s' % tvshowtitle) years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)] result = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'h2', ret='title')[0], client.parseDOM(i, 'span', attrs = {'itemprop': 'copyrightYear'})) for i in result] result = [i for i in result if len(i[2]) > 0] result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])] result = [i[0] for i in result if any(x in i[2][0] for x in years)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: query = self.search_link % (urllib.quote_plus(tvshowtitle)) query = urlparse.urljoin(self.base_link, query) result = cloudflare.source(query) result = client.parseDOM(result, 'div', attrs = {'class': 'tv-series-single'}) tvshowtitle = cleantitle.tv(tvshowtitle) years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)] result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', {'class': 'title'}), re.compile('<span>\s*(\d{4})\s*</span>').findall(i)) for i in result] result = [(i[0][0], i[1][0], i[2][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0] result = [(i[0], re.compile('([^>]+)$').findall(i[1]), i[2]) for i in result] result = [(i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0] result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])] result = [i[0] for i in result if any(x in i[2] for x in years)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: query = urlparse.urljoin( self.base_link, self.search_link % (urllib.quote_plus(tvshowtitle))) result = self.__request(query) result = json.loads(result) result = result['categories'] tvshowtitle = cleantitle.tv(tvshowtitle) years = [ '(%s)' % str(year), '(%s)' % str(int(year) + 1), '(%s)' % str(int(year) - 1) ] result = [(i['catalog_id'], i['catalog_name'].encode('utf-8')) for i in result] result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])] result = [i[0] for i in result if any(x in i[1] for x in years)][0] url = str(result) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: query = self.search_link % urllib.quote_plus(tvshowtitle) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = client.parseDOM(result, 'article', attrs = {'id': 'post-\d*'}) match = [i for i in result if imdb in i] if len(match) == 0: tvshowtitle = cleantitle.tv(tvshowtitle) years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)] result = [i for i in result if any(x in i for x in years)] result = [(client.parseDOM(i, 'a', ret='title'), i) for i in result] result = [(i[0][0], i[1]) for i in result if len(i[0]) > 0] result = [(re.sub(r'[^\x00-\x7F]+',' ', i[0]).strip(), i[1]) for i in result] match = [i[1] for i in result if tvshowtitle == cleantitle.tv(i[0])] result = match[0] result = client.parseDOM(result, 'a', ret='href', attrs = {'class': 'more-link'})[0] url = re.compile('//.+?/(\d*)').findall(result)[0] url = url.encode('utf-8') return url except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: tvshowtitle, year = re.compile('(.+?) [(](\d{4})[)]$').findall( url)[0] query = self.search_link % (urllib.quote_plus(tvshowtitle)) query = urlparse.urljoin(self.base_link, query) result = cloudflare.source(query) result = client.parseDOM(result, 'div', attrs={'id': 'post-.+?'}) tvshowtitle = cleantitle.tv(tvshowtitle) season = '%01d' % int(season) episode = '%01d' % int(episode) years = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1) ] result = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'a', ret='title')[0], client.parseDOM(i, 'div', attrs={'class': 'status status-year'})) for i in result] result = [x for y, x in enumerate(result) if x not in result[:y]] result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0] result = [(i[0], re.compile('(.+?) Season (\d*)$').findall(i[1]), i[2]) for i in result] result = [(i[0], i[1][0][0], i[1][0][1], i[2]) for i in result if len(i[1]) > 0] result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])] result = [i for i in result if season == i[2]] result = [(i[0], i[1], str(int(i[3]) - int(i[2]) + 1)) for i in result] result = [i[0] for i in result if any(x in i[2] for x in years)][0] url = urlparse.urljoin(self.base_link, result) result = cloudflare.source(url) result = client.parseDOM(result, 'div', attrs={'id': 'episode_show'})[0] result = re.compile('(<a.+?</a>)').findall(result) result = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'a')[0]) for i in result] result = [i[0] for i in result if episode == i[1]][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: if url == None: return title = url hdlr = 'S%02dE%02d' % (int(season), int(episode)) query = self.search_link % (urllib.quote_plus('%s "%s"' % (title, hdlr))) query = urlparse.urljoin(self.tvbase_link, query) result = client.source(query) result = client.parseDOM(result, 'header', attrs = {'class': "post-title"}) title = cleantitle.tv(title) result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in result] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [(i[0], re.compile('(.+?) (S\d*E\d*)').findall(i[1])) for i in result] result = [(i[0], i[1][0][0], i[1][0][1]) for i in result if len(i[1]) > 0] result = [i for i in result if title == cleantitle.tv(i[1])] result = [i[0] for i in result if hdlr == i[2]][0] url = result.replace(self.tvbase_link, '') url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: query = urlparse.urljoin( self.base_link, self.search_link % (urllib.quote_plus(tvshowtitle))) query += self.__extra() result = client.source(query, headers=self.headers) result = json.loads(result) result = self.__decrypt(self.data_key, result['data']) result = json.loads(result) result = result['categories'] tvshowtitle = cleantitle.tv(tvshowtitle) years = [ '(%s)' % str(year), '(%s)' % str(int(year) + 1), '(%s)' % str(int(year) - 1) ] result = [(i['catalog_id'], i['catalog_name'].encode('utf-8'), str(i['type_film'])) for i in result] result = [i for i in result if i[2] == '1'] result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])] result = [i[0] for i in result if any(x in i[1] for x in years)][0] url = str(result) url = url.encode('utf-8') return url except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: if url == None: return season = '%01d' % int(season) episode = '%01d' % int(episode) query = '%s "Season %s" "Episode %s"' % (url, season, episode) query = urlparse.urljoin(self.base_link, self.tvsearch_link + urllib.quote_plus(query)) result = cloudflare.source(query) if result == None: result = client.source(self.proxy_link + urllib.quote_plus(query)) r = client.parseDOM(result, 'li', attrs = {'class': 'first element.+?'}) r += client.parseDOM(result, 'li', attrs = {'class': 'element.+?'}) r += client.parseDOM(result, 'header', attrs = {'class': 'entry-header'}) tvshowtitle = cleantitle.tv(url) result = [(client.parseDOM(i, 'a', ret='href'), re.compile('(.+?): Season (\d*).+?Episode (\d*)').findall(i)) for i in r] result = [(i[0][0], i[1][-1]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [(i[0], i[1][0].split('>')[-1], i[1][1], i[1][2]) for i in result] result = [i for i in result if season == '%01d' % int(i[2]) and episode == '%01d' % int(i[3])] result = [i[0] for i in result if tvshowtitle == cleantitle.tv(i[1])][0] url = client.replaceHTMLCodes(result) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0] except: pass url = urlparse.urlparse(url).path url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: query = self.tvsearch_link % urllib.quote_plus(tvshowtitle) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, 'div', attrs={'class': 'item'}) tvshowtitle = 'watch' + cleantitle.tv(tvshowtitle) years = [ '(%s)' % str(year), '(%s)' % str(int(year) + 1), '(%s)' % str(int(year) - 1) ] result = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'a', ret='title')[0]) for i in result] result = [i for i in result if '-tv-show-online-' in i[0]] result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])] result = [i[0] for i in result if any(x in i[1] for x in years)][0] result = result.split('-tv-show-online-', 1)[0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: result = client.source(self.base_link) if not "'index show'" in str(result) and not (self.user == '' or self.password == ''): cookie = client.source(self.sign_link, post=self.post, output='cookie') result = client.source(self.base_link, cookie=cookie) result = client.parseDOM(result, 'div', attrs={'class': 'index show'}) result = [(client.parseDOM(i, 'a', attrs={'class': 'name'})[0], client.parseDOM(i, 'span', attrs={'class': 'value'})[0], client.parseDOM(i, 'a', ret='href')[0]) for i in result] tvshowtitle = cleantitle.tv(tvshowtitle) years = [str(year), str(int(year) + 1), str(int(year) - 1)] result = [i for i in result if any(x in i[1] for x in years)] result = [ i[2] for i in result if tvshowtitle == cleantitle.tv(i[0]) ][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: query = urlparse.urljoin(self.base_link, self.ajax_link) post = self.search_link % (urllib.quote_plus(tvshowtitle)) result = client.source(query, post=post, headers=self.headers) result = json.loads(result) tvshowtitle = cleantitle.tv(tvshowtitle) years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)] result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'h3'), re.compile('<h5>.+?(\d{4}).+?</h5>').findall(i)) for i in result] result = [(i[0][0], i[1][0], i[2][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0] result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])] result = [i[0] for i in result if any(x in i[2] for x in years)][0] url = urlparse.urljoin(self.base_link, result) result = client.source(url) url = client.parseDOM(result, 'div', ret='value', attrs = {'id': 'icerikid'})[0] url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: query = urlparse.urljoin(self.base_link, self.tvsearch_link) result = client.source(query) tvshowtitle = cleantitle.tv(tvshowtitle) result = zip( client.parseDOM(result, 'a', {'class': 'underilne'}, 'href'), client.parseDOM(result, 'a', {'class': 'underilne'})) result = [ i[0] for i in result if tvshowtitle == cleantitle.tv(i[1]) ][0] check = urlparse.urljoin(self.base_link, result) check = client.source(check) if not str(imdb) in check: raise Exception() try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: query = self.search_link % (urllib.quote_plus(tvshowtitle)) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = json.loads(result) result = result['data']['films'] tvshowtitle = cleantitle.tv(tvshowtitle) years = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1) ] result = [(i['id'], i['title'].encode('utf-8')) for i in result] result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])][:2] result = [(i[0], self.base_link + self.detail_link % i[0]) for i in result] result = [(i[0], client.source(i[1])) for i in result] result = [(i[0], json.loads(i[1])['data']['state']) for i in result] result = [i[0] for i in result if any(x in i[1] for x in years)][0] url = str(result) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: query = self.search_link post = {'searchquery': tvshowtitle, 'searchin': '2'} result = '' links = [self.link_1, self.link_3] for base_link in links: result = client.source(urlparse.urljoin(base_link, query), post=post, headers=self.headers) if 'widget search-page' in str(result): break result = client.parseDOM(result, 'div', attrs = {'class': 'widget search-page'})[0] result = client.parseDOM(result, 'td') tvshowtitle = cleantitle.tv(tvshowtitle) years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(client.parseDOM(i, 'a', ret='href')[-1], client.parseDOM(i, 'a')[-1]) for i in result] result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])] result = [i[0] for i in result if any(x in i[1] for x in years)][0] url = client.replaceHTMLCodes(result) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass url = urlparse.urlparse(url).path url = url.encode('utf-8') return url except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: if url == None: return season = '%01d' % int(season) episode = '%01d' % int(episode) query = '%s "Season %s" "Episode %s"' % (url, season, episode) query = urlparse.urljoin(self.base_link, self.tvsearch_link + urllib.quote_plus(query)) result = cloudflare.source(query) if result == None: result = client.source(self.__proxy() + urllib.quote_plus(query)) r = client.parseDOM(result, 'li', attrs = {'class': 'first element.+?'}) r += client.parseDOM(result, 'li', attrs = {'class': 'element.+?'}) r += client.parseDOM(result, 'header', attrs = {'class': 'entry-header'}) tvshowtitle = cleantitle.tv(url) result = [(client.parseDOM(i, 'a', ret='href'), re.compile('(.+?): Season (\d*).+?Episode (\d*)').findall(i)) for i in r] result = [(i[0][0], i[1][-1]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [(i[0], i[1][0].split('>')[-1], i[1][1], i[1][2]) for i in result] result = [i for i in result if season == '%01d' % int(i[2]) and episode == '%01d' % int(i[3])] result = [i[0] for i in result if tvshowtitle == cleantitle.tv(i[1])][0] url = client.replaceHTMLCodes(result) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0] except: pass url = urlparse.urlparse(url).path url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: query = self.tvsearch_link % (urllib.quote_plus(tvshowtitle)) query = urlparse.urljoin(self.base_link, query) result = client.request(query) result = client.parseDOM(result, 'div', attrs = {'class': 'searchResult'}) tvshowtitle = cleantitle.tv(tvshowtitle) years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)] result = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'h2', ret='title')[0], client.parseDOM(i, 'span', attrs = {'itemprop': 'copyrightYear'})) for i in result] print result result = [i for i in result if len(i[2]) > 0] result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])] result = [i[0] for i in result if any(x in i[2][0] for x in years)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') print('Vidics',url) return url except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: if url == None: return query = '%s S%02dE%02d' % (url, int(season), int(episode)) query = urlparse.urljoin(self.tvbase_link, self.search_link + urllib.quote_plus(query)) result = client.source(query) #if result == None: result = client.source(self.__proxy() + urllib.quote_plus(query)) r = client.parseDOM(result, 'li', attrs = {'class': 'first element.+?'}) r += client.parseDOM(result, 'li', attrs = {'class': 'element.+?'}) r += client.parseDOM(result, 'header', attrs = {'class': 'entry-heade.+?'}) tvshowtitle = cleantitle.tv(url) hdlr = 'S%02dE%02d' % (int(season), int(episode)) result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in r] result = [(i[0][0], i[1][0].upper()) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [(i[0], re.compile('(.+?) (S\d+E\d+)').findall(i[1])) for i in result] result = [(i[0], i[1][0][0], i[1][0][-1]) for i in result if len(i[1]) > 0] result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])] result = [i[0] for i in result if hdlr == i[2]][0] url = client.replaceHTMLCodes(result) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0] except: pass url = urlparse.urlparse(url).path url = url.encode('utf-8') return url except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: tvshowtitle, year = re.compile('(.+?) [(](\d{4})[)]$').findall(url)[0] query = self.search_link % urllib.quote(tvshowtitle) query = urlparse.urljoin(self.base_link, query) result = client.request(query) tvshowtitle = cleantitle.tv(tvshowtitle) season = '%01d' % int(season) episode = '%01d' % int(episode) years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)] result = client.parseDOM(result, 'div', attrs = {'class': 'ml-item'}) result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'h2'), re.compile('class *= *[\'|\"]jt-info[\'|\"]>(\d{4})<').findall(i)) for i in result] result = [(i[0][0], i[1][0], i[2][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0] result = [(i[0], re.compile('(.+?) - Season (\d*)$').findall(i[1]), i[2]) for i in result] result = [(i[0], i[1][0][0], i[1][0][1], i[2]) for i in result if len(i[1]) > 0] result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])] result = [i for i in result if season == i[2]] result = [(i[0], i[1], str(int(i[3]) - int(i[2]) + 1)) for i in result] result = [i[0] for i in result if any(x in i[2] for x in years)][0] result += '?S%02dE%02d' % (int(season), int(episode)) try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: tvshowtitle = cleantitle.tv(tvshowtitle) query = urlparse.urljoin(self.base_link, self.search_link) result = client.request(query) result = re.compile('(<li>.+?</li>)').findall(result) result = [ re.compile('href="(.+?)">(.+?)<').findall(i) for i in result ] result = [i[0] for i in result if len(i[0]) > 0] result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])] result = [i[0] for i in result][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: if url == None: return title = url hdlr = 'S%02dE%02d' % (int(season), int(episode)) query = self.tvsearch_link % (urllib.quote_plus('"%s %s"' % (title, hdlr))) result = client.source(query) tvshowtitle = cleantitle.tv(title) result = client.parseDOM(result, 'h3', attrs = {'class': '.+?'}) result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in result] result = [(i[0][0], i[1][-1]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [(i[0], re.compile('(^Watch Full "|^Watch |)(.+?) %s' % hdlr).findall(i[1])) for i in result] result = [(i[0], i[1][0][-1]) for i in result if len(i[1]) > 0] result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])] result = [i[0] for i in result][-1] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: query = self.search_link post = {'searchquery': tvshowtitle, 'searchin': '2'} post = urllib.urlencode(post) result = '' links = [self.link_3] for base_link in links: headers = {'Referer': base_link+ "/advance-search"} result = client.request(urlparse.urljoin(base_link, query), post=post, headers=headers) if 'widget search-page' in str(result): break result = client.parseDOM(result, 'div', attrs = {'class': 'widget search-page'})[0] result = client.parseDOM(result, 'td') tvshowtitle = cleantitle.tv(tvshowtitle) years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(client.parseDOM(i, 'a', ret='href')[-1], client.parseDOM(i, 'a')[-1]) for i in result] result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])] print "--!--" print result result = [i[0] for i in result if any(x in i[1] for x in years)][0] url = client.replaceHTMLCodes(result) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass url = urlparse.urlparse(url).path url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: query = self.search_link % (str(int(year) - 1), str(int(year) + 1), urllib.quote_plus(tvshowtitle)) result = '' result = client.request(urlparse.urljoin(self.base_link, query)) result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, 'div', attrs={'class': 'episode-summary'})[0] result = client.parseDOM(result, 'tr') tvshowtitle = cleantitle.tv(tvshowtitle) years = [ '(%s)' % str(year), '(%s)' % str(int(year) + 1), '(%s)' % str(int(year) - 1) ] result = [(re.compile( 'href=[\'|\"|\s|\<]*(.+?)[\'|\"|\s|\>]').findall(i)[0], client.parseDOM(i, 'a')[-1]) for i in result] result = [(i[0], re.sub('<.+?>|</.+?>', '', i[1])) for i in result] result = [i for i in result if any(x in i[1] for x in years)] result = [(client.replaceHTMLCodes(i[0]), i[1]) for i in result] try: result = [ (urlparse.parse_qs(urlparse.urlparse(i[0]).query)['u'][0], i[1]) for i in result ] except: pass result = [(urlparse.urlparse(i[0]).path, i[1]) for i in result] match = [ i[0] for i in result if tvshowtitle == cleantitle.tv(i[1]) ] match2 = [i[0] for i in result] match2 = [x for y, x in enumerate(match2) if x not in match2[:y]] if match2 == []: return for i in match2[:5]: try: if len(match) > 0: url = match[0] break result = client.request(self.base_link + i, headers=self.headers) if str(imdb) in str(result): url = i break except: pass url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: query = self.search_link % urllib.quote_plus(tvshowtitle) query = urlparse.urljoin(self.base_link, query) result = cloudflare.source(query, safe=True) result = client.parseDOM(result, 'div', {'class': 'movie-grid grid-.+?'}) tvshowtitle = cleantitle.tv(tvshowtitle) years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)] result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'div', {'class': 'movie-grid-title'})) for i in result] result = [(i[0][0], i[1][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [(i[0], i[1].split('<')[0].strip(), client.parseDOM(i[2], 'span', {'class': '[^"]*year'})) for i in result] result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0] result = [i for i in result if '/series/' in i[0]] result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])] result = [i[0] for i in result if any(x in i[2] for x in years)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: query = self.search_link % (str(int(year) - 1), str(int(year) + 1), urllib.quote_plus(tvshowtitle)) result = "" links = [self.link_1, self.link_2, self.link_3] for base_link in links: result = client.source(urlparse.urljoin(base_link, query), headers=self.headers) if "episode-summary" in str(result): break result = result.decode("iso-8859-1").encode("utf-8") result = client.parseDOM(result, "div", attrs={"class": "episode-summary"})[0] result = client.parseDOM(result, "tr") tvshowtitle = cleantitle.tv(tvshowtitle) years = ["(%s)" % str(year), "(%s)" % str(int(year) + 1), "(%s)" % str(int(year) - 1)] result = [ (re.compile("href=['|\"|\s|\<]*(.+?)['|\"|\s|\>]").findall(i)[0], client.parseDOM(i, "a")[-1]) for i in result ] result = [(i[0], re.sub("<.+?>|</.+?>", "", i[1])) for i in result] result = [i for i in result if any(x in i[1] for x in years)] result = [(client.replaceHTMLCodes(i[0]), i[1]) for i in result] try: result = [(urlparse.parse_qs(urlparse.urlparse(i[0]).query)["u"][0], i[1]) for i in result] except: pass result = [(urlparse.urlparse(i[0]).path, i[1]) for i in result] match = [i[0] for i in result if tvshowtitle == cleantitle.tv(i[1])] match2 = [i[0] for i in result] match2 = [x for y, x in enumerate(match2) if x not in match2[:y]] if match2 == []: return for i in match2[:5]: try: if len(match) > 0: url = match[0] break result = client.source(base_link + i, headers=self.headers) if str(imdb) in str(result): url = i break except: pass url = url.encode("utf-8") return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: data = os.path.join(control.dataPath, 'movietv.db') try: control.deleteFile(data) except: pass data = os.path.join(control.dataPath, 'movietv2.db') download = True try: download = abs( datetime.datetime.fromtimestamp(os.path.getmtime(data)) - (datetime.datetime.now())) > datetime.timedelta(days=7) except: pass if download == True: result = client.source(base64.b64decode(self.data_link)) zip = zipfile.ZipFile(StringIO.StringIO(result)) zip.extractall(control.dataPath) zip.close() dbcon = database.connect(data) dbcur = dbcon.cursor() dbcur.execute("SELECT * FROM tvshows WHERE year = '%s'" % year) result = dbcur.fetchone() result = eval(result[1].encode('utf-8')) tvshowtitle = cleantitle.tv(tvshowtitle) years = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1) ] result = [i for i in result if tvshowtitle == cleantitle.tv(i[2])] result = [i[0] for i in result if any(x in i[3] for x in years)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: if url == None: return query = '%s S%02dE%02d' % (url, int(season), int(episode)) query = urlparse.urljoin( self.tvbase_link, self.search_link + urllib.quote_plus(query)) result = client.source(query) #if result == None: result = client.source(self.__proxy() + urllib.quote_plus(query)) r = client.parseDOM(result, 'li', attrs={'class': 'first element.+?'}) r += client.parseDOM(result, 'li', attrs={'class': 'element.+?'}) r += client.parseDOM(result, 'header', attrs={'class': 'entry-heade.+?'}) tvshowtitle = cleantitle.tv(url) hdlr = 'S%02dE%02d' % (int(season), int(episode)) result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in r] result = [(i[0][0], i[1][0].upper()) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [(i[0], re.compile('(.+?) (S\d+E\d+)').findall(i[1])) for i in result] result = [(i[0], i[1][0][0], i[1][0][-1]) for i in result if len(i[1]) > 0] result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])] result = [i[0] for i in result if hdlr == i[2]][0] url = client.replaceHTMLCodes(result) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0] except: pass url = urlparse.urlparse(url).path url = url.encode('utf-8') return url except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: tvshowtitle, year = re.compile("(.+?) [(](\d{4})[)]$").findall(url)[0] query = self.search_link % (urllib.quote_plus(tvshowtitle)) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = client.parseDOM(result, "div", attrs={"id": "post-.+?"}) tvshowtitle = cleantitle.tv(tvshowtitle) season = "%01d" % int(season) episode = "%01d" % int(episode) years = ["%s" % str(year), "%s" % str(int(year) + 1), "%s" % str(int(year) - 1)] result = [ ( client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a", ret="title")[0], client.parseDOM(i, "div", attrs={"class": "status status-year"}), ) for i in result ] result = [x for y, x in enumerate(result) if x not in result[:y]] result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0] result = [(i[0], re.compile("(.+?) Season (\d*)$").findall(i[1]), i[2]) for i in result] result = [(i[0], i[1][0][0], i[1][0][1], i[2]) for i in result if len(i[1]) > 0] result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])] result = [i for i in result if season == i[2]] result = [(i[0], i[1], str(int(i[3]) - int(i[2]) + 1)) for i in result] result = [i[0] for i in result if any(x in i[2] for x in years)][0] url = urlparse.urljoin(self.base_link, result) result = client.source(url) result = client.parseDOM(result, "div", attrs={"id": "episode_show"})[0] result = re.compile("(<a.+?</a>)").findall(result) result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a")[0]) for i in result] result = [i[0] for i in result if episode == i[1]][0] try: url = re.compile("//.+?(/.+)").findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode("utf-8") return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: query = self.search_link % (str(int(year)-1), str(int(year)+1), urllib.quote_plus(tvshowtitle)) result = '' links = [self.link_1, self.link_2, self.link_3] for base_link in links: control.log('### Watch %s' % urlparse.urljoin(base_link, query)) result = client.request(urlparse.urljoin(base_link, query)) if 'episode-summary' in str(result): break #control.log('######WA %s' % result) result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, 'div', attrs = {'class': 'episode-summary'})[0] result = client.parseDOM(result, 'tr') tvshowtitle = cleantitle.tv(tvshowtitle) years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(re.compile('href=[\'|\"|\s|\<]*(.+?)[\'|\"|\s|\>]').findall(i)[0], client.parseDOM(i, 'a')[-1]) for i in result] result = [(i[0], re.sub('<.+?>|</.+?>','', i[1])) for i in result] result = [i for i in result if any(x in i[1] for x in years)] result = [(client.replaceHTMLCodes(i[0]), i[1]) for i in result] try: result = [(urlparse.parse_qs(urlparse.urlparse(i[0]).query)['u'][0], i[1]) for i in result] except: pass result = [(urlparse.urlparse(i[0]).path, i[1]) for i in result] match = [i[0] for i in result if tvshowtitle == cleantitle.tv(i[1])] match2 = [i[0] for i in result] match2 = [x for y,x in enumerate(match2) if x not in match2[:y]] if match2 == []: return for i in match2[:5]: try: if len(match) > 0: url = match[0] break result = client.request(base_link + i, headers=self.headers) if str(imdb) in str(result): url = i break except: pass url = url.encode('utf-8') return url except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: if url == None: return myses = 's%02de%02d' % (int(season), int(episode)) result = client.request(urlparse.urljoin(self.base_link, url)) result = re.compile("<li class='listEpisode'>.+?</>", re.DOTALL).findall(result) result = [ re.compile("<a href='(.*?)'.*</span>(.*?)</a>").findall(i) for i in result ] result = [i[0] for i in result if len(i[0]) > 0] result = [i for i in result if myses in cleantitle.tv(i[1])] result = [i[0] for i in result][0] print result try: url = re.compile('//.+?(/.+)').findall(url)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): print("URL",url) try: if url == None: return myses = 's%02de%02d' % (int(season), int(episode)) result = client.request(urlparse.urljoin(self.base_link,url)) result = re.compile("<li class='listEpisode'>.+?</>",re.DOTALL).findall(result) #for i in result: # print("I",i) result = [re.compile("<a href='(.*?)'.*</span>(.*?)</a>").findall(i) for i in result] #for i in result: # print("J",i) #print result result = [i[0] for i in result if len(i[0]) > 0] #print result for i in result: print("J",i[1]) if myses in i[1]: print "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" result = [i for i in result if myses in cleantitle.tv(i[1])] result = [i[0] for i in result][0] print result try: url = re.compile('//.+?(/.+)').findall(url)[0] except: url = result print result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: result = '' links = [self.link_1, self.link_2, self.link_3] for base_link in links: result = client.source(urlparse.urljoin(base_link, self.key_link), headers=self.headers) if 'searchform' in str(result): break key = client.parseDOM(result, 'input', ret='value', attrs = {'name': 'key'})[0] query = self.tvsearch_link % (urllib.quote_plus(re.sub('\'', '', tvshowtitle)), key) result = client.source(urlparse.urljoin(base_link, query), headers=self.headers) result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, 'div', attrs = {'class': 'index_item.+?'}) tvshowtitle = 'watch' + cleantitle.tv(tvshowtitle) years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'a', ret='title')[0]) for i in result] result = [i for i in result if any(x in i[1] for x in years)] result = [(client.replaceHTMLCodes(i[0]), i[1]) for i in result] try: result = [(urlparse.parse_qs(urlparse.urlparse(i[0]).query)['u'][0], i[1]) for i in result] except: pass result = [(urlparse.urlparse(i[0]).path, i[1]) for i in result] match = [i[0] for i in result if tvshowtitle == cleantitle.tv(i[1])] match2 = [i[0] for i in result] match2 = [x for y,x in enumerate(match2) if x not in match2[:y]] if match2 == []: return for i in match2[:5]: try: if len(match) > 0: url = match[0] break result = client.source(base_link + i, headers=self.headers) if str(imdb) in str(result): url = i break except: pass url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: result = client.source(self.base_link) result = client.parseDOM(result, 'div', attrs = {'id': 'fil'})[0] result = zip(client.parseDOM(result, 'a', ret='href'), client.parseDOM(result, 'a')) tvshowtitle = cleantitle.tv(tvshowtitle) result = [i[0] for i in result if tvshowtitle == cleantitle.tv(i[1])][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: query = urlparse.urljoin(self.base_link, self.search_link % (urllib.quote_plus(tvshowtitle))) result = self.__request(query) result = json.loads(result) result = result['categories'] tvshowtitle = cleantitle.tv(tvshowtitle) years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(i['catalog_id'], i['catalog_name'].encode('utf-8')) for i in result] result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])] result = [i[0] for i in result if any(x in i[1] for x in years)][0] url = str(result) url = url.encode('utf-8') return url except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: tvshowtitle, year = re.compile("(.+?) [(](\d{4})[)]$").findall(url)[0] query = self.search_link % urllib.quote(tvshowtitle) query = urlparse.urljoin(self.base_link, query) result = cloudflare.source(query) tvshowtitle = cleantitle.tv(tvshowtitle) season = "%01d" % int(season) episode = "%01d" % int(episode) years = ["%s" % str(year), "%s" % str(int(year) + 1), "%s" % str(int(year) - 1)] result = client.parseDOM(result, "div", attrs={"class": "ml-item"}) result = [ ( client.parseDOM(i, "a", ret="href"), client.parseDOM(i, "h2"), re.compile("class *= *['|\"]jt-info['|\"]>(\d{4})<").findall(i), ) for i in result ] result = [(i[0][0], i[1][0], i[2][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0] result = [(i[0], re.compile("(.+?) - Season (\d*)$").findall(i[1]), i[2]) for i in result] result = [(i[0], i[1][0][0], i[1][0][1], i[2]) for i in result if len(i[1]) > 0] result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])] result = [i for i in result if season == i[2]] result = [(i[0], i[1], str(int(i[3]) - int(i[2]) + 1)) for i in result] result = [i[0] for i in result if any(x in i[2] for x in years)][0] result += "?S%02dE%02d" % (int(season), int(episode)) try: url = re.compile("//.+?(/.+)").findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode("utf-8") return url except: return
def dizigold_shows(self): try: result = client.source(self.base_link) result = client.parseDOM(result, 'div', attrs = {'class': 'dizis'})[0] result = re.compile('href="(.+?)">(.+?)<').findall(result) result = [(re.sub('http.+?//.+?/','/', i[0]), cleantitle.tv(i[1])) for i in result] return result except: return
def dizimag_shows(self): try: result = cloudflare.source(self.base_link) result = client.parseDOM(result, 'div', attrs = {'id': 'fil'})[0] result = zip(client.parseDOM(result, 'a', ret='href'), client.parseDOM(result, 'a')) result = [(re.sub('http.+?//.+?/','/', i[0]), cleantitle.tv(i[1])) for i in result] return result except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: tvshowtitle = cleantitle.tv(tvshowtitle) query = urlparse.urljoin(self.base_link, self.search_link) result = client.request(query) result = re.compile('(<li>.+?</li>)').findall(result) result = [re.compile('href="(.+?)">(.+?)<').findall(i) for i in result] result = [i[0] for i in result if len(i[0]) > 0] result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])] result = [i[0] for i in result][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def dizibox_shows(self): try: result = client.request(self.base_link) result = client.parseDOM(result, 'input', {'id': 'filterAllCategories'})[0] result = client.parseDOM(result, 'li') result = zip(client.parseDOM(result, 'a', ret='href'), client.parseDOM(result, 'a')) result = [(re.sub('http.+?//.+?/','/', i[0]), cleantitle.tv(i[1])) for i in result] return result except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: query = self.tvsearch_link % (urllib.quote_plus(tvshowtitle)) query = urlparse.urljoin(self.base_link, query) result = client.request(query) result = client.parseDOM(result, 'div', attrs={'class': 'searchResult'}) tvshowtitle = cleantitle.tv(tvshowtitle) years = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1) ] result = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'h2', ret='title')[0], client.parseDOM(i, 'span', attrs={'itemprop': 'copyrightYear'})) for i in result] print result result = [i for i in result if len(i[2]) > 0] result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])] result = [ i[0] for i in result if any(x in i[2][0] for x in years) ][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') print('Vidics', url) return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: query = ' '.join([ i for i in tvshowtitle.split() if i not in ['The', 'the', 'A', 'a'] ]) query = self.search_link % urllib.quote_plus(query) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = client.parseDOM(result, 'div', attrs={'id': 'video_list'})[0] result = result.split('</a>') result = [(client.parseDOM(i, 'span', attrs={'class': 'article-title'}), client.parseDOM(i, 'a', ret='href')) for i in result] result = [(i[0][0], i[1][0]) for i in result if not (len(i[0]) == 0 or len(i[1]) == 0)] tvshowtitle = cleantitle.tv(tvshowtitle) years = [ '(%s)' % str(year), '(%s)' % str(int(year) + 1), '(%s)' % str(int(year) - 1) ] result = [i for i in result if tvshowtitle == cleantitle.tv(i[0])] result = [i[1] for i in result if any(x in i[0] for x in years)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return