def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: if url == None: return query = self.tvbase_link + self.index_link post = urllib.urlencode({'a': 'retrieve', 'c': 'result', 'p': '{"KeyWord":"%s","Page":"1","NextToken":""}' % url}) result = client.source(query, post=post) result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, "tr") show = cleantitle.tv(url) season = '%01d' % int(season) episode = '%02d' % int(episode) result = [client.parseDOM(i, "h1")[0] for i in result] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a")[0]) for i in result] result = [(i[0], re.sub('\sSeason(|\s)\d*.+', '', i[1]), re.compile('\sSeason *(\d*) *').findall(i[1])[0]) for i in result] result = [i for i in result if show == cleantitle.tv(i[1])] result = [i[0] for i in result if season == i[2]][0] url = result.split('v=', 1)[-1] url = '%s|%s' % (url, episode) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: if url == None: return season = '%01d' % int(season) episode = '%01d' % int(episode) query = '%s "Season %s" "Episode %s"' % (url, season, episode) query = urlparse.urljoin(self.base_link, self.tvsearch_link + urllib.quote_plus(query)) result = cloudflare.source(query) result = client.parseDOM(result, "header", attrs = { "class": "entry-header" }) show = cleantitle.tv(url) result = [(client.parseDOM(i, "a", ret="href"), client.parseDOM(i, "a")) for i in result] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [(i[0], re.compile('(.+?): Season (\d*).+?Episode (\d*)').findall(i[1])) for i in result] result = [(i[0], i[1][0][0], i[1][0][1], i[1][0][2]) for i in result if len(i[1]) > 0] result = [i for i in result if season == '%01d' % int(i[2]) and episode == '%01d' % int(i[3])] result = [i[0] for i in result if show == cleantitle.tv(i[1])][0] url = client.replaceHTMLCodes(result) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass url = urlparse.urlparse(url).path url = url.encode('utf-8') return url except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: if url == None: return title = url hdlr = 'S%02dE%02d' % (int(season), int(episode)) query = self.search_link % (urllib.quote_plus('%s "%s"' % (title, hdlr))) query = urlparse.urljoin(self.tvbase_link, query) result = client.source(query) result = client.parseDOM(result, "header", attrs = { "class": "post-title" }) title = cleantitle.tv(title) result = [(client.parseDOM(i, "a", ret="href"), client.parseDOM(i, "a")) for i in result] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [(i[0], re.compile('(.+?) (S\d*E\d*)').findall(i[1])) for i in result] result = [(i[0], i[1][0][0], i[1][0][1]) for i in result if len(i[1]) > 0] result = [i for i in result if title == cleantitle.tv(i[1])] result = [i[0] for i in result if hdlr == i[2]][0] url = result.replace(self.tvbase_link, '') url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: if url == None: return title = url hdlr = 'S%02dE%02d' % (int(season), int(episode)) query = self.search_link % (urllib.quote_plus('%s "%s"' % (title, hdlr))) query = urlparse.urljoin(self.tvbase_link, query) result = client.source(query) result = client.parseDOM(result, "header", attrs={"class": "post-title"}) title = cleantitle.tv(title) result = [(client.parseDOM(i, "a", ret="href"), client.parseDOM(i, "a")) for i in result] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [(i[0], re.compile('(.+?) (S\d*E\d*)').findall(i[1])) for i in result] result = [(i[0], i[1][0][0], i[1][0][1]) for i in result if len(i[1]) > 0] result = [i for i in result if title == cleantitle.tv(i[1])] result = [i[0] for i in result if hdlr == i[2]][0] url = result.replace(self.tvbase_link, '') url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, show, show_alt, year): try: result = client.source(self.base_link) if not "'index show'" in result: cookie = client.source(self.sign_link, post=self.key_link, output='cookie') result = client.source(self.base_link, cookie=cookie) result = client.parseDOM(result, "div", attrs={"class": "index show"}) result = [(client.parseDOM(i, "a", attrs={"class": "name"})[0], client.parseDOM(i, "span", attrs={"class": "value"})[0], client.parseDOM(i, "a", ret="href")[0]) for i in result] shows = [cleantitle.tv(show), cleantitle.tv(show_alt)] years = [str(year), str(int(year) + 1), str(int(year) - 1)] result = [i for i in result if any(x in i[1] for x in years)] result = [ i[2] for i in result if any(x == cleantitle.tv(i[0]) for x in shows) ][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, show, show_alt, year): try: query = self.search_link % (urllib.quote_plus(show)) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = json.loads(result) result = result['data']['films'] shows = [cleantitle.tv(show), cleantitle.tv(show_alt)] years = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1) ] result = [(i['id'], i['title'].encode('utf-8')) for i in result] result = [ i for i in result if any(x == cleantitle.tv(i[1]) for x in shows) ][:2] result = [(i[0], self.base_link + self.detail_link % i[0]) for i in result] result = [(i[0], client.source(i[1])) for i in result] result = [(i[0], json.loads(i[1])['data']['state']) for i in result] result = [i[0] for i in result if any(x in i[1] for x in years)][0] url = str(result) url = url.encode('utf-8') return url except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: if url == None: return season = '%01d' % int(season) episode = '%01d' % int(episode) query = '%s "Season %s" "Episode %s"' % (url, season, episode) query = urlparse.urljoin(self.base_link, self.tvsearch_link + urllib.quote_plus(query)) result = cloudflare.source(query) r = client.parseDOM(result, "li", attrs = { "class": "first element.+?" }) r += client.parseDOM(result, "li", attrs = { "class": "element.+?" }) show = cleantitle.tv(url) result = [(client.parseDOM(i, "a", ret="href"), re.compile('>(.+?): Season (\d*), Episode (\d*)<').findall(i)) for i in r] result = [(i[0][0], i[1][0][0], i[1][0][1], i[1][0][2]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [i for i in result if season == '%01d' % int(i[2]) and episode == '%01d' % int(i[3])] result = [i[0] for i in result if show == cleantitle.tv(i[1])][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, show, show_alt, year): try: query = urlparse.urljoin( self.base_link, self.search_link % (urllib.quote_plus(show))) result = client.source(query) result = json.loads(result) result = result['categories'] shows = [cleantitle.tv(show), cleantitle.tv(show_alt)] years = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1) ] result = [(i['catalog_id'], i['catalog_name'].encode('utf-8')) for i in result] result = [(i[0], re.compile('(.+?) [(](.+?)[)]$').findall(i[1])[0]) for i in result] result = [(i[0], i[1][0], re.compile('(\d{4})').findall(i[1][1])[0]) for i in result] result = [ i for i in result if any(x == cleantitle.tv(i[1]) for x in shows) ] result = [i[0] for i in result if any(x in i[2] for x in years)][0] url = str(result) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, show, show_alt, year): # import logging # LOG_FILENAME = '/home/keeganmccallum/test.log' # logging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG) # logging.debug(','.join([imdb, tvdb, show, show_alt, year])) try: result = client.source(self.base_link) if not "'index show'" in result: cookie = client.source(self.sign_link, post=self.key_link, output='cookie') result = client.source(self.base_link, cookie=cookie) result = client.parseDOM(result, "div", attrs = { "class": "index show" }) result = [(client.parseDOM(i, "a", attrs = { "class": "name" })[0], client.parseDOM(i, "span", attrs = { "class": "value" })[0], client.parseDOM(i, "a", ret="href")[0]) for i in result] shows = [cleantitle.tv(show), cleantitle.tv(show_alt)] years = [str(year), str(int(year)+1), str(int(year)-1)] result = [i for i in result if any(x in i[1] for x in years)] result = [i[2] for i in result if any(x == cleantitle.tv(i[0]) for x in shows)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, show, show_alt, year): try: query = urlparse.urljoin( self.base_link, self.search_link % (urllib.quote_plus(show))) query += self.__extra() result = client.source(query) result = json.loads(result) result = self.__decrypt(self.data_key, result['data']) result = json.loads(result) result = result['categories'] shows = [cleantitle.tv(show), cleantitle.tv(show_alt)] years = [ '(%s)' % str(year), '(%s)' % str(int(year) + 1), '(%s)' % str(int(year) - 1) ] result = [(i['catalog_id'], i['catalog_name'].encode('utf-8'), str(i['type_film'])) for i in result] result = [i for i in result if i[2] == '1'] result = [ i for i in result if any(x == cleantitle.tv(i[1]) for x in shows) ] result = [i[0] for i in result if any(x in i[1] for x in years)][0] url = str(result) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, show, show_alt, year): try: query = self.search_link post = urllib.urlencode({'searchquery': show, 'searchin': '2'}) result = '' links = [self.link_1, self.link_3] for base_link in links: result = client.source(urlparse.urljoin(base_link, query), post=post, headers=self.headers) if 'widget search-page' in str(result): break result = client.parseDOM(result, "div", attrs = { "class": "widget search-page" })[0] result = client.parseDOM(result, "td") shows = [cleantitle.tv(show), cleantitle.tv(show_alt)] years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(client.parseDOM(i, "a", ret="href")[-1], client.parseDOM(i, "a")[-1]) for i in result] result = [i for i in result if any(x == cleantitle.tv(i[1]) for x in shows)] result = [i[0] for i in result if any(x in i[1] for x in years)][0] url = client.replaceHTMLCodes(result) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass url = urlparse.urlparse(url).path url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, show, show_alt, year): try: result = client.source(self.base_link) if not "'index show'" in result: cookie = client.source(self.sign_link, post=self.key_link, output="cookie") result = client.source(self.base_link, cookie=cookie) result = client.parseDOM(result, "div", attrs={"class": "index show"}) result = [ ( client.parseDOM(i, "a", attrs={"class": "name"})[0], client.parseDOM(i, "span", attrs={"class": "value"})[0], client.parseDOM(i, "a", ret="href")[0], ) for i in result ] shows = [cleantitle.tv(show), cleantitle.tv(show_alt)] years = [str(year), str(int(year) + 1), str(int(year) - 1)] result = [i for i in result if any(x in i[1] for x in years)] result = [i[2] for i in result if any(x == cleantitle.tv(i[0]) for x in shows)][0] try: url = re.compile("//.+?(/.+)").findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode("utf-8") return url except: return
def get_show(self, imdb, tvdb, show, show_alt, year): try: result = '' links = [self.link_1, self.link_2, self.link_3] for base_link in links: result = client.source(urlparse.urljoin(base_link, self.key_link), headers=self.headers) if 'searchform' in str(result): break key = client.parseDOM(result, "input", ret="value", attrs = { "name": "key" })[0] query = self.tvsearch_link % (urllib.quote_plus(re.sub('\'', '', show)), key) result = client.source(urlparse.urljoin(base_link, query), headers=self.headers) result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, "div", attrs = { "class": "index_item.+?" }) shows = ['watch' + cleantitle.tv(show), 'watch' + cleantitle.tv(show_alt)] years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a", ret="title")[0]) for i in result] result = [i for i in result if any(x in i[1] for x in years)] result = [(client.replaceHTMLCodes(i[0]), i[1]) for i in result] try: result = [(urlparse.parse_qs(urlparse.urlparse(i[0]).query)['u'][0], i[1]) for i in result] except: pass result = [(urlparse.urlparse(i[0]).path, i[1]) for i in result] match = [i[0] for i in result if any(x == cleantitle.tv(i[1]) for x in shows)] match2 = [i[0] for i in result] match2 = [x for y,x in enumerate(match2) if x not in match2[:y]] if match2 == []: return for i in match2[:5]: try: if len(match) > 0: url = match[0] break result = client.source(base_link + i, headers=self.headers) if str('tt' + imdb) in str(result): url = i break except: pass url = url.encode('utf-8') return url except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: if url == None: return season = '%01d' % int(season) episode = '%01d' % int(episode) query = '%s "Season %s" "Episode %s"' % (url, season, episode) query = urlparse.urljoin( self.base_link, self.tvsearch_link + urllib.quote_plus(query)) result = cloudflare.source(query) result = client.parseDOM(result, "header", attrs={"class": "entry-header"}) show = cleantitle.tv(url) result = [(client.parseDOM(i, "a", ret="href"), client.parseDOM(i, "a")) for i in result] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [ (i[0], re.compile('(.+?): Season (\d*).+?Episode (\d*)').findall( i[1])) for i in result ] result = [(i[0], i[1][0][0], i[1][0][1], i[1][0][2]) for i in result if len(i[1]) > 0] result = [ i for i in result if season == '%01d' % int(i[2]) and episode == '%01d' % int(i[3]) ] result = [i[0] for i in result if show == cleantitle.tv(i[1])][0] url = client.replaceHTMLCodes(result) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass url = urlparse.urlparse(url).path url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, show, show_alt, year): try: query = self.search_link % (str(int(year)-1), str(int(year)+1), urllib.quote_plus(show)) result = '' links = [self.link_1, self.link_2, self.link_3] for base_link in links: result = client.source(urlparse.urljoin(base_link, query), headers=self.headers) if 'episode-summary' in str(result): break result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, "div", attrs = { "class": "episode-summary" })[0] result = client.parseDOM(result, "tr") shows = [cleantitle.tv(show), cleantitle.tv(show_alt)] years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(re.compile('href=[\'|\"|\s|\<]*(.+?)[\'|\"|\s|\>]').findall(i)[0], client.parseDOM(i, "a")[-1]) for i in result] result = [(i[0], re.sub('<.+?>|</.+?>','', i[1])) for i in result] result = [i for i in result if any(x in i[1] for x in years)] result = [(client.replaceHTMLCodes(i[0]), i[1]) for i in result] try: result = [(urlparse.parse_qs(urlparse.urlparse(i[0]).query)['u'][0], i[1]) for i in result] except: pass result = [(urlparse.urlparse(i[0]).path, i[1]) for i in result] match = [i[0] for i in result if any(x == cleantitle.tv(i[1]) for x in shows)] match2 = [i[0] for i in result] match2 = [x for y,x in enumerate(match2) if x not in match2[:y]] if match2 == []: return for i in match2[:5]: try: if len(match) > 0: url = match[0] break result = client.source(base_link + i, headers=self.headers) if str('tt' + imdb) in str(result): url = i break except: pass url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, show, show_alt, year): try: query = self.search_link post = urllib.urlencode({'searchquery': show, 'searchin': '2'}) result = '' links = [self.link_1, self.link_3] for base_link in links: result = client.source(urlparse.urljoin(base_link, query), post=post, headers=self.headers) if 'widget search-page' in str(result): break result = client.parseDOM(result, "div", attrs={"class": "widget search-page"})[0] result = client.parseDOM(result, "td") shows = [cleantitle.tv(show), cleantitle.tv(show_alt)] years = [ '(%s)' % str(year), '(%s)' % str(int(year) + 1), '(%s)' % str(int(year) - 1) ] result = [(client.parseDOM(i, "a", ret="href")[-1], client.parseDOM(i, "a")[-1]) for i in result] result = [ i for i in result if any(x == cleantitle.tv(i[1]) for x in shows) ] result = [i[0] for i in result if any(x in i[1] for x in years)][0] url = client.replaceHTMLCodes(result) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass url = urlparse.urlparse(url).path url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, show, show_alt, year): try: query = urlparse.urljoin(self.base_link, self.search_link % (urllib.quote_plus(show))) result = client.source(query) result = json.loads(result) result = result['categories'] shows = [cleantitle.tv(show), cleantitle.tv(show_alt)] years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)] result = [(i['catalog_id'], i['catalog_name'].encode('utf-8')) for i in result] result = [(i[0], re.compile('(.+?) [(](.+?)[)]$').findall(i[1])[0]) for i in result] result = [(i[0], i[1][0], re.compile('(\d{4})').findall(i[1][1])[0]) for i in result] result = [i for i in result if any(x == cleantitle.tv(i[1]) for x in shows)] result = [i[0] for i in result if any(x in i[2] for x in years)][0] url = str(result) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, show, show_alt, year): try: query = self.tvsearch_link % (urllib.quote_plus(show)) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = client.parseDOM(result, "div", attrs={"class": "searchResult"}) shows = [cleantitle.tv(show), cleantitle.tv(show_alt)] years = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1) ] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "h2", ret="title")[0], client.parseDOM(i, "span", attrs={"itemprop": "copyrightYear"})) for i in result] result = [i for i in result if len(i[2]) > 0] result = [ i for i in result if any(x == cleantitle.tv(i[1]) for x in shows) ] result = [ i[0] for i in result if any(x in i[2][0] for x in years) ][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: show, year = re.compile('(.+?) [(](\d{4})[)]$').findall(url)[0] query = self.search_link % (urllib.quote_plus(show)) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = client.parseDOM(result, "div", attrs = { "id": "post-.+?" }) show = cleantitle.tv(show) season = '%01d' % int(season) episode = '%01d' % int(episode) years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a", ret="title")[0], client.parseDOM(i, "div", attrs = { "class": "status status-year" })) for i in result] result = [x for y,x in enumerate(result) if x not in result[:y]] result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0] result = [(i[0], re.compile('(.+?) Season (\d*)$').findall(i[1]), i[2]) for i in result] result = [(i[0], i[1][0][0], i[1][0][1], i[2]) for i in result if len(i[1]) > 0] result = [i for i in result if show == cleantitle.tv(i[1])] result = [i for i in result if season == i[2]] result = [(i[0], i[1], str(int(i[3]) - int(i[2]) + 1)) for i in result] result = [i[0] for i in result if any(x in i[2] for x in years)][0] url = urlparse.urljoin(self.base_link, result) result = client.source(url) result = client.parseDOM(result, "div", attrs = { "id": "episode_show" })[0] result = re.compile('(<a.+?</a>)').findall(result) result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a")[0]) for i in result] result = [i[0] for i in result if episode == i[1]][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, show, show_alt, year): # import logging # LOG_FILENAME = '/home/keeganmccallum/test.log' # logging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG) # logging.debug(','.join([imdb, tvdb, show, show_alt, year])) try: result = client.source(self.base_link) if not "'index show'" in result: cookie = client.source(self.sign_link, post=self.key_link, output='cookie') result = client.source(self.base_link, cookie=cookie) result = client.parseDOM(result, "div", attrs={"class": "index show"}) result = [(client.parseDOM(i, "a", attrs={"class": "name"})[0], client.parseDOM(i, "span", attrs={"class": "value"})[0], client.parseDOM(i, "a", ret="href")[0]) for i in result] shows = [cleantitle.tv(show), cleantitle.tv(show_alt)] years = [str(year), str(int(year) + 1), str(int(year) - 1)] result = [i for i in result if any(x in i[1] for x in years)] result = [ i[2] for i in result if any(x == cleantitle.tv(i[0]) for x in shows) ][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: if url == None: return query = self.tvbase_link + self.index_link post = urllib.urlencode({ 'a': 'retrieve', 'c': 'result', 'p': '{"KeyWord":"%s","Page":"1","NextToken":""}' % url }) result = client.source(query, post=post) result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, "tr") show = cleantitle.tv(url) season = '%01d' % int(season) episode = '%02d' % int(episode) result = [client.parseDOM(i, "h1")[0] for i in result] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a")[0]) for i in result] result = [(i[0], re.sub('\sSeason(|\s)\d*.+', '', i[1]), re.compile('\sSeason *(\d*) *').findall(i[1])[0]) for i in result] result = [i for i in result if show == cleantitle.tv(i[1])] result = [i[0] for i in result if season == i[2]][0] url = result.split('v=', 1)[-1] url = '%s|%s' % (url, episode) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, show, show_alt, year): try: query = ' '.join( [i for i in show.split() if i not in ['The', 'the', 'A', 'a']]) query = self.search_link % urllib.quote_plus(query) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = client.parseDOM(result, "div", attrs={"id": "video_list"})[0] result = result.split('</a>') result = [(client.parseDOM(i, "span", attrs={"class": "article-title"}), client.parseDOM(i, "a", ret="href")) for i in result] result = [(i[0][0], i[1][0]) for i in result if not (len(i[0]) == 0 or len(i[1]) == 0)] shows = [cleantitle.tv(show), cleantitle.tv(show_alt)] years = [ '(%s)' % str(year), '(%s)' % str(int(year) + 1), '(%s)' % str(int(year) - 1) ] result = [ i for i in result if any(x == cleantitle.tv(i[0]) for x in shows) ] result = [i[1] for i in result if any(x in i[0] for x in years)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, show, show_alt, year): try: query = self.tvsearch_link % (urllib.quote_plus(show)) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = client.parseDOM(result, "div", attrs = { "class": "searchResult" }) shows = [cleantitle.tv(show), cleantitle.tv(show_alt)] years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "h2", ret="title")[0], client.parseDOM(i, "span", attrs = { "itemprop": "copyrightYear" })) for i in result] result = [i for i in result if len(i[2]) > 0] result = [i for i in result if any(x == cleantitle.tv(i[1]) for x in shows)] result = [i[0] for i in result if any(x in i[2][0] for x in years)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, show, show_alt, year): try: query = self.search_link % (urllib.quote_plus(show)) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = json.loads(result) result = result['data']['films'] shows = [cleantitle.tv(show), cleantitle.tv(show_alt)] years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)] result = [(i['id'], i['title'].encode('utf-8')) for i in result] result = [i for i in result if any(x == cleantitle.tv(i[1]) for x in shows)][:2] result = [(i[0], self.base_link + self.detail_link % i[0]) for i in result] result = [(i[0], client.source(i[1])) for i in result] result = [(i[0], json.loads(i[1])['data']['state']) for i in result] result = [i[0] for i in result if any(x in i[1] for x in years)][0] url = str(result) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, show, show_alt, year): try: query = self.search_link % (urllib.quote_plus(show_alt)) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, "ol", attrs = { "id": "searchresult" })[0] result = client.parseDOM(result, "h2") shows = [cleantitle.tv(show), cleantitle.tv(show_alt)] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a")[0]) for i in result] result = [(i[0], re.sub('<.+?>|</.+?>','', i[1])) for i in result] result = [i for i in result if any(x == cleantitle.tv(i[1]) for x in shows)] result = result[-1][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, show, show_alt, year): try: query = urlparse.urljoin(self.base_link, self.search_link % (urllib.quote_plus(show))) query += self.__extra() result = client.source(query) result = json.loads(result) result = self.__decrypt(self.data_key, result["data"]) result = json.loads(result) result = result["categories"] shows = [cleantitle.tv(show), cleantitle.tv(show_alt)] years = ["(%s)" % str(year), "(%s)" % str(int(year) + 1), "(%s)" % str(int(year) - 1)] result = [(i["catalog_id"], i["catalog_name"].encode("utf-8"), str(i["type_film"])) for i in result] result = [i for i in result if i[2] == "1"] result = [i for i in result if any(x == cleantitle.tv(i[1]) for x in shows)] result = [i[0] for i in result if any(x in i[1] for x in years)][0] url = str(result) url = url.encode("utf-8") return url except: return
def get_show(self, imdb, tvdb, show, show_alt, year): try: query = self.tvsearch_link % urllib.quote_plus(show) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, "div", attrs = { "class": "item" }) shows = ['watch' + cleantitle.tv(show), 'watch' + cleantitle.tv(show_alt)] years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a", ret="title")[0]) for i in result] result = [i for i in result if '-tv-show-online-' in i[0]] result = [i for i in result if any(x == cleantitle.tv(i[1]) for x in shows)] result = [i[0] for i in result if any(x in i[1] for x in years)][0] result = result.split('-tv-show-online-', 1)[0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, show, show_alt, year): try: query = ' '.join([i for i in show.split() if i not in ['The','the','A','a']]) query = self.search_link % urllib.quote_plus(query) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = client.parseDOM(result, "div", attrs = { "id": "video_list" })[0] result = result.split('</a>') result = [(client.parseDOM(i, "span", attrs = { "class": "article-title" }), client.parseDOM(i, "a", ret="href")) for i in result] result = [(i[0][0], i[1][0]) for i in result if not (len(i[0]) == 0 or len(i[1]) == 0)] shows = [cleantitle.tv(show), cleantitle.tv(show_alt)] years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [i for i in result if any(x == cleantitle.tv(i[0]) for x in shows)] result = [i[1] for i in result if any(x in i[0] for x in years)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources headers = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; rv:34.0) Gecko/20110101 Firefox/34.0"} # params = (base64.urlsafe_b64decode(self.key_link), urllib.quote_plus(url), urllib.quote_plus(','.join(locDict))) params = (base64.urlsafe_b64decode(self.key_link), urllib.quote_plus(url)) links = [] q = urlparse.urljoin(self.base_link, self.download_link % params) try: links += json.loads(client.source(q, headers=headers))["result"] except: pass q = urlparse.urljoin(self.base_link, self.stream_link % params) try: links += json.loads(client.source(q, headers=headers))["result"] except: pass title, hdlr = re.compile("(.+?) (\d{4}|S\d*E\d*)$").findall(url)[0] if hdlr.isdigit(): type = "movie" title = cleantitle.movie(title) hdlr = [str(hdlr), str(int(hdlr) + 1), str(int(hdlr) - 1)] else: type = "episode" title = cleantitle.tv(title) hdlr = [hdlr] for i in links: try: if len(i["hosterurls"]) > 1: raise Exception() if not i["extension"] in ["mkv", "mp4"]: raise Exception() host = i["hostername"] host = host.rsplit(".", 1)[0] host = host.strip().lower() if not (host in hosthdDict or host in hostDict): raise Exception() host = client.replaceHTMLCodes(host) host = host.encode("utf-8") T = client.replaceHTMLCodes(i["title"]) N = client.replaceHTMLCodes(i["sourcetitle"]) t = re.sub("(\.|\_|\(|\[|\s)(\d{4}|S\d*E\d*|3D)(\.|\_|\)|\]|\s)(.+)", "", T) if type == "movie": t = cleantitle.movie(t) else: t = cleantitle.tv(t) n = re.sub("(\.|\_|\(|\[|\s)(\d{4}|S\d*E\d*|3D)(\.|\_|\)|\]|\s)(.+)", "", N) if type == "movie": n = cleantitle.movie(n) else: n = cleantitle.tv(n) if not (t == title or n == title): raise Exception() y = re.compile("[\.|\_|\(|\[|\s](\d{4}|S\d*E\d*)[\.|\_|\)|\]|\s]").findall(T) y += re.compile("[\.|\_|\(|\[|\s](\d{4}|S\d*E\d*)[\.|\_|\)|\]|\s]").findall(N) y = y[0] if not any(x == y for x in hdlr): raise Exception() fmt = re.sub("(.+)(\.|\_|\(|\[|\s)(\d{4}|S\d*E\d*)(\.|\_|\)|\]|\s)", "", T) fmt += " " + re.sub("(.+)(\.|\_|\(|\[|\s)(\d{4}|S\d*E\d*)(\.|\_|\)|\]|\s)", "", N) fmt = re.split("\.|\_|\(|\)|\[|\]|\s|\-", fmt) fmt = [x.lower() for x in fmt] if "1080p" in fmt: quality = "1080p" elif "720p" in fmt: quality = "HD" else: quality = "SD" if any( x in ["dvdscr", "r5", "r6", "camrip", "tsrip", "hdcam", "hdts", "dvdcam", "dvdts", "cam", "ts"] for x in fmt ): raise Exception() if quality in ["1080p", "HD"] and not host in hosthdDict: raise Exception() if quality == "SD" and not host in hostDict: raise Exception() url = i["hosterurls"][0]["url"] url = client.replaceHTMLCodes(url) url = url.encode("utf-8") info = [] size = i["sizeinternal"] if type == "movie" and 1 < size < 100000000: raise Exception() size = float(size) / 1073741824 if not size == 0: info.append("%.2f GB" % size) if "3d" in fmt: info.append("3D") info = " | ".join(info) sources.append({"source": host, "quality": quality, "provider": "Alluc", "url": url, "info": info}) except: pass return sources except: return sources
def get_show(self, imdb, tvdb, show, show_alt, year): try: result = '' links = [self.link_1, self.link_2, self.link_3] for base_link in links: result = client.source(urlparse.urljoin( base_link, self.key_link), headers=self.headers) if 'searchform' in str(result): break key = client.parseDOM(result, "input", ret="value", attrs={"name": "key"})[0] query = self.tvsearch_link % (urllib.quote_plus( re.sub('\'', '', show)), key) result = client.source(urlparse.urljoin(base_link, query), headers=self.headers) result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, "div", attrs={"class": "index_item.+?"}) shows = [ 'watch' + cleantitle.tv(show), 'watch' + cleantitle.tv(show_alt) ] years = [ '(%s)' % str(year), '(%s)' % str(int(year) + 1), '(%s)' % str(int(year) - 1) ] result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a", ret="title")[0]) for i in result] result = [i for i in result if any(x in i[1] for x in years)] result = [(client.replaceHTMLCodes(i[0]), i[1]) for i in result] try: result = [ (urlparse.parse_qs(urlparse.urlparse(i[0]).query)['u'][0], i[1]) for i in result ] except: pass result = [(urlparse.urlparse(i[0]).path, i[1]) for i in result] match = [ i[0] for i in result if any(x == cleantitle.tv(i[1]) for x in shows) ] match2 = [i[0] for i in result] match2 = [x for y, x in enumerate(match2) if x not in match2[:y]] if match2 == []: return for i in match2[:5]: try: if len(match) > 0: url = match[0] break result = client.source(base_link + i, headers=self.headers) if str('tt' + imdb) in str(result): url = i break except: pass url = url.encode('utf-8') return url except: return
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:34.0) Gecko/20110101 Firefox/34.0'} #params = (base64.urlsafe_b64decode(self.key_link), urllib.quote_plus(url), urllib.quote_plus(','.join(locDict))) params = (base64.urlsafe_b64decode(self.key_link), urllib.quote_plus(url)) links = [] q = urlparse.urljoin(self.base_link, self.download_link % params) try: links += json.loads(client.source(q, headers=headers))['result'] except: pass q = urlparse.urljoin(self.base_link, self.stream_link % params) try: links += json.loads(client.source(q, headers=headers))['result'] except: pass title, hdlr = re.compile('(.+?) (\d{4}|S\d*E\d*)$').findall(url)[0] if hdlr.isdigit(): type = 'movie' title = cleantitle.movie(title) hdlr = [str(hdlr), str(int(hdlr)+1), str(int(hdlr)-1)] else: type = 'episode' title = cleantitle.tv(title) hdlr = [hdlr] for i in links: try: if len(i['hosterurls']) > 1: raise Exception() if not i['extension'] in ['mkv', 'mp4']: raise Exception() host = i['hostername'] host = host.rsplit('.', 1)[0] host = host.strip().lower() if not (host in hosthdDict or host in hostDict): raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') T = client.replaceHTMLCodes(i['title']) N = client.replaceHTMLCodes(i['sourcetitle']) t = re.sub('(\.|\_|\(|\[|\s)(\d{4}|S\d*E\d*|3D)(\.|\_|\)|\]|\s)(.+)', '', T) if type == 'movie': t = cleantitle.movie(t) else: t = cleantitle.tv(t) n = re.sub('(\.|\_|\(|\[|\s)(\d{4}|S\d*E\d*|3D)(\.|\_|\)|\]|\s)(.+)', '', N) if type == 'movie': n = cleantitle.movie(n) else: n = cleantitle.tv(n) if not (t == title or n == title): raise Exception() y = re.compile('[\.|\_|\(|\[|\s](\d{4}|S\d*E\d*)[\.|\_|\)|\]|\s]').findall(T) y += re.compile('[\.|\_|\(|\[|\s](\d{4}|S\d*E\d*)[\.|\_|\)|\]|\s]').findall(N) y = y[0] if not any(x == y for x in hdlr): raise Exception() fmt = re.sub('(.+)(\.|\_|\(|\[|\s)(\d{4}|S\d*E\d*)(\.|\_|\)|\]|\s)', '', T) fmt += ' ' + re.sub('(.+)(\.|\_|\(|\[|\s)(\d{4}|S\d*E\d*)(\.|\_|\)|\]|\s)', '', N) fmt = re.split('\.|\_|\(|\)|\[|\]|\s|\-', fmt) fmt = [x.lower() for x in fmt] if '1080p' in fmt: quality = '1080p' elif '720p' in fmt: quality = 'HD' else: quality = 'SD' if any(x in ['dvdscr', 'r5', 'r6', 'camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'ts'] for x in fmt): raise Exception() if quality in ['1080p', 'HD'] and not host in hosthdDict: raise Exception() if quality == 'SD' and not host in hostDict: raise Exception() url = i['hosterurls'][0]['url'] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') info = [] size = i['sizeinternal'] if type == 'movie' and 1 < size < 100000000: raise Exception() size = float(size)/1073741824 if not size == 0: info.append('%.2f GB' % size) if '3d' in fmt: info.append('3D') info = ' | '.join(info) sources.append({'source': host, 'quality': quality, 'provider': 'Alluc', 'url': url, 'info': info}) except: pass return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources if (self.user == '' or self.password == ''): raise Exception() query = urlparse.urljoin(self.base_link, self.login_link) post = urllib.urlencode({'login': self.user, 'pwd': self.password}) cookie = client.source(query, post=post, output='cookie') query = urlparse.urljoin(self.base_link, self.search_link) post = urllib.urlencode({'sort': 'relevance', 'filter': 'all', 'moderated': 'yes', 'offset': '0', 'limit': '100', 'match': 'all', 'q': url}) result = client.source(query, post=post, cookie=cookie) result = json.loads(result) links = result['files'] title, hdlr = re.compile('(.+?) (\d{4}|S\d*E\d*)$').findall(url)[0] if hdlr.isdigit(): type = 'movie' title = cleantitle.movie(title) hdlr = [str(hdlr), str(int(hdlr)+1), str(int(hdlr)-1)] else: type = 'episode' title = cleantitle.tv(title) hdlr = [hdlr] for i in links: try: name = i['name'] name = client.replaceHTMLCodes(name) info = i['video_info'] if type == 'movie' and not '#0:1(eng): Audio:' in info: raise Exception() t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|3D)(\.|\)|\]|\s)(.+)', '', name) if type == 'movie': t = cleantitle.movie(t) else: t = cleantitle.tv(t) if not t == title: raise Exception() y = re.compile('[\.|\(|\[|\s](\d{4}|S\d*E\d*)[\.|\)|\]|\s]').findall(name)[-1] if not any(x == y for x in hdlr): raise Exception() fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*)(\.|\)|\]|\s)', '', name) fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt) fmt = [x.lower() for x in fmt] if any(x.endswith(('subs', 'sub', 'dubbed', 'dub')) for x in fmt): raise Exception() if any(x in ['extras'] for x in fmt): raise Exception() res = i['video_info'].replace('\n','') res = re.compile(', (\d*)x\d*').findall(res)[0] res = int(res) if 1900 <= res <= 1920: quality = '1080p' elif 1200 <= res <= 1280: quality = 'HD' else: quality = 'SD' if any(x in ['dvdscr', 'r5', 'r6'] for x in fmt): quality = 'SCR' elif any(x in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'ts'] for x in fmt): quality = 'CAM' size = i['size'] size = float(size)/1073741824 if int(size) > 2 and not quality in ['1080p', 'HD']: raise Exception() if int(size) > 5: raise Exception() info = i['video_info'].replace('\n','') v = re.compile('Video: (.+?),').findall(info)[0] a = re.compile('Audio: (.+?), .+?, (.+?),').findall(info)[0] if '3d' in fmt: q = ' | 3D' else: q = '' info = '%.2f GB%s | %s | %s | %s' % (size, q, v, a[0], a[1]) info = re.sub('\(.+?\)', '', info) info = info.replace('stereo', '2.0') info = ' '.join(info.split()) url = i['url_pls'] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') sources.append({'source': 'Furk', 'quality': quality, 'provider': 'Furk', 'url': url, 'info': info}) except: pass if not all(i['quality'] in ['CAM', 'SCR'] for i in sources): sources = [i for i in sources if not i['quality'] in ['CAM', 'SCR']] return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources query = base64.urlsafe_b64decode(self.search_link) + urllib.quote_plus(url) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = json.loads(result) title, hdlr = re.compile('(.+?) (S\d*E\d*)$').findall(url)[0] title = cleantitle.tv(title) hdlr = [hdlr] links = [] for i in result: try: t = i['showName'] t = client.replaceHTMLCodes(t) t = cleantitle.tv(t) if not t == title: raise Exception() y = i['release'] y = re.compile('[\.|\(|\[|\s](\d{4}|S\d*E\d*)[\.|\)|\]|\s]').findall(y)[-1] y = y.upper() if not any(x == y for x in hdlr): raise Exception() quality = i['quality'] if quality == 'WEBDL1080P': quality = '1080p' elif quality in ['720P', 'WEBDL']: quality = 'HD' else: quality = 'SD' size = i['size'] size = float(size)/1024 info = '%.2f GB' % size url = i['links'] for x in url.keys(): links.append({'url': url[x], 'quality': quality, 'info': info}) except: pass for i in links: try: url = i['url'] if len(url) > 1: raise Exception() url = url[0] host = (urlparse.urlparse(url).netloc).replace('www.', '').rsplit('.', 1)[0].lower() if not host in hosthdDict: raise Exception() sources.append({'source': host, 'quality': i['quality'], 'provider': 'DirectDL', 'url': url, 'info': i['info']}) except: pass return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources if (self.user == '' or self.password == ''): raise Exception() query = urlparse.urljoin(self.base_link, self.login_link) post = urllib.urlencode({'login': self.user, 'pwd': self.password}) cookie = client.source(query, post=post, output='cookie') query = urlparse.urljoin(self.base_link, self.search_link) post = urllib.urlencode({ 'sort': 'relevance', 'filter': 'all', 'moderated': 'yes', 'offset': '0', 'limit': '100', 'match': 'all', 'q': url }) result = client.source(query, post=post, cookie=cookie) result = json.loads(result) links = result['files'] title, hdlr = re.compile('(.+?) (\d{4}|S\d*E\d*)$').findall(url)[0] if hdlr.isdigit(): type = 'movie' title = cleantitle.movie(title) hdlr = [str(hdlr), str(int(hdlr) + 1), str(int(hdlr) - 1)] else: type = 'episode' title = cleantitle.tv(title) hdlr = [hdlr] for i in links: try: name = i['name'] name = client.replaceHTMLCodes(name) info = i['video_info'] if type == 'movie' and not '#0:1(eng): Audio:' in info: raise Exception() t = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|3D)(\.|\)|\]|\s)(.+)', '', name) if type == 'movie': t = cleantitle.movie(t) else: t = cleantitle.tv(t) if not t == title: raise Exception() y = re.compile('[\.|\(|\[|\s](\d{4}|S\d*E\d*)[\.|\)|\]|\s]' ).findall(name)[-1] if not any(x == y for x in hdlr): raise Exception() fmt = re.sub( '(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*)(\.|\)|\]|\s)', '', name) fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt) fmt = [x.lower() for x in fmt] if any( x.endswith(('subs', 'sub', 'dubbed', 'dub')) for x in fmt): raise Exception() if any(x in ['extras'] for x in fmt): raise Exception() res = i['video_info'].replace('\n', '') res = re.compile(', (\d*)x\d*').findall(res)[0] res = int(res) if 1900 <= res <= 1920: quality = '1080p' elif 1200 <= res <= 1280: quality = 'HD' else: quality = 'SD' if any(x in ['dvdscr', 'r5', 'r6'] for x in fmt): quality = 'SCR' elif any(x in [ 'camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'ts' ] for x in fmt): quality = 'CAM' size = i['size'] size = float(size) / 1073741824 if int(size) > 2 and not quality in ['1080p', 'HD']: raise Exception() if int(size) > 5: raise Exception() info = i['video_info'].replace('\n', '') v = re.compile('Video: (.+?),').findall(info)[0] a = re.compile('Audio: (.+?), .+?, (.+?),').findall( info)[0] if '3d' in fmt: q = ' | 3D' else: q = '' info = '%.2f GB%s | %s | %s | %s' % (size, q, v, a[0], a[1]) info = re.sub('\(.+?\)', '', info) info = info.replace('stereo', '2.0') info = ' '.join(info.split()) url = i['url_pls'] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') sources.append({ 'source': 'Furk', 'quality': quality, 'provider': 'Furk', 'url': url, 'info': info }) except: pass if not all(i['quality'] in ['CAM', 'SCR'] for i in sources): sources = [ i for i in sources if not i['quality'] in ['CAM', 'SCR'] ] return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources query = base64.urlsafe_b64decode(self.search_link) + urllib.quote_plus(url) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = json.loads(result) title, hdlr = re.compile("(.+?) (S\d*E\d*)$").findall(url)[0] title = cleantitle.tv(title) hdlr = [hdlr] links = [] for i in result: try: t = i["showName"] t = client.replaceHTMLCodes(t) t = cleantitle.tv(t) if not t == title: raise Exception() y = i["release"] y = re.compile("[\.|\(|\[|\s](\d{4}|S\d*E\d*)[\.|\)|\]|\s]").findall(y)[-1] y = y.upper() if not any(x == y for x in hdlr): raise Exception() quality = i["quality"] if quality == "WEBDL1080P": quality = "1080p" elif quality in ["720P", "WEBDL"]: quality = "HD" else: quality = "SD" size = i["size"] size = float(size) / 1024 info = "%.2f GB" % size url = i["links"] for x in url.keys(): links.append({"url": url[x], "quality": quality, "info": info}) except: pass for i in links: try: url = i["url"] if len(url) > 1: raise Exception() url = url[0] host = (urlparse.urlparse(url).netloc).replace("www.", "").rsplit(".", 1)[0].lower() if not host in hosthdDict: raise Exception() sources.append( {"source": host, "quality": i["quality"], "provider": "DirectDL", "url": url, "info": i["info"]} ) except: pass return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources query = base64.urlsafe_b64decode( self.search_link) + urllib.quote_plus(url) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = json.loads(result) title, hdlr = re.compile('(.+?) (S\d*E\d*)$').findall(url)[0] title = cleantitle.tv(title) hdlr = [hdlr] links = [] for i in result: try: t = i['showName'] t = client.replaceHTMLCodes(t) t = cleantitle.tv(t) if not t == title: raise Exception() y = i['release'] y = re.compile('[\.|\(|\[|\s](\d{4}|S\d*E\d*)[\.|\)|\]|\s]' ).findall(y)[-1] y = y.upper() if not any(x == y for x in hdlr): raise Exception() quality = i['quality'] if quality == 'WEBDL1080P': quality = '1080p' elif quality in ['720P', 'WEBDL']: quality = 'HD' else: quality = 'SD' size = i['size'] size = float(size) / 1024 info = '%.2f GB' % size url = i['links'] for x in url.keys(): links.append({ 'url': url[x], 'quality': quality, 'info': info }) except: pass for i in links: try: url = i['url'] if len(url) > 1: raise Exception() url = url[0] host = (urlparse.urlparse(url).netloc).replace( 'www.', '').rsplit('.', 1)[0].lower() if not host in hosthdDict: raise Exception() sources.append({ 'source': host, 'quality': i['quality'], 'provider': 'DirectDL', 'url': url, 'info': i['info'] }) except: pass return sources except: return sources