def GetContent(self, indexer, url, title, name, year, season, episode, type, list): import urllib url = urllib.unquote_plus(url) title = urllib.unquote_plus(title) name = urllib.unquote_plus(name) name = (name).lower() import re from entertainment.net import Net net = Net(cached=False) net.set_cookies(self.cookie_file) content = net.http_GET(url).content if type == 'tv_seasons': match=re.compile('<br><br><b>(.+?)x').findall(content) for seasonnumber in match: item_title = 'Season ' + seasonnumber item_id = common.CreateIdFromString(title + ' ' + item_title) self.AddContent(list, indexer, common.mode_Content, item_title, item_id, 'tv_episodes', url=url, name=name, season=seasonnumber) elif type == 'tv_episodes': match=re.compile("<br><b>"+season+"x(.+?)\s-\s<a style=.+?color.+?\shref='/(.+?)'>(.+?)</a>").findall(content) for item_v_id_2,url,item_title in match: season = "0%s"%season if len(season)<2 else season item_v_id_2 = "0%s"%item_v_id_2 if len(item_v_id_2)<2 else item_v_id_2 item_url = self.base_url + url item_v_id_2 = str(int(item_v_id_2)) item_id = common.CreateIdFromString(name + '_season_' + season + '_episode_' + item_v_id_2) self.AddContent(list, indexer, common.mode_File_Hosts, item_title, item_id, type, url=item_url, name=name, season=season, episode=item_v_id_2)
def SearchContent(self, search_key, search_keywords, type, year): from entertainment.net import Net net = Net(do_not_cache_if_any=do_no_cache_keywords_list) if self.Settings().get_setting('proxy') == "true": import socks (proxy, port) = self.get_proxy() socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, proxy, port) net.set_socket_class(socks.socksocket) custom_url = self.get_url() import re import urllib search_dict = { 'key': search_key, 'search_keywords': search_keywords, 'search_section': '1' if type == 'movies' else '2', 'year': year } search_for_url = custom_url + 'index.php?' + urllib.urlencode( search_dict) from entertainment import odict search_dict_for_cache = odict.odict(search_dict) search_dict_for_cache.update({'key': ''}) search_dict_for_cache.sort(key=lambda x: x[0].lower()) search_for_url_for_cache = custom_url + 'index.php?' + urllib.urlencode( search_dict_for_cache) search_results = common.unescape( common.str_conv( net.http_GET(search_for_url, url_for_cache=search_for_url_for_cache).content)) search_content = None #print search_results for search_item in re.finditer( r"<div class=\"index_item.+?\"><a href=\"(.+?)\" title=\"Watch (.+?)\"", search_results): searchitem = search_item.group(2) if year == '0' or year == '': searchitem = re.sub(' \([0-9]+\)', '', searchitem) if common.CreateIdFromString( searchitem) == common.CreateIdFromString( search_keywords + (' (' + year + ')' if year != '0' and year != '' else '')): search_content = search_item break return search_content
def GetContent(self, indexer, url, title, name, year, season, episode, type, list): import urllib url = urllib.unquote_plus(url) title = urllib.unquote_plus(title) name = urllib.unquote_plus(name) from entertainment.net import Net net = Net(cached=False) import re show_url = self.GoogleSearchByTitleReturnFirstResultOnlyIfValid('tvrage.com', name, 'shows', item_count=2, title_extrctr=['(.+?) tv show', '(.+?) \- tvrage'], exact_match=True) if show_url == '' : tv_url= 'http://www.tvrage.com/search.php?search=%s&searchin=2&button=Go' %(name.lower().replace(' ','+')) html = net.http_GET(tv_url).content r = re.search(r'<h2><a href="(.+?)">(.+?)</a> <img src=\'.+?\' /> </h2>\s*</dt>\s*<dd class="img"> <a href="/(.+?)">', html) show_url = 'http://www.tvrage.com' + r.group(1) item_url = show_url + '/episode_list' #year = year #tv_url= 'http://www.tvrage.com/search.php?search=%s+%s&searchin=2&button=Go' %(name.lower().replace(' ','+'),year) #<h2><a href="/Breaking_Bad">Breaking Bad</a> #http://www.tvrage.com #html = net.http_GET(tv_url).content #r = re.search(r'<h2><a href="(.+?)">(.+?)</a> <img src=\'.+?\' /> </h2>\s*</dt>\s*<dd class="img"> <a href="/(.+?)">', html) #item_url = 'http://www.tvrage.com' + r.group(1) + '/episode_list' #item_name = r.group(2) #item_id = r.group(3) import datetime todays_date = datetime.date.today() content = net.http_GET(item_url).content if type == 'tv_seasons': match=re.compile('>S-(.+?)<').findall(content) for seasonnumber in match: item_url = item_url item_title = 'Season ' + seasonnumber item_id = common.CreateIdFromString(title + ' ' + item_title) self.AddContent(list, indexer, common.mode_Content, item_title, item_id, 'tv_episodes', url=item_url, name=name, season=seasonnumber) elif type == 'tv_episodes': new_url = url+'/'+season content2 = net.http_GET(new_url).content match=re.compile("<td width='40' align='center'><a href='(.+?)' title='.+?'>.+?x(.+?)</i></a></td>\s*<td width='80' align='center'>(.+?)</td>\s*<td style='padding-left: 6px;'> <a href='.+?/([0-9]*)'>(.+?)</a> </td>",re.DOTALL).findall(content2) for item_url, item_v_id_2, item_date, fixscrape, item_title in match: item_v_id_2 = str(int(item_v_id_2)) item_fmtd_air_date = self.get_formated_date( item_date ) if item_fmtd_air_date.date() > todays_date: break item_id = common.CreateIdFromString(name + '_season_' + season + '_episode_' + item_v_id_2) self.AddContent(list, indexer, common.mode_File_Hosts, item_title, item_id, type, url=item_url, name=name, season=season, episode=item_v_id_2)
def GetFileHosts(self, id, other_names, region, language, list, lock, message_queue): search_term = id.replace('_', '+') from entertainment.net import Net net = Net() content = net.http_GET(self.base_url + 'channels/?q=' + search_term).content if '+' in search_term: content += net.http_GET(self.base_url + 'channels/?q=' + search_term.replace('+', '', 1)).content import re for item in re.finditer( '(?s)(<a href="https://www.streamlive.to/view.+?</li>)', content): item = item.group(1) item_url = re.search('<a href="(.+?)"', item).group(1) #item_url = self.base_url + 'm/channel.php?n=' + re.search('<a href="https://www.streamlive.to/view/(.+?)/', item).group(1) item_img = re.search('src="(.+?)"', item).group(1) item_title = re.search('<strong>(.+?)</strong>', item, re.DOTALL).group(1).strip() item_language = re.search( '<a href="https://www.streamlive.to/channels\?lang=[0-9]+">(.+?)</a>', item) if item_language: item_language = item_language.group(1) else: item_language = '' if language: ilive_language = self.ilive_language_to_language.get( common.CreateIdFromString(item_language), '') if ilive_language not in language and 'other' not in region: continue if region: ilive_region = self.ilive_language_to_region.get( common.CreateIdFromString(item_language), '') if ilive_region not in region and ilive_region != 'all' and 'other' not in region: continue self.AddLiveLink(list, item_title, item_url, language=self.ilive_language_to_language.get( common.CreateIdFromString(item_language), '').title(), region=self.ilive_language_to_region.get( common.CreateIdFromString(item_language), '').title(), img=item_img)
def GetFileHostsForContent(self, title, name, year, season, episode, type, list, lock, message_queue): import urllib2 import re from entertainment.net import Net net = Net() title = self.CleanTextForSearch(title) name = self.CleanTextForSearch(name) season_pull = "0%s" % season if len(season) < 2 else season episode_pull = "0%s" % episode if len(episode) < 2 else episode tv_url = 'http://movie4u.org/rss/search/%s s%se%s' % (name.replace( ' ', '%20'), season_pull, episode_pull) tv_url = tv_url.replace(' ', '%20') movie_url = 'http://movie4u.org/rss/search/%s %s' % (name.replace( ' ', '%20'), year) movie_url = movie_url.replace(' ', '%20') if type == 'movies': html = net.http_GET(movie_url).content name_lower = common.CreateIdFromString(name) for item in re.finditer( r"<title>(.+?) \((.+?)\)</title>\s*<link>(.+?)</link>\s*<category>Movies</category>", html): item_url = item.group(3) item_name = common.CreateIdFromString(item.group(1)) item_year = item.group(2) item_name = item_name.replace('-', ' ') if item_name == name_lower and item_year == year: self.GetFileHosts(item_url, list, lock, message_queue) elif type == 'tv_episodes': html = net.http_GET(tv_url).content name_lower = common.CreateIdFromString(name) for item in re.finditer( r"<title>(.+?) S(.+?) E(.+?)</title>\s*<link>(.+?)</link>\s*<category>TV Episodes</category>", html): item_url = item.group(4) item_name = common.CreateIdFromString(item.group(1)) item_season = item.group(2) item_eps = item.group(3) season_pull = "0%s" % season if len(season) < 2 else season episode_pull = "0%s" % episode if len(episode) < 2 else episode if item_name == name_lower and item_season == season_pull and item_eps == episode_pull: self.GetFileHosts(item_url, list, lock, message_queue)
def GetFileHostsForContent(self, title, name, year, season, episode, type, list, lock, message_queue): if os.path.exists(common.cookies_path) == False: os.makedirs(common.cookies_path) import re from entertainment.net import Net net = Net( cached=False, user_agent= 'Mozilla/5.0 (iPhone; CPU iPhone OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5376e Safari/8536.25' ) title = self.CleanTextForSearch(title) name = self.CleanTextForSearch(name) main_url = self.base_url helper = '%s (%s)' % (name, year) if type == 'movies': self.login() url = 'http://ororo.tv/nl/movies' html = net.http_GET(url).content net.save_cookies(self.cookie_file) name_lower = common.CreateIdFromString(name) r = '<span class=\'value\'>(\d{4}).*?href="([^"]+)[^>]+>([^<]+)' match = re.compile(r, re.DOTALL).findall(html) for item_year, item_url, item_title in match: item_title = item_title.lower() if item_title in name_lower: self.GetFileHosts(item_url, list, lock, message_queue) elif type == 'tv_episodes': self.login() name_lower = common.CreateIdFromString(name) name_lower = name_lower.replace('_', '-') title_url = 'http://ororo.tv/en/shows/' + name_lower net.set_cookies(self.cookie_file) html2 = net.http_GET(title_url).content net.save_cookies(self.cookie_file) r = '%s-%s' % (season, episode) match = re.compile( 'data-href="(.+?)".+?class="episode" href="#(.+?)">').findall( html2) for item_url, passer in match: item_url = 'http://ororo.tv/' + item_url if r in passer: self.GetFileHosts(item_url, list, lock, message_queue)
def GetContent(self, indexer, url, title, name, year, season, episode, type, list): from entertainment.net import Net net = Net() content = net.http_GET(url).content.replace('×', ' ').replace( '–', '-') content = common.CleanText(content, True, True) import re if type == 'tv_seasons': season_list = [] for item in re.finditer('([0-9]+) [0-9]+ .+?<(?:br|/p)', content): season_num = item.group(1) if season_num not in season_list: season_list.append(season_num) item_title = 'Season ' + season_num item_id = common.CreateIdFromString(title + ' ' + item_title) self.AddContent(list, indexer, common.mode_Content, item_title, item_id, 'tv_episodes', url=url, name=name, year=year, season=season_num) elif type == 'tv_episodes': for item in re.finditer(season + ' ([0-9]+) (.*)', content): item_v_id = item.group(1) item_title = item.group(2).split(' - ')[0] item_id = common.CreateIdFromString(name + '_' + year + '_season_' + season + '_episode_' + item_v_id) self.AddContent(list, indexer, common.mode_File_Hosts, item_title, item_id, type, url=url + '|||||' + season + '|||||' + item_v_id, name=name, year=year, season=season, episode=item_v_id)
def GetFileHosts(self, id, other_names, region, language, list, lock, message_queue): search_term = id from entertainment.net import Net net = Net(cached=False) user = self.Settings().get_setting('user') pwd = self.Settings().get_setting('pwd') if user and pwd: content = net.http_POST('http://www.filmon.com/user/login', {'login':user, 'password':pwd, 'remember':'1'}, headers={'Referer':self.base_url}).content net.save_cookies(self.cookie_file) content = net.http_GET(self.base_url).content.encode("utf-8") link = content.split('{"id":') import re for p in link: if '"filmon_' in p: title=p.split('"title":"')[1] ITEM_TITLE=title.split('"')[0] p_id = common.CreateIdFromString( common.CleanTextForSearch(ITEM_TITLE, strip=True) ) if id == p_id or p_id in other_names : channel_id=p.split(',')[0] res=['SD','HD'] for quality in res: channel_id_with_quality=channel_id + '__' + quality self.AddLiveLink( list, ITEM_TITLE, channel_id_with_quality, language = language.title(),host='FILMON',quality=quality) break
def GetContent(self, indexer, url, title, name, year, season, episode, type, list): import urllib url = urllib.unquote_plus(url) title = urllib.unquote_plus(title) name = urllib.unquote_plus(name) custom_url = self.get_url() import re new_url = url if not new_url.startswith(custom_url): new_url = re.sub("http\://.*?/", custom_url, url) from entertainment.net import Net net = Net(cached=False) headers = { 'Referer': self.get_url(), 'Accept': self.ACCEPT, 'User-Agent': self.USER_AGENT } content = net.http_GET(new_url, headers=headers).content if type == 'tv_seasons': for item in re.finditer('</a>Season ([0-9]+)', content): item_url = new_url item_v_id = item.group(1) item_title = 'Season ' + item_v_id item_id = common.CreateIdFromString(title + ' ' + item_title) self.AddContent(list, indexer, common.mode_Content, item_title, item_id, 'tv_episodes', url=item_url, name=name, year=year, season=item_v_id) elif type == 'tv_episodes': season_content = re.search('>Season ' + season + ' (.*)', content).group(1) for item in re.finditer(r"<a href=/ip\.php\?v=(.+?)>" + season + "x([0-9]+) (.+?)</a>", season_content): item_v_id = item.group(1) item_v_id_2 = str(int(item.group(2))) item_title = item.group(3) item_url = custom_url+'membersonly/components/com_iceplayer/video.php?h=331&w=719&vid='+item_v_id+'img=' item_id = common.CreateIdFromString(name + '_' + year + '_season_' + season + '_episode_' + item_v_id_2) self.AddContent(list, indexer, common.mode_File_Hosts, item_title, item_id, type, url=item_url, name=name, year=year, season=season, episode=item_v_id_2)
def GetFileHostsForContent(self, title, name, year, season, episode, type, list, lock, message_queue): import urllib2 import re from entertainment.net import Net net = Net() title = self.CleanTextForSearch(title) name = self.CleanTextForSearch(name) #Movies = http://www.movie1k.ws/?s=Ice+Soldiers #TV Shows = http://www.movie1k.ag/watch-103452-arrow-season-2-episode-19/ #season_pull = "%s"%season if len(season)<2 else season #episode_pull = "%s"%episode if len(episode)<2 else episode tv_url = 'http://www.movie1k.ws/?s=%s+Season+%s+Episode+%s' % ( name.replace(' ', '+'), season, episode) movie_url = 'http://www.movie1k.ws/?s=%s' % (name.replace(' ', '+')) if type == 'movies': print movie_url html = net.http_GET(movie_url).content name_lower = common.CreateIdFromString(name) for item in re.finditer( r'<a href="(.+?)" rel="bookmark">(.+?)</a>', html): item_url = item.group(1) item_year = item.group(2) #if item_year == year: self.GetFileHosts(item_url, list, lock, message_queue) elif type == 'tv_episodes': print 'tv epiosdes##############################################################################' html = net.http_GET(tv_url).content name_lower = common.CreateIdFromString(name) for item in re.finditer( r'<a href="(.+?)" rel="bookmark">.+? Season .+? Episode .+?</a>', html): item_url = item.group(1) print item_url print 'item_url' self.GetFileHosts(item_url, list, lock, message_queue)
def GetFileHosts(self, id, other_names, region, language, list, lock, message_queue): search_term = id.replace('_', ' ') from entertainment.net import Net net = Net(cached=False) using_base_url_1 = False try: content = net.http_GET(self.base_url).content regex = '<a href="/(en/.+?/)([a-z]{2})">(.+?)</a>' except: content = net.http_GET(self.base_url_1).content regex = '<a href="/en/(.+?)" class="(.+?)" data\-action="(.+?)".+?data\-label="(.+?)"' using_base_url_1 = True import re for item in re.finditer(regex, content): item_title = item.group(4) if using_base_url_1 else item.group(3) item_title_id = common.CreateIdFromString(item_title) if item_title_id.startswith(id): link_pt_1 = item.group(1) link_pt_lang = (item.group(2) + item.group(3)).lower( ) if using_base_url_1 else item.group(2) if using_base_url_1 and 'on_demand' in link_pt_lang: continue if using_base_url_1: item_url = self.base_url + link_pt_1 else: item_url = self.base_url + link_pt_1 + link_pt_lang item_language = None if using_base_url_1: for key, value in self.livestation_language_to_language.items( ): if key in link_pt_lang: item_language = value if not item_language: item_language = 'english' else: item_language = self.livestation_language_to_language.get( link_pt_lang, '') if language and item_language not in language and 'other' not in region: continue self.AddLiveLink(list, item_title, item_url, language=item_language.title())
def GetFileHostsForContent(self, title, name, year, season, episode, type, list, lock, message_queue): from entertainment.net import Net net = Net() title = self.CleanTextForSearch(title) title_id = common.CreateIdFromString(title) name = self.CleanTextForSearch(name) name_id = common.CreateIdFromString(name) import urllib search_page_url = self.base_url + '?s=' + urllib.quote_plus( title) + '&submit=Search' content = net.http_GET(search_page_url).content item_re = r'(?s)<div class=[\'"]{1}inner[\'"]{1}>.+?<a href=[\'"]{1}(.+?)[\'"]{1}.+?<img src=[\'"]{1}(.+?)[\'"]{1} alt=[\'"]{1}(.+?)[\'"]{1}.+?<p>(.+?)<' for item in re.finditer(item_re, content): item_url = item.group(1) item_alt = item.group(3) item_name = re.sub('\([0-9]+\).*', '', item_alt) item_year = re.search("\(([0-9]+)", item_alt) if item_year: item_year = item_year.group(1) item_title = item_name + ' (' + item_year + ')' else: item_year = '' item_title = item_name if common.CreateIdFromString( item_title) == title_id or common.CreateIdFromString( item_name) == name_id: self.GetFileHosts(item_url, list, lock, message_queue) else: break
def GetContent(self, indexer, url, title, name, year, season, episode, type, list): import urllib url = urllib.unquote_plus(url) title = urllib.unquote_plus(title) name = urllib.unquote_plus(name) from entertainment.net import Net net = Net(cached=False) import re import datetime todays_date = datetime.date.today() content = net.http_GET(url).content if type == 'tv_seasons': match=re.compile('<a class="titles-link" href="(.+?)"><div class="titles"><h3>Season (.+?)</h3>').findall(content) for url,seasonnumber in match: item_url = self.base_url + url item_title = 'Season ' + seasonnumber item_id = common.CreateIdFromString(title + ' ' + item_title) self.AddContent(list, indexer, common.mode_Content, item_title, item_id, 'tv_episodes', url=item_url, name=name, season=seasonnumber) elif type == 'tv_episodes': new_url = url content2 = net.http_GET(new_url).content match=re.compile('div class="titles"><h4(.+?)</h3><ul class="additional-stats">').findall(content2) for item in match: item_url = self.base_url + re.search('<a href="([^"]+?)">', item).group(1) item_v_id_2 = re.search('<meta content="([^"]+?)" itemprop="episodeNumber">', item).group(1) item_title = re.search('<meta content="([^"]+?)" itemprop="name">', item).group(1) item_date = re.search('<meta content="([0-9\-]+?)T[^"]+?" itemprop="datePublished">', item).group(1) item_fmtd_air_date = self.get_formated_date( item_date, "%Y-%m-%d" ) if item_fmtd_air_date.date() > todays_date: break item_id = common.CreateIdFromString(name + '_season_' + season + '_episode_' + item_v_id_2) self.AddContent(list, indexer, common.mode_File_Hosts, item_title, item_id, type, url=item_url, name=name, season=season, episode=item_v_id_2)
def ExtractContentAndAddtoList(self, indexer, section, url, type, list, page='', total_pages='', sort_by='', sort_order=''): if section == 'popular': new_url = url if page == '': page = '1' else: page = str( int(page) ) new_url = 'http://trakt.tv/movies/popular/'+ page from entertainment.net import Net net = Net() import urllib import re url = urllib.unquote_plus(url) html = net.http_GET(new_url).content total_pages = '446' #print html.encode('utf-8') #if total_pages == '': #r= '</a><a href="/movies/.+?" >(.+?)</a> </div>' #total_pages = re.compile(r).findall(html)[0] self.AddInfo(list, indexer, 'popular', '', type, str(page), total_pages) match=re.compile('data-url="(.+?)">.+?class="titles"><h3>(.+?)<span class="year">(.+?)</span>').findall(html) for url, name, year in match: name = self.CleanTextForSearch(name.strip()) name = name.replace('$','s') url = 'http://services.tvrage.com/myfeeds/search.php?key=ag6txjP0RH4m0c8sZk2j&show='+name self.AddContent(list, indexer, common.mode_File_Hosts, name + ' (' + year +')', '', type, '', name, year) elif section == 'populartv': new_url = url if page == '': page = '1' else: page = str( int(page) ) new_url = 'http://trakt.tv/shows/popular/'+ page from entertainment.net import Net net = Net() import urllib import re url = urllib.unquote_plus(url) html = net.http_GET(new_url).content total_pages = '446' #print html.encode('utf-8') #if total_pages == '': #r= '</a><a href="/movies/.+?" >(.+?)</a> </div>' #total_pages = re.compile(r).findall(html)[0] self.AddInfo(list, indexer, 'populartv', '', type, str(page), total_pages) match=re.compile('data-url="(.+?)">.+?class="titles"><h3>(.+?)<').findall(html) for url, name in match: name = self.CleanTextForSearch(name.strip()) name = name.replace('$','s') url = 'http://services.tvrage.com/myfeeds/search.php?key=ag6txjP0RH4m0c8sZk2j&show='+name self.AddContent(list, indexer, common.mode_Content, name, '', 'tv_seasons', url=url, name=name) elif section == 'trending': from entertainment.net import Net net = Net() import urllib import re import json response = net.http_GET(url).content match = json.loads(response) ''' Pagination Code Start ''' num_items_on_a_page = 25 if page == '': page = '1' total_items = len(match) total_pages = str ( ( total_items / num_items_on_a_page ) + ( 1 if total_items % num_items_on_a_page >= 1 else 0) ) self.AddInfo(list, indexer, section, url, type, page, total_pages, sort_by, sort_order) start_index = ( int(page) - 1 ) * num_items_on_a_page match = match[ start_index : start_index + num_items_on_a_page ] ''' Pagination Code End ''' for movies in match: name = movies['title'] if name: name = name.encode('utf8') year = str(movies['year']) self.AddContent(list, indexer, common.mode_File_Hosts, name + ' (' + year +')', '', type, '', name, year) elif section == 'trendingtv': from entertainment.net import Net net = Net() import urllib import re import json response = net.http_GET(url).content match = json.loads(response) ''' Pagination Code Start ''' num_items_on_a_page = 25 if page == '': page = '1' total_items = len(match) total_pages = str ( ( total_items / num_items_on_a_page ) + ( 1 if total_items % num_items_on_a_page >= 1 else 0) ) self.AddInfo(list, indexer, section, url, type, page, total_pages, sort_by, sort_order) start_index = ( int(page) - 1 ) * num_items_on_a_page match = match[ start_index : start_index + num_items_on_a_page ] ''' Pagination Code End ''' for shows in match: name = shows['title'] if name: name = name.encode('utf8') year = str(shows['year']) name = self.CleanTextForSearch(name) url = 'http://services.tvrage.com/myfeeds/search.php?key=ag6txjP0RH4m0c8sZk2j&show='+name self.AddContent(list, indexer, common.mode_Content, name+ ' (' + year +')', '', 'tv_seasons', url=url, name=name) elif section == 'watched': new_url = url if page == '': page = '1' else: page = str( int(page) ) new_url = 'http://trakt.tv/movies/watched?page='+ page from entertainment.net import Net net = Net() import urllib import re url = urllib.unquote_plus(url) html = net.http_GET(new_url).content total_pages = '446' #print html.encode('utf-8') #if total_pages == '': #r= '</a><a href="/movies/.+?" >(.+?)</a> </div>' #total_pages = re.compile(r).findall(html)[0] self.AddInfo(list, indexer, 'watched', '', type, str(page), total_pages) match=re.compile('data-url="(.+?)">.+?<h3>(.+?)<span class="year">(.+?)<').findall(html) print match for url, name, year in match: name = self.CleanTextForSearch(name.strip()) name = name.replace('$','s') url = 'http://services.tvrage.com/myfeeds/search.php?key=ag6txjP0RH4m0c8sZk2j&show='+name self.AddContent(list, indexer, common.mode_File_Hosts, name + ' (' + year +')', '', type, '', name, year) elif section == 'watchedtv': new_url = url if page == '': page = '1' else: page = str( int(page) ) new_url = 'http://trakt.tv/shows/watchers/daily/'+ page from entertainment.net import Net net = Net() import urllib import re url = urllib.unquote_plus(url) html = net.http_GET(new_url).content total_pages = '446' self.AddInfo(list, indexer, 'watchedtv', '', type, str(page), total_pages) match=re.compile('data-url="(.+?)">.+?class="titles"><h3>(.+?)<').findall(html) for url, name in match: name = self.CleanTextForSearch(name) name = name.replace('$','s') url = 'http://services.tvrage.com/myfeeds/search.php?key=ag6txjP0RH4m0c8sZk2j&show='+name self.AddContent(list, indexer, common.mode_Content, name, '', 'tv_seasons', url=url, name=name) elif section == 'watchedeps': new_url = url if page == '': page = '1' else: page = str( int(page) ) new_url = 'http://trakt.tv/shows/watchers/daily/'+ page from entertainment.net import Net net = Net() import urllib import re url = urllib.unquote_plus(url) html = net.http_GET(new_url).content total_pages = '446' self.AddInfo(list, indexer, 'watchedeps', '', type, str(page), total_pages) match=re.compile('<div class="title-overflow"></div>.+?<a href=".+?">(.+?)</a>.+?<div class="title-overflow"></div>.+?<a href="(.+?)">.+?<span>(.+?)x(.+?)</span>',re.DOTALL).findall(html) for name, url, Sea_num, eps_num in match: name = self.CleanTextForSearch(name) url = 'http://services.tvrage.com/myfeeds/search.php?key=ag6txjP0RH4m0c8sZk2j&show='+name season_pull = "0%s"%Sea_num if len(Sea_num)<2 else Sea_num episode_pull = "0%s"%eps_num if len(eps_num)<2 else eps_num sea_eps = 'S'+season_pull+'E'+episode_pull year= '0' item_id = common.CreateIdFromString(name + '_' + year + '_season_' + Sea_num + '_episode_' + eps_num) self.AddContent(list, indexer, common.mode_File_Hosts, name +'[COLOR royalblue] ('+sea_eps+')[/COLOR]', item_id, 'tv_episodes', url=url, name=name, year=year, season=Sea_num, episode=eps_num) elif section == 'played': new_url = url if page == '': page = '1' else: page = str( int(page) ) new_url = 'http://trakt.tv/movies/plays/weekly/'+ page from entertainment.net import Net net = Net() import urllib import re url = urllib.unquote_plus(url) html = net.http_GET(new_url).content total_pages = '446' #print html.encode('utf-8') #if total_pages == '': #r= '</a><a href="/movies/.+?" >(.+?)</a> </div>' #total_pages = re.compile(r).findall(html)[0] self.AddInfo(list, indexer, 'played', '', type, str(page), total_pages) match=re.compile('<a class="title" href="(.+?)">(.+?) \((.+?)\)</a>').findall(html) for url, name, year in match: name = self.CleanTextForSearch(name) name = name.replace('$','s') url = 'http://services.tvrage.com/myfeeds/search.php?key=ag6txjP0RH4m0c8sZk2j&show='+name self.AddContent(list, indexer, common.mode_File_Hosts, name + ' (' + year +')', '', type, '', name, year) elif section == 'playedtv': new_url = url if page == '': page = '1' else: page = str( int(page) ) new_url = 'http://trakt.tv/shows/plays/daily'+ page from entertainment.net import Net net = Net() import urllib import re url = urllib.unquote_plus(url) html = net.http_GET(new_url).content total_pages = '446' self.AddInfo(list, indexer, 'playedtv', '', type, str(page), total_pages) match=re.compile('data-url="(.+?)">.+?class="titles"><h3>(.+?)<').findall(html) for url, name in match: name = self.CleanTextForSearch(name) name = name.replace('$','s') url = 'http://services.tvrage.com/myfeeds/search.php?key=ag6txjP0RH4m0c8sZk2j&show='+name self.AddContent(list, indexer, common.mode_Content, name, '', 'tv_seasons', url=url, name=name) elif section == 'playedeps': new_url = url if page == '': page = '1' else: page = str( int(page) ) new_url = 'http://trakt.tv/shows/episodes/plays/daily/'+ page from entertainment.net import Net net = Net() import urllib import re url = urllib.unquote_plus(url) html = net.http_GET(new_url).content total_pages = '446' self.AddInfo(list, indexer, 'playedeps', '', type, str(page), total_pages) match=re.compile('<div class="title-overflow"></div>.+?<a href=".+?">(.+?)</a>.+?<div class="title-overflow"></div>.+?<a href="(.+?)">.+?<span>(.+?)x(.+?)</span>',re.DOTALL).findall(html) for name, url, Sea_num, eps_num in match: name = self.CleanTextForSearch(name) url = 'http://services.tvrage.com/myfeeds/search.php?key=ag6txjP0RH4m0c8sZk2j&show='+name season_pull = "0%s"%Sea_num if len(Sea_num)<2 else Sea_num episode_pull = "0%s"%eps_num if len(eps_num)<2 else eps_num sea_eps = 'S'+season_pull+'E'+episode_pull year= '0' item_id = common.CreateIdFromString(name + '_' + year + '_season_' + Sea_num + '_episode_' + eps_num) self.AddContent(list, indexer, common.mode_File_Hosts, name +'[COLOR royalblue] ('+sea_eps+')[/COLOR]', item_id, 'tv_episodes', url=url, name=name, year=year, season=Sea_num, episode=eps_num) elif section == 'release': new_url = url if page == '': page = '1' else: page = str( int(page) ) new_url = 'http://trakt.tv/movies/released/'+ page from entertainment.net import Net net = Net() import urllib import re url = urllib.unquote_plus(url) html = net.http_GET(new_url).content total_pages = '446' self.AddInfo(list, indexer, 'release', '', type, str(page), total_pages) match=re.compile('<a class="title" href="(.+?)">(.+?) \((.+?)\)</a>').findall(html) for url, name, year in match: name = self.CleanTextForSearch(name) name = name.replace('$','s') url = 'http://services.tvrage.com/myfeeds/search.php?key=ag6txjP0RH4m0c8sZk2j&show='+name self.AddContent(list, indexer, common.mode_File_Hosts, name + ' (' + year +')', '', type, '', name, year) elif section == 'trakt_title': new_url = url from entertainment.net import Net net = Net() import urllib import re url = urllib.unquote_plus(url) html = net.http_GET(new_url).content match=re.compile('<img class="poster-art" alt="(.+?) \((.+?)\)"').findall(html) if 'No items in this list yet!' in html: self.AddContent(list, indexer, common.mode_File_Hosts, 'No items in this list yet!', '', type, '', '', '') ''' Pagination Code Start ''' num_items_on_a_page = 25 if page == '': page = '1' total_items = len(match) total_pages = str ( ( total_items / num_items_on_a_page ) + ( 1 if total_items % num_items_on_a_page >= 1 else 0) ) self.AddInfo(list, indexer, section, url, type, page, total_pages, sort_by, sort_order) start_index = ( int(page) - 1 ) * num_items_on_a_page match = match[ start_index : start_index + num_items_on_a_page ] ''' Pagination Code End ''' for name, year in match: name = self.CleanTextForSearch(name) name = name.replace('$','s') url = 'http://services.tvrage.com/myfeeds/search.php?key=ag6txjP0RH4m0c8sZk2j&show='+name self.AddContent(list, indexer, common.mode_File_Hosts, name + ' (' + year +')', '', type, '', name, year) elif section == 'trakt_official': new_url = url from entertainment.net import Net net = Net() import urllib import re url = urllib.unquote_plus(url) html = net.http_GET(new_url).content match=re.compile('data-title="(.+?) \((.+?)\)"').findall(html) ''' Pagination Code Start ''' num_items_on_a_page = 25 if page == '': page = '1' total_items = len(match) total_pages = str ( ( total_items / num_items_on_a_page ) + ( 1 if total_items % num_items_on_a_page >= 1 else 0) ) self.AddInfo(list, indexer, section, url, type, page, total_pages, sort_by, sort_order) start_index = ( int(page) - 1 ) * num_items_on_a_page match = match[ start_index : start_index + num_items_on_a_page ] ''' Pagination Code End ''' for name, year in match: name = self.CleanTextForSearch(name) name = name.replace('$','s') url = 'http://trakt.tv' self.AddContent(list, indexer, common.mode_File_Hosts, name + ' (' + year +')', '', type, '', name, year) elif section == 'trakt_personal': new_url = url from entertainment.net import Net net = Net() import urllib import re url = urllib.unquote_plus(url) html = net.http_GET(new_url).content match=re.compile('<a class="title" href="(.+?)">(.+?) \((.+?)\)</a>').findall(html) ''' Pagination Code Start ''' num_items_on_a_page = 25 if page == '': page = '1' total_items = len(match) total_pages = str ( ( total_items / num_items_on_a_page ) + ( 1 if total_items % num_items_on_a_page >= 1 else 0) ) self.AddInfo(list, indexer, section, url, type, page, total_pages, sort_by, sort_order) start_index = ( int(page) - 1 ) * num_items_on_a_page match = match[ start_index : start_index + num_items_on_a_page ] ''' Pagination Code End ''' for url2,name, year in match: name = self.CleanTextForSearch(name) name = name.replace('$','s') url = 'http://trakt.tv'+url2 self.AddContent(list, indexer, common.mode_File_Hosts, name + ' (' + year +')', '', type, '', name, year) elif section == 'collected': new_url = url if page == '': page = '1' else: page = str( int(page) ) new_url = 'http://trakt.tv/movies/collected/'+ page from entertainment.net import Net net = Net() import urllib import re url = urllib.unquote_plus(url) html = net.http_GET(new_url).content total_pages = '446' #print html.encode('utf-8') #if total_pages == '': #r= '</a><a href="/movies/.+?" >(.+?)</a> </div>' #total_pages = re.compile(r).findall(html)[0] self.AddInfo(list, indexer, 'collected', '', type, str(page), total_pages) match=re.compile('<a class="title" href="(.+?)">(.+?) \((.+?)\)</a>').findall(html) for url, name, year in match: name = self.CleanTextForSearch(name) name = name.replace('$','s') url = 'http://services.tvrage.com/myfeeds/search.php?key=ag6txjP0RH4m0c8sZk2j&show='+name self.AddContent(list, indexer, common.mode_File_Hosts, name + ' (' + year +')', '', type, '', name, year) elif section == 'populartvgenre': section = url.replace('http://trakt.tv/shows/popular/','') new_url = url if page == '': page = '1' else: page = str( int(page) ) new_url = 'http://trakt.tv/shows/popular/'+section+'/'+ page from entertainment.net import Net net = Net() import urllib import re url = urllib.unquote_plus(url) html = net.http_GET(new_url).content total_pages = '446' #print html.encode('utf-8') #if total_pages == '': #r= '</a><a href="/movies/.+?" >(.+?)</a> </div>' #total_pages = re.compile(r).findall(html)[0] self.AddInfo(list, indexer, 'populartvgenre', url, type, str(page), total_pages) match=re.compile('data-url="(.+?)">.+?class="titles"><h3>(.+?)<').findall(html) for url, name in match: name = self.CleanTextForSearch(name) name = name.replace('$','s') url = 'http://services.tvrage.com/myfeeds/search.php?key=ag6txjP0RH4m0c8sZk2j&show='+name self.AddContent(list, indexer, common.mode_Content, name, '', 'tv_seasons', url=url, name=name) elif section == 'trakt_watchlisttv': from entertainment.net import Net net = Net() import urllib import re import json response = net.http_GET(url).content match = json.loads(response) ''' Pagination Code Start ''' num_items_on_a_page = 25 if page == '': page = '1' total_items = len(match) total_pages = str ( ( total_items / num_items_on_a_page ) + ( 1 if total_items % num_items_on_a_page >= 1 else 0) ) self.AddInfo(list, indexer, section, url, type, page, total_pages, sort_by, sort_order) start_index = ( int(page) - 1 ) * num_items_on_a_page match = match[ start_index : start_index + num_items_on_a_page ] ''' Pagination Code End ''' for shows in match: name = shows['title'] if name: name = name.encode('utf8') year = str(shows['year']) name = self.CleanTextForSearch(name) name = name.replace('$','s') url = 'http://services.tvrage.com/myfeeds/search.php?key=ag6txjP0RH4m0c8sZk2j&show='+name self.AddContent(list, indexer, common.mode_Content, name, '', 'tv_seasons', url=url, name=name, year=year) elif section == 'calendar': new_url = url if page == '': page = '1' else: page = str( int(page) ) new_url = 'http://trakt.tv/calendars/shows/'+ page from entertainment.net import Net net = Net() import urllib import re url = urllib.unquote_plus(url) html = net.http_GET(new_url).content print html.encode('utf-8') total_pages = '446' self.AddInfo(list, indexer, 'calendar', '', type, str(page), total_pages) match=re.compile('data-url=".+?"><a href="(.+?)">.+?<div class="titles"><h4> (.+?) am on (.+?)</h4><h3><span class=\'main-title-sxe\'>(.+?)x(.+?)</span>(.+?)</h3>',re.DOTALL).findall(html) for url,time, network, Sea_num, eps_num, title in match: name = url.split('/shows/')[1] name = name.split('/season/')[0] name = name.replace('-',' ') name = name.title() name = self.CleanTextForSearch(name) url = 'http://services.tvrage.com/myfeeds/search.php?key=ag6txjP0RH4m0c8sZk2j&show='+name season_pull = "0%s"%Sea_num if len(Sea_num)<2 else Sea_num episode_pull = "0%s"%eps_num if len(eps_num)<2 else eps_num sea_eps = 'S'+season_pull+'E'+episode_pull year= '0' item_id = common.CreateIdFromString(name + '_' + year + '_season_' + Sea_num + '_episode_' + eps_num) self.AddContent(list, indexer, common.mode_File_Hosts, name +'[COLOR royalblue] ('+sea_eps+')[/COLOR] Time: [COLOR red]'+time+'[/COLOR] On [COLOR red]'+network+'[/COLOR]', item_id, 'tv_episodes', url=url, name=name, year=year, season=Sea_num, episode=eps_num) elif section == 'calendar2': from entertainment.net import Net net = Net() import urllib import re import json html = net.http_GET(url).content match=re.compile('"show":{"title":"(.+?)","year":(.+?),.+?"network":"(.+?)".+?"episode":{"season":(.+?),"number":(.+?),"title":"(.+?)","overview":"","url":"(.+?)".+?"first_aired_iso":"(.+?)T.+?-(.+?)"',re.DOTALL).findall(html) ''' Pagination Code Start ''' num_items_on_a_page = 25 if page == '': page = '1' total_items = len(match) total_pages = str ( ( total_items / num_items_on_a_page ) + ( 1 if total_items % num_items_on_a_page >= 1 else 0) ) self.AddInfo(list, indexer, section, url, type, page, total_pages, sort_by, sort_order) start_index = ( int(page) - 1 ) * num_items_on_a_page match = match[ start_index : start_index + num_items_on_a_page ] ''' Pagination Code End ''' for name,year,network,Sea_num,eps_num,title,url,date,time in match: name = self.CleanTextForSearch(name) season_pull = "0%s"%Sea_num if len(Sea_num)<2 else Sea_num episode_pull = "0%s"%eps_num if len(eps_num)<2 else eps_num sea_eps = 'S'+season_pull+'E'+episode_pull network=network.replace('",','') item_id = common.CreateIdFromString(name + '_' + year + '_season_' + Sea_num + '_episode_' + eps_num) self.AddContent(list, indexer, common.mode_File_Hosts, name +'[COLOR royalblue] ('+sea_eps+')[/COLOR] [COLOR red]'+network+'[/COLOR] Date: [COLOR red]'+date+'[/COLOR]', item_id, 'tv_episodes', url=url, name=name, year=year, season=Sea_num, episode=eps_num) else: from entertainment.net import Net net = Net() import urllib import re import json response = net.http_GET(url).content match = json.loads(response) ''' Pagination Code Start ''' num_items_on_a_page = 25 if page == '': page = '1' total_items = len(match) total_pages = str ( ( total_items / num_items_on_a_page ) + ( 1 if total_items % num_items_on_a_page >= 1 else 0) ) self.AddInfo(list, indexer, section, url, type, page, total_pages, sort_by, sort_order) start_index = ( int(page) - 1 ) * num_items_on_a_page match = match[ start_index : start_index + num_items_on_a_page ] ''' Pagination Code End ''' for movies in match: name = movies['title'] if name: name = name.encode('utf8') year = str(movies['year']) self.AddContent(list, indexer, common.mode_File_Hosts, name + ' (' + year +')', '', type, '', name, year)
def GetContent(self, indexer, url, title, name, year, season, episode, type, list): import urllib url = urllib.unquote_plus(url) title = urllib.unquote_plus(title) name = urllib.unquote_plus(name) custom_url = self.get_url() import re new_url = url if not new_url.startswith(custom_url): new_url = re.sub("http\://.*?/", custom_url, url) from entertainment.net import Net net = Net(do_not_cache_if_any=do_no_cache_keywords_list) if self.Settings().get_setting('proxy') == "true": import socks (proxy, port) = self.get_proxy() socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, proxy, port) net.set_socket_class(socks.socksocket) content = net.http_GET(new_url).content if type == 'tv_seasons': #<h2><a href="(.+?)">Season ([0-9]+)</a></h2> for item in re.finditer( '<a data-id=".+?" class="season-toggle" href="(.+?)">.+? Season ([0-9]+)<', content): item_url = item.group(1) if item_url[0] == '/': item_url = item_url[1:] item_url = custom_url + item_url item_v_id = item.group(2) item_title = 'Season ' + item_v_id item_id = common.CreateIdFromString(title + ' ' + item_title) self.AddContent(list, indexer, common.mode_Content, item_title, item_id, 'tv_episodes', url=item_url, name=name, year=year, season=item_v_id) elif type == 'tv_episodes': #<div class="tv_episode_item"> <a href="(.+?)">E[0-9]+)\s*<span class="tv_episode_name"> - (.+?)</span> for item in re.finditer( '<div class="tv_episode_item"> <a href="(.+?)">E([0-9]+)\s*<span class="tv_episode_name"> - (.+?)</span>', content): item_url = item.group(1) if item_url[0] == '/': item_url = item_url[1:] item_url = custom_url + item_url item_v_id = item.group(2) item_title = item.group(3) if item_title == None: item_title = '' item_id = common.CreateIdFromString(name + '_' + year + '_season_' + season + '_episode_' + item_v_id) self.AddContent(list, indexer, common.mode_File_Hosts, item_title, item_id, type, url=item_url, name=name, year=year, season=season, episode=item_v_id)
def GetContent(self, indexer, url, title, name, year, season, episode, type, list): import urllib url = urllib.unquote_plus(url) title = urllib.unquote_plus(title) name = urllib.unquote_plus(name) import re new_url = url if not new_url.startswith(self.get_url()): new_url = re.sub("http\://.*?/", self.get_url(), url) from entertainment.net import Net net = Net(cached=False) print new_url print '########################################' content = net.http_GET(new_url).content import datetime todays_date = datetime.date.today() if type == 'tv_seasons': check_season = 0 last_season = 0 season_url = None seasons = re.search('<a href="/(title/.+?/episodes\?season=)([0-9]+)', content) if seasons: last_season = int(seasons.group(2)) season_url = seasons.group(1) for season_num in xrange(last_season, 0, -1): item_v_id = str(season_num) item_url = self.get_url() + season_url + item_v_id if check_season < 2: check_season += 1 item_content = net.http_GET(item_url).content season_item = re.search('<div>S' + item_v_id +', Ep([0-9]+)</div>', item_content) if not season_item: check_season -= 1 continue item_item = re.search('(?s)<div class="list_item.+?href="(.+?)".+?title="(.+?)".+?<div>S' + item_v_id +', Ep([0-9]+)</div>.+?<div class="airdate">(.+?)</div>', item_content) if 'unknown' in item_item.group(4).lower(): continue item_fmtd_air_date = self.get_formated_date( item_item.group(4) ) if item_fmtd_air_date.date() > todays_date or item_fmtd_air_date.date() == '0001-12-01': continue item_title = 'Season ' + item_v_id item_id = common.CreateIdFromString(title + ' ' + item_title) self.AddContent(list, indexer, common.mode_Content, item_title.strip(), item_id, 'tv_episodes', url=item_url, name=name.strip(), year=year, season=item_v_id) elif type == 'tv_episodes': season_item = re.search('<div>S' + season +', Ep([0-9]+)</div>', content) if not season_item: return for item in re.finditer('(?s)<div class="list_item.+?href="(.+?)".+?title="(.+?)".+?<div>S' + season +', Ep([0-9]+)</div>.+?<div class="airdate">(.+?)</div>', content): item_fmtd_air_date = self.get_formated_date( item.group(4) ) if self.Settings().get_setting('future')=='false': if item_fmtd_air_date.date() > todays_date: break item_url = self.get_url() + item.group(1) item_v_id = item.group(3) item_title = item.group(2).strip() if item_title == None: item_title = '' item_id = common.CreateIdFromString(name + '_' + year + '_season_' + season + '_episode_' + item_v_id) self.AddContent(list, indexer, common.mode_File_Hosts, item_title.strip(), item_id, type, url=item_url, name=name.strip(), year=year, season=season, episode=item_v_id)
def GetContent(self, indexer, url, title, name, year, season, episode, type, list): import urllib url = urllib.unquote_plus(url) title = urllib.unquote_plus(title) name = urllib.unquote_plus(name) custom_url = self.get_url() #custom_url = self.get_url() name = (name).lower() import re tv_url = custom_url + '%s/index.html' % (name.lower().replace( ' ', '_')) new_url = url from entertainment.net import Net net = Net() content = net.http_GET(tv_url).content if type == 'tv_seasons': match = re.compile( '<td width="99%" class="mnlcategorylist"><a href="(.+?)"><b>Season (.+?)</b></a>' ).findall(content) for url, seasonnumber in match: item_url = custom_url + '%s/' % (name.lower().replace( ' ', '_')) item_url1 = item_url + url item_title = 'Season ' + seasonnumber item_id = common.CreateIdFromString(title + ' ' + item_title) self.AddContent(list, indexer, common.mode_Content, item_title, item_id, 'tv_episodes', url=item_url1, name=name, season=seasonnumber) elif type == 'tv_episodes': tv_url2 = custom_url + '%s/season_%s.html' % (name.lower().replace( ' ', '_'), season) from entertainment.net import Net net = Net() content2 = net.http_GET(tv_url2).content match = re.compile( '<td class="episode"><a name=".+?"></a><b>.+?. (.+?)</b></td>\s*<td class="mnllinklist" align="right"><div class="right">S.+?E(.+?)&' ).findall(content2) for item_title, item_v_id_2 in match: item_v_id_2 = str(int(item_v_id_2)) item_url = tv_url2 + '?episode=' + item_v_id_2 item_id = common.CreateIdFromString(name + '_season_' + season + '_episode_' + item_v_id_2) self.AddContent(list, indexer, common.mode_File_Hosts, item_title, item_id, type, url=item_url, name=name, season=season, episode=item_v_id_2)
def GetSection(self, indexer, section, url, type, list, page='', total_pages='', sort_by='', sort_order=''): from entertainment.net import Net import re net = Net() base_url_for_match = self.base_url if indexer == common.indxr_TV_Shows: base_url_for_match = self.tv_base_url if section == 'main': if indexer == common.indxr_TV_Shows: self.AddSection(list, indexer, 'serie-tv-home', 'Serie-Tv Home', base_url_for_match, indexer) else: self.AddSection(list, indexer, 'film-home', 'Film Home', base_url_for_match, indexer) content = net.http_GET(base_url_for_match).content for menu_item in re.finditer( '(?s)<select name=[\'"]select(.+?)</select>', content): menu_item_title = re.search('<option.+?>(.+?)</option>', menu_item.group(1)).group(1) self.AddSection(list, indexer, common.CreateIdFromString(menu_item_title), menu_item_title, base_url_for_match, indexer) elif url == base_url_for_match and section not in ('serie-tv-home', 'film-home'): content = net.http_GET(url).content for menu_item in re.finditer( '(?s)<select name=[\'"]select(.+?)</select>', content): is_item_menu_title = True for menu_sub_item in re.finditer( '<option value=[\'"](.*)[\'"]>(.+?)</option>', menu_item.group(1)): menu_item_title = menu_sub_item.group(2) if is_item_menu_title == True: menu_item_title_id = common.CreateIdFromString( menu_item_title) if menu_item_title_id != section: break is_item_menu_title = False continue self.AddSection(list, indexer, common.CreateIdFromString(menu_item_title), menu_item_title, self.base_url + menu_sub_item.group(1), indexer) else: self.ExtractContentAndAddtoList(indexer, section, url, type, list, page, total_pages, sort_by, sort_order)
def GetSection(self, indexer, section, url, type, list, page='', total_pages='', sort_by='', sort_order=''): if section == 'main': self.AddSection(list, indexer, 'Movies', 'Movies', type='movies') self.AddSection(list, indexer, 'TV Episodes', 'TV Episodes', type='tv_episodes') else: from universal import playbackengine player = playbackengine.Player() if type == 'movies': indexer = common.indxr_Movies elif type == 'tv_episodes': indexer = common.indxr_TV_Shows video_type = common.VideoType_Movies if type == 'movies' else common.VideoType_Episode sql_select = '' if playbackengine.DB == 'mysql': sql_select = 'SELECT title, season, episode, year FROM bookmarks WHERE addon_id = %s AND video_type = %s' else: sql_select = 'SELECT title, season, episode, year FROM bookmarks WHERE addon_id = ? AND video_type = ?' matchedrows = [] try: player._connect_to_db() player.dbcur.execute(sql_select, (common.addon_id, video_type)) matchedrows = player.dbcur.fetchall() player._close_db() except: pass for matchedrow in matchedrows: item = dict(matchedrow) temp_title = item['title'] + (' (' + item['year'] + ')' if item['year'] else '') if indexer == common.indxr_TV_Shows: temp_title += '_season_' + str( item['season']) + '_episode_' + str(item['episode']) id = common.CreateIdFromString(temp_title) list.append({ 'indexer': indexer, 'mode': common.mode_File_Hosts, 'title': temp_title if indexer == common.indxr_Movies else '', 'id': id, 'website': 'bookmarks', 'name': item['title'], 'year': item['year'], 'season': str(item['season']), 'episode': str(item['episode']), 'video_type': video_type, 'type': type, 'bookmark': 'true' })
def GetFileHostsForContent(self, title, name, year, season, episode, type, list, lock, message_queue): #if type!= 'movies': return from entertainment.net import Net import re net = Net(cached=False) tv_user = self.Settings().get_setting('tv_user') tv_pwd = self.Settings().get_setting('tv_pwd') if tv_user == 'Enter your Superchillin email' or tv_pwd == 'xunity' or tv_user == '' or tv_pwd == '': if os.path.exists(self.cookie_file): try: os.remove(self.cookie_file) except: pass common.addon.show_small_popup('[B][COLOR blue]I[/B][/COLOR]stream: Superchillin', '[COLOR red]Please Enter Login & Password in Settings[/COLOR]', 7000, self.icon) return if os.path.exists(self.cookie_file): try: os.remove(self.cookie_file) except: pass headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Connection': 'keep-alive', 'Cache-Control': 'max-age=0', 'Host': 'superchillin.com', 'Origin': 'http://superchillin.com', 'Referer': 'http://superchillin.com/login.php', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.107 Safari/537.36'} net.http_GET('http://superchillin.com/login.php') net.http_POST('http://superchillin.com/login2.php', {'email': str(tv_user), 'password': str(tv_pwd)}, headers, auto_read_response=False).content net.save_cookies(self.cookie_file) net.set_cookies(self.cookie_file) name = self.CleanTextForSearch(name) name = name.rstrip() # import urllib movie_url='http://superchillin.com/search.php?q=%s' %(name.replace(' ','+')) html = net.http_GET(movie_url).content if not re.search(r'\"logout.php\"\>Logout\<\/a\>', html, re.I): common.addon.show_small_popup('[B][COLOR blue]I[/B][/COLOR]stream: Superchillin', '[COLOR red]Please Check Login & Password Are Correct[/COLOR]', 7000, self.icon) if type == 'movies': name_lower = common.CreateIdFromString(name) for item in re.finditer(r"href='/(.+?)'>(.+?)</a> \((.+?)\)", html): item_url = self.base_url + item.group(1) item_name = common.CreateIdFromString(item.group(2)) item_year = item.group(3) #item_url = item_url+'&hd=1' if item_name == name_lower and item_year == year: self.GetFileHosts(item_url + '__movies', list, lock, message_queue) elif type == 'tv_episodes': name_lower = common.CreateIdFromString(name) for item in re.finditer(r"<i>TV Series</i></b><br><br>.+? href='/(.+?)'>(.+?)</a>", html): item_url = self.base_url + item.group(1) item_name = common.CreateIdFromString(item.group(2)) html = net.http_GET(item_url).content #<b>(.+?)x(.+?) - <a style='text.+? href='/(.+?)'>(.+?)</a></b> #<b>(.+?)x(.+?) .+? href='/(.+?)'>(.+?)</a> season_pull = "0%s"%season if len(season)<2 else season episode_pull = "0%s"%episode if len(episode)<2 else episode for item in re.finditer(r"<b>"+season+"x"+episode_pull+" - <a style='text.+? href='/(.+?)'>(.+?)</a></b>", html): item_url2 = self.base_url + item.group(1) item_title = item.group(2) if item_name == name_lower: self.GetFileHosts(item_url2, list, lock, message_queue)
def ExtractContentAndAddtoList(self, indexer, section, url, type, list, page='', total_pages='', sort_by='', sort_order=''): if section == 'category/hollywood-movies': new_url = url print new_url if page == '': page = '1' else: page = str(int(page)) new_url = new_url + 'page/' + page + '/' from entertainment.net import Net net = Net() import urllib import re url = urllib.unquote_plus(url) new_url = self.base_url + '/' + section + '/' html = net.http_GET(new_url + 'page/' + str(page) + '/').content if total_pages == '': r = '<ul><li class="page_info">Page 1 of (.+?)</li><li class="active_page">' total_pages = re.compile(r).findall(html)[0] self.AddInfo(list, indexer, section, '', type, str(page), total_pages) match = re.compile( 'alt="Watch .+?: (.+?)\(([\d]{4})\) .+?<a href="(.+?)" rel="bookmark">.+?</a></h3>' ).findall(html) for name, year, url in match: print url print name print year name = self.CleanTextForSearch(name) self.AddContent(list, indexer, common.mode_File_Hosts, name + ' (' + year + ')', '', type, url=url, name=name, year=year) elif section == 'category/hindi-movies': new_url = url if page == '': page = '1' else: page = str(int(page)) new_url = new_url + 'page/' + page + '/' from entertainment.net import Net net = Net() import urllib import re url = urllib.unquote_plus(url) new_url = self.base_url + '/' + section + '/' html = net.http_GET(new_url + 'page/' + str(page) + '/').content if total_pages == '': r = '<ul><li class="page_info">Page 1 of (.+?)</li><li class="active_page">' total_pages = re.compile(r).findall(html)[0] self.AddInfo(list, indexer, section, '', type, str(page), total_pages) match = re.compile( 'alt="Watch .+?: (.+?)\(([\d]{4})\) .+?<a href="(.+?)" rel="bookmark">.+?</a></h3>' ).findall(html) for name, year, url in match: name = self.CleanTextForSearch(name) self.AddContent(list, indexer, common.mode_File_Hosts, name + ' (' + year + ')', '', type, url=url, name=name, year=year) elif section == 'category/hindi-dubbed-movies': new_url = url if page == '': page = '1' else: page = str(int(page)) new_url = new_url + 'page/' + page + '/' from entertainment.net import Net net = Net() import urllib import re url = urllib.unquote_plus(url) new_url = self.base_url + '/' + section + '/' html = net.http_GET(new_url + 'page/' + str(page) + '/').content if total_pages == '': r = '<ul><li class="page_info">Page 1 of (.+?)</li><li class="active_page">' total_pages = re.compile(r).findall(html)[0] self.AddInfo(list, indexer, section, '', type, str(page), total_pages) match = re.compile( 'alt="Watch .+?: (.+?)\(([\d]{4})\) .+?<a href="(.+?)" rel="bookmark">.+?</a></h3>' ).findall(html) for name, year, url in match: #name=name.split('(')[0] name = self.CleanTextForSearch(name) self.AddContent(list, indexer, common.mode_File_Hosts, name + ' (' + year + ')', '', type, url=url, name=name, year=year) elif section == 'tag/box': new_url = url if page == '': page = '1' else: page = str(int(page)) new_url = new_url + 'page/' + page + '/' from entertainment.net import Net net = Net() import urllib import re url = urllib.unquote_plus(url) new_url = self.base_url + '/' + section + '/' html = net.http_GET(new_url + 'page/' + str(page) + '/').content if total_pages == '': r = '<ul><li class="page_info">Page 1 of (.+?)</li><li class="active_page">' total_pages = re.compile(r).findall(html)[0] self.AddInfo(list, indexer, section, '', type, str(page), total_pages) match = re.compile( 'alt="Watch .+?: (.+?)\(([\d]{4})\) .+?<a href="(.+?)" rel="bookmark">.+?</a></h3>' ).findall(html) for name, year, url in match: #name=name.split('(')[0] name = self.CleanTextForSearch(name) self.AddContent(list, indexer, common.mode_File_Hosts, name + ' (' + year + ')', '', type, url=url, name=name, year=year) elif section == 'category/tv-show': new_url = url if page == '': page = '1' else: page = str(int(page)) new_url = new_url + 'page/' + page + '/' from entertainment.net import Net net = Net() import urllib import re url = urllib.unquote_plus(url) new_url = self.base_url + '/' + section + '/' html = net.http_GET(new_url + 'page/' + str(page) + '/').content if total_pages == '': r = '<ul><li class="page_info">Page 1 of (.+?)</li><li class="active_page">' total_pages = re.compile(r).findall(html)[0] self.AddInfo(list, indexer, section, '', type, str(page), total_pages) match = re.compile( '<h3 class="entry-title"><a href="(.+?)" rel="bookmark">(.+?) Season (.+?) Episode (.+?)</a></h3>' ).findall(html) for url, name, Sea_num, eps_num in match: name = self.CleanTextForSearch(name) season_pull = "0%s" % Sea_num if len(Sea_num) < 2 else Sea_num episode_pull = "0%s" % eps_num if len(eps_num) < 2 else eps_num sea_eps = 'S' + season_pull + 'E' + episode_pull year = '0' item_id = common.CreateIdFromString(name + '_' + year + '_season_' + Sea_num + '_episode_' + eps_num) self.AddContent(list, indexer, common.mode_File_Hosts, name + '[COLOR royalblue] (' + sea_eps + ')[/COLOR]', item_id, 'tv_episodes', url=url, name=name, year=year, season=Sea_num, episode=eps_num) else: new_url = url if page == '': page = '1' else: page = str(int(page)) new_url = new_url + 'page/' + page + '/' from entertainment.net import Net net = Net() import urllib #/tag/action/page/2/ import re url = urllib.unquote_plus(url) new_url = self.base_url + '/tag/' + section + '/' html = net.http_GET(new_url + 'page/' + str(page) + '/').content if total_pages == '': r = '<ul><li class="page_info">Page 1 of (.+?)</li><li class="active_page">' total_pages = re.compile(r).findall(html)[0] self.AddInfo(list, indexer, section, '', type, str(page), total_pages) match = re.compile( 'alt="Watch .+?: (.+?)\(([\d]{4})\) .+?<a href="(.+?)" rel="bookmark">.+?</a></h3>' ).findall(html) for name, year, url in match: #name=name.split('(')[0] name = self.CleanTextForSearch(name) self.AddContent(list, indexer, common.mode_File_Hosts, name + ' (' + year + ')', '', type, url=url, name=name, year=year)
def GetContent(self, indexer, url, title, name, year, season, episode, type, list): import urllib url = urllib.unquote_plus(url) title = urllib.unquote_plus(title) name = urllib.unquote_plus(name) name = (name).lower() import re #tv_url= custom_url+'%s/index.html' %(name.lower().replace(' ','_')) new_url = url from entertainment.net import Net net = Net(cached=False) content = net.http_GET(new_url).content ''' Virtual Seasons Code Start ''' num_episodes_per_virtual_season = 25 ''' Virtual Seasons Code End ''' if type == 'tv_seasons': item_url = new_url ''' Virtual Seasons Code Start ''' match = re.compile( '<td class="epnum">(.+?)</td><td class="title"><a href="(.+?)">(.+?)</a>' ).findall(content) total_episodes = len(match) total_seasons = total_episodes / num_episodes_per_virtual_season # ) + ( 1 if total_items % num_episodes_per_virtual_season >= 1 else 0) for x in range(0, total_seasons): start_index = (x * num_episodes_per_virtual_season) item_title = "Episodes %04d - %04d" % ( start_index + 1, start_index + num_episodes_per_virtual_season) item_id = common.CreateIdFromString(title + ' ' + item_title) self.AddContent(list, indexer, common.mode_Content, item_title, item_id, 'tv_episodes', url=item_url, name=name, season=str(x + 1)) if total_episodes % num_episodes_per_virtual_season >= 1: start_index = (total_seasons * num_episodes_per_virtual_season) item_title = "Episodes %04d - %04d" % (start_index + 1, total_episodes) item_id = common.CreateIdFromString(title + ' ' + item_title) self.AddContent(list, indexer, common.mode_Content, item_title, item_id, 'tv_episodes', url=item_url, name=name, season=str(total_seasons + 1)) ''' Virtual Seasons End ''' elif type == 'tv_episodes': match = re.compile( '<td class="epnum">(.+?)</td><td class="title"><a href="(.+?)">(.+?)</a>' ).findall(content) ''' Virtual Seasons Code Start ''' start_index = (int(season) - 1) * num_episodes_per_virtual_season match = match[start_index:start_index + num_episodes_per_virtual_season] ''' Virtual Seasons Code Start ''' for epnum, url, item_title in match: epnum = epnum.split('-')[0] epnum = str(int(epnum)) item_url = url season = str('1') item_id = common.CreateIdFromString(item_title + '_season_' + season + '_episode_' + epnum) self.AddContent(list, indexer, common.mode_File_Hosts, item_title, item_id, type, url=item_url, name=name, season=season, episode=epnum)
def ExtractContentAndAddtoList(self, indexer, section, url, type, list, page='', total_pages='', sort_by='', sort_order=''): if section == 'watch-anime': new_url = url from entertainment.net import Net net = Net(cached=False) import urllib url = urllib.unquote_plus(url) html = net.http_GET(url).content match = re.compile( '<li><a href="http:/\/\www.+?animeultima..+?/(.+?)" title=".+?">(.+?)</a></li>' ).findall(html) ''' Pagination Code Start ''' num_items_on_a_page = 25 if page == '': page = '1' total_items = len(match) total_pages = str((total_items / num_items_on_a_page) + ( 1 if total_items % num_items_on_a_page >= 1 else 0)) self.AddInfo(list, indexer, section, url, type, page, total_pages, sort_by, sort_order) start_index = (int(page) - 1) * num_items_on_a_page match = match[start_index:start_index + num_items_on_a_page] ''' Pagination Code End ''' for url, name in match: name = self.CleanTextForSearch(name) url = self.base_url + url self.AddContent(list, indexer, common.mode_Content, name, '', 'tv_seasons', url=url, name=name) if section == 'watch-live-action-anime': new_url = url from entertainment.net import Net net = Net(cached=False) import urllib url = urllib.unquote_plus(url) html = net.http_GET(url).content match = re.compile( '<a title=".+?" href="(.+?)">(.+?)</a>.+?<span class=".+?">.+?<' ).findall(html) ''' Pagination Code Start ''' num_items_on_a_page = 25 if page == '': page = '1' total_items = len(match) total_pages = str((total_items / num_items_on_a_page) + ( 1 if total_items % num_items_on_a_page >= 1 else 0)) self.AddInfo(list, indexer, section, url, type, page, total_pages, sort_by, sort_order) start_index = (int(page) - 1) * num_items_on_a_page match = match[start_index:start_index + num_items_on_a_page] ''' Pagination Code End ''' for url, name in match: name = self.CleanTextForSearch(name) self.AddContent(list, indexer, common.mode_Content, name, '', 'tv_seasons', url=url, name=name) if section == 'ongoing anime': new_url = url from entertainment.net import Net net = Net(cached=False) import urllib url = urllib.unquote_plus(url) html = net.http_GET(url).content match = re.compile( '<a title=".+?" href="(.+?)">(.+?)</a>.+?<span class=".+?">.+?<' ).findall(html) ''' Pagination Code Start ''' num_items_on_a_page = 25 if page == '': page = '1' total_items = len(match) total_pages = str((total_items / num_items_on_a_page) + ( 1 if total_items % num_items_on_a_page >= 1 else 0)) self.AddInfo(list, indexer, section, url, type, page, total_pages, sort_by, sort_order) start_index = (int(page) - 1) * num_items_on_a_page match = match[start_index:start_index + num_items_on_a_page] ''' Pagination Code End ''' for url, name in match: name = self.CleanTextForSearch(name) self.AddContent(list, indexer, common.mode_Content, name, '', 'tv_seasons', url=url, name=name) if section == 'topanime': new_url = url from entertainment.net import Net net = Net(cached=False) import urllib url = urllib.unquote_plus(url) html = net.http_GET(url).content match = re.compile( '<li><a href="(.+?)">(.+?)</a> <strong>').findall(html) ''' Pagination Code Start ''' num_items_on_a_page = 25 if page == '': page = '1' total_items = len(match) total_pages = str((total_items / num_items_on_a_page) + ( 1 if total_items % num_items_on_a_page >= 1 else 0)) self.AddInfo(list, indexer, section, url, type, page, total_pages, sort_by, sort_order) start_index = (int(page) - 1) * num_items_on_a_page match = match[start_index:start_index + num_items_on_a_page] ''' Pagination Code End ''' for url, name in match: name = self.CleanTextForSearch(name) self.AddContent(list, indexer, common.mode_Content, name, '', 'tv_seasons', url=url, name=name) if section == 'hdfluffy': new_url = url from entertainment.net import Net net = Net(cached=False) import urllib url = urllib.unquote_plus(url) html = net.http_GET(url).content up = re.compile('<a name="(.+?)" href="(.+?)"').findall(html) for name, url in up: name = self.CleanTextForSearch(name) #content2 = net.http_GET(url).content #match=re.compile('<a href="(.+?)" title="Watch .+?">').findall(content2)[0] item_id = common.CreateIdFromString(name) self.AddContent(list, indexer, common.mode_File_Hosts, name, item_id, 'tv_seasons', url=url, name=name) if section == 'genres_title': #url = url.replace(' ','+') new_url = url if page == '': page = '1' else: page = str(int(page)) new_url = new_url + page + '/' #http://www.animeultima.io/genres/watch-action-animes/2/ from entertainment.net import Net net = Net(cached=False) import urllib url = urllib.unquote_plus(url) new_url = url.rpartition('/')[0] new_url = new_url + '/' html = net.http_GET(new_url + str(page) + '/').content if total_pages == '': r = '<a href="/genres/watch-.+?-animes/(.+?)/">.+?</a></div><div class="genre-anime">\s*<a href=".+?">\s*<span class="genre-anime-thumb"' total_pages = re.compile(r).findall(html)[0] self.AddInfo(list, indexer, 'genres_title', url, type, str(page), total_pages) match = re.compile( '<div class="genre-anime">\s*<a href="/(.+?)">\s*<span class="genre-anime-thumb".+?<h2>(.+?)</h2>', re.DOTALL).findall(html) for url, name in match: name = self.CleanTextForSearch(name) url = self.base_url + url self.AddContent(list, indexer, common.mode_Content, name, '', 'tv_seasons', url=url, name=name) if section == 'abc_title': Alpha = [] new_url = url from entertainment.net import Net net = Net(cached=False) import urllib url = urllib.unquote_plus(url) html = net.http_GET(url).content letter = url.split('#')[1] letter = letter.replace('number', '0-9') r = '<li><a href="(.+?)" title=".+?">(.+?)</a></li>' total_pages = re.compile(r).findall(html) for i in range(0, len(total_pages)): if total_pages[i][1][:1].isdigit() == True and letter == "0-9": Alpha.append(total_pages[i]) elif total_pages[i][1][:1] == letter: Alpha.append(total_pages[i]) for urls, name in Alpha: self.AddContent(list, indexer, common.mode_Content, name, '', 'tv_seasons', url=url, name=name)
def ExtractContentAndAddtoList(self, indexer, section, url, type, list, page='', total_pages='', sort_by='', sort_order=''): if section == 'latest': new_url = url if page == '': page = '1' else: page = str(int(page)) new_url = new_url + 'index.php?&page=' + page print new_url from entertainment.net import Net import re net = Net(cached=False) import urllib html = net.http_GET(new_url).content #total_pages = '7' if total_pages == '': r = 'title="Last Page - Results .+? to .+? of (.+?)">Last' total_pages = re.compile(r).findall(html)[0] self.AddInfo(list, indexer, 'latest', url, type, str(page), total_pages) for item in re.finditer( r'<span class="leftgg"> <a href="(.+?)" id=".+?"><img onerror=.+?href=".+?" id=".+?">(.+?)(\([\d]{4}\)) .+?Online Streaming</a>', html, re.I | re.DOTALL): url = 'http://g2g.so/forum/' + item.group(1) url = url.split('&')[0] print url name = item.group(2) item_year = item.group(3).replace('(', '').replace(')', '') name = self.CleanTextForSearch(name) self.AddContent(list, indexer, common.mode_File_Hosts, name + ' (' + item_year + ')', '', type, url=url, name=name, year=item_year) if section == 'tvshows': new_url = url from entertainment.net import Net import re net = Net(cached=False) import urllib html = net.http_GET(new_url).content for item in re.finditer( r'<!--- <a href=".+?" target="_self"> ---><a href="(.+?)" target="_self"><img class="image" src=".+?" alt="(.+?)"', html, re.I): url = 'http://g2g.so/tvseries/' + item.group(1) print url url = url.split('&')[0] print url name = item.group(2) name = self.CleanTextForSearch(name) self.AddContent(list, indexer, common.mode_Content, name, '', 'tv_seasons', url=url, name=name) if section == 'tvshowslatest': new_url = url if page == '': page = '1' else: page = str(int(page)) new_url = new_url + 'index.php?&page=' + page print new_url from entertainment.net import Net import re net = Net(cached=False) import urllib html = net.http_GET(new_url).content total_pages = '6' ''' if total_pages == '': r= 'title="Last Page - Results .+? to .+? of (.+?)">Last' total_pages = re.compile(r).findall(html)[0] ''' self.AddInfo(list, indexer, 'tvshowslatest', url, type, str(page), total_pages) for item in re.finditer( r'<!--- <a href=".+?" target="_self"> ---><a href="(.+?)" target="_self"><img class="image" src=".+?" alt="(.+?) S(\d+)E(\d+)"', html, re.I): url = 'http://g2g.so/episodes/' + item.group(1) url = url.split('&')[0] print url name = item.group(2) season = item.group(3) episode = item.group(4) name = self.CleanTextForSearch(name) item_id = common.CreateIdFromString(name + '_season_' + season + '_episode_' + episode) self.AddContent(list, indexer, common.mode_File_Hosts, name + ' S' + season + 'E' + episode, item_id, 'tv_episodes', url=url, name=name, season=season, episode=episode) else: new_url = url if page == '': page = '1' else: page = str(int(page)) new_url = new_url + 'index.php?&page=' + page print new_url from entertainment.net import Net import re net = Net(cached=False) import urllib html = net.http_GET(new_url).content #total_pages='7' ''' if total_pages == '': r= 'title="Last Page - Results .+? to .+? of (.+?)">Last' total_pages = re.compile(r).findall(html)[0] ''' #self.AddInfo(list, indexer, 'latest2', url, type, str(page), total_pages) for item in re.finditer( r'<!--- <a href=".+?" target="_self"> ---><a href="(.+?)" target="_self"><img class="image" src=".+?" alt="(.+?)(\([\d]{4}\))"', html, re.I): url = 'http://g2g.so/movies/' + item.group(1) print url url = url.split('&')[0] print url name = item.group(2) item_year = item.group(3).replace('(', '').replace(')', '') name = self.CleanTextForSearch(name) #url = net.http_GET(url).get_url() #print url self.AddContent(list, indexer, common.mode_File_Hosts, name + ' (' + item_year + ')', '', type, url=url, name=name, year=item_year)
def GetContent(self, indexer, url, title, name, year, season, episode, type, list): from md_request import open_url import re, urllib url = urllib.unquote_plus(url) content = open_url(url).content if type == 'tv_seasons': for item in re.finditer( "<a data-id='.+?' class='season-toggle' href='(.+?)'.+?>.+? Season ([0-9]+)<", content): item_url = item.group(1) if item_url[1] == '/': item_url = 'http:' + item_url if item_url[0] == '/': item_url = item_url[1:] item_url = self.base_url + item_url item_v_id = item.group(2) item_title = 'Season ' + item_v_id item_id = common.CreateIdFromString(title + ' ' + item_title) self.AddContent(list, indexer, common.mode_Content, item_title, item_id, 'tv_episodes', url=item_url, name=name, year=year, season=item_v_id) elif type == 'tv_episodes': for item in re.finditer( "<a href='(.+?)' title=.+?><strong>Episode</strong> ([0-9]+) .+?<", content): item_url = item.group(1) if item_url[1] == '/': item_url = 'http:' + item_url if item_url[0] == '/': item_url = item_url[1:] item_url = self.base_url + item_url item_v_id = item.group(2) #item_title = item.group(3) #if item_title == None: item_title = '' item_id = common.CreateIdFromString(name + '_' + year + '_season_' + season + '_episode_' + item_v_id) self.AddContent(list, indexer, common.mode_File_Hosts, item_title, item_id, type, url=item_url, name=name, year=year, season=season, episode=item_v_id)
def ExtractContentAndAddtoList(self, indexer, section, url, type, list, page='', total_pages='', sort_by='', sort_order=''): if section == 'latest': new_url = url if page == '': page = '0' else: page = str(int(page)) new_url = 'http://www.watchseries.to/latest/' + page from entertainment.net import Net net = Net() import urllib import re url = urllib.unquote_plus(url) html = net.http_GET(new_url).content total_pages = '9' self.AddInfo(list, indexer, 'latest', '', type, str(page), total_pages) match = re.compile( '<li><a href="/episode/(.+?)">(.+?) Seas. (.+?) Ep. (.+?) \(.+?\)<span class="epnum">(.+?)</span></a></li>' ).findall(html) match2 = re.compile( '<li><a href="/episode/(.+?)">(.+?) Seas. (.+?) Ep. (.+?) \(.+?\)<.+?</li>' ).findall(html) for url, name, Sea_num, eps_num, date in match: for url, name, Sea_num, eps_num in match2: name = self.CleanTextForSearch(name) url = self.tv_calender_url season_pull = "0%s" % Sea_num if len( Sea_num) < 2 else Sea_num episode_pull = "0%s" % eps_num if len( eps_num) < 2 else eps_num sea_eps = 'S' + season_pull + 'E' + episode_pull year = '0' item_id = common.CreateIdFromString(name + '_' + year + '_season_' + Sea_num + '_episode_' + eps_num) self.AddContent(list, indexer, common.mode_File_Hosts, name + '[COLOR royalblue] (' + sea_eps + ')[/COLOR] ' + 'Date Aired: ' + date, item_id, 'tv_episodes', url=url, name=name, year=year, season=Sea_num, episode=eps_num) elif section == 'popular': new_url = url from entertainment.net import Net net = Net() import urllib import re url = urllib.unquote_plus(url) html = net.http_GET(new_url).content match = re.compile( '<li><a href="/episode/(.+?)">(.+?) Seas. (.+?) Ep. (.+?) \(.+?\)</a></li>' ).findall(html) for url, name, Sea_num, eps_num in match: name = self.CleanTextForSearch(name) url = self.tv_calender_url season_pull = "0%s" % Sea_num if len(Sea_num) < 2 else Sea_num episode_pull = "0%s" % eps_num if len(eps_num) < 2 else eps_num sea_eps = 'S' + season_pull + 'E' + episode_pull year = '0' item_id = common.CreateIdFromString(name + '_' + year + '_season_' + Sea_num + '_episode_' + eps_num) self.AddContent(list, indexer, common.mode_File_Hosts, name + '[COLOR royalblue] (' + sea_eps + ')[/COLOR]', item_id, 'tv_episodes', url=url, name=name, year=year, season=Sea_num, episode=eps_num) elif section == 'web': new_url = url if page == '': page = '1' else: page = str(int(page)) new_url = self.base_url_tv_com + section + '/page' + page + '/' #http://www.tv.com/web/page2/ from entertainment.net import Net net = Net() import urllib import re url = urllib.unquote_plus(url) new_url = self.base_url_tv_com + section + '/' html = net.http_GET(new_url + 'page' + str(page) + '/').content if total_pages == '': #lastlist = url r = '>([0-9]*)</a>\s*<a href=".+?" class="next">' #% lastlist total_pages = re.compile(r).findall(html)[0] self.AddInfo(list, indexer, 'web', url, type, str(page), total_pages) match = re.compile( '<div class="mask"><a href=".+?"><img src=".+?" alt="(.+?)"/></a></div>.+?<div class="_clear"></div>\s*<div class="airtime">(.+?)</div>', re.DOTALL).findall(html) for name, netw in match: name = self.CleanTextForSearch(name) url = 'http://services.tvrage.com/myfeeds/search.php?key=ag6txjP0RH4m0c8sZk2j&show=' + name netw = netw.split('<')[0] netw = '[COLOR red]' + netw + '[/COLOR]' self.AddContent(list, indexer, common.mode_Content, name + ' (' + netw.replace('(', '').replace(')', '') + ')', '', 'tv_seasons', url=url, name=name) elif section == 'decade_title': new_url = url if page == '': page = '1' else: page = str(int(page)) new_url = new_url + '/page' + page + '/' from entertainment.net import Net net = Net() import urllib import re url = urllib.unquote_plus(url) new_url = url.rpartition('/')[0] new_url = new_url + '/' html = net.http_GET(new_url + 'page' + str(page) + '/').content if total_pages == '': #lastlist = url r = '>([0-9]*)</a>\s*<a href=".+?" class="next">' #% lastlist total_pages = re.compile(r).findall(html)[0] self.AddInfo(list, indexer, 'decade_title', url, type, str(page), total_pages) match = re.compile( '<div class="mask"><a href=".+?"><img src=".+?" alt="(.+?)"/></a></div>.+?<div class="_clear"></div>\s*<div class="airtime">(.+?)</div>', re.DOTALL).findall(html) for name, netw in match: name = self.CleanTextForSearch(name) url = 'http://services.tvrage.com/myfeeds/search.php?key=ag6txjP0RH4m0c8sZk2j&show=' + name netw = netw.split('<')[0] netw = '[COLOR red]' + netw + '[/COLOR]' self.AddContent(list, indexer, common.mode_Content, name + ' (' + netw.replace('(', '').replace(')', '') + ')', '', 'tv_seasons', url=url, name=name) elif section == 'network_title': import re #url = url.replace(' ','+') new_url = url if page == '': page = '1' else: page = str(int(page)) new_url = new_url + '/page' + page + '/' from entertainment.net import Net net = Net() import urllib url = urllib.unquote_plus(url) new_url = url.rpartition('/')[0] new_url = new_url + '/' html = net.http_GET(new_url + 'page' + str(page) + '/').content if total_pages == '': #lastlist = url r = '>([0-9]*)</a>\s*<a href=".+?" class="next">' #% lastlist total_pages = re.compile(r).findall(html)[0] self.AddInfo(list, indexer, 'network_title', url, type, str(page), total_pages) match = re.compile( '<div class="mask"><a href=".+?"><img src=".+?" alt="(.+?)"/></a></div>.+?<div class="_clear"></div>\s*<div class="airtime">(.+?)</div>', re.DOTALL).findall(html) for name, netw in match: name = self.CleanTextForSearch(name) url = 'http://services.tvrage.com/myfeeds/search.php?key=ag6txjP0RH4m0c8sZk2j&show=' + name netw = netw.split('<')[0] netw = '[COLOR red]' + netw + '[/COLOR]' self.AddContent(list, indexer, common.mode_Content, name + ' (' + netw.replace('(', '').replace(')', '') + ')', '', 'tv_seasons', url=url, name=name) elif section == 'genres_title': import re #url = url.replace(' ','+') new_url = url if page == '': page = '1' else: page = str(int(page)) new_url = new_url + '/page' + page + '/' from entertainment.net import Net net = Net() import urllib url = urllib.unquote_plus(url) new_url = url.rpartition('/')[0] new_url = new_url + '/' html = net.http_GET(new_url + 'page' + str(page) + '/').content if total_pages == '': #lastlist = url r = '>([0-9]*)</a>\s*<a href=".+?" class="next">' #% lastlist total_pages = re.compile(r).findall(html)[0] self.AddInfo(list, indexer, 'genres_title', url, type, str(page), total_pages) match = re.compile( '<div class="mask"><a href=".+?"><img src=".+?" alt="(.+?)"/></a></div>.+?<div class="_clear"></div>\s*<div class="airtime">(.+?)</div>', re.DOTALL).findall(html) for name, netw in match: name = self.CleanTextForSearch(name) url = 'http://services.tvrage.com/myfeeds/search.php?key=ag6txjP0RH4m0c8sZk2j&show=' + name netw = netw.split('<')[0] netw = '[COLOR red]' + netw + '[/COLOR]' self.AddContent(list, indexer, common.mode_Content, name + ' (' + netw.replace('(', '').replace(')', '') + ')', '', 'tv_seasons', url=url, name=name) elif section == 'news': import re new_url = url if page == '': page = '1' else: page = str(int(page)) new_url = new_url + '/page' + page + '/' #new_url = new_url + '?p=' + page from entertainment.net import Net net = Net() import urllib url = urllib.unquote_plus(url) new_url = url.rpartition('/')[0] new_url = new_url + '/' html = net.http_GET(new_url + 'page' + str(page) + '/').content if total_pages == '': #lastlist = url r = '>([0-9]*)</a>\s*<a href=".+?" class="next">' #<a href='?p=.+?'>([0-9]*)</a> | <a href='?p=.+?'>Next page total_pages = re.compile(r).findall(html)[0] self.AddInfo(list, indexer, 'news', url, type, str(page), total_pages) match = re.compile( '<h3 class="title"><a href="(.+?)">(.+?)</a></h3>\s*<p class="body">(.+?)</p>' ).findall(html) for url, title_name, title_name2 in match: #<h3><a href="(.+?)">(.+?)</a></h3> url = 'http://www.tv.com' + url #http://www.tvrage.com+url #title_name = '[COLOR royalblue]'+title_name+'[/COLOR]' title_name = title_name + ' ' + title_name2 title_name = title_name.replace('<i>', '') name = title_name.replace('</i>', '') self.AddContent(list, indexer, 'news', name, '', 'news', url=url, name=name) elif section == 'listings': import re #url = url.replace(' ','+') new_url = url from entertainment.net import Net net = Net() import urllib url = urllib.unquote_plus(url) html = net.http_GET(new_url).content r = '<td colspan="3" class="pop"><div align="center"><a href=".+?"><img src=".+?" border="0"></a> <a href="(.+?)">' next_pages = re.compile(r).findall(html)[0] url = 'http://www.thefutoncritic.com' + next_pages #next_pages = next_pages.replace('/listings/','') #next_pages = next_pages[:-1] #next_pages = next_pages.replace('/','-') #self.AddSection(list, indexer,'listings','[COLOR royalblue]<<--'+next_pages+'[/COLOR]',url,indexer)#(list, indexer, 'listings', url, type, '', total_pages) r2 = '<td colspan="3" class="pop"><div align="center"><a href=".+?"><img src=".+?" border="0"></a> <a href=".+?"><img src="/.+?" border="0"></a> .+? <a href="(.+?)"' r3 = 'border="0"></a> \[.+?, (.+?)\]&' todays_date = re.compile(r3).findall(html)[0] next_pages2 = re.compile(r2, re.DOTALL).findall(html)[0] url = 'http://www.thefutoncritic.com' + next_pages2 next_pages2 = next_pages2.replace('/listings/', '') next_pages2 = next_pages2[:-1] next_pages2 = next_pages2.replace('/', '-') todays_date = todays_date.title() self.AddSection( list, indexer, 'listings', '[COLOR white]Today Date: ' + '[COLOR red]' + todays_date + ' [/COLOR]' + '[COLOR white]Click to go to:[/COLOR] ' + '[COLOR royalblue]' + next_pages2 + '-->>[/COLOR]', url, indexer ) #(list, indexer, 'listings', url, type, '', total_pages) match = re.compile( '<td width="15%">(.+?)</td>\s*<td width="15%">(.+?)</td>\s*<td width="70%"><a href="(.+?)">(.+?)</a' ).findall(html) date_idem = re.search(r'border="0"></a> [.+?, (.+?)]', html) # february 25, 2014 for time, netw, url, name in match: name = name.split(':')[0] name = self.CleanTextForSearch(name) url = 'http://www.thefutoncritic.com' + url name = re.sub('(.+?), the', 'the \g<1>', name) name = name.title() name = name.replace('Nhl Special', 'NHL').replace( '$', '').replace('Abc Special', 'ABC Special').replace( 'Nba Special', 'NBA').replace( 'Wwe Main Event', 'WWE Main Event').replace( 'Pbs Special', 'PBS Special').replace( 'Wwe Raw', 'WWE Raw').replace( 'Bet Special', 'BET Special').replace( 'Hbo Special', 'HBO Special').replace( 'Nbc Sports Special', 'NBC Sports Special').replace( '#', '') time = time.replace(' ', '') netw = '[COLOR red]' + netw + '[/COLOR]' self.AddContent(list, indexer, common.mode_Content, name + ' (' + netw + ')' + ' ' + '[COLOR royalblue]' + time + '[/COLOR]', '', 'tv_seasons', url=url, name=name) r = '<td colspan="3" class="pop"><div align="center"><a href=".+?"><img src=".+?" border="0"></a> <a href="(.+?)">' next_pages = re.compile(r).findall(html)[0] url = 'http://www.thefutoncritic.com' + next_pages next_pages = next_pages.replace('/listings/', '') next_pages = next_pages[:-1] next_pages = next_pages.replace('/', '-') self.AddSection( list, indexer, 'listings', '[COLOR royalblue]<<--[/COLOR]' + '[COLOR white]Click to go to: [/COLOR]' + '[COLOR royalblue]' + next_pages + '[/COLOR]', url, indexer) #self.AddSection(list, indexer,'listings','[COLOR royalblue]<<--'+next_pages+'[/COLOR]',url,indexer) elif section == 'primewire': import re import urllib url = urllib.unquote_plus(url) import re new_url = url if page == '': page = '1' else: page = str(int(page)) new_url = new_url + '&page=' + page if sort_by == '' and 'sort' not in new_url: sort_by = 'date' if 'sort' not in new_url: new_url = new_url + '&sort=' + sort_by from entertainment.net import Net net = Net() new_url_for_cache = re.sub('\?key=.+?&', '?', new_url) content = net.http_GET(new_url, url_for_cache=new_url_for_cache).content if total_pages == '': total_pages = re.search('page=([0-9]*)"> >> <', content) if total_pages: total_pages = total_pages.group(1) else: if re.search('0 items found', content): page = '0' total_pages = '0' else: page = '1' total_pages = '1' self.AddInfo(list, indexer, section, url, type, page, total_pages, sort_by, sort_order) mode = common.mode_File_Hosts if type == 'tv_shows': mode = common.mode_Content type = 'tv_seasons' for item in re.finditer( r"<div class=\"index_item.+?\"><a href=\"/(.+?)\" title=\"Watch (.+?)\"", content): item_v_id = item.group(1) item_title = common.addon.unescape(item.group(2)) item_year = re.search("\(([0-9]+)\)", item_title) if item_year: item_year = item_year.group(1) else: item_year = '' item_name = re.sub(" \([0-9]+\)", "", item_title) item_url = self.primewire_url + item_v_id if total_pages == '': total_pages = '1' self.AddContent(list, indexer, mode, item_title, '', type, url=item_url, name=item_name, year=item_year) elif section == 'abc': import re new_url = url if page == '': page = '1' else: page = str(int(page)) new_url = new_url + '/page' + page + '/' from entertainment.net import Net net = Net() import urllib url = urllib.unquote_plus(url) new_url = url.rpartition('/')[0] new_url = new_url + '/' html = net.http_GET(new_url + 'page' + str(page) + '/').content if total_pages == '': r = '>([0-9]*)</a>\s*<a href=".+?" class="next">' total_pages = re.compile(r).findall(html)[0] self.AddInfo(list, indexer, 'abc', url, type, str(page), total_pages) match = re.compile( '<li class="show">\s*<a href="(.+?)">(.+?)</a>').findall(html) for url, name in match: name = self.CleanTextForSearch(name) self.AddContent(list, indexer, common.mode_Content, name, '', 'tv_seasons', url=url, name=name) elif section == 'tvguide_day': import re new_url = url from entertainment.net import Net net = Net() import urllib url = urllib.unquote_plus(url) html = net.http_GET(new_url).content r2 = '<td colspan="7" class="month_name"><div class="prev-month "><a href=".+?"><< <strong>.+?</strong></a></div><h3>.+?</h3><div class="next-month "><a href="(.+?)"><strong>.+?</strong>' r3 = '<td colspan="7" class="month_name"><div class="prev-month "><a href=".+?"><< <strong>.+?</strong></a></div><h3>(.+?)</h3><div class="next-month' todays_date = re.compile(r3).findall(html)[0] next_pages2 = re.compile(r2, re.DOTALL).findall(html)[0] url = next_pages2 next_pages2 = next_pages2.replace('http://', '').replace('.at-my.tv', '') next_pages2 = next_pages2.replace('.', '-') self.AddSection( list, indexer, 'tvguide_day', '[COLOR white]Today Date: ' + '[COLOR red]' + todays_date + ' [/COLOR]' + '[COLOR white]Click to go to:[/COLOR] ' + '[COLOR royalblue]' + next_pages2 + '-->>[/COLOR]', url, indexer ) #(list, indexer, 'listings', url, type, '', total_pages) match = re.compile( 'class="openlink">\+ (.+?)</a> </th>.+?<strong>Season (.+?), Episode (.+?) - "(.+?)"</strong>.+?<strong>Network:</strong> (.+?) .+?<strong>Status:</strong> (.+?) .+?<strong>Rating:</strong> (.+?) ', re.DOTALL).findall(html) for name, Sea_num, eps_num, title, netw, info, rate in match: name = self.CleanTextForSearch(name) url = self.tv_calender_url season_pull = "0%s" % Sea_num if len(Sea_num) < 2 else Sea_num episode_pull = "0%s" % eps_num if len(eps_num) < 2 else eps_num sea_eps = 'S' + season_pull + 'E' + episode_pull netw = netw.upper() info = info.lower() netw = '[COLOR red]' + netw + ': ' + info + '[/COLOR]' #self.AddContent(list, indexer, common.mode_File_Hosts, name+ ' (' + netw + ')'+' '+'[COLOR royalblue] '+sea_eps+'[/COLOR] '+'Rating: '+rate, type, url=url, name=name, season=season_pull, episode=episode_pull) year = '0' item_id = common.CreateIdFromString(name + '_' + year + '_season_' + Sea_num + '_episode_' + eps_num) self.AddContent(list, indexer, common.mode_File_Hosts, name + ' (' + netw + ')' + ' ' + '[COLOR royalblue] ' + sea_eps + '[/COLOR] ' + 'Rating: ' + rate, item_id, 'tv_episodes', url=url, name=name, year=year, season=Sea_num, episode=eps_num) r = '<td colspan="7" class="month_name"><div class="prev-month "><a href="(.+?)"><< <strong>.+?</strong></a></div><h3>.+?</h3><div class="next-month "><a href=".+?"><strong>.+?</strong>' next_pages = re.compile(r).findall(html)[0] url = next_pages next_pages = next_pages.replace('http://', '').replace('.at-my.tv', '') next_pages = next_pages.replace('.', '-') self.AddSection( list, indexer, 'tvguide_day', '[COLOR royalblue]<<--[/COLOR]' + '[COLOR white]Click to go to: [/COLOR]' + '[COLOR royalblue]' + next_pages + '[/COLOR]', url, indexer) elif section == 'tvguide_week': import re new_url = url from entertainment.net import Net net = Net() import urllib url = urllib.unquote_plus(url) html = net.http_GET(new_url).content r3 = '</div><h3>(.+?)</h3>' todays_date = re.compile(r3).findall(html)[0] self.AddSection( list, indexer, 'tvguide_week', '[COLOR red].:' + todays_date + ':. [/COLOR]', '', indexer ) #(list, indexer, 'listings', url, type, '', total_pages) match = re.compile( 'class="openlink">\+ (.+?)</a> </th>.+?<td class="airdate">(.+?)</td>.+?<strong>Season (.+?), Episode (.+?) - "(.+?)"</strong>.+?<strong>Network:</strong> (.+?) .+?<strong>Status:</strong> (.+?) .+?<strong>Rating:</strong> (.+?) ', re.DOTALL).findall(html) for name, airdate, Sea_num, eps_num, title, netw, info, rate in match: name = self.CleanTextForSearch(name) url = self.tv_calender_url season_pull = "0%s" % Sea_num if len(Sea_num) < 2 else Sea_num episode_pull = "0%s" % eps_num if len(eps_num) < 2 else eps_num sea_eps = 'S' + season_pull + 'E' + episode_pull netw = netw.upper() info = info.lower() netw = '[COLOR red]' + netw + ': ' + info + '[/COLOR]' #self.AddContent(list, indexer, common.mode_Content, name+ ' (' + netw + ')'+' '+'[COLOR royalblue] '+sea_eps+'[/COLOR] '+'Airdate: '+airdate, '', 'tv_seasons', url=url, name=name) year = '0' item_id = common.CreateIdFromString(name + '_' + year + '_season_' + Sea_num + '_episode_' + eps_num) self.AddContent(list, indexer, common.mode_File_Hosts, name + ' (' + netw + ')' + ' ' + '[COLOR royalblue] ' + sea_eps + '[/COLOR] ' + 'Airdate: ' + airdate, item_id, 'tv_episodes', url=url, name=name, year=year, season=Sea_num, episode=eps_num) else: #section == 'tvguide_month': new_url = url import re from entertainment.net import Net net = Net() import urllib url = urllib.unquote_plus(url) html = net.http_GET(new_url).content r3 = '</a></div><h3>.+?; (.+?)</h3>' todays_month = re.compile(r3).findall(html)[0] self.AddSection(list, indexer, 'tvguide_week', '[COLOR red].:' + todays_month + ':. [/COLOR]', '', indexer) match = re.compile( 'href="http://(.+?).at-my.tv/#r_.+?" class="eplink ">(.+?)</a><span class="seasep"></span><br /><span class="seasep" >S: (.+?) - Ep: (.+?)</span>' ).findall(html) for url, name, Sea_num, eps_num in match: name = self.CleanTextForSearch(name) airdate = url.replace('.', '-') airdate = '[COLOR red]' + airdate + '[/COLOR]' season_pull = "0%s" % Sea_num if len(Sea_num) < 2 else Sea_num episode_pull = "0%s" % eps_num if len(eps_num) < 2 else eps_num sea_eps = 'S' + season_pull + 'E' + episode_pull #self.AddContent(list, indexer, common.mode_Content, name+' ('+'[COLOR royalblue]'+sea_eps+'[/COLOR]) '+'Airdate: ('+airdate+')', '', 'tv_seasons', url=url, name=name) year = '0' item_id = common.CreateIdFromString(name + '_' + year + '_season_' + Sea_num + '_episode_' + eps_num) self.AddContent(list, indexer, common.mode_File_Hosts, name + ' (' + '[COLOR royalblue]' + sea_eps + '[/COLOR]) ' + 'Airdate: (' + airdate + ')', item_id, 'tv_episodes', url=url, name=name, year=year, season=Sea_num, episode=eps_num)
def ExtractContentAndAddtoList(self, indexer, section, url, type, list, page='', total_pages='', sort_by='', sort_order=''): from entertainment.net import Net net = Net(cached=False) trakt_user = self.Settings().get_setting('username') trakt_password = self.Settings().get_setting('password') if trakt_user!="" and trakt_password != "" and section != 'search': net.set_cookies(self.cookie_file) import re new_url = url if section != 'list' or total_pages != '': if page == '': page = '1' else: page = str( int(page) ) new_url = new_url + ('&' if section=='search' else '?') + 'page=' + page if total_pages == '': total_pages = '500' if section == 'boxoffice': total_pages = '1' elif section == 'search': total_pages = '5' self.AddInfo(list, indexer, section, url, type, str(page), total_pages) html = net.http_GET(new_url).content if section == 'list' and page == '' and total_pages == '': pagination_match = re.search('<ul class="pagination">(.+?)</ul>', html, re.DOTALL) if pagination_match: if page == '': page ='1' pagination = pagination_match.group(1) page_match = re.compile('<a href=[^>]+?>([^<]+?)<').findall(pagination) if page_match: total_pages = page_match[-1].strip() self.AddInfo(list, indexer, section, url, type, str(page), total_pages) match = re.compile('(<div class="grid-item col.+?<div class="titles[^>]+?>.+?</div>.+?</div>)', re.DOTALL).findall(html) for item in match: url_match = re.search('<a href="([^"]+?)">', item) if url_match: url = self.base_url + url_match.group(1) item_indexer = '' mode = '' name = '' year = '' season = '' episode = '' item_id = '' displayname = '' if '/shows/' in url: if indexer == common.indxr_Movies and section == 'list': continue item_indexer = common.indxr_TV_Shows mode = common.mode_Content type = 'tv_seasons' if section == 'list': name_match = re.search('<h3>(.+?)</h3>', item) else: name_match = re.search('<meta content="([^"]+?)" itemprop="name">', item) year_match = re.search('<span class="year">(.+?)</span>', item) if year_match: year = year_match.group(1) if name_match: name = name_match.group(1) displayname = name if year: displayname = displayname + ' (' + year + ')' if '/seasons/' in url: type = 'tv_episodes' name_span = re.search('itemprop="partOfSeries"(.+?)</span>', item, re.DOTALL).group(1) name = re.search('<meta content="([^"]+?)" itemprop="name">', name_span).group(1) season = re.search('/seasons/([0-9]+)', url).group(1) displayname = name + ' - Season: ' + season if 'episodes/' in url: mode = common.mode_File_Hosts type = 'tv_episode' episode = re.search('/episodes/([0-9]+)', url).group(1) item_id = common.CreateIdFromString(name + '_' + year + '_season_' + season + '_episode_' + episode) episode_name = '' episode_name_match = re.compile('<meta content="([^"]+?)" itemprop="name">').findall(item) if episode_name_match: episode_name = episode_name_match[-1].strip() displayname = name + ' - S' + season + 'E' + episode + ' - ' + episode_name item_id = common.CreateIdFromString(name + '_season_' + season + '_episode_' + episode) else: if indexer == common.indxr_TV_Shows and section == 'list': continue item_indexer = common.indxr_Movies mode = common.mode_File_Hosts type = common.indxr_Movies if section == 'list': name = re.search('<h3>(.+?)</h3>', item).group(1) else: name = re.search('<meta content="([^"]+?)" itemprop="name">', item).group(1) displayname = name year_match = re.search('\-([0-9]{4})$', url) if year_match: year = year_match.group(1) displayname += ' (' + year + ')' self.AddContent(list, item_indexer, mode, displayname, item_id, type, url=url, name=name, year=year, season=season, episode=episode)
def GetSection(self, indexer, section, url, type, list, page='', total_pages='', sort_by='', sort_order=''): from universal import favorites fav = favorites.Favorites(common.addon_id) if section == 'main': main_sections = fav.get_my_main_sections() for main_section in main_sections: self.AddSection(list, indexer, main_section['title'], main_section['title'], type=main_section['title'].lower()) else: section_items = section.split('|') section_items_count = len(section_items) if section_items_count == 1: items = fav.get_my_sub_sections(section_items[0]) if len(items) > 0: for item in items: self.AddSection(list, indexer, section + '|' + item['title'], item['title'], type=(section + ' ' + item['title']).lower()) else: items = fav.get_my_favorites(section_items[0]) for item in items: id = common.CreateIdFromString(item['title']) list.append({ 'indexer': item['infolabels'].get('indexer', ''), 'mode': item['infolabels'].get('mode', ''), 'title': item['title'], 'url': item['infolabels'].get('url', ''), 'id': id, 'website': item['infolabels'].get('indexer_id', ''), 'indexer_id': item['infolabels'].get('indexer_id', ''), 'name': item['infolabels'].get('name', ''), 'year': item['infolabels'].get('year', ''), 'season': item['infolabels'].get('season', ''), 'episode': item['infolabels'].get('episode', ''), 'type': item['infolabels'].get('type', ''), 'img': item['infolabels'].get('img', ''), 'favorite': 'true', 'imdb_id': item['infolabels'].get('imdb_id', ''), 'video_type': item['infolabels'].get('video_type', ''), 'urls': item['infolabels'].get('urls', '') }) elif section_items_count == 2: items = fav.get_my_favorites(section_items[0], section_items[1]) for item in items: id = common.CreateIdFromString(item['title']) list.append({ 'indexer': item['infolabels'].get('indexer', ''), 'mode': item['infolabels'].get('mode', ''), 'title': item['title'], 'item_title': item['infolabels'].get('item_title', ''), 'url': item['infolabels'].get('url', ''), 'id': id, 'website': item['infolabels'].get('indexer_id', ''), 'indexer_id': item['infolabels'].get('indexer_id', ''), 'name': item['infolabels'].get('name', ''), 'year': item['infolabels'].get('year', ''), 'season': item['infolabels'].get('season', ''), 'episode': item['infolabels'].get('episode', ''), 'type': item['infolabels'].get('type', ''), 'img': item['infolabels'].get('img', ''), 'favorite': 'true', 'imdb_id': item['infolabels'].get('imdb_id', ''), 'video_type': item['infolabels'].get('video_type', ''), 'urls': item['infolabels'].get('urls', '') })