def Handle(self, url, html, params=None): import re recaptcha = re.search('<script type=[\'"]{1}text/javascript[\'"]{1} src=[\'"]{1}(http://www.google.com/recaptcha/api/.+?)[\'"]{1}', html) if recaptcha: recaptcha = recaptcha.group(1) from entertainment.net import Net net = Net(cached=False) if 'recaptcha_ajax' in recaptcha: import random recaptcha = 'http://www.google.com/recaptcha/api/challenge?k=%s&ajax=1&cachestop=%s' % ( params['site'], str(random.random()) ) html = net.http_GET( recaptcha ).content import re hugekey=re.search('challenge \: [\'"]{1}(.+?)[\'"]{1}', html).group(1) solution = self.Solve(net.http_GET("http://www.google.com/recaptcha/api/image?c=%s" % hugekey ).content) if solution: return {'status':'ok', 'captcha_type':self.name, 'challenge':hugekey, 'captcha':solution, 'recaptcha_challenge_field': hugekey,'recaptcha_response_field': solution} else: return {'status':'error', 'message':'Image-Text not entered', 'captcha_type':self.name} return None
def GetFileHostsForContent(self, title, name, year, season, episode, type, list, lock, message_queue): from entertainment.net import Net net = Net() title = self.CleanTextForSearch(title) name = self.CleanTextForSearch(name) search_term = name.replace(' ','%20') search_term = search_term.lower() search_term = search_term.replace('the','') import re url='http://clickplay.to/search/'+search_term+'?' link=net.http_GET(url).content if 'Unfortunately there are no results right now' in link: return None match = re.compile('<div id="video_list">\s*<a href="(.+?)" class="article" data="(.+?)">.+?<span class="article-title">(.+?) \((.+?)\)</span>',re.DOTALL).findall(link) for tv_url , data, tvshowname, tvshowyear in match: if year in tvshowyear: season_pull = "0%s"%season if len(season)<2 else season episode_pull = "0%s"%episode if len(episode)<2 else episode url=tv_url+'season-'+season+'/' html=net.http_GET(url).content link_url = re.compile('<a href="(.+?)" title=".+?Episode '+episode+' / .+?" class=".+?">').findall(html)[0] self.GetFileHosts(link_url, list, lock, message_queue)
def Resolve(self, url): try: from entertainment.net import Net net = Net(cached=False) import os cookie_file = os.path.join(common.cookies_path, 'realdebrid.lwp') if net.set_cookies(cookie_file) == False: import urllib credentials = urllib.urlencode({'user' : self.Settings().get_setting('realdebrid-username'), 'pass' : self.Settings().get_setting('realdebrid-password')}) content = net.http_GET(self.base_url + 'ajax/login.php?' + credentials ).content net.save_cookies(cookie_file) elif 'My Account' not in net.http_GET(self.base_url).content: import urllib credentials = urllib.urlencode({'user' : self.Settings().get_setting('realdebrid-username'), 'pass' : self.Settings().get_setting('realdebrid-password')}) content = net.http_GET(self.base_url + 'ajax/login.php?' + credentials ).content net.save_cookies(cookie_file) content = net.http_GET(self.base_url + 'ajax/unrestrict.php?link=' + url).content import re r = re.search('[\'"]{1}main_link[\'"]{1}\:[\'"]{1}(.+?)[\'"]{1}', content) if r: stream_url = r.group(1) if stream_url: return stream_url.replace('\/', '/') except Exception, e: common.addon.log(self.name.upper() + ' - Exception occured: %s' % e) common.addon.show_small_popup('[B][COLOR white]' + self.name.upper() + '[/COLOR][/B]', '[COLOR red]Exception occured, check logs.[/COLOR]') return None
def GetFileHosts(self, url, list, lock, message_queue): import re from entertainment.net import Net net = Net(cached=False) print url content = net.http_GET(url).content #print content match=re.compile('movie-player/(.+?)"').findall(content) for URL in match: getcontent = net.http_GET('https://beinmovie.com/movie-player.php?'+URL).content #print getcontent try: FINAL_URL=re.compile('src="(.+?)"').findall(getcontent)[0] except: FINAL_URL=re.compile("src='(.+?)'").findall(getcontent)[0] if len(FINAL_URL)< 8: grabsecond = re.compile('movie-player/(.+?)"').findall(getcontent)[1] getcontent = net.http_GET('https://beinmovie.com/movie-player.php?'+grabsecond).content try: FINAL_URL=re.compile(' src="(.+?)"').findall(getcontent)[0] except: FINAL_URL=re.compile(" src='(.+?)'").findall(getcontent)[0] if 'movie_lang=fr' in URL: language= 'French' elif 'movie_lang=en' in URL: language= 'English' else:language='' self.AddFileHost(list, '1080P', FINAL_URL,host='GOOGLEVIDEO - '+language)
def GetFileHostsForContent(self, title, name, year, season, episode, type, list, lock, message_queue): import urllib2 import re from entertainment.net import Net net = Net() title = self.CleanTextForSearch(title) name = self.CleanTextForSearch(name) season_pull = "0%s"%season if len(season)<2 else season episode_pull = "0%s"%episode if len(episode)<2 else episode tv_url='http://www.myvideolinks.eu/index.php?s=%s+S%sE%s' %(name.replace(' ','+'),season_pull,episode_pull) movie_url='http://www.myvideolinks.eu/index.php?s=%s+%s' %(name.replace(' ','+'),year) if type == 'movies': html = net.http_GET(movie_url).content for item in re.finditer(r'<h4><a href="(.+?)" rel="bookmark"',html,re.I): url = item.group(1) self.GetFileHosts(url, list, lock, message_queue) elif type == 'tv_episodes': html = net.http_GET(tv_url).content for item in re.finditer(r'<h4><a href="(.+?)" rel="bookmark"',html,re.I): url = item.group(1) self.GetFileHosts(url, list, lock, message_queue)
def GetFileHostsForContent(self, title, name, year, season, episode, type, list, lock, message_queue): import urllib2 import re from entertainment.net import Net net = Net() title = self.CleanTextForSearch(title) name = self.CleanTextForSearch(name) #TV Shows = http://www.animeultima.io/search.html?searchquery=pokemon+xy+2013 if type == 'tv_episodes': season_pull = "0%s"%season if len(season)<2 else season episode_pull = "%s"%episode if len(episode)<2 else episode tv_url=self.base_url+'search.html?searchquery=%s+%s' %(name.lower().replace(' ','+'),year) html = net.http_GET(tv_url).content r = '<ol id="searchresult"><li><h2><a href="(.+?)"><b>.+?</b> .+?</a>' search_result = re.compile(r).findall(html)[0] content2 = net.http_GET(search_result).content eps=re.compile('<td class="epnum">'+episode_pull+'</td><td class="title"><a href="(.+?)">.+?</a></td><td class=.+?</td><td class="td-lang-subbed">').findall(content2) for url in eps: tv_url=url self.GetFileHosts(tv_url, list, lock, message_queue)
def Resolve(self, url): from entertainment.net import Net net = Net() if 'playsominaltv.com' in url: net._cached = False premium_url = 'http://www.playsominaltv.com/login/?redirect_to=' + url content = net.http_GET(premium_url, headers={'Referer':url}).content params={'log':self.Settings().get_setting('user'), 'pwd':self.Settings().get_setting('pass'), 'wp-submit':'1'} for hidden_param in re.finditer('(<input.+?type=[\'"]{1}hidden[\'"]{1}.+?/>)', content): hidden_param_input = hidden_param.group(1) param_name = re.search('name=[\'"]{1}(.+?)[\'"]{1}', hidden_param_input).group(1) param_value = re.search('value=[\'"]{1}(.+?)[\'"]{1}', hidden_param_input).group(1) params.update( { param_name : param_value } ) content = net.http_POST('http://www.playsominaltv.com/login/',params,headers={'Referer':premium_url}).content if 'playsominaltv.com' in url or 'sominaltvfilms.com' in url or 'desionlinetheater.com' in url: content = net.http_GET(url).content content=dekode(content) if content: source_url=re.search('<iframe.+?src.+?(http.+?)[\'"\\\\]{1}', content) if source_url: return MovieSource.Resolve(self, source_url.group(1)) else: return MovieSource.Resolve(self, url) return url
def ResolveLive(self, content, url): import re from entertainment.net import Net net = Net() new_content = re.search("<script.+?fid=[\"'](.+?)[\"'].+?src=[\"']http://www\.castup\.me/js/embed\.js[\"']", content) if not new_content: new_content = re.search('src=[\'"]{1}(http://www\.castup\.me/channel\.php\?id=.+?)[\'"]{1}', content) if new_content: new_url = new_content.group(1) new_content = net.http_GET( new_content.group(1), headers={'Referer':url} ).content url = new_url new_content = re.search("<script.+?fid=[\"'](.+?)[\"'].+?src=[\"']http://www\.castup\.me/js/embed\.js[\"']", new_content) if new_content: page_url = 'http://www.castup.me/embed.php?channel=' + new_content.group(1) content = net.http_GET( page_url, headers={'Referer':url} ).content swf_url = re.search( "SWFObject\([\"'](.+?)[\"']" ,content).group(1) if 'castup.me' not in swf_url: swf_url = 'http://www.castup.me%s' % swf_url playpath = re.search( "so.addVariable\([\"']file[\"'].+?[\"'](.+?)[\"']" ,content).group(1) streamer = re.search( "so.addVariable\([\"']streamer[\"'].+?[\"'](.+?)[\"']" ,content).group(1) token = re.search( "so.addVariable\([\"']token[\"'].+?[\"'](.+?)[\"']" ,content).group(1) content = streamer + ' playpath=' + playpath + ' swfUrl=' + swf_url + ' pageUrl=' + page_url + ' token=' + token + ' swfVfy=1 timeout=15 live=1' return (True, True, content, url) return (False, False, content, url)
def Resolve(self, url): from entertainment.net import Net import re import time net = Net() try: common.addon.show_small_popup('[B][COLOR blue]I[/B][/COLOR]stream: [B][COLOR white]' + self.name.upper() + '[/B][/COLOR]', 'Resolving', 700, self.icon) content = net.http_GET(url).content new_url = re.search('streamContinueButton.+?href="(.+?)"', content,re.I) from entertainment import istream html = net.http_GET(new_url.group(1)).content r = re.search(r'<iframe\s*src="(.+?)"\s*allowfullscreen',html,re.I|re.DOTALL) if r: play_url = istream.ResolveUrl(r.group(1)) return play_url except Exception, e: common.addon.log(self.name.upper() + ' - Exception occured: %s' % e) common.addon.show_small_popup('[B][COLOR blue]I[/B][/COLOR]stream: [B][COLOR white]' + self.name.upper() + '[/B][/COLOR]', '[COLOR red]'+str(e)+'[/COLOR]', 700, self.icon) return None
def Resolve(self, url): print url import re from entertainment.net import Net net = Net(cached=False,user_agent='Apple-iPhone/') tv_user = self.Settings().get_setting('tv_user') tv_pwd = self.Settings().get_setting('tv_pwd') loginurl = 'http://hdtvshows.net/reg.php' html = net.http_GET(loginurl).content match=re.compile('name="Token(.+?)" value="(.+?)"').findall(html) _Token=re.compile('name="data\[\_Token\]\[fields\]" value="(.+?)"').findall(html)[0] data = {'_method':'POST','subscriptionsPass': tv_pwd, 'UserUsername': tv_user, 'Token'+match[0][0]:'login','data[_Token][fields]':_Token} headers = {'Host':'hdtvshows.net', 'Origin':'http://hdtvshows.net', 'Referer':'http://hdtvshows.net/login.php', 'X-Requested-With':'XMLHttpRequest'} html = net.http_POST(loginurl, data, headers) cookie_jar = os.path.join(cookie_path, "hdtvshows.lwp") if os.path.exists(cookie_path) == False: os.makedirs(cookie_path) net.save_cookies(cookie_jar) net.set_cookies(cookie_jar) html = net.http_GET(url).content match=re.compile('<video id="ipadvideo" src="(.+?)"').findall(html) return match[0].replace('|','%7C')
def GetFileHostsForContent(self, title, name, year, season, episode, type, list, lock, message_queue): title = self.CleanTextForSearch(title) name = self.CleanTextForSearch(name) search_term = name if len(episode)< 2: episode = '0'+episode if len(season)< 2: season = '0'+season helper_term = 's%se%s' % (season,episode) import re from entertainment.net import Net net = Net() tv_url = self.base_url+ '/play/menulist' content = net.http_GET(tv_url).content match = re.compile("<li><a href='(.+?)'>(.+?)<").findall(content) for url, title in match: if search_term.lower() in title.lower(): if not 'http' in url: url=self.base_url+url content = net.http_GET(url).content link =content.split('href') for p in link: if helper_term in p: get_url=re.compile('="(.+?)">').findall (p) [0] self.GetFileHosts(self.base_url+get_url, list, lock, message_queue)
def ResolveLive(self, content, url): import re new_content = re.search("<script.+?src=[\"'](http://www\.serverhd\.eu/channel\.php.+?)[\"']", content) if new_content: page_url = new_content.group(1) from entertainment.net import Net net = Net() content = net.http_GET( page_url, headers={'Referer':url} ).content new_content = re.search("<iframe.+?src=[\"'](http://www\.serverhd\.eu/embed\.php.+?)[\"']", content) page_url = new_content.group(1) content = net.http_GET( page_url, headers={'Referer':url} ).content import base64 swf_url = re.search( "SWFObject\([\"'](.+?)[\"']" ,content).group(1) playpath = base64.b64decode( re.search( "<input type=[\"']hidden[\"'] id=[\"']ssx1[\"'] value=[\"'](.+?)[\"']" ,content).group(1) ) streamer = base64.b64decode( re.search( "<input type=[\"']hidden[\"'] id=[\"']ssx4[\"'] value=[\"'](.+?)[\"']" ,content).group(1) ) content = streamer + ' playpath=' + playpath + ' swfUrl=' + swf_url + ' pageUrl=' + page_url + ' timeout=20 live=true' return (True, True, content, url) return (False, False, content, url)
def ResolveLive(self, content, url): import re new_content = re.search('src=[\'"]{1}(http://cdn\.kingofplayers\.com/.+?\.(?:js|html))[\'"]{1}', content) if new_content: page_url = new_content.group(1) from entertainment.net import Net net = Net() new_content = net.http_GET( page_url, headers={'Referer':url} ).content streamer = re.search('[,\: \'"=]{1,5}((?:rtmp\://|rtmpe\://).+?[^\'"&=]+?)[\'"&]{1}', new_content) if not streamer: new_content = re.search('src=[\'"]{1}(http://cdn\.kingofplayers\.com/.+?\.html)[\'"]{1}', new_content) new_url = new_content.group(1) new_content = net.http_GET( new_url, headers={'Referer':page_url} ).content page_url = new_url streamer = re.search('[,\: \'"=]{1,5}((?:rtmp\://|rtmpe\://).+?[^\'"&=]+?)[\'"&]{1}', new_content) streamer = streamer.group(1) swf_url = re.search('[,\: \'"=]{1,5}(http\://.+?\.swf)[\'"&]{1}', new_content).group(1) playpath = re.search('file[,\: \'"=]*([^\'"]+?)[\'"&]{1}', new_content).group(1) content = streamer + ' playpath=' + playpath + ' swfUrl=' + swf_url + ' pageUrl=' + page_url + ' timeout=15 live=1' return (True, True, content, url) return (False, False, content, url)
def Resolve(self, url): custom_url = self.get_url() if 'http' in url: #print url from entertainment.net import Net import re net = Net() content = net.http_GET(url).content # get host from url host = re.search('/([A-Za-z0-9]+?)\.php', url).group(1) #print host id = re.search('\?id=(.*)', url).group(1) #print id # iframe source iframe_src = re.search('(?i)<iframe.+?src=[\'"](.+?' + id + '.+?)[\'" >]', content).group(1) #print iframe_src if iframe_src.endswith("'") or iframe_src.endswith('"'): iframe_src = iframe_src[:-1] if host.lower() in iframe_src.lower(): final = iframe_src else: content = net.http_GET(iframe_src).content final = re.search('(?i)<iframe.+?src=[\'"](.+?' + id + '.+?)[\'"]', content).group(1) #print final return TVShowSource.Resolve(self, final) return ''
def GetFileHostsForContent(self, title, name, year, season, episode, type, list, lock, message_queue): from entertainment.net import Net import re net = Net(cached=False) tv_user = self.Settings().get_setting('tv_user') tv_pwd = self.Settings().get_setting('tv_pwd') if os.path.exists(self.cookie_file): try: os.remove(self.cookie_file) except: pass headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Connection': 'keep-alive', 'Cache-Control': 'max-age=0', 'Host': 'www.einthusan.com', 'Origin': 'http://www.einthusan.com', 'Referer': 'http://www.einthusan.com/index.php?lang=hindi', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.131 Safari/537.36'} net.http_GET('http://www.einthusan.com/etc/login.php') net.http_POST('http://www.einthusan.com/etc/login.php', {'username': str(tv_user), 'password': str(tv_pwd)}, headers, auto_read_response=False).content net.save_cookies(self.cookie_file) net.set_cookies(self.cookie_file) name = self.CleanTextForSearch(name) import urllib movie_url=self.GoogleSearchByTitleReturnFirstResultOnlyIfValid('einthusan.com', name, 'watch.php', item_count=10, title_extrctr='(.+?) (?:Hindi|hindi|Tamil|tamil|Telugu|telugu|Malayalam|malayalam) movie') if movie_url: self.GetFileHosts(movie_url, list, lock, message_queue)
def ResolveLive(self, content, url): import re new_content = re.search("<script.+?id=[\"'](.+?)[\"'].+?src=[\"']http://embeds\.coolsport\.tv/(.+?)\.js[\"']", content) if new_content: headers = {'Referer':url} page_url = 'http://embeds.coolsport.tv/' + new_content.group(2) + '.php?id=' + new_content.group(1) from entertainment.net import Net net = Net() content = net.http_GET( page_url, headers=headers ).content return (False, True, content, page_url) new_content = re.search("<iframe.+?src=[\"'](http://www.coolsport.tv/.+?)[\"']", content) if new_content: page_url = new_content.group(1) from entertainment.net import Net net = Net() content = net.http_GET( page_url, headers={'Referer':url} ).content new_content = re.search("<script.+?id=[\"'](.+?)[\"'].+?src=[\"']http://embeds\.coolsport\.tv/(.+?)\.js[\"']", content) headers = {'Referer':page_url} page_url = 'http://embeds.coolsport.tv/' + new_content.group(2) + '.php?id=' + new_content.group(1) content = net.http_GET( page_url, headers=headers ).content return (False, True, content, page_url) return (False, False, content, url)
def stop_local_proxy(): is_local_proxy_server_running = GetGlobalProperty(gb_Local_Proxy_Server_Running) if is_local_proxy_server_running and is_local_proxy_server_running == 'True': import time time.sleep(10) from entertainment.net import Net net = Net(cached=False) try: net.http_GET('http://localhost:12345/stop') except: pass ClearGlobalProperty(gb_Local_Proxy_Server_Running)
def GetContent(self, indexer, url, title, name, year, season, episode, type, list): import urllib url = urllib.unquote_plus(url) title = urllib.unquote_plus(title) name = urllib.unquote_plus(name) from entertainment.net import Net net = Net() import re show_url = self.GoogleSearchByTitleReturnFirstResultOnlyIfValid('tvrage.com', name, 'shows', item_count=2, title_extrctr=['(.+?) tv show', '(.+?) \- tvrage'], exact_match=True) if show_url == '' : tv_url= 'http://www.tvrage.com/search.php?search=%s&searchin=2&button=Go' %(name.lower().replace(' ','+')) html = net.http_GET(tv_url).content r = re.search(r'<h2><a href="(.+?)">(.+?)</a> <img src=\'.+?\' /> </h2>\s*</dt>\s*<dd class="img"> <a href="/(.+?)">', html) show_url = 'http://www.tvrage.com' + r.group(1) item_url = show_url + '/episode_list' #year = year #tv_url= 'http://www.tvrage.com/search.php?search=%s+%s&searchin=2&button=Go' %(name.lower().replace(' ','+'),year) #<h2><a href="/Breaking_Bad">Breaking Bad</a> #http://www.tvrage.com #html = net.http_GET(tv_url).content #r = re.search(r'<h2><a href="(.+?)">(.+?)</a> <img src=\'.+?\' /> </h2>\s*</dt>\s*<dd class="img"> <a href="/(.+?)">', html) #item_url = 'http://www.tvrage.com' + r.group(1) + '/episode_list' #item_name = r.group(2) #item_id = r.group(3) import datetime todays_date = datetime.date.today() content = net.http_GET(item_url).content if type == 'tv_seasons': match=re.compile('>S-(.+?)<').findall(content) for seasonnumber in match: item_url = item_url item_title = 'Season ' + seasonnumber item_id = common.CreateIdFromString(title + ' ' + item_title) self.AddContent(list, indexer, common.mode_Content, item_title, item_id, 'tv_episodes', url=item_url, name=name, season=seasonnumber) elif type == 'tv_episodes': new_url = url+'/'+season content2 = net.http_GET(new_url).content match=re.compile("<td width='40' align='center'><a href='(.+?)' title='.+?'>.+?x(.+?)</i></a></td>\s*<td width='80' align='center'>(.+?)</td>\s*<td style='padding-left: 6px;'> <a href='.+?/([0-9]*)'>(.+?)</a> </td>",re.DOTALL).findall(content2) for item_url, item_v_id_2, item_date, fixscrape, item_title in match: item_v_id_2 = str(int(item_v_id_2)) item_fmtd_air_date = self.get_formated_date( item_date ) if item_fmtd_air_date.date() > todays_date: break item_id = common.CreateIdFromString(name + '_season_' + season + '_episode_' + item_v_id_2) self.AddContent(list, indexer, common.mode_File_Hosts, item_title, item_id, type, url=item_url, name=name, season=season, episode=item_v_id_2)
def GetFileHosts(self, id, other_names, region, language, list, lock, message_queue): quality_dict = {'1200':'HD', '800':'SD', '600':'LOW'} import re from entertainment.net import Net net = Net(cached=False) if not 'itv' in id: return if '2' in id or '3' in id: id=id.split('itv')[1] name='ITV '+id SoapMessage=self.TEMPLATE('sim'+id,'itv'+id) headers={'Content-Length':'%d'%len(SoapMessage),'Content-Type':'text/xml; charset=utf-8','Host':'mercury.itv.com','Origin':'http://www.itv.com','Referer':'http://www.itv.com/Mercury/Mercury_VideoPlayer.swf?v=null','SOAPAction':"http://tempuri.org/PlaylistService/GetPlaylist",'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.107 Safari/537.36'} response = net.http_POST("http://mercury.itv.com/PlaylistService.svc",SoapMessage, headers=headers).content response = response.split('VideoEntries')[1] if id =='itv1': name='ITV 1' url='http://www.itv.com/mediaplayer/xml/channels.itv1.xml' response = net.http_GET(url).content if id =='itv4': name='ITV 4' url='http://www.itv.com/mediaplayer/xml/channels.itv4.xml' response = net.http_GET(url).content rtmp=re.compile('<MediaFiles base="(.+?)"').findall(response)[0] match=re.compile('CDATA\[(.+?)\]').findall(response) for playpath in match: print playpath res=playpath.split('_')[1] res=res.split('@')[0] resolved_media_url=rtmp+' playpath='+playpath+' swfUrl=http://www.itv.com/mediaplayer/ITVMediaPlayer.swf?v=12.18.4 live=true timeout=10 swfvfy=true' self.AddLiveLink( list, name, resolved_media_url, language='English',host='ITV PLAYER',quality=quality_dict.get(res, 'NA') )
def ExtractContentAndAddtoList(self, indexer, section, url, type, list, page='', total_pages='', sort_by='', sort_order=''): net = Net() type = common.indxr_Movies mode = common.mode_File_Hosts indexer = common.indxr_Movies if section == 'new_releases': response = net.http_GET(url).content stuff = json.loads(response) for movies in stuff['movies']: title = movies['title'] num = movies['year'] name = title.encode('utf8') year = str(num) self.AddContent(list, indexer, common.mode_File_Hosts, name + ' (' + year +')', '', type, '', name, year) elif section == 'trakt_watchlist': response = net.http_GET(url).content stuff = json.loads(response) for movies in stuff: name = movies['title'] if name: name = name.encode('utf8') year = str(movies['year']) self.AddContent(list, indexer, mode, name + ' (' + year +')', '', type, '', name, year) elif section == 'list_name': response = net.http_GET(url).content stuff = json.loads(response) for items in stuff['items']: movies = items['movie'] name = movies['title'] if name: name = name.encode('utf8') year = str(movies['year']) self.AddContent(list, indexer, mode, name + ' (' + year +')', '', type, '', name, year) else: if page == '': page = '1' else: page = str(int(page)) url = url + '&page=' + page response = net.http_GET(url).content stuff = json.loads(response) total_pages = stuff['total_pages'] self.AddInfo(list, indexer, section, url, type, str(page), str(total_pages)) for movies in stuff['results']: name = movies['title'] date = movies['release_date'] year = str(date)[0:4] name = name.encode('utf8') self.AddContent(list, indexer, common.mode_File_Hosts, name + ' (' + year +')', '', type, '', name, year)
def GetFileHostsForContent(self, title, name, year, season, episode, type, list, lock, message_queue): import urllib2 import re from entertainment.net import Net net = Net(cached=False,user_agent='Magic Browser') title = self.CleanTextForSearch(title) name = self.CleanTextForSearch(name) wait=False new_url='http://movietv.to/index/loadmovies' if type == 'tv_episodes': types='tv' r='href="/series/(.+?)".+?movie-title">(.+?)</h2>' NEW='http://movietv.to/series/' else: types='movie' r='href="/movies/(.+?)".+?movie-title">(.+?)</h2>' NEW='http://movietv.to/movies/' data={'loadmovies':'showData','page':'1','abc':'All','genres':'','sortby':'Popularity','quality':'All','type':types,'q':name} content=net.http_POST(new_url,data,headers={'Referer':'http://movietv.to'}).content match=re.compile(r,re.DOTALL).findall (content) print 'wwwwwwwwwwwwwwwwwwwwwwwwwwwwwww' print match for URL , TITLE in match: print TITLE print URL if name.lower() in TITLE.lower(): if type == 'tv_episodes': id=URL.split('-')[0] LINKURL='http://movietv.to/series/getLink?id=%s&s=%s&e=%s' % (id,season,episode) contents=net.http_GET(LINKURL).content import json match=json.loads(contents)['url'] else: contents=net.http_GET(NEW+URL).content match=re.compile('<source src="(.+?)" type=\'video/mp4\'>').findall(contents)[0] self.GetFileHosts(match, list, lock, message_queue,URL)
def Resolve(self, url): from entertainment.net import Net import re ,json,urllib,time,os profile_path = common.profile_path puzzle_img = os.path.join(profile_path, 'captchas', '%s.png') % self.name net = Net(user_agent='Mozilla/5.0 (compatible, MSIE 11, Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko') id = re.compile('//.+?/(?:embed|f)/([0-9a-zA-Z-_]+)').findall(url)[0] url = 'https://api.openload.io/1/file/dlticket?file=%s' % id result = net.http_GET(url).content result = json.loads(result) print result if 'bandwidth usage too high' in str(result): return '' else: cap = result['result']['captcha_url'] import xbmcgui img = xbmcgui.ControlImage(550,15,300,57,cap) wdlg = xbmcgui.WindowDialog() wdlg.addControl(img) wdlg.show() import xbmc kb = xbmc.Keyboard('', 'Please enter the text in the image', False) kb.doModal() capcode = kb.getText() if (kb.isConfirmed()): userInput = kb.getText() time.sleep(result['result']['wait_time']) url = 'https://api.openload.io/1/file/dl?file=%s&ticket=%s' % (id, result['result']['ticket']) if not cap == None: url += '&captcha_response=%s' % urllib.quote(userInput) result = net.http_GET(url).content result = json.loads(result) url = result['result']['url'] + '?mime=true' return url
def Search(self, indexer, keywords, type, list, lock, message_queue, page='', total_pages=''): if page and len(page) > 0 and total_pages and len(total_pages) > 0 and int(page) > int(total_pages): return if page=='': page='1' from entertainment.net import Net net = Net() search_url ='%spage/%s/?s=%s' %(self.base_url, page, keywords.replace(' ','+')) print search_url import re html = net.http_GET(search_url).content if total_pages == '': r= '<a class="last" href="http://filmstream.me/page/(.+?)/' try: total_pages = re.compile(r).findall(html)[0] except: total_pages = '1' self.AddInfo(list, indexer, 'search', self.base_url, type, str(page), total_pages) for item in re.finditer(r'<h2> <a href="(.+?)">(.+?)</a> </h2>\s*<div class=".+?">.+?</div>\s*</div>\s*<a href=".+?" title="">\s*<img src="(.+?)" alt="(.+?)"',html,re.I): url=item.group(1) name=item.group(4) name=name.split('Stream')[0] image=item.group(3) name = self.CleanTextForSearch(name) self.AddContent(list,indexer,common.mode_File_Hosts,name,'',type, url=url, name=name, img=image, plot='')
def GetFileHostsForContent(self, title, name, year, season, episode, type, list, lock, message_queue): from entertainment.net import Net import re #net = Net(cached=False) name = self.CleanTextForSearch(name) import urllib name = name.lower() net = Net() if type == 'movies': title = self.CleanTextForSearch(title) name = self.CleanTextForSearch(name) URL= self.base_url+'?type=movie&keywords=%s' %name.replace(' ','+') content = net.http_GET(URL).content match =re.compile('href="(.+?)" target="_blank"><img class="image" src=".+?" alt="(.+?)"').findall(content) for item_url , name in match: if year in name: print item_url self.GetFileHosts(item_url, list, lock, message_queue) '''elif type == 'tv_episodes':
def GetContent(self, indexer, url, title, name, year, season, episode, type, list): import urllib url = urllib.unquote_plus(url) title = urllib.unquote_plus(title) name = urllib.unquote_plus(name) name = (name).lower() import re from entertainment.net import Net net = Net(cached=False) net.set_cookies(self.cookie_file) content = net.http_GET(url).content if type == 'tv_seasons': match=re.compile('<br><br><b>(.+?)x').findall(content) for seasonnumber in match: item_title = 'Season ' + seasonnumber item_id = common.CreateIdFromString(title + ' ' + item_title) self.AddContent(list, indexer, common.mode_Content, item_title, item_id, 'tv_episodes', url=url, name=name, season=seasonnumber) elif type == 'tv_episodes': match=re.compile("<br><b>"+season+"x(.+?)\s-\s<a style=.+?color.+?\shref='/(.+?)'>(.+?)</a>").findall(content) for item_v_id_2,url,item_title in match: season = "0%s"%season if len(season)<2 else season item_v_id_2 = "0%s"%item_v_id_2 if len(item_v_id_2)<2 else item_v_id_2 item_url = self.base_url + url item_v_id_2 = str(int(item_v_id_2)) self.AddContent(list, indexer, common.mode_File_Hosts, item_title, item_id, type, url=item_url, name=name, season=season, episode=item_v_id_2)
def GetFileHosts(self, url, list, lock, message_queue): import re from entertainment.net import Net net = Net() content = net.http_GET(url).content r = '<li><a href="(http://.+?)">(.+?)</a></li>' match = re.compile(r).findall(content) match1 = re.compile('rel="bookmark" title=".+?">(.+?)</a></h1>').findall(content) urlselect = [] for url, host in match: for res in match1: if url not in urlselect: urlselect.append(url) quality = 'SD' res_lower = '.' + res.lower() + '.' for quality_key, quality_value in common.quality_dict.items(): if re.search('[^a-zA-Z0-0]' + quality_key + '[^a-zA-Z0-0]', res_lower): quality = quality_value break if 'myvideolinks' in url: url= False continue if 'http://i.imgur.com/' in url: url=False continue self.AddFileHost(list, quality, url, host=host.upper())
def ExtractContentAndAddtoList(self, indexer, section, url, type, list, page='', total_pages='', sort_by='', sort_order=''): import urllib if section != 'search': url = urllib.unquote_plus(url) new_url = url if not new_url.startswith(self.base_url): new_url = re.sub("http\://.*?/", self.base_url, url) if page == '': page = '1' from entertainment.net import Net cached = False if section == 'watchlist' else True net = Net(cached=cached) content = net.http_GET(new_url + '/page/' + page).content if total_pages == '' : re_page = '<span class=[\'"]{1}pages[\'"]{1}>Page 1 of ([0-9]+)</span>' #'<a class=[\'"]{1}last[\'"]{1}.+?([0-9]+)[\'"]{1}' total_pages = re.search(re_page, content) if total_pages: total_pages = total_pages.group(1) else: if re.search('0 items found', content): page = '0' total_pages = '0' else: page = '1' total_pages = '1' self.AddInfo(list, indexer, section, url, type, page, total_pages, sort_by, sort_order) item_re = r'(?s)<div class=[\'"]{1}inner[\'"]{1}>.+?<a href=[\'"]{1}(.+?)[\'"]{1}.+?<img src=[\'"]{1}(.+?)[\'"]{1} alt=[\'"]{1}(.+?)[\'"]{1}.+?<p>(.+?)<' for item in re.finditer(item_re, content): item_url = item.group(1) item_img = item.group(2) item_alt = item.group(3) item_name = re.sub('\([0-9]+\).*', '', item_alt) item_year = re.search("\(([0-9]+)", item_alt) if item_year: item_year = item_year.group(1) item_title = item_name + ' (' + item_year + ')' else: item_year = '' item_title = item_name if total_pages == '': total_pages = '1' item_plot = re.sub('^\s', '', common.CleanText(item.group(4), True, True) ) self.AddContent(list, indexer, common.mode_File_Hosts, item_title, '', type, url=item_url, name=item_name, year=item_year, img=item_img, plot=item_plot)
def GetFileHosts(self, id, other_names, region, language, list, lock, message_queue): search_term = id from entertainment.net import Net net = Net(cached=False) user = self.Settings().get_setting('user') pwd = self.Settings().get_setting('pwd') if user and pwd: content = net.http_POST('http://www.filmon.com/user/login', {'login':user, 'password':pwd, 'remember':'1'}, headers={'Referer':self.base_url}).content net.save_cookies(self.cookie_file) content = net.http_GET(self.base_url).content.encode("utf-8") link = content.split('{"id":') import re for p in link: if '"filmon_' in p: title=p.split('"title":"')[1] ITEM_TITLE=title.split('"')[0] p_id = common.CreateIdFromString( common.CleanTextForSearch(ITEM_TITLE, strip=True) ) if id == p_id or p_id in other_names : channel_id=p.split(',')[0] res=['SD','HD'] for quality in res: channel_id_with_quality=channel_id + '__' + quality self.AddLiveLink( list, ITEM_TITLE, channel_id_with_quality, language = language.title(),host='FILMON',quality=quality) break
def GetFileHostsForContent(self, title, name, year, season, episode, type, list, lock, message_queue): from entertainment.net import Net import re,urllib,json net = Net(user_agent='Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3') name = self.CleanTextForSearch(name) search_term = name.lower() linkme='http://my.mail.ru/cgi-bin/my/ajax?user=&ajax_call=1&func_name=video.get_list&mna=&mnb=&arg_tag=%s&arg_duration=long&arg_sort=&arg_sort_order=desc&arg_hd_exists=&arg_unsafe=0&arg_type=search&arg_offset=0&arg_limit=300' new_url=linkme % (name.replace(' ','+').lower()+'+'+year) response= net.http_GET(new_url).content link=json.loads(response)[-1] data=link['items'] for i in data: _url_=i['ExternalID'] title=i['Title'] time=i['DurationFormat'] if len(time)>5: if name.lower() in title.lower(): if year in title: movie_url='http://videoapi.my.mail.ru/videos/'+_url_+'.json' print movie_url self.GetFileHosts(movie_url, list, lock, message_queue,title)
def ResolveLive(self, content, url): import re new_content = re.search('src=[\'"]{1}http://liveall\.tv/player\.php\?streampage=(.+?)[&\'"]{1}', content) if new_content: page_url = 'http://liveall.tv/player.php?streampage=%s' % new_content.group(1) from entertainment.net import Net net = Net() content = net.http_GET( page_url, headers={'Referer':url} ).content var_a = int(re.search('var a = ([0-9]*);', content).group(1)) var_b = int(re.search('var b = ([0-9]*);', content).group(1)) var_c = int(re.search('var c = ([0-9]*);', content).group(1)) var_d = int(re.search('var d = ([0-9]*);', content).group(1)) var_f = int(re.search('var f = ([0-9]*);', content).group(1)) var_v_part = re.search('var v_part = [\'"]{1}(.+?)[\'"]{1};', content).group(1) swf_url = 'http://wds.liveall.tv/jwplayer.flash.swf' #playpath = play_media.group(2) streamer = 'rtmp://%s.%s.%s.%s%s' % ( str(var_a/var_f), str(var_b/var_f), str(var_c/var_f), str(var_d/var_f), var_v_part ) ''' playpath=' + playpath + ''' content = streamer + ' swfUrl=' + swf_url + ' pageUrl=' + page_url + ' timeout=15 live=1' return (True, True, content, url) return (False, False, content, url)
def ExtractContentAndAddtoList(self, indexer, section, url, type, list, page='', total_pages='', sort_by='', sort_order=''): import urllib url = urllib.unquote_plus(url) custom_url = self.get_url() import re new_url = url if not new_url.startswith(custom_url): new_url = re.sub("http\://.*?/", custom_url, url) if sort_by == '' and 'added' not in new_url and 'release' not in new_url and 'popular' not in new_url and 'rating' not in new_url and 'a-z' not in new_url: sort_by = 'popular' if 'added' not in new_url and 'release' not in new_url and 'popular' not in new_url and 'rating' not in new_url and 'a-z' not in new_url: new_url = new_url + sort_by + '/' + section from entertainment.net import Net net = Net() content = net.http_GET(new_url).content self.AddInfo(list, indexer, section, url, type, page, total_pages, sort_by, sort_order) if type == 'movies': for item in re.finditer(r"<a href=/ip\.php\?v=(.+?)>(.+?)</a>", content): item_v_id = item.group(1) item_title = item.group(2) item_year = re.search("\(([0-9]+)\)", item_title) if item_year: item_year = item_year.group(1) else: item_year = '' item_name = re.sub(" \([0-9]+\)", "", item_title) item_url = custom_url + 'membersonly/components/com_iceplayer/video.php?vid=' + item_v_id self.AddContent(list, indexer, common.mode_File_Hosts, item_title, '', type, url=item_url, name=item_name, year=item_year) elif type == 'tv_shows': for item in re.finditer(r"<a href=/tv/series/(.+?)>(.+?)</a>", content): item_v_id = item.group(1) item_title = item.group(2) item_year = re.search("\(([0-9]+)\)", item_title) if item_year: item_year = item_year.group(1) else: item_year = '' item_name = re.sub(" \([0-9]+\)", "", item_title) item_url = custom_url + 'tv/series/' + item_v_id self.AddContent(list, indexer, common.mode_Content, item_title, '', 'tv_seasons', url=item_url, name=item_name, year=item_year)
def GetContent(self, indexer, url, title, name, year, season, episode, type, list): import urllib url = urllib.unquote_plus(url) title = urllib.unquote_plus(title) name = urllib.unquote_plus(name) custom_url = self.get_url() import re new_url = url if not new_url.startswith(custom_url): new_url = re.sub("http\://.*?/", custom_url, url) from entertainment.net import Net net = Net() content = net.http_GET(new_url).content if type == 'tv_seasons': for item in re.finditer('</a>Season ([0-9]+)', content): item_url = new_url item_v_id = item.group(1) item_title = 'Season ' + item_v_id item_id = common.CreateIdFromString(title + ' ' + item_title) self.AddContent(list, indexer, common.mode_Content, item_title, item_id, 'tv_episodes', url=item_url, name=name, year=year, season=item_v_id) elif type == 'tv_episodes': season_content = re.search('>Season ' + season + ' (.*)', content).group(1) for item in re.finditer( r"<a href=/ip\.php\?v=(.+?)>" + season + "x([0-9]+) (.+?)</a>", season_content): item_v_id = item.group(1) item_v_id_2 = str(int(item.group(2))) item_title = item.group(3) item_url = custom_url + 'membersonly/components/com_iceplayer/video.php?vid=' + item_v_id item_id = common.CreateIdFromString(name + '_' + year + '_season_' + season + '_episode_' + item_v_id_2) self.AddContent(list, indexer, common.mode_File_Hosts, item_title, item_id, type, url=item_url, name=name, year=year, season=season, episode=item_v_id_2)
def GetSection(self, indexer, section, url, type, list, page='', total_pages='', sort_by='', sort_order=''): if indexer != common.indxr_Movies: return url_type = '' content_type = '' if section == 'main': self.AddSection(list, indexer, 'recent', 'Recently Added') self.AddSection(list, indexer, 'popular', 'Most Popular') self.AddSection(list, indexer, 'latest_dvds', 'Latest DVDs') self.AddSection(list, indexer, 'theatres', 'In Theatres Now') self.AddSection(list, indexer, 'recent_blurays', 'Recent Blurays') self.AddSection(list, indexer, 'year', 'Year') self.AddSection(list, indexer, 'browse', 'Browse') self.AddSection(list, indexer, 'blurays', 'Blurays') self.AddSection(list, indexer, 'english_subtitled', 'English Subtitled') elif section in ('latest_dvds', 'theatres', 'recent_blurays'): from entertainment.net import Net net = Net() content = net.http_GET(self.base_url).content import re if section == 'latest_dvds': section_header = 'Latest DVDs' elif section == 'theatres': section_header = 'Now Playing in Theaters' elif section == 'recent_blurays': section_header = 'Recently Added BluRays' section_content = re.search( '(?s)<h2><b>' + section_header + '(.+?)<h2><b>', content).group(1) item_re = r'(?s)<article.+?<a href=[\'"]{1}(.+?)[\'"]{1}.+?<img src=[\'"]{1}(.+?)[\'"]{1} alt=[\'"]{1}(.+?)[\'"]{1}' for item in re.finditer(item_re, section_content): item_url = item.group(1) item_img = item.group(2) item_alt = item.group(3) item_name = re.sub('\([0-9]+\).*', '', item_alt) item_year = re.search("\(([0-9]+)", item_alt) if item_year: item_year = item_year.group(1) item_title = item_name + ' (' + item_year + ')' else: item_year = '' item_title = item_name if total_pages == '': total_pages = '1' self.AddContent(list, indexer, common.mode_File_Hosts, item_title, '', type, url=item_url, name=item_name, year=item_year, img=item_img) elif section in ('recent', 'popular'): from entertainment.net import Net net = Net() content = net.http_GET(self.base_url).content import re if section == 'recent': section_header = 'Recently Added' elif section == 'popular': section_header = 'Most Popular' section_content = re.search( '(?s)<h3 class="widgettitle">' + section_header + '.+?<ul>(.+?)</ul>', content).group(1) item_re = r'(?s)<li.+?<a href=[\'"]{1}(.+?)[\'"]{1}.+?<img src=[\'"]{1}(.+?)[\'"]{1} alt=[\'"]{1}(.+?)[\'"]{1}' for item in re.finditer(item_re, section_content): item_url = item.group(1) item_img = item.group(2) item_alt = item.group(3) item_name = re.sub('\([0-9]+\).*', '', item_alt) item_year = re.search("\(([0-9]+)", item_alt) if item_year: item_year = item_year.group(1) item_title = item_name + ' (' + item_year + ')' else: item_year = '' item_title = item_name if total_pages == '': total_pages = '1' self.AddContent(list, indexer, common.mode_File_Hosts, item_title, '', type, url=item_url, name=item_name, year=item_year, img=item_img) elif section == 'browse': self.AddSection(list, indexer, 'all', 'All', self.base_url + 'browse', indexer) self.AddSection(list, indexer, 'hindi', 'Hindi', self.base_url + 'hindi-movies', indexer) self.AddSection(list, indexer, 'telugu', 'Telugu', self.base_url + 'telugu-movies', indexer) self.AddSection(list, indexer, 'tamil', 'Tamil', self.base_url + 'tamil-movies', indexer) self.AddSection(list, indexer, 'malayalam', 'Malayalam', self.base_url + 'malayalam-movies', indexer) self.AddSection(list, indexer, 'punjabi', 'Punjabi', self.base_url + 'punjabi-movies', indexer) self.AddSection(list, indexer, 'hindi_dubbed', 'Hindi Dubbed', self.base_url + 'category/hindi-dubbed', indexer) elif section == 'blurays': self.AddSection(list, indexer, 'all', 'All', self.base_url + 'blurays', indexer) self.AddSection(list, indexer, 'hindi', 'Hindi', self.base_url + 'hindi-blurays', indexer) self.AddSection(list, indexer, 'telugu', 'Telugu', self.base_url + 'telugu-blurays', indexer) self.AddSection(list, indexer, 'tamil', 'Tamil', self.base_url + 'tamil-blurays', indexer) elif section == 'english_subtitled': self.AddSection(list, indexer, 'all', 'All', self.base_url + 'english-subtitles', indexer) self.AddSection(list, indexer, 'hindi', 'Hindi', self.base_url + 'hindi-movies-english-subtitles', indexer) self.AddSection(list, indexer, 'telugu', 'Telugu', self.base_url + 'telugu-movies-english-subtitles', indexer) self.AddSection(list, indexer, 'tamil', 'Tamil', self.base_url + 'tamil-movies-english-subtitles', indexer) self.AddSection( list, indexer, 'malayalam', 'Malayalam', self.base_url + 'malayalam-movies-english-subtitles', indexer) self.AddSection(list, indexer, 'punjabi', 'Punjabi', self.base_url + 'punjabi-movies-english-subtitles', indexer) elif section == 'year': from datetime import date for i in range(date.today().year, 1999, -1): self.AddSection(list, indexer, 'year' + str(i), str(i), self.base_url + '/category/' + str(i), indexer) self.AddSection(list, indexer, 'year_pre_2000', 'Pre 2000', self.base_url + '/category/pre-2000', indexer) else: self.ExtractContentAndAddtoList(indexer, section, url, type, list, page, total_pages, sort_by, sort_order)
def ExtractContentAndAddtoList(self, indexer, section, url, type, list, page='', total_pages='', sort_by='', sort_order=''): if section == 'al-cinema': new_url = url if page == '': page = '1' else: page = str( int(page) ) new_url = new_url + 'page/' + page print new_url from entertainment.net import Net import re net = Net(cached=False) import urllib html = net.http_GET(new_url).content if total_pages == '': r= '<a class="last" href="http://filmstream.me/al-cinema/page/(.+?)/">' total_pages = re.compile(r).findall(html)[0] self.AddInfo(list, indexer, 'al-cinema', url, type, str(page), total_pages) for item in re.finditer(r'<h2> <a href="(.+?)">(.+?)</a> </h2>\s*<div class=".+?">.+?</div>\s*</div>\s*<a href=".+?" title="">\s*<img src="(.+?)" alt="(.+?)"',html,re.I): url=item.group(1) name=item.group(4) name=name.split('Stream')[0] image=item.group(3) name = self.CleanTextForSearch(name) self.AddContent(list,indexer,common.mode_File_Hosts,name,'',type, url=url, name=name, img=image, plot='') else: new_url = url if page == '': page = '1' else: page = str( int(page) ) new_url = new_url + 'page/' + page print new_url from entertainment.net import Net import re net = Net(cached=False) import urllib html = net.http_GET(new_url).content if total_pages == '': r= '<a class="last" href="http://filmstream.me/.+?/page/(.+?)/">' total_pages = re.compile(r).findall(html)[0] self.AddInfo(list, indexer, 'al-cinema', url, type, str(page), total_pages) for item in re.finditer(r'<h2> <a href="(.+?)">(.+?)</a> </h2>\s*<div class=".+?">.+?</div>\s*</div>\s*<a href=".+?" title="">\s*<img src="(.+?)" alt="(.+?)"',html,re.I): url=item.group(1) name=item.group(4) name=name.split('Stream')[0] image=item.group(3) name = self.CleanTextForSearch(name) self.AddContent(list,indexer,common.mode_File_Hosts,name,'',type, url=url, name=name, img=image, plot='')
def GetContent(self, indexer, url, title, name, year, season, episode, type, list): import urllib url = urllib.unquote_plus(url) title = urllib.unquote_plus(title) name = urllib.unquote_plus(name) custom_url = self.get_url() #custom_url = self.get_url() name = (name).lower() import re tv_url = custom_url + '%s/index.html' % (name.lower().replace( ' ', '_')) new_url = url from entertainment.net import Net net = Net() content = net.http_GET(tv_url).content if type == 'tv_seasons': match = re.compile( '<td width="99%" class="mnlcategorylist"><a href="(.+?)"><b>Season (.+?)</b></a>' ).findall(content) for url, seasonnumber in match: item_url = custom_url + '%s/' % (name.lower().replace( ' ', '_')) item_url1 = item_url + url item_title = 'Season ' + seasonnumber item_id = common.CreateIdFromString(title + ' ' + item_title) self.AddContent(list, indexer, common.mode_Content, item_title, item_id, 'tv_episodes', url=item_url1, name=name, season=seasonnumber) elif type == 'tv_episodes': tv_url2 = custom_url + '%s/season_%s.html' % (name.lower().replace( ' ', '_'), season) from entertainment.net import Net net = Net() content2 = net.http_GET(tv_url2).content match = re.compile( '<td class="episode"><a name=".+?"></a><b>.+?. (.+?)</b></td>\s*<td class="mnllinklist" align="right"><div class="right">S.+?E(.+?)&' ).findall(content2) for item_title, item_v_id_2 in match: item_v_id_2 = str(int(item_v_id_2)) item_url = tv_url2 + '?episode=' + item_v_id_2 item_id = common.CreateIdFromString(name + '_season_' + season + '_episode_' + item_v_id_2) self.AddContent(list, indexer, common.mode_File_Hosts, item_title, item_id, type, url=item_url, name=name, season=season, episode=item_v_id_2)
def GetContent(self, indexer, url, title, name, year, season, episode, type, list): import urllib url = urllib.unquote_plus(url) title = urllib.unquote_plus(title) name = urllib.unquote_plus(name) import re new_url = url if not new_url.startswith(self.get_url()): new_url = re.sub("http\://.*?/", self.get_url(), url) from entertainment.net import Net net = Net(cached=False) print new_url print '########################################' content = net.http_GET(new_url).content import datetime todays_date = datetime.date.today() if type == 'tv_seasons': check_season = 0 last_season = 0 season_url = None seasons = re.search('<a href="/(title/.+?/episodes\?season=)([0-9]+)', content) if seasons: last_season = int(seasons.group(2)) season_url = seasons.group(1) for season_num in xrange(last_season, 0, -1): item_v_id = str(season_num) item_url = self.get_url() + season_url + item_v_id if check_season < 2: check_season += 1 item_content = net.http_GET(item_url).content season_item = re.search('<div>S' + item_v_id +', Ep([0-9]+)</div>', item_content) if not season_item: check_season -= 1 continue item_item = re.search('(?s)<div class="list_item.+?href="(.+?)".+?title="(.+?)".+?<div>S' + item_v_id +', Ep([0-9]+)</div>.+?<div class="airdate">(.+?)</div>', item_content) if 'unknown' in item_item.group(4).lower(): continue item_fmtd_air_date = self.get_formated_date( item_item.group(4) ) if item_fmtd_air_date.date() > todays_date or item_fmtd_air_date.date() == '0001-12-01': continue item_title = 'Season ' + item_v_id item_id = common.CreateIdFromString(title + ' ' + item_title) self.AddContent(list, indexer, common.mode_Content, item_title.strip(), item_id, 'tv_episodes', url=item_url, name=name.strip(), year=year, season=item_v_id) elif type == 'tv_episodes': season_item = re.search('<div>S' + season +', Ep([0-9]+)</div>', content) if not season_item: return for item in re.finditer('(?s)<div class="list_item.+?href="(.+?)".+?title="(.+?)".+?<div>S' + season +', Ep([0-9]+)</div>.+?<div class="airdate">(.+?)</div>', content): item_fmtd_air_date = self.get_formated_date( item.group(4) ) if self.Settings().get_setting('future')=='false': if item_fmtd_air_date.date() > todays_date: break item_url = self.get_url() + item.group(1) item_v_id = item.group(3) item_title = item.group(2).strip() if item_title == None: item_title = '' item_id = common.CreateIdFromString(name + '_' + year + '_season_' + season + '_episode_' + item_v_id) self.AddContent(list, indexer, common.mode_File_Hosts, item_title.strip(), item_id, type, url=item_url, name=name.strip(), year=year, season=season, episode=item_v_id)
def GetFileHosts(self, url, list, lock, message_queue, type, season, episode): import re, json from entertainment.net import Net net = Net(cached=False) headers = { 'Host': 'moviehdmax.com', 'Connection': 'keep-alive', 'Accept': 'text/plain, */*; q=0.01', 'X-Requested-With': 'XMLHttpRequest', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36', 'Accept-Encoding': 'gzip, deflate, sdch', 'Accept-Language': 'en-US,en;q=0.8' } net.set_cookies(self.cookie_file) if type == 'tv_episodes': headers = { 'Host': 'moviehdmax.com', 'Connection': 'keep-alive', 'Referer': url, 'X-Requested-With': 'XMLHttpRequest', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36', 'Accept-Encoding': 'gzip, deflate, sdch', 'Accept-Language': 'en-US,en;q=0.8' } data = {'p': episode} POST = url.replace('/watch/', '/getepisode/') content = net.http_POST(POST, data, headers=headers).content link = json.loads(content) data = link['sources'] for field in data: quality = field['quality'] + 'P' FINAL_URL = field['host'] if '1080P' in quality.upper(): Q = '1080P' elif '720P' in quality.upper(): Q = '720P' elif '480P' in quality.upper(): Q = 'HD' else: Q = 'SD' self.AddFileHost(list, Q, FINAL_URL) else: content = net.http_GET(url, headers=headers).content match = re.compile( '<source src="(.+?)".+?data-res="(.+?)"').findall(content) for FINAL_URL, quality in match: if '1080' in quality.upper(): Q = '1080P' elif '720' in quality.upper(): Q = '720P' elif '480' in quality.upper(): Q = 'HD' else: Q = 'SD' self.AddFileHost(list, Q, FINAL_URL)
def Search(self, indexer, keywords, type, list, lock, message_queue, page='', total_pages=''): if page and len(str(page)) > 0 and total_pages and len( str(total_pages)) > 0 and int(page) > int(total_pages): return if page == '': page = '1' from entertainment.net import Net net = Net() base_url_for_match = self.base_url if indexer == common.indxr_TV_Shows: base_url_for_match = self.tv_base_url search_url = '%s/page/%s/?s=%s' % (base_url_for_match, page, keywords.replace(' ', '+')) import re content = net.http_GET(search_url).content if 'Nessun Film risponde ai criteri di ricerca impostati' in content: return if total_pages == '': page_count = re.search( '<li class=[\'"]page_info[\'"]>Pagina 1 di ([0-9]+)</li>', content) if page_count: total_pages = page_count.group(1) else: total_pages = 1 self.AddInfo(list, indexer, 'search', self.base_url, type, str(page), str(total_pages)) mode = common.mode_File_Hosts if type == 'tv_shows': mode = common.mode_Content type = 'tv_seasons' for movie in re.finditer( '(?s)<div id=[\'"]item[\'"]>.+?<div id=[\'"]covershot[\'"]><a href=[\'"](.+?)[\'"].+?<img src=[\'"](.+?)[\'"]>.+?<h3>(.+?)</h3>.+?<div id=[\'"]description[\'"]><p>(.+?)</p>', content): movie_url = movie.group(1) movie_img = movie.group(2).replace(' ', '%20') movie_title = movie.group(3) movie_year = re.search("\(([0-9]+)\)$", movie_title) if movie_year: movie_year = movie_year.group(1) movie_name = re.sub("\(([0-9]+)\)$", "", movie_title) else: movie_year = '' movie_name = movie_title movie_description = movie.group(4).replace(' ', '').replace( ' ', '') self.AddContent(list, indexer, mode, movie_title, '', type, url=movie_url, name=movie_name, year=movie_year, img=movie_img, plot=movie_description)
def ExtractContentAndAddtoList(self, indexer, section, url, type, list, page='', total_pages='', sort_by='', sort_order=''): from entertainment.net import Net import re if page == '': page = '1' base_url_for_match = self.base_url if indexer == common.indxr_TV_Shows: base_url_for_match = self.tv_base_url if url.startswith('%s/?' % base_url_for_match): new_url = url.replace('%s/?' % base_url_for_match, '%s/page/%s/?' % (base_url_for_match, page)) else: if not url.endswith('/'): new_url = url + '/page/' + page else: new_url = url + 'page/' + page net = Net() content = net.http_GET(new_url).content if total_pages == '': page_count = re.search( '<li class=[\'"]page_info[\'"]>Pagina 1 di ([0-9]+)</li>', content) if page_count: total_pages = page_count.group(1) else: total_pages = 1 self.AddInfo(list, indexer, section, url, type, str(page), str(total_pages)) mode = common.mode_File_Hosts if type == 'tv_shows': mode = common.mode_Content type = 'tv_seasons' for movie in re.finditer( '(?s)<div id=[\'"]item[\'"]>.+?<div id=[\'"]covershot[\'"]><a href=[\'"](.+?)[\'"].+?<img.+?src=[\'"](.+?)[\'"]>.+?<h3>(.+?)</h3>.+?<div id=[\'"]description[\'"]><p>(.+?)</p>', content): movie_url = movie.group(1) movie_img = movie.group(2).replace(' ', '%20') movie_title = movie.group(3) movie_year = re.search("\(([0-9]+)\)$", movie_title) if movie_year: movie_year = movie_year.group(1) movie_name = re.sub("\(([0-9]+)\)$", "", movie_title) else: movie_year = '' movie_name = movie_title movie_description = movie.group(4).replace(' ', '').replace( ' ', '') self.AddContent(list, indexer, mode, movie_title, '', type, url=movie_url, name=movie_name, year=movie_year, img=movie_img, plot=movie_description)
def GetFileHostsForContent(self, title, name, year, season, episode, type, list, lock, message_queue): from entertainment.net import Net import re net = Net() import urllib if type == 'tv_episodes': search_title = self.CleanTextForSearch(name).lower().strip() episode = '%02d' % int(episode) else: search_title = self.CleanTextForSearch(title).lower().strip() base_url_for_match = self.base_url if type == 'tv_episodes': base_url_for_match = self.tv_base_url search_url = '%s/?s=%s' % (base_url_for_match, urllib.quote_plus(search_title)) content = net.http_GET(search_url).content if 'Nessun Film risponde ai criteri di ricerca impostati' in content: return keywords_lower = name.lower().split(' ') match_total = float(len(keywords_lower)) postfix_url = '' if type == 'tv_episodes': postfix_url = '|||||' + season + '|||||' + episode for movie in re.finditer( '(?s)<div id=[\'"]item[\'"]>.+?<div id=[\'"]covershot[\'"]><a href=[\'"](.+?)[\'"].+?<img src=[\'"](.+?)[\'"]>.+?<h3>(.+?)</h3>.+?<div id=[\'"]description[\'"]><p>(.+?)</p>', content): movie_url = None movie_title = movie.group(3) movie_year = re.search("\(([0-9]+)\)$", movie_title) if movie_year: movie_year = movie_year.group(1) movie_name = re.sub("\(([0-9]+)\)$", "", movie_title) else: movie_year = '' movie_name = movie_title movie_name_lower = movie_name.lower() match_count = 0 for kw in keywords_lower: if kw in movie_name_lower: match_count = match_count + 1 if match_count >= match_total: if movie_year and len(movie_year) > 0 and year and len( year) > 0: if movie_year == year: movie_url = movie.group(1) else: movie_url = movie.group(1) if movie_url: self.GetFileHosts(movie_url + postfix_url, list, lock, message_queue) break
def GetFileHostsForContent(self, title, name, year, season, episode, type, list, lock, message_queue): import urllib2 import re from entertainment.net import Net net = Net(cached=False, user_agent='Magic Browser') title = self.CleanTextForSearch(title) name = self.CleanTextForSearch(name) wait = False new_url = 'http://movietv.to/index/loadmovies' if type == 'tv_episodes': types = 'tv' r = 'href="/series/(.+?)".+?movie-title">(.+?)</h2>' NEW = 'http://movietv.to/series/' else: types = 'movie' r = 'href="/movies/(.+?)".+?movie-title">(.+?)</h2>' NEW = 'http://movietv.to/movies/' data = { 'loadmovies': 'showData', 'page': '1', 'abc': 'All', 'genres': '', 'sortby': 'Popularity', 'quality': 'All', 'type': types, 'q': name } content = net.http_POST(new_url, data, headers={ 'Referer': 'http://movietv.to' }).content match = re.compile(r, re.DOTALL).findall(content) print 'wwwwwwwwwwwwwwwwwwwwwwwwwwwwwww' print match for URL, TITLE in match: print TITLE print URL if name.lower() in TITLE.lower(): if type == 'tv_episodes': id = URL.split('-')[0] LINKURL = 'http://movietv.to/series/getLink?id=%s&s=%s&e=%s' % ( id, season, episode) contents = net.http_GET(LINKURL).content import json match = json.loads(contents)['url'] else: contents = net.http_GET(NEW + URL).content match = re.compile( '<source src="(.+?)" type=\'video/mp4\'>').findall( contents)[0] self.GetFileHosts(match, list, lock, message_queue, URL)
def GetFileHosts(self, url, list, lock, message_queue, type, season, episode): import re from entertainment.net import Net net = Net(cached=False) headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36', 'Referer': 'http://pubfilm.ac' } content = net.http_GET(url, headers=headers).content headers = { 'Host': 'player.pubfilm.ac', 'Connection': 'keep-alive', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36', 'Accept': 'text/html, application/xhtml+xml, */*', 'Referer': url, 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-GB' } match = re.compile( 'player.pubfilm.ac/api/get.php\?id=(.+?)".+?arget="EZWebPlayer".+?>(.+?)<', re.DOTALL).findall(content) for URL, EP in match: URL = 'http://player.pubfilm.ac/api/get.php?id=' + URL EPISODE = EP.replace('EPISODE', '').replace('episode', '').replace( 'SERVER', '').replace('server', '').replace(':', '').strip() if type == 'tv_episodes': if EPISODE == '00': EPISODE = int(EPISODE) + 1 EPISODE = int(EPISODE) if int(episode) == EPISODE: CONTENT = net.http_GET(URL, headers=headers).content MATCHED = re.compile( 'file":"(.+?)".+?label":"(.+?)"').findall(CONTENT) for FINAL_URL, quality in MATCHED: if '//' in FINAL_URL: quality = quality.upper() if quality == '1080P': quality = '1080P' elif quality == '720P': quality = '720P' elif quality == '480P': quality = 'HD' else: quality = 'SD' HOST = FINAL_URL.split('//')[1] HOST = HOST.split('/')[0] self.AddFileHost(list, quality, FINAL_URL, host=HOST.upper()) else: CONTENT = net.http_GET(URL, headers=headers).content MATCHED = re.compile('file":"(.+?)".+?label":"(.+?)"').findall( CONTENT) for FINAL_URL, quality in MATCHED: if '//' in FINAL_URL: quality = quality.upper() if quality == '1080P': quality = '1080P' elif quality == '720P': quality = '720P' elif quality == '480P': quality = 'HD' else: quality = 'SD' HOST = FINAL_URL.split('//')[1] HOST = HOST.split('/')[0] self.AddFileHost(list, quality, FINAL_URL, host=HOST.upper())
def ExtractContentAndAddtoList(self, indexer, section, url, type, list, page='', total_pages='', sort_by='', sort_order=''): import urllib if section != 'search': url = urllib.unquote_plus(url) new_url = url if not new_url.startswith(self.base_url): new_url = re.sub("http\://.*?/", self.base_url, url) if page == '': page = '1' from entertainment.net import Net cached = False if section == 'watchlist' else True net = Net(cached=cached) content = net.http_GET(new_url + '/page/' + page).content if total_pages == '': re_page = '<span class=[\'"]{1}pages[\'"]{1}>Page 1 of ([0-9]+)</span>' #'<a class=[\'"]{1}last[\'"]{1}.+?([0-9]+)[\'"]{1}' total_pages = re.search(re_page, content) if total_pages: total_pages = total_pages.group(1) else: if re.search('0 items found', content): page = '0' total_pages = '0' else: page = '1' total_pages = '1' self.AddInfo(list, indexer, section, url, type, page, total_pages, sort_by, sort_order) item_re = r'(?s)<div class=[\'"]{1}inner[\'"]{1}>.+?<a href=[\'"]{1}(.+?)[\'"]{1}.+?<img src=[\'"]{1}(.+?)[\'"]{1} alt=[\'"]{1}(.+?)[\'"]{1}.+?<p>(.+?)<' for item in re.finditer(item_re, content): item_url = item.group(1) item_img = item.group(2) item_alt = item.group(3) item_name = re.sub('\([0-9]+\).*', '', item_alt) item_year = re.search("\(([0-9]+)", item_alt) if item_year: item_year = item_year.group(1) item_title = item_name + ' (' + item_year + ')' else: item_year = '' item_title = item_name if total_pages == '': total_pages = '1' item_plot = re.sub('^\s', '', common.CleanText(item.group(4), True, True)) self.AddContent(list, indexer, common.mode_File_Hosts, item_title, '', type, url=item_url, name=item_name, year=item_year, img=item_img, plot=item_plot)
def Search(self, indexer, keywords, type, list, lock, message_queue, page='', total_pages=''): from entertainment.net import Net net = Net() keywords = self.CleanTextForSearch(keywords) if page == '': page = '1' import urllib search_for_url = self.base_url + '/page/' + page + '?s=' + urllib.quote_plus( keywords) + '&submit=Search' content = net.http_GET(search_for_url).content if "<div class='big-title'>Oops!</div>" in content: return keywords_lower = keywords.lower().split(' ') match_total = float(len(keywords_lower)) if total_pages == '': total_pages = '1' if int(page) == int(total_pages): total_pages = str(int(total_pages) + 1) item_re = r'(?s)<div class=[\'"]{1}inner[\'"]{1}>.+?<a href=[\'"]{1}(.+?)[\'"]{1}.+?<img src=[\'"]{1}(.+?)[\'"]{1} alt=[\'"]{1}(.+?)[\'"]{1}.+?<p>(.+?)<' info_added = False for item in re.finditer(item_re, content): item_alt = item.group(3) if 'trailer' in item_alt.lower(): continue item_name = re.sub('\([0-9]+\).*', '', item_alt) item_year = re.search("\(([0-9]+)", item_alt) if item_year: item_year = item_year.group(1) item_title = item_name + ' (' + item_year + ')' else: item_year = '' item_title = item_name item_match = '.' + item_name + '.' + item_year + '.' item_match_lower = item_match.lower() item_match_count = 0 for kywd in keywords_lower: if re.search('[^a-zA-Z0-9]' + kywd + '[^a-zA-Z0-9]', item_match_lower): item_match_count += 1 if item_match_count / match_total > 0.5: if info_added == False: self.AddInfo(list, indexer, 'search', self.base_url, type, page, total_pages) info_added = True item_url = item.group(1) if 'trailer' in item_url.lower(): continue item_img = item.group(2) item_plot = re.sub('^\s', '', common.CleanText(item.group(4), True, True)) self.AddContent(list, indexer, common.mode_File_Hosts, item_title, '', type, url=item_url, name=item_name, year=item_year, img=item_img, plot=item_plot) if info_added == False: self.AddInfo(list, indexer, 'search', self.base_url, type, page, page)
def ExtractContentAndAddtoList(self, indexer, section, url, type, list, page='', total_pages='', sort_by='', sort_order=''): from entertainment.net import Net net = Net(cached=False) trakt_user = self.Settings().get_setting('username') trakt_password = self.Settings().get_setting('password') if trakt_user!="" and trakt_password != "" and section != 'search': net.set_cookies(self.cookie_file) import re new_url = url if section != 'list' or total_pages != '': if page == '': page = '1' else: page = str( int(page) ) new_url = new_url + ('&' if section=='search' else '?') + 'page=' + page if total_pages == '': total_pages = '500' if section == 'boxoffice': total_pages = '1' elif section == 'search': total_pages = '5' self.AddInfo(list, indexer, section, url, type, str(page), total_pages) html = net.http_GET(new_url).content if section == 'list' and page == '' and total_pages == '': pagination_match = re.search('<ul class="pagination">(.+?)</ul>', html, re.DOTALL) if pagination_match: if page == '': page ='1' pagination = pagination_match.group(1) page_match = re.compile('<a href=[^>]+?>([^<]+?)<').findall(pagination) if page_match: total_pages = page_match[-1].strip() self.AddInfo(list, indexer, section, url, type, str(page), total_pages) match = re.compile('(<div class="grid-item col.+?<div class="titles[^>]+?>.+?</div>.+?</div>)', re.DOTALL).findall(html) for item in match: url_match = re.search('<a href="([^"]+?)">', item) if url_match: url = self.base_url + url_match.group(1) item_indexer = '' mode = '' name = '' year = '' season = '' episode = '' item_id = '' displayname = '' if '/shows/' in url: if indexer == common.indxr_Movies and section == 'list': continue item_indexer = common.indxr_TV_Shows mode = common.mode_Content type = 'tv_seasons' if section == 'list': name_match = re.search('<h3>(.+?)</h3>', item) else: name_match = re.search('<meta content="([^"]+?)" itemprop="name">', item) year_match = re.search('<span class="year">(.+?)</span>', item) if year_match: year = year_match.group(1) if name_match: name = name_match.group(1) displayname = name if year: displayname = displayname + ' (' + year + ')' if '/seasons/' in url: type = 'tv_episodes' name_span = re.search('itemprop="partOfSeries"(.+?)</span>', item, re.DOTALL).group(1) name = re.search('<meta content="([^"]+?)" itemprop="name">', name_span).group(1) season = re.search('/seasons/([0-9]+)', url).group(1) displayname = name + ' - Season: ' + season if 'episodes/' in url: mode = common.mode_File_Hosts type = 'tv_episode' episode = re.search('/episodes/([0-9]+)', url).group(1) item_id = common.CreateIdFromString(name + '_' + year + '_season_' + season + '_episode_' + episode) episode_name = '' episode_name_match = re.compile('<meta content="([^"]+?)" itemprop="name">').findall(item) if episode_name_match: episode_name = episode_name_match[-1].strip() displayname = name + ' - S' + season + 'E' + episode + ' - ' + episode_name item_id = common.CreateIdFromString(name + '_season_' + season + '_episode_' + episode) else: if indexer == common.indxr_TV_Shows and section == 'list': continue item_indexer = common.indxr_Movies mode = common.mode_File_Hosts type = common.indxr_Movies if section == 'list': name = re.search('<h3>(.+?)</h3>', item).group(1) else: name = re.search('<meta content="([^"]+?)" itemprop="name">', item).group(1) displayname = name year_match = re.search('\-([0-9]{4})$', url) if year_match: year = year_match.group(1) displayname += ' (' + year + ')' self.AddContent(list, item_indexer, mode, displayname, item_id, type, url=url, name=name, year=year, season=season, episode=episode)
def ExtractContentAndAddtoList(self, indexer, section, url, type, list, page='', total_pages='', sort_by='', sort_order=''): import urllib if section != 'search': url = urllib.unquote_plus(url) if section == 'search_celeb': import xbmc search_entered = '' keyboard = xbmc.Keyboard(search_entered, '[COLOR blue]i[/COLOR]Stream') keyboard.doModal() if keyboard.isConfirmed(): search_entered = keyboard.getText() if search_entered=='':return url = urllib.unquote_plus(url)+search_entered.replace(' ','+') import re new_url = url if not new_url.startswith(self.get_url()): new_url = re.sub("http\://.*?/", self.get_url(), url) if page == '': page = '1' #change page length to 100. page_item_count = (100 if section == 'watchlist' else 50) start = str( ( (int(page) - 1) * page_item_count ) + 1 ) count = str(page_item_count) if section != 'watchlist_people': if not '?' in new_url: new_url = new_url + '?start=' + start + '&count=' + count else: new_url = new_url + '&start=' + start + '&count=' + count if sort_by == '' and 'sort' not in new_url: sort_by = 'moviemeter' if sort_order == '' and 'sort' not in new_url: sort_order = 'asc' if 'sort' not in new_url: new_url = new_url + '&sort=' + ('title' if section == 'watchlist' and sort_by == 'alpha' else sort_by) + (':' if section == 'watchlist' else ',') + sort_order #print 'new_url ' + new_url if sort_by == '' and 'sort=user_rating,desc' in new_url: sort_by = 'user_rating' sort_order = 'desc' url = url.replace("sort=user_rating,desc", "") elif sort_by == '' and 'sort=num_votes,desc' in new_url: sort_by = 'num_votes' sort_order = 'desc' url = url.replace("sort=num_votes,desc", "") elif sort_by == '' and 'sort=boxoffice_gross_us,desc' in new_url: sort_by = 'boxoffice_gross_us' sort_order = 'desc' url = url.replace("sort=boxoffice_gross_us,desc", "") elif sort_by == '' and 'sort=release_date_us,desc' in new_url: sort_by = 'release_date_us' sort_order = 'desc' url = url.replace("sort=release_date_us,desc", "") elif sort_by == '' and 'sort=year,desc' in new_url: sort_by = 'year' sort_order = 'desc' url = url.replace("sort=year,desc", "") from entertainment.net import Net cached = False if section == 'watchlist' else True net = Net(cached=cached) if self.Settings().get_setting('en_us')=='true': content = net.http_GET(new_url,{'Accept-Language':'en-US'}).content else: content = net.http_GET(new_url).content if total_pages == '': #page problem watchlist solved. #re_page = '<span>\(.+? of ([0-9,]+)' if section == 'watchlist' else '(?s)<div id="left">.+? of ([0-9,]+)' if section == 'watchlist': if not 'watchlist?' in new_url: re_page = '<span>\(.+? of ([0-9,]+)' else: re_page = 'of ([0-9,]+) titles' else: re_page = 'of ([0-9,]+) titles' total_pages = re.search(re_page, content) if total_pages: total_count = total_pages.group(1) total_count = int ( total_count.replace(',', '') ) total_pages = str( total_count / page_item_count + ( 1 if total_count % page_item_count > 0 else 0 ) ) else: if re.search('0 items found', content): page = '0' total_pages = '0' else: page = '1' total_pages = '1' self.AddInfo(list, indexer, section, url, type, page, total_pages, sort_by, sort_order) if section == 'search_celeb': match=re.compile('<img src="(.+?)" /></a> </td> <td class="result_text"> <a href="(.+?)" >(.+?)<.+?<small>\((.+?),').findall(content) for img,url , name , gender in match: img=img.split(',')[0] if 'Actress' in gender or 'Actor' in gender: self.AddSection(list, indexer, 'celeb_result', name+' (%s)'%gender, self.get_url()+url, indexer,img=img.replace('SX32','SX280')) if section == 'watchlist_people': match=re.compile('<a href="/(.+?)"><img src="(.+?)".+?alt="(.+?)">').findall(content) for url ,img, name in match: img=img.split(',')[0] self.AddSection(list, indexer, 'celeb_result', name, self.get_url()+url, indexer,img=img.replace('SX140','SX280')) mode = common.mode_File_Hosts if type == 'tv_shows': mode = common.mode_Content type = 'tv_seasons' item_re = r'<a href="/title/(.+?)/.+?"\n> <img alt="(.+?)"' if section == 'theaters': item_re = r'<h4 itemprop="name"><a href="/title/(.+?)/.+?title="(.+?)"' if section == 'watchlist': if not 'watchlist?' in new_url: item_re = r'(?s)<b><a.+?href="/title/(.+?)/".+?>(.+?)</a>.+?<span class="year_type">(.+?)<.+?<div class="(.+?)"' else: item_re= r'\{"href":"/title/([^"]+?)","year":\["([^"]+?)"\],"title":"([^"]+?)"\}' if section=='celeb_result': match=re.compile('<div class="filmo-row .+?" id=".+?">.+?span class="year_column">.+?nbsp;(.+?)</span>.+?<b><a href="/title/(.+?)/.+?>(.+?)</a>(.+?)<br/>',re.DOTALL).findall(content) for year , tt , title, id_type in match: if 'TV Series' in id_type: type = 'tv_seasons' mode = common.mode_Content indexer = common.indxr_TV_Shows else: type = common.indxr_Movies mode = common.mode_File_Hosts indexer = common.indxr_Movies item_title = common.addon.unescape(title) item_url = self.get_url()+'title/'+tt+'/' year=year.strip() if '-' in year: year=year.split('-')[0] self.AddContent(list, indexer, mode, item_title.strip(), '', type, url=item_url, name=item_title.strip(), year=year, imdb_id=tt) else: for item in re.finditer(item_re, content): item_v_id = item.group(1) item_title = common.addon.unescape( item.group(3) if section == 'watchlist' and 'watchlist?' in new_url else item.group(2)) item_type = item_title if section =='watchlist': if not 'watchlist?' in new_url: item_type = item.group(3) else: item_type = "(" + item.group(2) + ")" item_type =item_type.replace(' Video)',')').replace(' Short Film)','') item_year = re.search("\(([0-9]+)", item_type) if item_year: item_year = item_year.group(1) else: r='>%s</a>\n <span class=".+?">\((.+?)\)<'%item_title.replace('?','\?') item_year=re.search(r, content) item_year = re.search("([0-9]+)", item_year.group(1)) if item_year: item_year = item_year.group(1) else: item_year='' item_name = item_title if section == 'watchlist' else re.sub(" \([0-9]+.+?\)", "", item_title ) item_title = item_name.strip() if item_year != '': item_title = item_title + ' (' + item_year.replace('-','') + ')' item_url = self.get_url()+'title/'+item_v_id+'/' if total_pages == '': total_pages = '1' if section == 'watchlist': if 'movie' in item_type.lower() or re.sub("[0-9]+", "", item_type) == "()": type = common.indxr_Movies mode = common.mode_File_Hosts indexer = common.indxr_Movies elif 'series' in item_type.lower() or ' ' in item_type: type = 'tv_seasons' mode = common.mode_Content indexer = common.indxr_TV_Shows else: type = common.indxr_Movies mode = common.mode_File_Hosts indexer = common.indxr_Movies self.AddContent(list, indexer, mode, item_title.strip(), '', type, url=item_url, name=item_name.strip(), year=item_year, imdb_id=item_v_id)
def GetSection(self, indexer, section, url, type, list, page='', total_pages='', sort_by='', sort_order=''): url_type = '' #added filters url_filter = '' url_filter_less = '' url_filter_rated = '' url_filter_small = '' if indexer == common.indxr_Movies: url_type = 'title_type=feature,tv_movie&' #added filters movies. #request: add some of the values as user settings variables. #moviemeter_default_movies = 50000 #num_votes_default_movies = 3000 #num_votes_small_collection_movies = 200 #num_votes_rated_movies = 25000 url_filter = 'has=technical&moviemeter=,50000&num_votes=3000,&production_status=released&' url_filter_less = 'has=technical&moviemeter=,200000&num_votes=1000,&production_status=released&' url_filter_small = 'has=technical&moviemeter=,200000&num_votes=200,&production_status=released&' url_filter_rated = 'has=technical&moviemeter=,50000&num_votes=25000,&production_status=released&' elif indexer == common.indxr_TV_Shows: url_type = 'title_type=tv_series,mini_series&' #num_votes_default_tv_shows = 1500 #num_votes_small_collection_tv_shows = 200 #num_votes_rated_tv_shows = 25000 #added filters tv shows. url_filter = 'has=technical&moviemeter=,50000&num_votes=1500,&' url_filter_less = 'has=technical&moviemeter=,200000&num_votes=500,&' url_filter_small = 'has=technical&moviemeter=,200000&num_votes=200,&' url_filter_rated = 'has=technical&moviemeter=,50000&num_votes=25000,&' elif indexer == common.indxr_Lists: url_type = ''#title_type=feature,tv_movie,tv_series,mini_series&' if section == 'main': user_number = self.Settings().get_setting('imdb_user_number') if user_number: list_url_type = ''#title_type=feature,tv_movie,tv_series,mini_series&' if self.Settings().get_setting('watch_list_main')=='true': self.AddSection(list, indexer, 'watchlist', 'Watchlist', self.get_url()+'user/' + user_number + '/watchlist?' + list_url_type + 'view=detail', indexer) from entertainment.net import Net net = Net(cached=False) import re named_lists_url = self.get_url()+'user/' + user_number + '/lists?tab=public' named_lists = net.http_GET(named_lists_url).content match = re.compile('<div class="list_name"><b><a.+?href="(.+?)".+?>(.+?)</a>.+?\n.+?div class="list_meta">(.+?)</div>').findall(named_lists) for url, name ,TYPE in match: custom_name='%s List' % name if 'people' in TYPE: custom_url=self.get_url() + str(url) +'?view=grid&sort=listorian:asc' self.AddSection(list, indexer, 'watchlist_people', '%s' % custom_name, custom_url, indexer, hlevel=1) else: custom_url=self.get_url() + str(url) + '?' + list_url_type + 'view=detail' self.AddSection(list, indexer, 'watchlist', '%s' % custom_name, custom_url, indexer, hlevel=1) #seperated movies and tv shows. #added filters to commands for movies. if indexer == common.indxr_Movies: #self.AddSection(list, indexer, 'a_z', 'A-Z') self.AddSection(list, indexer, 'moviemeter', 'Most Popular', self.get_url()+'search/title?' + url_filter + url_type, indexer) self.AddSection(list, indexer, 'genres', 'Genres') self.AddSection(list, indexer, 'boxoffice_gross_us', 'Box Office', self.get_url()+'search/title?' + url_filter + url_type + 'sort=boxoffice_gross_us,desc', indexer) self.AddSection(list, indexer, 'year', 'Box Office By Year') self.AddSection(list, indexer, 'decade', 'Box Office By Decade') self.AddSection(list, indexer, 'user_rating', 'Highly Rated', self.get_url()+'search/title?' + url_filter_rated + url_type + 'sort=user_rating,desc', indexer) self.AddSection(list, indexer, 'top_250', 'IMDb Top 250', self.get_url()+'search/title?' + url_filter_small + url_type + 'groups=top_250&sort=user_rating,desc', indexer) self.AddSection(list, indexer, 'num_votes', 'Most Voted', self.get_url()+'search/title?' + url_filter + url_type + 'sort=num_votes,desc', indexer) self.AddSection(list, indexer, 'kids', 'Kids Zone', self.get_url()+'search/title?' + url_filter_small + 'certificates=us:g&genres=family&' + url_type + 'sort=boxoffice_gross_us,desc', indexer) self.AddSection(list, indexer, 'now-playing-us', 'Now Playing', self.get_url()+'search/title?' + url_filter_less + url_type + 'groups=now-playing-us&sort=release_date_us,desc', indexer) #added list of companies and award list self.AddSection(list, indexer, 'company', 'Company Lists') self.AddSection(list, indexer, 'award_lists', 'Award Lists') self.AddSection(list, indexer, 'search_celeb', 'Celebrity Search', self.get_url()+'find?q=', indexer) #added filters to commands for tv shows. elif indexer == common.indxr_TV_Shows: #self.AddSection(list, indexer, 'a_z', 'A-Z') self.AddSection(list, indexer, 'moviemeter', 'Most Popular', self.get_url()+'search/title?' + url_filter + url_type, indexer) self.AddSection(list, indexer, 'genres', 'Genres') self.AddSection(list, indexer, 'num_votes', 'Most Voted', self.get_url()+'search/title?' + url_filter + url_type + 'sort=num_votes,desc', indexer) self.AddSection(list, indexer, 'year', 'Most Voted By Year') self.AddSection(list, indexer, 'decade', 'Most Voted By Decade') self.AddSection(list, indexer, 'user_rating', 'Highly Rated', self.get_url()+'search/title?' + url_filter_rated + url_type + 'sort=user_rating,desc', indexer) self.AddSection(list, indexer, 'award_lists', 'Award Lists') self.AddSection(list, indexer, 'search_celeb', 'Celebrity Search', self.get_url()+'find?q=', indexer) elif section == 'genres': import re from entertainment.net import Net net = Net() genre_url = self.get_url() genre_re = '' genre_url = genre_url + 'genre/' #genre different for movies and tv shows. if indexer == common.indxr_Movies: genre_re = '(?s)<h2>On Amazon Prime Instant Video.+?<table(.+?)</table>' elif indexer == common.indxr_TV_Shows: genre_re = '(?s)<h2>Television.+?<table(.+?)</table>' content = net.http_GET(genre_url).content genres = re.search(genre_re, content) if genres: genres = genres.group(1) for genre in re.finditer('<a href=".+?">(.+?)</a>', genres): genre_title = genre.group(1) genre_section = genre_title.lower() #added filter for movies and tv shows #solved - sign problem in url #request: some of the genres are empty, empty genres shouldn't be visible, by example game-show for movies. genre_section = genre_section.replace("-", "_") if indexer == common.indxr_TV_Shows and genre_section == 'sitcom': genre_section = 'comedy&keywords=sitcom' if indexer == common.indxr_Movies and genre_section == 'documentary': url_type2 = 'title_type=documentary&' genre_url = self.get_url() +'search/title?' + url_filter_less + url_type2 + 'genres=' + genre_section + '&sort=boxoffice_gross_us,desc' elif indexer == common.indxr_Movies: genre_url = self.get_url() +'search/title?' + url_filter_less + url_type + 'genres=' + genre_section + '&sort=boxoffice_gross_us,desc' elif indexer == common.indxr_TV_Shows: genre_url = self.get_url() +'search/title?' + url_filter_less + url_type + 'genres=' + genre_section + '&sort=num_votes,desc' self.AddSection(list, indexer, genre_section, genre_title, genre_url, indexer) #not working a-z. elif section == 'a_z': self.AddSection(list, indexer, '123', '#123', self.get_url()+'?' + url_type + 'letter=123', indexer) A2Z=[chr(i) for i in xrange(ord('A'), ord('Z')+1)] for letter in A2Z: self.AddSection(list, indexer, letter.lower(), letter, self.get_url()+'?' + url_type + 'letter=' + letter.lower(), indexer) elif section == 'year': start = 1900 import datetime end = datetime.datetime.today().year year = [] for yr in range(end, start-1, -1): str_year = str(yr) #added filter for movies and tv shows #changed to one line, removed sort method moviemeter, sort method is default moviemeter, but the user can also sort on rated, alphabet. if indexer == common.indxr_Movies: self.AddSection(list, indexer, str_year, str_year, self.get_url()+'search/title?' + url_filter_less + 'year=' + str_year+','+str_year + '&' + url_type + 'sort=boxoffice_gross_us,desc', indexer) elif indexer == common.indxr_TV_Shows: self.AddSection(list, indexer, str_year, str_year, self.get_url()+'search/title?' + url_filter_less + 'year=' + str_year+','+str_year + '&' + url_type + 'sort=num_votes,desc', indexer) #added decade lists. #the sort order can be changed by all of the lists. elif section == 'decade': if indexer == common.indxr_Movies: self.AddSection(list, indexer, '2010s', '2010-2016', self.get_url()+'search/title?' +'release_date=2010,2016&' + url_filter_less + url_type + 'sort=boxoffice_gross_us,desc', indexer) self.AddSection(list, indexer, '2000s', '2000-2009', self.get_url()+'search/title?' +'release_date=2000,2009&' + url_filter_less + url_type + 'sort=boxoffice_gross_us,desc', indexer) self.AddSection(list, indexer, '1990s', '1990-1999', self.get_url()+'search/title?' +'release_date=1990,1999&' + url_filter_less + url_type + 'sort=boxoffice_gross_us,desc', indexer) self.AddSection(list, indexer, '1980s', '1980-1989', self.get_url()+'search/title?' +'release_date=1980,1989&' + url_filter_less + url_type + 'sort=boxoffice_gross_us,desc', indexer) self.AddSection(list, indexer, '1970s', '1970-1979', self.get_url()+'search/title?' +'release_date=1970,1979&' + url_filter_less + url_type + 'sort=boxoffice_gross_us,desc', indexer) self.AddSection(list, indexer, '1960s', '1960-1969', self.get_url()+'search/title?' +'release_date=1960,1969&' + url_filter_less + url_type + 'sort=boxoffice_gross_us,desc', indexer) self.AddSection(list, indexer, '1950s', '1950-1959', self.get_url()+'search/title?' +'release_date=1950,1959&' + url_filter_less + url_type + 'sort=boxoffice_gross_us,desc', indexer) self.AddSection(list, indexer, '1940s', '1940-1949', self.get_url()+'search/title?' +'release_date=1940,1949&' + url_filter_less + url_type + 'sort=boxoffice_gross_us,desc', indexer) self.AddSection(list, indexer, '1930s', '1930-1939', self.get_url()+'search/title?' +'release_date=1930,1939&' + url_filter_less + url_type + 'sort=boxoffice_gross_us,desc', indexer) self.AddSection(list, indexer, '1920s', '1920-1929', self.get_url()+'search/title?' +'release_date=1920,1929&' + url_filter_less + url_type + 'sort=boxoffice_gross_us,desc', indexer) self.AddSection(list, indexer, '1910s', '1910-1919', self.get_url()+'search/title?' +'release_date=1910,1919&' + url_filter_small + url_type + 'sort=boxoffice_gross_us,desc', indexer) elif indexer == common.indxr_TV_Shows: self.AddSection(list, indexer, '2010s', '2010-2016', self.get_url()+'search/title?' +'release_date=2010,2016&' + url_filter_less + url_type + 'sort=num_votes,desc', indexer) self.AddSection(list, indexer, '2000s', '2000-2009', self.get_url()+'search/title?' +'release_date=2000,2009&' + url_filter_less + url_type + 'sort=num_votes,desc', indexer) self.AddSection(list, indexer, '1990s', '1990-1999', self.get_url()+'search/title?' +'release_date=1990,1999&' + url_filter_less + url_type + 'sort=num_votes,desc', indexer) self.AddSection(list, indexer, '1980s', '1980-1989', self.get_url()+'search/title?' +'release_date=1980,1989&' + url_filter_less + url_type + 'sort=num_votes,desc', indexer) self.AddSection(list, indexer, '1970s', '1970-1979', self.get_url()+'search/title?' +'release_date=1970,1979&' + url_filter_less + url_type + 'sort=num_votes,desc', indexer) self.AddSection(list, indexer, '1960s', '1960-1969', self.get_url()+'search/title?' +'release_date=1960,1969&' + url_filter_less + url_type + 'sort=num_votes,desc', indexer) self.AddSection(list, indexer, '1950s', '1949-1959', self.get_url()+'search/title?' +'release_date=1949,1959&' + url_filter_less + url_type + 'sort=num_votes,desc', indexer) #added companies lists. #the sort order can be changed by all of the lists. elif section == 'company': if indexer == common.indxr_Movies: self.AddSection(list, indexer, 'fox', '20th Century Fox', self.get_url()+'search/title?' + url_filter_small + 'companies=fox&' + url_type + 'sort=boxoffice_gross_us,desc', indexer) self.AddSection(list, indexer, 'dreamworks', 'DreamWorks', self.get_url()+'search/title?' + url_filter_small + 'companies=dreamworks&' + url_type + 'sort=boxoffice_gross_us,desc', indexer) self.AddSection(list, indexer, 'mgm', 'MGM', self.get_url()+'search/title?' + url_filter_small + 'companies=mgm&' + url_type + 'sort=boxoffice_gross_us,desc', indexer) self.AddSection(list, indexer, 'paramount', 'Paramount', self.get_url()+'search/title?' + url_filter_small + 'companies=paramount&' + url_type + 'sort=boxoffice_gross_us,desc', indexer) self.AddSection(list, indexer, 'columbia', 'Sony', self.get_url()+'search/title?' + url_filter_small + 'companies=columbia&' + url_type + 'sort=boxoffice_gross_us,desc', indexer) self.AddSection(list, indexer, 'universal', 'Universal', self.get_url()+'search/title?' + url_filter_small + 'companies=universal&' + url_type + 'sort=boxoffice_gross_us,desc', indexer) self.AddSection(list, indexer, 'disney', 'Walt Disney', self.get_url()+'search/title?' + url_filter_small + 'companies=disney&' + url_type + 'sort=boxoffice_gross_us,desc', indexer) self.AddSection(list, indexer, 'warner', 'Warner Bros.', self.get_url()+'search/title?' + url_filter_small + 'companies=warner&' + url_type + 'sort=boxoffice_gross_us,desc', indexer) #added additional lists. #the sort order can be changed by most of the lists. #not all lists need filters. elif section == 'award_lists': if indexer == common.indxr_Movies: #changed name to Best Picture-Winning, so I could add oscar winners list. self.AddSection(list, indexer, 'oscar_best_picture_winners', 'Best Picture-Winning', self.get_url()+'search/title?' + url_filter_small + url_type + 'groups=oscar_best_picture_winners&sort=boxoffice_gross_us,desc', indexer) self.AddSection(list, indexer, 'oscar_winners', 'Oscar-Winning', self.get_url()+'search/title?' + url_filter_small + url_type + 'groups=oscar_winners&sort=boxoffice_gross_us,desc', indexer) self.AddSection(list, indexer, 'oscar_nominees', 'Oscar-Nominated', self.get_url()+'search/title?' + url_filter_small + url_type + 'groups=oscar_nominees&sort=boxoffice_gross_us,desc', indexer) self.AddSection(list, indexer, 'golden_globe_winners', 'Golden Globe-Winning', self.get_url()+'search/title?' + url_filter_small + url_type + 'groups=golden_globe_winners&sort=boxoffice_gross_us,desc', indexer) self.AddSection(list, indexer, 'golden_g lobe_nominees', 'Golden Globe-Nominated', self.get_url()+'search/title?' + url_filter_small + url_type + 'groups=golden_globe_nominees&sort=boxoffice_gross_us,desc', indexer) elif indexer == common.indxr_TV_Shows: self.AddSection(list, indexer, 'emmy_winners', 'Emmy Award-Winning', self.get_url()+'search/title?' + url_filter_small + url_type + 'groups=emmy_winners&sort=num_votes,desc', indexer) self.AddSection(list, indexer, 'emmy_nominees', 'Emmy Award-Nominated', self.get_url()+'search/title?' + url_filter_small + url_type + 'groups=emmy_nominees&sort=num_votes,desc', indexer) self.AddSection(list, indexer, 'golden_globe_winners', 'Golden Globe-Winning', self.get_url()+'search/title?' + url_filter_small + url_type + 'groups=golden_globe_winners&sort=num_votes,desc', indexer) self.AddSection(list, indexer, 'golden_globe_nominees', 'Golden Globe-Nominated', self.get_url()+'search/title?' + url_filter_small + url_type + 'groups=golden_globe_nominees&sort=num_votes,desc', indexer) else: self.ExtractContentAndAddtoList(indexer, section, url, type, list, page, total_pages, sort_by, sort_order)
def GetFileHosts(self, url, list, lock, message_queue, season, episode, type, year, query, RES): from entertainment.net import Net import re net = Net(cached=False) season_pull = "0%s"%season if len(season)<2 else season episode_pull = "0%s"%episode if len(episode)<2 else episode referer = url headers = {'User-Agent':self.User_Agent, 'Referer':self.base_url} html = net.http_GET(url, headers=headers).content if type == 'tv_episodes': link = html.split('"target"') for p in link: try: p = p.replace(' ','') name = re.compile('"episode">(.+?)<').findall(p)[0] URL = re.compile('href="(.+?)"').findall(p)[0] data = name.replace('-','').upper() data=data.replace('S','').replace('E','') if ',' in data: data = data.split(',')[0] BOTH=season_pull+episode_pull if BOTH in data: link = net.http_GET(URL, headers=headers).content except:pass else: request_url = re.findall(r'href="([^"]+)">Watch', str(html), re.I|re.DOTALL)[0] link = net.http_GET(request_url, headers=headers).content try: self.fetch(list,link) except: request_url2 = '%s/demo.php' %self.base_url match2 = re.findall(r'link="([^"]+)".*?>Server .*?</span>', str(link), re.I|re.DOTALL) for a in match2: link2 = net.http_GET('%s?v=%s' %(request_url2,a), headers=headers).content try: self.fetch(list,link2) except: url = re.findall(r'source.*?"([^"]+)"', str(link2), re.I|re.DOTALL)[0] if 'google' in url or 'usercdn' in url or self.base_url in url: final_url = url else: final_url = '%s/%s' %(self.base_url,url) final_url = final_url.replace('../view.php?','view.php?') final_url = final_url.replace('./view.php?','view.php?') HOST = final_url.split('//')[1].replace('redirector.','') HOST = HOST.split('/')[0].split('.')[0] import urllib if 'usercdn' in final_url: ###this isa the link that wont resolve serach film moana to test final_url = final_url.replace(':443','') host = final_url.split('//')[1].split('/')[0] headers = {'Referer': referer, 'Host':host, 'User-Agent':self.User_Agent} final_url = final_url.strip() + '|' + urllib.urlencode(headers) res = RES.replace('-','').strip() self.AddFileHost(list, res, final_url)
def Search(self, indexer, keywords, type, list, lock, message_queue, page='', total_pages=''): from entertainment.net import Net net = Net() keywords = self.CleanTextForSearch(keywords) keywords_lower = keywords.lower().split(' ') match_total = float( len(keywords_lower) ) from entertainment import odict search_dict = odict.odict({ 's' : 'tt', 'q' : keywords}) if indexer == common.indxr_Movies: search_dict.update({'ttype':'ft'}) elif indexer == common.indxr_TV_Shows: search_dict.update({'ttype':'tv'}) search_dict.sort(key=lambda x: x[0].lower()) import urllib search_for_url = self.get_url() + 'find?' + urllib.urlencode(search_dict) content = net.http_GET(search_for_url).content if '<h1 class="findHeader">No results found' in content: return self.AddInfo(list, indexer, 'search', self.get_url(), type, '1', '1') mode = common.mode_File_Hosts if type == 'tv_shows': mode = common.mode_Content type = 'tv_seasons' import re search_results = re.search('(?s)<table class="findList">(.+?)</table>', content) if search_results: search_results = search_results.group(1) search_term_not_found_count = 0 for search_item in re.finditer('<td class="result_text"> <a href="/title/(.+?)/.+?" >(.+?)</a> (.+?) </td>', content): item_id = search_item.group(1) item_url = self.get_url() + 'title/' + item_id item_name = search_item.group(2).strip() item_name_lower = item_name.lower().strip() match_count = 0 for kw in keywords_lower: if kw in item_name_lower: match_count = match_count + 1 match_fraction = ( match_count / match_total ) if not ( match_fraction >= 0.8 ): aka_item = search_item.group(4) aka_name = re.search('aka <i>"(.+?)"</i>', aka_item) if aka_name: item_name = aka_name.group(1) item_name_lower = item_name.lower() match_count = 0 for kw in keywords_lower: if kw in item_name_lower: match_count = match_count + 1 match_fraction = ( match_count / match_total ) if not ( match_fraction >= 0.8 ): search_term_not_found_count += 1 if search_term_not_found_count >= 2: break else: continue else: search_term_not_found_count += 1 if search_term_not_found_count >= 2: break else: continue item_title = item_name.strip() item_other_info = search_item.group(3) item_year = re.search('\(([0-9]+)\)', item_other_info) if item_year: item_year = item_year.group(1) item_title += ' (' + item_year + ')' else: item_year = '' if 'movie' in item_other_info.lower(): type = common.indxr_Movies mode = common.mode_File_Hosts indexer = common.indxr_Movies elif 'series' in item_other_info.lower(): type = 'tv_seasons' mode = common.mode_Content indexer = common.indxr_TV_Shows self.AddContent(list, indexer, mode, item_title.strip(), '', type, url=item_url, name=item_name.strip(), year=item_year, imdb_id=item_id)
def GetSection(self, indexer, section, url, type, list, page='', total_pages='', sort_by='', sort_order=''): from entertainment.net import Net import re net = Net() url_type = '' content_type = '' if indexer == common.indxr_Movies:#'[COLOR orange]'+year+'[/COLOR]' if section == 'main': self.AddSection(list, indexer,'al-cinema','Al-Cinema',self.base_url +'al-cinema/',indexer) self.AddSection(list, indexer,'piu-visti','Piu-Visti',self.base_url +'piu-visti/',indexer) self.AddSection(list, indexer,'sub-ita','Sub-Ita',self.base_url +'film/sub-ita/',indexer) self.AddSection(list, indexer,'anno','Anno','http://filmstream.me/news/',indexer) self.AddSection(list, indexer,'genre','Genere','http://filmstream.me/news/',indexer) #self.AddSection(list, indexer,'popular','Popular',self.base_url +'movies/favorites/',indexer) elif section == 'genre': r = re.findall(r'<li><a href="(http://filmstream.me/film/.+?)">(.+?)</a></li>', net.http_GET(url).content, re.I) for genres_url,genres in r[0:]: genres_title = genres.upper() self.AddSection(list, indexer, 'genres_title', genres_title, genres_url, indexer) elif section == 'anno': r = re.findall(r'<li><a href="(http://filminstreaming.eu/anno/.+?)">(.+?)</a></li>', net.http_GET(url).content, re.I) for anno_url,anno in r[0:]: anno_title = anno.upper() self.AddSection(list, indexer, 'anno_title', anno_title, anno_url, indexer) else: self.ExtractContentAndAddtoList(indexer, section, url, type, list, page, total_pages, sort_by, sort_order)
def GetFileHostsForContent(self, title, name, year, season, episode, type, list, lock, message_queue): import urllib2 import re from entertainment.net import Net net = Net(cached=False) title = self.CleanTextForSearch(title) tv_series = name.lower().replace(' ', '_') if type == 'tv_episodes': if 'the_' in tv_series: try: new_url = 'http://www.vidics.ch/Serie/%s-Season-%s-Episode-%s' % ( tv_series, season, episode) content = net.http_GET(new_url).content except: new_url = 'http://www.vidics.ch/Serie/%s-Season-%s-Episode-%s' % ( tv_series.replace('the_', ''), season, episode) content = net.http_GET(new_url).content else: new_url = 'http://www.vidics.ch/Serie/%s-Season-%s-Episode-%s' % ( tv_series, season, episode) content = net.http_GET(new_url).content link = content.split('title="Language') for p in link: if ' Flag ' in p: language = re.compile(' Flag (.+?)">').findall(p)[0] match = re.compile( 'href="(.+?)" target="_blank" rel="nofollow">(.+?)<', re.DOTALL).findall(p) for url, name in match: if self.get_lang() == 'All': self.GetFileHosts( 'http://www.vidics.ch' + url, list, lock, message_queue, name.upper() + ' - [COLOR orange]' + language.upper() + '[/COLOR]') else: if self.get_lang().lower() in language.lower(): self.GetFileHosts( 'http://www.vidics.ch' + url, list, lock, message_queue, name.upper() + ' - [COLOR orange]' + language.upper() + '[/COLOR]') else: new_url = 'http://www.vidics.ch/Film/%s' % (name.replace(' ', '_')) content = net.http_GET(new_url).content if not year + ' year' in content: #print 'year not found trying again......' new_url = 'http://www.vidics.ch/Film/%s_(%s)' % (name.replace( ' ', '_'), year) content = net.http_GET(new_url).content link = content.split('title="Language') for p in link: if ' Flag ' in p: language = re.compile(' Flag (.+?)">').findall(p)[0] match = re.compile( 'href="(.+?)" target="_blank" rel="nofollow">(.+?)<', re.DOTALL).findall(p) for url, name in match: if self.get_lang() == 'All': self.GetFileHosts( 'http://www.vidics.ch' + url, list, lock, message_queue, name.upper() + ' - [COLOR orange]' + language.upper() + '[/COLOR]') else: if self.get_lang().lower() in language.lower(): self.GetFileHosts( 'http://www.vidics.ch' + url, list, lock, message_queue, name.upper() + ' - [COLOR orange]' + language.upper() + '[/COLOR]')
def Resolve(self, url): import decrypter from entertainment.net import Net import re net = Net(cached=False) html = net.http_GET(url).content encoded_stuff = re.search('(?s)(<script>eval.+?</script>)', html) if encoded_stuff: encoded_stuff = encoded_stuff.group(1) self.decode(encoded_stuff) kplayer_root_url = re.search('KPlayer.kplayer_root_url = [\'"](.+?)[\'"]', decoded_stuff).group(1).replace('\/','/') kplayer_e = re.search('KPlayer.init\([\'"](.+?)[\'"]\)', decoded_stuff).group(1) html = net.http_POST(kplayer_root_url, {'url':kplayer_e}, headers={'Referer':url}).content j = json.loads(html) captcha_k = "" if j['status'] == False and j['code']==3: captcha_dict = common.handle_captcha(url, '<script type="text/javascript" src="http://www.google.com/recaptcha/api/js/recaptcha_ajax.js">', params={'site':j['k']}) if captcha_dict['status'] == 'ok': html = net.http_POST(kplayer_root_url, {'url':kplayer_e, 'chall':captcha_dict['challenge'], 'res':captcha_dict['captcha']}, headers={'Referer':url}).content print html playfiles = json.loads(html) res_name = [] res_url = [] if playfiles['status']==True and playfiles['code']==1000: for playfile in playfiles['source']: res_name.append(playfile['label']) res_url.append(playfile['file']) dialog = xbmcgui.Dialog() ret = dialog.select('Please Select Stream Quality.',res_name) if ret < 0: return None else: return res_url[ret].replace('\/','/').replace('\u003d','=').replace('\u0026','&') else: headers={'Host':'r20---googlevideo.com', 'Referer': url , 'User-Agent':'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36'} html = net.http_GET(url).content if '<div id="embedHolder" style="display:none;"><iframe src="' in html: item_url=re.compile('<div id="embedHolder" style="display:none;"><iframe src="(.+?)"').findall(html)[0] headers={'Host':'r20---googlevideo.com', 'Referer': url , 'User-Agent':'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36'} content = net.http_GET(item_url,headers).content new_url = re.search("[\"']{1}file[\"']{1}\:[\"']{1}(.+?)[\"']{1}", content) if new_url: new_url = new_url.group(1).replace("\/", "/") headers.update({'Referer':item_url, 'Accept':'*/*', 'Accept-Encoding':'identity;q=1, *;q=0', 'Range':'bytes=0-'}) import urllib2 try: new_content = net.http_GET(new_url,headers,auto_read_response=False) play_url = new_content.get_url() except urllib2.HTTPError, error: play_url = error.geturl() return play_url else:
def GetFileHosts(self, url, list, lock, message_queue): from entertainment.net import Net net = Net() content = net.http_GET(url).content global_quality = 'DVD' pg_title = re.search('<title>(.+?)</title>', content).group(1).lower() if 'dvdscr' in pg_title: global_quality = 'DVDSCR' elif 'bluray' in pg_title: global_quality = 'HD' quality = global_quality old_quality = quality host_url = '' new_format = False for sq in re.finditer( '(?:(?:class=[\'"]{1}btn)|(?:<p style="text\-align: center;"><span style="font\-size: 20px;))(.+?)</p>', content): new_format = True sq_item = sq.group(1) sq_name = re.search('<strong>(.+?)</strong>', sq_item) if sq_name: sq_name = sq_name.group(1).lower() else: sq_name = '' if 'dvdscr' in sq_name: old_quality = quality quality = 'DVDSCR' elif 'dvd' in sq_name: old_quality = quality quality = 'DVD' elif 'bluray' in sq_name: old_quality = quality quality = 'HD' sq_url = re.search('href=[\'"]{1}(.+?)[\'"]{1}', sq_item) if sq_url: sq_url = sq_url.group(1) if 'adf.ly' in sq_url: sq_url = re.sub('http.+?http', 'http', sq_url) if 'part' in sq_name or 'part' in sq_url: host_url += sq_url + '|||part|||' elif 'source' in sq_name or 'full' in sq_name: self.AddFileHost(list, old_quality, sq_url, host=self.display_name) elif 'premium' in sq_name or 'playsominaltv.com' in sq_url: if host_url and len(host_url) > 0: self.AddFileHost(list, old_quality, host_url, host=self.display_name) host_url = '' self.AddFileHost(list, global_quality, sq_url, host=self.display_name + ' (Premium)') else: if host_url and len(host_url) > 0: self.AddFileHost(list, old_quality, host_url, host=self.display_name) old_quality = quality host_url = '' if new_format == False: host_url = re.search( '<a href=[\'"]{1}http://adf.ly/.+?/(.+?)[\'"]{1}', content) if host_url: host_url = host_url.group(1) host_content = net.http_GET(host_url).content video_url = '' for i, video_content in enumerate( re.finditer('<iframe.+?src=[\'"]{1}(.+?)[\'"]{1}', host_content)): if i == 0: video_url = video_content.group(1) else: video_url += '|||part|||' + video_content.group(1) if video_url and len(video_url) > 0: self.AddFileHost(list, global_quality, video_url)
def Resolve(self, url): resolved_media_url = '' quality=url.split('__')[1] url=url.split('__')[0] from entertainment.net import Net net = Net(cached=False) sess = self.GET_SESSION_ID() net.set_cookies(self.cookie_file) r='http://www.filmon.com/api/channel/%s?session_key=%s' % (url,sess) print r content = net.http_GET(r).content import json data = json.loads(content) channels= data['streams'] for stream in channels: if stream['quality'] == 'low': import re foregex= stream['url']+'<' playpath=stream['name'] name=stream['quality'] if re.search('mp4',playpath ,re.IGNORECASE): regex = re.compile('rtmp://(.+?)/(.+?)/(.+?)/<') match1 = regex.search(foregex) app = '%s/%s/' %(match1.group(2), match1.group(3)) swfUrl='http://www.filmon.com/tv/modules/FilmOnTV/files/flashapp/filmon/FilmonPlayer.swf' url=stream['url']+playpath if re.search('m4v',playpath ,re.IGNORECASE): app = 'vodlast' swfUrl= 'http://www.filmon.com/tv/modules/FilmOnTV/files/flashapp/filmon/FilmonPlayer.swf' url= stream['url']+'/'+playpath else: try: regex = re.compile('rtmp://(.+?)/live/(.+?)id=(.+?)<') match = regex.search(foregex) app = 'live/%sid=%s' %(match.group(2),match.group(3)) url= stream['url'] swfUrl= 'http://www.filmon.com/tv/modules/FilmOnTV/files/flashapp/filmon/FilmonPlayer.swf' except: pass try: regex = re.compile('rtmp://(.+?)/(.+?)id=(.+?)"') match1 = regex.search(foregex) app = '%sid=%s' %(match1.group(2), match1.group(3)) swfUrl='http://www.filmon.com/tv/modules/FilmOnTV/files/flashapp/filmon/FilmonPlayer.swf?v=28' except: pass try: regex = re.compile('rtmp://(.+?)/(.+?)/<') match = regex.search(foregex) app = '%s/' %(match.group(2)) url= stream['url']+'/'+playpath swfUrl= 'http://www.filmon.com/tv/modules/FilmOnTV/files/flashapp/filmon/FilmonPlayer.swf' except: pass tcUrl=stream['url'] pageUrl = 'http://www.filmon.com/' resolved_media_url= str(url)+' playpath='+str(playpath)+' app='+str(app)+' swfUrl='+str(swfUrl)+' tcUrl='+str(tcUrl)+' pageurl='+str(pageUrl)+' live=true' if quality=='HD': return resolved_media_url.replace('low','high') else: return resolved_media_url
def ExtractContentAndAddtoList(self, indexer, section, url, type, list, page='', total_pages='', sort_by='', sort_order=''): custom_url = self.get_url() new_url = url from entertainment.net import Net net = Net() import urllib url = urllib.unquote_plus(url) html = net.http_GET(url).content #new_url = self.base_url+section+'/' if section == 'index_last': match = re.compile( '<a href="(.+?)"><b>(.+?) - Season .+? Episode .+? <').findall( html) for url, name in match: name = self.CleanTextForSearch(name) url = self.base_url_tv + url self.AddContent(list, indexer, common.mode_Content, name, '', 'tv_seasons', url=url, name=name) if section == 'index_last_3_days': match = re.compile( '<a href="(.+?)"><b>(.+?) - Season .+? Episode .+? <').findall( html) for url, name in match: name = self.CleanTextForSearch(name) url = self.base_url_tv + url self.AddContent(list, indexer, common.mode_Content, name, '', 'tv_seasons', url=url, name=name) if section == 'index_last_7_days': match = re.compile( '<a href="(.+?)"><b>(.+?) - Season .+? Episode .+? <').findall( html) for url, name in match: name = self.CleanTextForSearch(name) url = self.base_url_tv + url self.AddContent(list, indexer, common.mode_Content, name, '', 'tv_seasons', url=url, name=name) if section == 'index_last_30_days': match = re.compile( '<a href="(.+?)"><b>(.+?) - Season .+? Episode .+? <').findall( html) for url, name in match: name = self.CleanTextForSearch(name) url = self.base_url_tv + url self.AddContent(list, indexer, common.mode_Content, name, '', 'tv_seasons', url=url, name=name) if section == 'index_last_365_days': match = re.compile( '<a href="(.+?)"><b>(.+?) - Season .+? Episode .+? <').findall( html) for url, name in match: name = self.CleanTextForSearch(name) url = self.base_url_tv + url self.AddContent(list, indexer, common.mode_Content, name, '', 'tv_seasons', url=url, name=name) else: r = re.search('<a name="%s">(.+?)(<a name=|</table>)' % section, html, re.DOTALL) if r: match = re.compile( 'class="mnlcategorylist"><a href="(.+?)"><b>(.+?)</b></a>' ).findall(r.group(1)) for url, name in match: name = self.CleanTextForSearch(name) url = self.base_url_tv + url self.AddContent(list, indexer, common.mode_Content, name, '', 'tv_seasons', url=url, name=name)
def ExtractContentAndAddtoList(self, indexer, section, url, type, list, page='', total_pages='', sort_by='', sort_order=''): if section == 'latest': from entertainment.net import Net import re net = Net(cached=False) tv_user = self.Settings().get_setting('tv_user') tv_pwd = self.Settings().get_setting('tv_pwd') if tv_user == 'Enter your Superchillin email' or tv_pwd == 'xunity' or tv_user == '' or tv_pwd == '': if os.path.exists(self.cookie_file): try: os.remove(self.cookie_file) except: pass common.addon.show_small_popup('[B][COLOR blue]I[/B][/COLOR]stream: Superchillin', '[COLOR red]Please Enter Login & Password in Settings[/COLOR]', 7000, self.icon) return if os.path.exists(self.cookie_file): try: os.remove(self.cookie_file) except: pass headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Connection': 'keep-alive', 'Cache-Control': 'max-age=0', 'Host': 'superchillin.com', 'Origin': 'http://superchillin.com', 'Referer': 'http://superchillin.com/login.php', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.107 Safari/537.36'} net.http_GET('http://superchillin.com/login.php') net.http_POST('http://superchillin.com/login2.php', {'email': str(tv_user), 'password': str(tv_pwd)}, headers, auto_read_response=False).content net.save_cookies(self.cookie_file) net.set_cookies(self.cookie_file) import urllib html = net.http_GET(url).content if not re.search(r'\"logout.php\"\>Logout\<\/a\>', html, re.I): common.addon.show_small_popup('[B][COLOR blue]I[/B][/COLOR]stream: Noobroom', '[COLOR red]Please Check Login & Password Are Correct[/COLOR]', 7000, self.icon) match=re.compile("<br>(.+?) - <a[^>]+?href='(.+?)'>(.+?)</a>").findall(html)#[:25]#, [26:50] ''' Pagination Code Start ''' num_items_on_a_page = 25 if page == '': page = '1' total_items = len(match) total_pages = str ( ( total_items / num_items_on_a_page ) + ( 1 if total_items % num_items_on_a_page >= 1 else 0) ) self.AddInfo(list, indexer, section, url, type, page, total_pages, sort_by, sort_order) start_index = ( int(page) - 1 ) * num_items_on_a_page match = match[ start_index : start_index + num_items_on_a_page ] ''' Pagination Code End ''' for year,url,name in match: name = self.CleanTextForSearch(name) url = self.base_url + url self.AddContent(list,indexer,common.mode_File_Hosts,name + ' (' + '[COLOR red]'+year+'[/COLOR]' +')','',type, url=url, name=name, year=year) elif section == 'azlist': from entertainment.net import Net import re net = Net(cached=False) tv_user = self.Settings().get_setting('tv_user') tv_pwd = self.Settings().get_setting('tv_pwd') if tv_user == 'Enter your Superchillin email' or tv_pwd == 'xunity' or tv_user == '' or tv_pwd == '': if os.path.exists(self.cookie_file): try: os.remove(self.cookie_file) except: pass common.addon.show_small_popup('[B][COLOR blue]I[/B][/COLOR]stream: Superchillin', '[COLOR red]Please Enter Login & Password in Settings[/COLOR]', 7000, self.icon) return if os.path.exists(self.cookie_file): try: os.remove(self.cookie_file) except: pass headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Connection': 'keep-alive', 'Cache-Control': 'max-age=0', 'Host': 'superchillin.com', 'Origin': 'http://superchillin.com', 'Referer': 'http://superchillin.com/login.php', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.107 Safari/537.36'} net.http_GET('http://superchillin.com/login.php') net.http_POST('http://superchillin.com/login2.php', {'email': str(tv_user), 'password': str(tv_pwd)}, headers, auto_read_response=False).content net.save_cookies(self.cookie_file) net.set_cookies(self.cookie_file) import urllib html = net.http_GET(url).content if not re.search(r'\"logout.php\"\>Logout\<\/a\>', html, re.I): common.addon.show_small_popup('[B][COLOR blue]I[/B][/COLOR]stream: Superchillin', '[COLOR red]Please Check Login & Password Are Correct[/COLOR]', 7000, self.icon) match=re.compile("href='/(.+?)'>(.+?)</a><br>").findall(html) ''' Pagination Code Start ''' num_items_on_a_page = 25 if page == '': page = '1' total_items = len(match) total_pages = str ( ( total_items / num_items_on_a_page ) + ( 1 if total_items % num_items_on_a_page >= 1 else 0) ) self.AddInfo(list, indexer, section, url, type, page, total_pages, sort_by, sort_order) start_index = ( int(page) - 1 ) * num_items_on_a_page match = match[ start_index : start_index + num_items_on_a_page ] ''' Pagination Code End ''' for url,name in match: name = self.CleanTextForSearch(name) url = self.base_url + url self.AddContent(list,indexer,common.mode_File_Hosts,name,'',type, url=url, name=name) elif section == 'year': from entertainment.net import Net import re net = Net(cached=False) tv_user = self.Settings().get_setting('tv_user') tv_pwd = self.Settings().get_setting('tv_pwd') if tv_user == 'Enter your Superchillin email' or tv_pwd == 'xunity' or tv_user == '' or tv_pwd == '': if os.path.exists(self.cookie_file): try: os.remove(self.cookie_file) except: pass common.addon.show_small_popup('[B][COLOR blue]I[/B][/COLOR]stream: Superchillin', '[COLOR red]Please Enter Login & Password in Settings[/COLOR]', 7000, self.icon) return if os.path.exists(self.cookie_file): try: os.remove(self.cookie_file) except: pass headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Connection': 'keep-alive', 'Cache-Control': 'max-age=0', 'Host': 'superchillin.com', 'Origin': 'http://superchillin.com', 'Referer': 'http://superchillin.com/login.php', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.107 Safari/537.36'} net.http_GET('http://superchillin.com/login.php') net.http_POST('http://superchillin.com/login2.php', {'email': str(tv_user), 'password': str(tv_pwd)}, headers, auto_read_response=False).content net.save_cookies(self.cookie_file) net.set_cookies(self.cookie_file) import urllib html = net.http_GET(url).content if not re.search(r'\"logout.php\"\>Logout\<\/a\>', html, re.I): common.addon.show_small_popup('[B][COLOR blue]I[/B][/COLOR]stream: Superchillin', '[COLOR red]Please Check Login & Password Are Correct[/COLOR]', 7000, self.icon) match=re.compile("<br>(.+?) - <a[^>]+?href='(.+?)'>(.+?)</a>").findall(html) ''' Pagination Code Start ''' num_items_on_a_page = 25 if page == '': page = '1' total_items = len(match) total_pages = str ( ( total_items / num_items_on_a_page ) + ( 1 if total_items % num_items_on_a_page >= 1 else 0) ) self.AddInfo(list, indexer, section, url, type, page, total_pages, sort_by, sort_order) start_index = ( int(page) - 1 ) * num_items_on_a_page match = match[ start_index : start_index + num_items_on_a_page ] ''' Pagination Code End ''' for year,url,name in match: name = self.CleanTextForSearch(name) url = self.base_url + url self.AddContent(list,indexer,common.mode_File_Hosts,name + ' (' + '[COLOR red]'+year+'[/COLOR]' +')','',type, url=url, name=name, year=year) elif section == 'rating': from entertainment.net import Net import re net = Net(cached=False) tv_user = self.Settings().get_setting('tv_user') tv_pwd = self.Settings().get_setting('tv_pwd') if tv_user == 'Enter your Superchillin email' or tv_pwd == 'xunity' or tv_user == '' or tv_pwd == '': if os.path.exists(self.cookie_file): try: os.remove(self.cookie_file) except: pass common.addon.show_small_popup('[B][COLOR blue]I[/B][/COLOR]stream: Superchillin', '[COLOR red]Please Enter Login & Password in Settings[/COLOR]', 7000, self.icon) return if os.path.exists(self.cookie_file): try: os.remove(self.cookie_file) except: pass headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Connection': 'keep-alive', 'Cache-Control': 'max-age=0', 'Host': 'superchillin.com', 'Origin': 'http://superchillin.com', 'Referer': 'http://superchillin.com/login.php', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.107 Safari/537.36'} net.http_GET('http://superchillin.com/login.php') net.http_POST('http://superchillin.com/login2.php', {'email': str(tv_user), 'password': str(tv_pwd)}, headers, auto_read_response=False).content net.save_cookies(self.cookie_file) net.set_cookies(self.cookie_file) import urllib html = net.http_GET(url).content if not re.search(r'\"logout.php\"\>Logout\<\/a\>', html, re.I): common.addon.show_small_popup('[B][COLOR blue]I[/B][/COLOR]stream: Superchillin', '[COLOR red]Please Check Login & Password Are Correct[/COLOR]', 7000, self.icon) match=re.compile("<br><b>(.+?)</b> - (.+?) - <a[^>]+?href='(.+?)'>(.+?)</a>").findall(html) ''' Pagination Code Start ''' num_items_on_a_page = 25 if page == '': page = '1' total_items = len(match) total_pages = str ( ( total_items / num_items_on_a_page ) + ( 1 if total_items % num_items_on_a_page >= 1 else 0) ) self.AddInfo(list, indexer, section, url, type, page, total_pages, sort_by, sort_order) start_index = ( int(page) - 1 ) * num_items_on_a_page match = match[ start_index : start_index + num_items_on_a_page ] ''' Pagination Code End ''' for rating,year,url,name in match: name = self.CleanTextForSearch(name)#href='/?3304'>Annie</a> - PG</div> url = self.base_url + url self.AddContent(list,indexer,common.mode_File_Hosts,name +' ('+'[COLOR royalblue]'+rating+'[/COLOR])'+' (' + '[COLOR red]'+year+'[/COLOR]' +')','',type, url=url, name=name, year=year) elif section == 'kids': from entertainment.net import Net import re net = Net(cached=False) tv_user = self.Settings().get_setting('tv_user') tv_pwd = self.Settings().get_setting('tv_pwd') if tv_user == 'Enter your Superchillin email' or tv_pwd == 'xunity' or tv_user == '' or tv_pwd == '': if os.path.exists(self.cookie_file): try: os.remove(self.cookie_file) except: pass common.addon.show_small_popup('[B][COLOR blue]I[/B][/COLOR]stream: Superchillin', '[COLOR red]Please Enter Login & Password in Settings[/COLOR]', 7000, self.icon) return if os.path.exists(self.cookie_file): try: os.remove(self.cookie_file) except: pass headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Connection': 'keep-alive', 'Cache-Control': 'max-age=0', 'Host': 'superchillin.com', 'Origin': 'http://superchillin.com', 'Referer': 'http://superchillin.com/login.php', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.107 Safari/537.36'} net.http_GET('http://superchillin.com/login.php') net.http_POST('http://superchillin.com/login2.php', {'email': str(tv_user), 'password': str(tv_pwd)}, headers, auto_read_response=False).content net.save_cookies(self.cookie_file) net.set_cookies(self.cookie_file) import urllib html = net.http_GET(url).content if not re.search(r'\"logout.php\"\>Logout\<\/a\>', html, re.I): common.addon.show_small_popup('[B][COLOR blue]I[/B][/COLOR]stream: Superchillin', '[COLOR red]Please Check Login & Password Are Correct[/COLOR]', 7000, self.icon) match=re.compile("<b><a style=\'color:#fff\' href=\'(.+?)\'>(.+?)</a> - (.+?)</div>").findall(html) ''' Pagination Code Start ''' num_items_on_a_page = 25 if page == '': page = '1' total_items = len(match) total_pages = str ( ( total_items / num_items_on_a_page ) + ( 1 if total_items % num_items_on_a_page >= 1 else 0) ) self.AddInfo(list, indexer, section, url, type, page, total_pages, sort_by, sort_order) start_index = ( int(page) - 1 ) * num_items_on_a_page match = match[ start_index : start_index + num_items_on_a_page ] ''' Pagination Code End ''' for url,name,pg in match: name = self.CleanTextForSearch(name) url = self.base_url + url pg = '[COLOR royalblue]'+pg+'[/COLOR]' self.AddContent(list,indexer,common.mode_File_Hosts,name + ' (' + pg +')','',type, url=url, name=name) elif section == 'random': from entertainment.net import Net import re net = Net(cached=False) tv_user = self.Settings().get_setting('tv_user') tv_pwd = self.Settings().get_setting('tv_pwd') if tv_user == 'Enter your Superchillin email' or tv_pwd == 'xunity' or tv_user == '' or tv_pwd == '': if os.path.exists(self.cookie_file): try: os.remove(self.cookie_file) except: pass common.addon.show_small_popup('[B][COLOR blue]I[/B][/COLOR]stream: Superchillin', '[COLOR red]Please Enter Login & Password in Settings[/COLOR]', 7000, self.icon) return if os.path.exists(self.cookie_file): try: os.remove(self.cookie_file) except: pass headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Connection': 'keep-alive', 'Cache-Control': 'max-age=0', 'Host': 'superchillin.com', 'Origin': 'http://superchillin.com', 'Referer': 'http://superchillin.com/login.php', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.107 Safari/537.36'} net.http_GET('http://superchillin.com/login.php') net.http_POST('http://superchillin.com/login2.php', {'email': str(tv_user), 'password': str(tv_pwd)}, headers, auto_read_response=False).content net.save_cookies(self.cookie_file) net.set_cookies(self.cookie_file) import urllib html = net.http_GET(url).content if not re.search(r'\"logout.php\"\>Logout\<\/a\>', html, re.I): common.addon.show_small_popup('[B][COLOR blue]I[/B][/COLOR]stream: Noobroom', '[COLOR red]Please Check Login & Password Are Correct[/COLOR]', 7000, self.icon) match=re.compile('<a title="The Internet Movie Database" style="text-decoration: none; color: .+? href="(.+?)"').findall(html) for url in match: html2 = net.http_GET(url).content match2=re.compile('<title>(.+?) (\([\d]{4}\)) - IMDb</title>').findall(html2) for name,year in match2: name = self.CleanTextForSearch(name) url = self.base_url year=year.replace('(','').replace(')','') self.AddContent(list,indexer,common.mode_File_Hosts,name + ' (' + '[COLOR red]'+year+'[/COLOR]' +')','',type, url=url, name=name, year=year) elif section == 'tvshows': from entertainment.net import Net import re net = Net(cached=False) tv_user = self.Settings().get_setting('tv_user') tv_pwd = self.Settings().get_setting('tv_pwd') if tv_user == 'Enter your Superchillin email' or tv_pwd == 'xunity' or tv_user == '' or tv_pwd == '': if os.path.exists(self.cookie_file): try: os.remove(self.cookie_file) except: pass common.addon.show_small_popup('[B][COLOR blue]I[/B][/COLOR]stream: Superchillin', '[COLOR red]Please Enter Login & Password in Settings[/COLOR]', 7000, self.icon) return if os.path.exists(self.cookie_file): try: os.remove(self.cookie_file) except: pass headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Connection': 'keep-alive', 'Cache-Control': 'max-age=0', 'Host': 'superchillin.com', 'Origin': 'http://superchillin.com', 'Referer': 'http://superchillin.com/login.php', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.107 Safari/537.36'} net.http_GET('http://superchillin.com/login.php') net.http_POST('http://superchillin.com/login2.php', {'email': str(tv_user), 'password': str(tv_pwd)}, headers, auto_read_response=False).content net.save_cookies(self.cookie_file) net.set_cookies(self.cookie_file) import urllib html = net.http_GET(url).content if not re.search(r'\"logout.php\"\>Logout\<\/a\>', html, re.I): common.addon.show_small_popup('[B][COLOR blue]I[/B][/COLOR]stream: Superchillin', '[COLOR red]Please Check Login & Password Are Correct[/COLOR]', 7000, self.icon) match=re.compile("href='/(.+?)'>(.+?)</a></b><br><br>\s*<span style='color:.+?;font-size:14px'>.+?Latest: (.+?) - <b>").findall(html) ''' Pagination Code Start ''' num_items_on_a_page = 25 if page == '': page = '1' total_items = len(match) total_pages = str ( ( total_items / num_items_on_a_page ) + ( 1 if total_items % num_items_on_a_page >= 1 else 0) ) self.AddInfo(list, indexer, section, url, type, page, total_pages, sort_by, sort_order) start_index = ( int(page) - 1 ) * num_items_on_a_page match = match[ start_index : start_index + num_items_on_a_page ] ''' Pagination Code End ''' for url,name,eps in match: name = self.CleanTextForSearch(name) url = self.base_url + url self.AddContent(list, indexer, common.mode_Content, name+ ' (' + '[COLOR red]'+eps+'[/COLOR]' +')', '', 'tv_seasons', url=url, name=name) elif section == 'tvshowsadded': from entertainment.net import Net import re net = Net(cached=False) tv_user = self.Settings().get_setting('tv_user') tv_pwd = self.Settings().get_setting('tv_pwd') if tv_user == 'Enter your Superchillin email' or tv_pwd == 'xunity' or tv_user == '' or tv_pwd == '': if os.path.exists(self.cookie_file): try: os.remove(self.cookie_file) except: pass common.addon.show_small_popup('[B][COLOR blue]I[/B][/COLOR]stream: Superchillin', '[COLOR red]Please Enter Login & Password in Settings[/COLOR]', 7000, self.icon) return if os.path.exists(self.cookie_file): try: os.remove(self.cookie_file) except: pass headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Connection': 'keep-alive', 'Cache-Control': 'max-age=0', 'Host': 'superchillin.com', 'Origin': 'http://superchillin.com', 'Referer': 'http://superchillin.com/login.php', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.107 Safari/537.36'} net.http_GET('http://superchillin.com/login.php') net.http_POST('http://superchillin.com/login2.php', {'email': str(tv_user), 'password': str(tv_pwd)}, headers, auto_read_response=False).content net.save_cookies(self.cookie_file) net.set_cookies(self.cookie_file) import urllib html = net.http_GET(url).content if not re.search(r'\"logout.php\"\>Logout\<\/a\>', html, re.I): common.addon.show_small_popup('[B][COLOR blue]I[/B][/COLOR]stream: Superchillin', '[COLOR red]Please Check Login & Password Are Correct[/COLOR]', 7000, self.icon) match=re.compile("<a href='(.+?)'><img style='border:0' src='(.+?)' width='53' height='79'></a>.+?<a style='color:#fff' href='.+?'>(.+?)</a></b><br><br>.+?Latest: (.+?) - <b><span style='color:#fff'>(.+?)</span>",re.DOTALL).findall(html) ''' Pagination Code Start ''' num_items_on_a_page = 25 if page == '': page = '1' total_items = len(match) total_pages = str ( ( total_items / num_items_on_a_page ) + ( 1 if total_items % num_items_on_a_page >= 1 else 0) ) self.AddInfo(list, indexer, section, url, type, page, total_pages, sort_by, sort_order) start_index = ( int(page) - 1 ) * num_items_on_a_page match = match[ start_index : start_index + num_items_on_a_page ] ''' Pagination Code End ''' for url,image,name,eps,title in match: url = 'http://superchillin.com'+url name = self.CleanTextForSearch(name) self.AddContent(list, indexer, common.mode_Content, name+ ' (' + '[COLOR red]'+eps+'[/COLOR]' +')', '', 'tv_seasons', url=url, name=name) else: from entertainment.net import Net import re net = Net(cached=False) tv_user = self.Settings().get_setting('tv_user') tv_pwd = self.Settings().get_setting('tv_pwd') if tv_user == 'Enter your Superchillin email' or tv_pwd == 'xunity' or tv_user == '' or tv_pwd == '': if os.path.exists(self.cookie_file): try: os.remove(self.cookie_file) except: pass common.addon.show_small_popup('[B][COLOR blue]I[/B][/COLOR]stream: Superchillin', '[COLOR red]Please Enter Login & Password in Settings[/COLOR]', 7000, self.icon) return if os.path.exists(self.cookie_file): try: os.remove(self.cookie_file) except: pass headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Connection': 'keep-alive', 'Cache-Control': 'max-age=0', 'Host': 'superchillin.com', 'Origin': 'http://superchillin.com', 'Referer': 'http://superchillin.com/login.php', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.107 Safari/537.36'} net.http_GET('http://superchillin.com/login.php') net.http_POST('http://superchillin.com/login2.php', {'email': str(tv_user), 'password': str(tv_pwd)}, headers, auto_read_response=False).content net.save_cookies(self.cookie_file) net.set_cookies(self.cookie_file) import urllib html = net.http_GET(url).content if not re.search(r'\"logout.php\"\>Logout\<\/a\>', html, re.I): common.addon.show_small_popup('[B][COLOR blue]I[/B][/COLOR]stream: Superchillin', '[COLOR red]Please Check Login & Password Are Correct[/COLOR]', 7000, self.icon) match=re.compile("<br>(.+?) - <a[^>]+?href='(.+?)'>(.+?)</a>").findall(html) ''' Pagination Code Start ''' num_items_on_a_page = 25 if page == '': page = '1' total_items = len(match) total_pages = str ( ( total_items / num_items_on_a_page ) + ( 1 if total_items % num_items_on_a_page >= 1 else 0) ) self.AddInfo(list, indexer, section, url, type, page, total_pages, sort_by, sort_order) start_index = ( int(page) - 1 ) * num_items_on_a_page match = match[ start_index : start_index + num_items_on_a_page ] ''' Pagination Code End ''' for year,url,name in match: name = self.CleanTextForSearch(name) url = self.base_url + url self.AddContent(list,indexer,common.mode_File_Hosts,name + ' (' + '[COLOR red]'+year+'[/COLOR]' +')','',type, url=url, name=name, year=year)
def GetFileHosts(self, url, list, lock, message_queue): from entertainment.net import Net import re net = Net() sources = [] content = net.http_GET(url).content qual = re.compile(r'td_col\"\>TV-(.+?)\<\/td\>').findall(content)[0] try: links = re.compile(r'\'_blank\'\shref=\'(.+?)\'\>', re.I | re.M | re.DOTALL).findall(content) for url in links: sources.append(url) except: pass try: links = re.compile( r'\d\d\:\d\d\<\/div\>(http\:.+?)\<\/div\>\<\/li\>', re.I | re.M | re.DOTALL).findall(content) for url in links: sources.append(url) except: pass for url in sources: res = 'SD' quality = qual.lower() if '720p' in quality or '1080p' in quality or 'hd' in quality: res = 'HD' #if re.search(r'go4up.com', url, re.I): # import requests2 # import time # Referer = url # html = requests2.get(url) # r = re.findall(r'Cookie\s(.*?)\=(.*?)\>', str(html.cookies), re.I) # cookies = {} # for name, value in r: # cookies.update({str(name): str(value)}) # r = re.findall(r'href=\"(.*?)\"\s+class=\"dl\"\s+\"\s+title=\".*?\s+:\s+succeed\"', # html.text, re.I) # for go4url in r: # headers = {} # headers.update({'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', # 'Accept-Encoding': 'gzip,deflate,sdch', 'Accept-Language': 'en-GB,en-US;q=0.8,en;q=0.6', # 'Connection': 'keep-alive', 'Referer': '', # 'User-Agent': 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.146 Safari/537.36'}) # try: # url = requests2.get(go4url).url # except: # return # self.AddFileHost(list,res,url) if not '.rar' in url: self.AddFileHost(list, res, url)
def GetFileHostsForContent(self, title, name, year, season, episode, type, list, lock, message_queue): #if type!= 'movies': return from entertainment.net import Net import re net = Net(cached=False) tv_user = self.Settings().get_setting('tv_user') tv_pwd = self.Settings().get_setting('tv_pwd') if tv_user == 'Enter your Superchillin email' or tv_pwd == 'xunity' or tv_user == '' or tv_pwd == '': if os.path.exists(self.cookie_file): try: os.remove(self.cookie_file) except: pass common.addon.show_small_popup('[B][COLOR blue]I[/B][/COLOR]stream: Superchillin', '[COLOR red]Please Enter Login & Password in Settings[/COLOR]', 7000, self.icon) return if os.path.exists(self.cookie_file): try: os.remove(self.cookie_file) except: pass headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Connection': 'keep-alive', 'Cache-Control': 'max-age=0', 'Host': 'superchillin.com', 'Origin': 'http://superchillin.com', 'Referer': 'http://superchillin.com/login.php', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.107 Safari/537.36'} net.http_GET('http://superchillin.com/login.php') net.http_POST('http://superchillin.com/login2.php', {'email': str(tv_user), 'password': str(tv_pwd)}, headers, auto_read_response=False).content net.save_cookies(self.cookie_file) net.set_cookies(self.cookie_file) name = self.CleanTextForSearch(name) name = name.rstrip() # import urllib movie_url='http://superchillin.com/search.php?q=%s' %(name.replace(' ','+')) html = net.http_GET(movie_url).content if not re.search(r'\"logout.php\"\>Logout\<\/a\>', html, re.I): common.addon.show_small_popup('[B][COLOR blue]I[/B][/COLOR]stream: Superchillin', '[COLOR red]Please Check Login & Password Are Correct[/COLOR]', 7000, self.icon) if type == 'movies': name_lower = common.CreateIdFromString(name) for item in re.finditer(r"href='/(.+?)'>(.+?)</a> \((.+?)\)", html): item_url = self.base_url + item.group(1) item_name = common.CreateIdFromString(item.group(2)) item_year = item.group(3) #item_url = item_url+'&hd=1' if item_name == name_lower and item_year == year: self.GetFileHosts(item_url + '__movies', list, lock, message_queue) elif type == 'tv_episodes': name_lower = common.CreateIdFromString(name) for item in re.finditer(r"<i>TV Series</i></b><br><br>.+? href='/(.+?)'>(.+?)</a>", html): item_url = self.base_url + item.group(1) item_name = common.CreateIdFromString(item.group(2)) html = net.http_GET(item_url).content #<b>(.+?)x(.+?) - <a style='text.+? href='/(.+?)'>(.+?)</a></b> #<b>(.+?)x(.+?) .+? href='/(.+?)'>(.+?)</a> season_pull = "0%s"%season if len(season)<2 else season episode_pull = "0%s"%episode if len(episode)<2 else episode for item in re.finditer(r"<b>"+season+"x"+episode_pull+" - <a style='text.+? href='/(.+?)'>(.+?)</a></b>", html): item_url2 = self.base_url + item.group(1) item_title = item.group(2) if item_name == name_lower: self.GetFileHosts(item_url2, list, lock, message_queue)
def GetFileHostsForContent(self, title, name, year, season, episode, type, list, lock, message_queue): import re from entertainment.net import Net net = Net(cached=False) title = self.CleanTextForSearch(title) name = self.CleanTextForSearch(name) if type == 'tv_episodes': search_term = '%s</h1>' % (re.sub('\A(a|A|the|THE|The)\s', '', name)) episode_get = '?season=%s&episode=%s#searialinks' % (season, episode) movie_url = 'http://moviestorm.eu/search' data = {'q': name, 'go': 'Search'} content = net.http_POST(movie_url, data).content html = content.split('<div class="movie_box">') for p in html: if search_term in p: match = re.compile('<a href="(.+?)"').findall(p) for url in match: if 'http://moviestorm.eu/view' in url: new_tv_url = url + episode_get link = net.http_GET(new_tv_url).content quality = link.split('<td class="quality_td">') for p in quality: res = p.split('</td>')[0].strip() ep = re.compile( '<a target="_blank" href="(.+?)">WATCH</a>' ).findall(p) try: episode_link = re.compile( 'href="(.+?)">WATCH</a>').findall(p)[0] host = movie_link.split('//')[1] host = host.split('/')[0] self.GetFileHosts(episode_link, list, lock, message_queue, res, host) except: pass elif type == 'movies': name = name.rstrip() search_term = '%s</h1>' % (re.sub('\A(a|A|the|THE|The)\s', '', name)) movie_url = 'http://moviestorm.eu/search' data = {'q': name, 'go': 'Search'} content = net.http_POST(movie_url, data).content html = content.split('<div class="movie_box">') for p in html: if search_term in p: new_url = re.compile('<a href="(.+?)"').findall(p)[0] if 'http://moviestorm.eu/view' in new_url: link = net.http_GET(new_url).content quality = link.split('<td class="quality_td">') for p in quality: res = p.split('</td>')[0].strip() try: movie_link = re.compile( 'href="(.+?)">WATCH</a>').findall(p)[0] host = movie_link.split('//')[1] host = host.split('/')[0] self.GetFileHosts(movie_link, list, lock, message_queue, res, host) except: pass
def GetSection(self, indexer, section, url, type, list, page='', total_pages='', sort_by='', sort_order=''): from entertainment.net import Net import re net = Net() base_url_for_match = self.base_url if indexer == common.indxr_TV_Shows: base_url_for_match = self.tv_base_url if section == 'main': if indexer == common.indxr_TV_Shows: self.AddSection(list, indexer, 'serie-tv-home', 'Serie-Tv Home', base_url_for_match, indexer) else: self.AddSection(list, indexer, 'film-home', 'Film Home', base_url_for_match, indexer) content = net.http_GET(base_url_for_match).content for menu_item in re.finditer( '(?s)<select name=[\'"]select(.+?)</select>', content): menu_item_title = re.search('<option.+?>(.+?)</option>', menu_item.group(1)).group(1) self.AddSection(list, indexer, common.CreateIdFromString(menu_item_title), menu_item_title, base_url_for_match, indexer) elif url == base_url_for_match and section not in ('serie-tv-home', 'film-home'): content = net.http_GET(url).content for menu_item in re.finditer( '(?s)<select name=[\'"]select(.+?)</select>', content): is_item_menu_title = True for menu_sub_item in re.finditer( '<option value=[\'"](.*)[\'"]>(.+?)</option>', menu_item.group(1)): menu_item_title = menu_sub_item.group(2) if is_item_menu_title == True: menu_item_title_id = common.CreateIdFromString( menu_item_title) if menu_item_title_id != section: break is_item_menu_title = False continue self.AddSection(list, indexer, common.CreateIdFromString(menu_item_title), menu_item_title, self.base_url + menu_sub_item.group(1), indexer) else: self.ExtractContentAndAddtoList(indexer, section, url, type, list, page, total_pages, sort_by, sort_order)