def __http_get_with_retry_1(self, url, headers): utils.log('Fetching URL: %s' % url, xbmc.LOGDEBUG) net = Net() cookiejar = _1CH.get_profile() cookiejar = os.path.join(cookiejar, 'cookies') net.set_cookies(cookiejar) retries = 0 html = None while retries <= MAX_RETRIES: try: html = net.http_GET(url, headers=headers).content # if no exception, jump out of the loop break except socket.timeout: retries += 1 utils.log( 'Retry #%s for URL %s because of timeout' % (retries, url), xbmc.LOGWARNING) continue except urllib2.HTTPError as e: # if it's a temporary code, retry if e.code in TEMP_ERRORS: retries += 1 utils.log( 'Retry #%s for URL %s because of HTTP Error %s' % (retries, url, e.code), xbmc.LOGWARNING) continue # if it's not pass it back up the stack else: raise else: raise return html
def PLAYLIST(name, url, iconimage, imdb): net = Net() link = net.http_GET(url).content link=link.replace('\r\n', '').replace('"},]', '"}]') magic = api.loads(link, encoding='latin1') liste=[] stream1=[] stream2=[] dialog = xbmcgui.Dialog() for i in magic: if imdb in i['imdblink'].encode('utf-8'): try: stream1=re.search("S*R*C*s*r*c*='(.*?)'.*?", i['streamlink1']).group(1) vid1=re.search("S*R*C*s*r*c*='https*://(.*?)\.*c*o*m*t*o*/.*?'.*?",i['streamlink1']) liste.append(vid1.group(1)) except: pass try: stream2=re.search("S*R*C*s*r*c*='(.*?)'.*?", i['streamlink2']).group(1) vid2=re.search("S*R*C*s*r*c*='https*://(.*?)\.*c*o*m*t*o*/.*?'.*?",i['streamlink2']) liste.append(vid2.group(1)) except: pass hoster = dialog.select('HOSTER',liste) if hoster == 0: HOSTER(name,stream1,iconimage) elif hoster == 1: HOSTER(name,stream2,iconimage) else: pass
def resolve_cloudyvideos(name, url, iconimage): # print "cloudyvideos" url = re.sub('embed-|-.*?(?:\.html)', '', url) net = Net() web_url = url headers = {'Referer': web_url} html = net.http_GET(web_url, headers=headers).content data = {} time.sleep(3) for match in re.finditer( r'type="hidden".*?name="([^"]+)".*?value="([^"]+)', html): data[match.group(1)] = match.group(2) data.update({'method_free': 'Continue'}) htmla = net.http_POST(web_url, data).content r = re.search('file:\s*\'(.*?)\',+', htmla) pl = xbmc.PlayList(xbmc.PLAYLIST_VIDEO) pl.clear() try: listitem = xbmcgui.ListItem(name, thumbnailImage=iconimage) url = r.group( 1) + '|Referer=http://cloudyvideos&User-Agent=%s' % (USER_AGENT) pl.add(url, listitem) xbmc.Player().play(pl) except Exception, e: dialog = xbmcgui.DialogProgress() dialog1 = xbmcgui.Dialog() dialog1.ok( 'error', '[UPPERCASE][B] Sorry but the video is deleted!!![/B][/UPPERCASE]' ) print '**** cloudyvideo Error occured: %s' % e raise
def getnet(url, bypass_cloudflare=False): try: logdata('url', url) from addon.common.net import Net net = Net() USER_AGENT = 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:30.0) Gecko/20100101 Firefox/30.0' MAX_TRIES = 3 headers = {'User-Agent': USER_AGENT, 'Referer': url} try: data = net.http_GET(url).content except: logdata('getnet', "download error") data = requestsurl(url) if data is None: logdata('requests', "error") try: data = data.encode('utf-8', "ignore") except: pass if not data: return None return data except: trace_error() return None return None
def __http_get_with_retry_1(self, url, headers): utils.log('Fetching URL: %s' % url, xbmc.LOGDEBUG) net = Net() cookiejar = _1CH.get_profile() cookiejar = os.path.join(cookiejar, 'cookies') net.set_cookies(cookiejar) retries=0 html=None while retries<=MAX_RETRIES: try: html = net.http_GET(url, headers=headers).content # if no exception, jump out of the loop break except socket.timeout: retries += 1 utils.log('Retry #%s for URL %s because of timeout' % (retries, url), xbmc.LOGWARNING) continue except urllib2.HTTPError as e: # if it's a temporary code, retry if e.code in TEMP_ERRORS: retries += 1 utils.log('Retry #%s for URL %s because of HTTP Error %s' % (retries, url, e.code), xbmc.LOGWARNING) continue # if it's not pass it back up the stack else: raise else: raise return html
def resolve_cloudyvideos(name,url,iconimage): # print "cloudyvideos" url=re.sub('embed-|-.*?(?:\.html)','',url) net = Net() web_url = url headers = {'Referer': web_url} html = net.http_GET(web_url, headers=headers).content data={} time.sleep(3) for match in re.finditer(r'type="hidden".*?name="([^"]+)".*?value="([^"]+)', html): data[match.group(1)] = match.group(2) data.update ({'method_free': 'Continue'}) htmla = net.http_POST(web_url, data).content r = re.search('file:\s*\'(.*?)\',+', htmla) pl = xbmc.PlayList(xbmc.PLAYLIST_VIDEO) pl.clear() try: listitem = xbmcgui.ListItem(name,thumbnailImage=iconimage) url = r.group(1)+'|Referer=http://cloudyvideos&User-Agent=%s' % (USER_AGENT) pl.add(url, listitem) xbmc.Player().play(pl) except Exception, e: dialog = xbmcgui.DialogProgress() dialog1 = xbmcgui.Dialog() dialog1.ok('error','[UPPERCASE][B] Sorry but the video is deleted!!![/B][/UPPERCASE]') print '**** cloudyvideo Error occured: %s' % e raise
def read_url(url): net = Net() html=net.http_GET(url).content h = HTMLParser.HTMLParser() html = h.unescape(html) return html.encode('utf-8')
def read_url(url): net = Net() html=net.http_GET(url).content h = HTMLParser.HTMLParser() html = h.unescape(html) return html
def __init__(self, load = None, disable = None, cache_results=False): self.threadpool_size = 5 self.cache_results = cache_results self._load_list = load self._disable_list = disable self.enabled_scrapers = 0 self.active_scrapers = [] self.supported_scrapers = [] self._active_scrapers = [] self._load_scrapers() self._enable_scrapers() self.search_results = [] self.show_scraper_progress = ADDON.get_setting('enable_scraper_progress') == 'true' expired = True self.filters = False if ADDON.get_setting('enable_result_filters') == 'true': cache_file = vfs.join(DATA_PATH, 'filters.cache') if vfs.exists(cache_file): self.filters = ADDON.load_data(cache_file) cache_file = vfs.join(DATA_PATH, 'debrid_hosts.cache') if vfs.exists(cache_file): timestamp = int(time.time()) m_time = vfs.get_stat(cache_file).st_mtime() if (timestamp - m_time) < 86400: expired = False if expired: hosts = {"pm": [], "rd": [], "ad": [], "rp": []} net = Net() try: customer_id = xbmcaddon.Addon('script.module.urlresolver').getSetting('PremiumizeMeResolver_username') pin = xbmcaddon.Addon('script.module.urlresolver').getSetting('PremiumizeMeResolver_password') query = {"method": "hosterlist", "params[login]": customer_id, "params[pass]": pin} api_url = "http://api.premiumize.me/pm-api/v1.php?" + urllib.urlencode(query) response = net.http_GET(api_url).content data = json.loads(response) if 'result' in data: hosts['pm'] = data['result']['hosterlist'] except: pass try: response = Net().http_GET('http://real-debrid.com/api/hosters.php').content hosts['rd'] = [x.strip('"') for x in response.split(',')] except: pass try: response = Net().http_GET('http://alldebrid.com/api.php?action=get_host').content hosts['ad'] = [x.strip('"') for x in response.split(',\n')] except: pass try: response = Net().http_GET('http://premium.rpnet.biz/hoster2.json').content hosts['rp'] = json.loads(response)['supported'] except: pass ADDON.save_data(cache_file, hosts)
def read_url(url): net = Net() html = net.http_GET(url).content import HTMLParser h = HTMLParser.HTMLParser() html = h.unescape(html) try: return html.encode('utf-8') except: return html
def read_url(url): net = Net() html = net.http_GET(url).content import HTMLParser h = HTMLParser.HTMLParser() html = h.unescape(html) try: return html.encode("utf-8") except: return html
def readnet2(url): from addon.common.net import Net net=Net() USER_AGENT='Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:30.0) Gecko/20100101 Firefox/30.0' MAX_TRIES=3 headers = { 'User-Agent': USER_AGENT, 'Referer': url } html = net.http_GET(url).content return html
def SEARCH(url,name,imdb,move,movegen): net = Net() link = net.http_GET(url).content link=link.replace('\r\n', '').replace('"},]', '"}]') magic = api.loads(link, encoding='latin1') kb = xbmc.Keyboard('', 'Search KinoLeak', False) kb.doModal() search = kb.getText() # search=urllib.quote(search) for e,i in enumerate(magic): if search.lower() in (i['titel'].encode('utf-8')).lower(): try: imdb=re.search(".*?/(tt\d+)/*.*?$", i['imdblink']).group(1) except: imdb="" try: sub=re.search("(.*?)\((\d+)\)", i['titel']) addDir(sub.group(1),url,2,i['cover'],imdb,"movie".decode('utf-8'),sub.group(2),None,"") except: addDir(i['titel'],url,2,i['cover'],imdb,"movie",'',None,"") xbmcplugin.setContent(int(sys.argv[1]), 'movies') xbmc.executebuiltin("Container.SetViewMode(%s)" % addon.getSetting('MAIN') )
def request(self, uri, params=None, query=None, headers=None, return_soup=False, return_json=False): COOKIE_JAR = vfs.join(COOKIE_PATH, self.service + ".lwp") net = Net() net.set_cookies(COOKIE_JAR) if headers: headers["Referer"] = self.referrer headers["Accept"] = self.ACCEPT headers["User-Agent"] = self.USER_AGENT else: headers = {"Referer": self.referrer, "Accept": self.ACCEPT, "User-Agent": self.USER_AGENT} if query: uri = uri % urllib.urlencode(query) if params: html = net.http_POST(self.base_url + uri, params, headers=headers).content else: html = net.http_GET(self.base_url + uri, headers=headers).content net.save_cookies(COOKIE_JAR) if return_soup: return BeautifulSoup(html) elif return_json: return json.loads(html) else: return html
def PLAYLIST(name, url, iconimage, imdb): net = Net() link = net.http_GET(url).content link = link.replace('\r\n', '').replace('"},]', '"}]') magic = api.loads(link, encoding='latin1') liste = [] stream1 = [] stream2 = [] dialog = xbmcgui.Dialog() for i in magic: if imdb in i['imdblink'].encode('utf-8'): try: stream1 = re.search("S*R*C*s*r*c*='(.*?)'.*?", i['streamlink1']).group(1) vid1 = re.search( "S*R*C*s*r*c*='https*://(.*?)\.*c*o*m*t*o*/.*?'.*?", i['streamlink1']) liste.append(vid1.group(1)) except: pass try: stream2 = re.search("S*R*C*s*r*c*='(.*?)'.*?", i['streamlink2']).group(1) vid2 = re.search( "S*R*C*s*r*c*='https*://(.*?)\.*c*o*m*t*o*/.*?'.*?", i['streamlink2']) liste.append(vid2.group(1)) except: pass hoster = dialog.select('HOSTER', liste) if hoster == 0: HOSTER(name, stream1, iconimage) elif hoster == 1: HOSTER(name, stream2, iconimage) else: pass
def SEARCH(url, name, imdb, move, movegen): net = Net() link = net.http_GET(url).content link = link.replace('\r\n', '').replace('"},]', '"}]') magic = api.loads(link, encoding='latin1') kb = xbmc.Keyboard('', 'Search KinoLeak', False) kb.doModal() search = kb.getText() # search=urllib.quote(search) for e, i in enumerate(magic): if search.lower() in (i['titel'].encode('utf-8')).lower(): try: imdb = re.search(".*?/(tt\d+)/*.*?$", i['imdblink']).group(1) except: imdb = "" try: sub = re.search("(.*?)\((\d+)\)", i['titel']) addDir(sub.group(1), url, 2, i['cover'], imdb, "movie".decode('utf-8'), sub.group(2), None, "") except: addDir(i['titel'], url, 2, i['cover'], imdb, "movie", '', None, "") xbmcplugin.setContent(int(sys.argv[1]), 'movies') xbmc.executebuiltin("Container.SetViewMode(%s)" % addon.getSetting('MAIN'))
def INDEX(url, name, imdb, move, movegen): net = Net() link = net.http_GET(url).content link = link.replace('\r\n', '').replace('"},]', '"}]') magic = api.loads(link, encoding='latin1') progress = xbmcgui.DialogProgress() progress.create('Fortschritt', 'This is a progress bar.') genre = [] neu = [] sammeln = [] for e, i in enumerate(reversed(magic)): if name == "Neu im Programm" and e < 27: neu.append(i) for e, i in enumerate(magic): if name in i['genre'].encode('utf-8'): genre.append(i) for e, i in enumerate(magic): if "sammeln" in name: sammeln.append(i) #----Neu im Programm----# for e, i in enumerate(neu): if e < len(neu): percent = int((e / len(neu)) * 100) message = str(e) + " von " + str(len(neu)) + " Filmen geladen" progress.update(percent, message, "Dies passiert bei noch nie eingelesenen Filmen") try: imdb = re.search(".*?/(tt\d+)/*.*?$", i['imdblink']).group(1) except: imdb = "" try: sub = re.search("(.*?)\((\d+)\)", i['titel']) addDir(sub.group(1), url, 2, i['cover'], imdb, "movie".decode('utf-8'), sub.group(2), None, "") except: addDir(i['titel'], url, 2, i['cover'], imdb, "movie", '', None, "") #----GENRES die Filme----# for e, i in enumerate(sorted(genre, key=lambda genre: genre['titel'])): if move <= e < move + 25: if e - move < move + 25: percent = ((e - move) / 25 * 100) message = "FilmInfo des " + str( e - move) + ". von 25 Filmen geladen" progress.update( percent, message, "Dies passiert bei noch nie eingelesenen Filmen") try: imdb = re.search(".*?/(tt\d+)/*.*?$", i['imdblink']).group(1) except: imdb = "" try: sub = re.search("(.*?)\((\d+)\)", i['titel']) addDir(sub.group(1), url, 2, i['cover'], imdb, "movie".decode('utf-8'), sub.group(2), None, "") except: addDir(i['titel'], url, 2, i['cover'], imdb, "movie", '', None, "") #----FilmInfo von allen Filmen Sammeln für die Datenbank----# for e, i in enumerate(sorted(sammeln, key=lambda sammeln: sammeln['titel'])): if e < len(sammeln): percent = int((e / len(sammeln)) * 100) message = "FilmInfo des " + str(e) + ". von " + str( len(sammeln)) + " Filmen geladen" progress.update(percent, message, "Dies passiert bei noch nie eingelesenen Filmen") try: imdb = re.search(".*?/(tt\d+)/*.*?$", i['imdblink']).group(1) except: imdb = "" try: sub = re.search("(.*?)\((\d+)\)", i['titel']) addDir(sub.group(1), url, 2, i['cover'], imdb, "movie".decode('utf-8'), sub.group(2), None, "") except: addDir(i['titel'], url, 2, i['cover'], imdb, "movie", '', None, "") #----SEITENNAVIGATION----# if len(genre) > 0: if move != None and move == 0 and len(genre) > 25: print "<<<----OLDU-1---->>>" addDir("Next-->>", url, 4, "", "", "folder", "", move + 25, name) if move != None and move != 0 and move + 25 <= len( genre) and len(genre) - move > 0: print "<<<----OLDU-2---->>>" addDir("Next-->>", url, 4, "", "", "folder", "", move + 25, name) addDir("<<--Back", url, 4, "", "", "folder", "", move - 25, name) if move + 25 >= len(genre) and move != 0: print "<<<----OLDU-3---->>>" addDir("<<--Back", url, 4, "", "", "folder", "", move - 25, name) addDir("Home", "", None, "", "", "folder", "", None, "") progress.close() xbmcplugin.setContent(int(sys.argv[1]), 'movies') xbmc.executebuiltin("Container.SetViewMode(%s)" % addon.getSetting('MAIN'))
def __init__(self, load=None, disable=None, cache_results=False, is_stream=False): self.threadpool_size = 5 self.cache_results = cache_results self._load_list = load self._disable_list = disable self.enabled_scrapers = 0 self.active_scrapers = [] self.supported_scrapers = [] self._active_scrapers = [] self._load_scrapers() self._enable_scrapers() self.search_results = [] if is_stream: self.show_scraper_progress = False else: self.show_scraper_progress = ADDON.get_setting( 'enable_scraper_progress') == 'true' self.skip_second_search = True expired = True self.filters = False self.cache_queue = Queue() if ADDON.get_setting('enable_result_filters') == 'true': cache_file = vfs.join(DATA_PATH, 'filters.cache') if vfs.exists(cache_file): self.filters = ADDON.load_data(cache_file) cache_file = vfs.join(DATA_PATH, 'debrid_hosts.cache') if vfs.exists(cache_file): timestamp = int(time.time()) m_time = vfs.get_stat(cache_file).st_mtime() if (timestamp - m_time) < 86400: expired = False if expired: hosts = {"pm": [], "rd": [], "ad": [], "rp": []} net = Net() try: customer_id = xbmcaddon.Addon( 'script.module.urlresolver').getSetting( 'PremiumizeMeResolver_username') pin = xbmcaddon.Addon('script.module.urlresolver').getSetting( 'PremiumizeMeResolver_password') query = { "method": "hosterlist", "params[login]": customer_id, "params[pass]": pin } api_url = "http://api.premiumize.me/pm-api/v1.php?" + urllib.urlencode( query) response = net.http_GET(api_url).content data = json.loads(response) if 'result' in data: hosts['pm'] = data['result']['hosterlist'] except: pass try: response = Net().http_GET( 'http://real-debrid.com/api/hosters.php').content hosts['rd'] = [x.strip('"') for x in response.split(',')] except: pass try: response = Net().http_GET( 'http://alldebrid.com/api.php?action=get_host').content hosts['ad'] = [x.strip('"') for x in response.split(',\n')] except: pass try: response = Net().http_GET( 'http://premium.rpnet.biz/hoster2.json').content hosts['rp'] = json.loads(response)['supported'] except: pass ADDON.save_data(cache_file, hosts)
youtube = re.search( 'src="([http:|https:]*//www.youtube.com/[v|embed]*/[0-9A-Za-z_\-]+).+?"', html) springboard = re.search( 'src="(http://cms.springboardplatform.com/.+?)"', html) if youtube: if youtube.group(1).startswith("//"): youtube_link = 'http:' + youtube.group(1) else: youtube_link = youtube.group(1) stream_url = urlresolver.HostedMediaFile( url=youtube_link).resolve() elif springboard: html = net.http_GET(springboard.group(1)).content stream_url = re.search( '<meta property="og:video" content="(.+?)" />', html).group(1) else: video = re.search( '<embed.+?src="http://[a.]{0,2}blip.tv/[^#/]*[#/]{1}([^"]*)"', html, re.DOTALL).group(1) api_url = APIPath % video links = [] roles = [] tree = parse(urllib.urlopen(api_url))
if html: #Check for youtube video first youtube = re.search('src="([http:]*//www.youtube.com/[v|embed]*/[0-9A-Za-z_\-]+).+?"',html) springboard = re.search('src="(http://cms.springboardplatform.com/.+?)"', html) if youtube: if youtube.group(1).startswith("//"): youtube_link = 'http:' + youtube.group(1) else: youtube_link = youtube.group(1) stream_url = urlresolver.HostedMediaFile(url=youtube_link).resolve() elif springboard: html = net.http_GET(springboard.group(1)).content stream_url = re.search('<meta property="og:video" content="(.+?)" />', html).group(1) else: video = re.search('<embed.+?src="http://[a.]{0,2}blip.tv/[^#/]*[#/]{1}([^"]*)"',html, re.DOTALL).group(1) api_url = APIPath % video links = [] roles = [] tree = parse(urllib.urlopen(api_url)) for media in tree.getiterator('media'): for link in media.getiterator('link'): links.append(link.get('href')) roles.append(media.findtext('role'))
def INDEX(url,name,imdb,move,movegen): net = Net() link = net.http_GET(url).content link=link.replace('\r\n', '').replace('"},]', '"}]') magic = api.loads(link, encoding='latin1') progress = xbmcgui.DialogProgress() progress.create('Fortschritt', 'This is a progress bar.') genre=[] neu=[] sammeln=[] for e,i in enumerate(reversed(magic)): if name == "Neu im Programm" and e<27: neu.append(i) for e,i in enumerate(magic): if name in i['genre'].encode('utf-8'): genre.append(i) for e,i in enumerate(magic): if "sammeln" in name: sammeln.append(i) #----Neu im Programm----# for e,i in enumerate(neu): if e < len(neu): percent = int((e/len(neu))*100) message = str(e) + " von "+str(len(neu))+" Filmen geladen" progress.update(percent, message, "Dies passiert bei noch nie eingelesenen Filmen") try: imdb=re.search(".*?/(tt\d+)/*.*?$", i['imdblink']).group(1) except: imdb="" try: sub=re.search("(.*?)\((\d+)\)", i['titel']) addDir(sub.group(1),url,2,i['cover'],imdb,"movie".decode('utf-8'),sub.group(2),None,"") except: addDir(i['titel'],url,2,i['cover'],imdb,"movie",'',None,"") #----GENRES die Filme----# for e,i in enumerate(sorted(genre, key=lambda genre: genre['titel'])): if move<=e<move+25: if e-move < move+25: percent = ((e-move)/25*100) message = "FilmInfo des "+str(e-move) + ". von 25 Filmen geladen" progress.update(percent, message, "Dies passiert bei noch nie eingelesenen Filmen") try: imdb=re.search(".*?/(tt\d+)/*.*?$", i['imdblink']).group(1) except: imdb="" try: sub=re.search("(.*?)\((\d+)\)", i['titel']) addDir(sub.group(1),url,2,i['cover'],imdb,"movie".decode('utf-8'),sub.group(2),None,"") except: addDir(i['titel'],url,2,i['cover'],imdb,"movie",'',None,"") #----FilmInfo von allen Filmen Sammeln für die Datenbank----# for e,i in enumerate(sorted(sammeln, key=lambda sammeln: sammeln['titel'])): if e < len(sammeln): percent = int((e/len(sammeln))*100) message = "FilmInfo des "+str(e) + ". von "+str(len(sammeln))+" Filmen geladen" progress.update(percent, message, "Dies passiert bei noch nie eingelesenen Filmen") try: imdb=re.search(".*?/(tt\d+)/*.*?$", i['imdblink']).group(1) except: imdb="" try: sub=re.search("(.*?)\((\d+)\)", i['titel']) addDir(sub.group(1),url,2,i['cover'],imdb,"movie".decode('utf-8'),sub.group(2),None,"") except: addDir(i['titel'],url,2,i['cover'],imdb,"movie",'',None,"") #----SEITENNAVIGATION----# if len(genre)>0: if move!=None and move==0 and len(genre)>25: print "<<<----OLDU-1---->>>" addDir("Next-->>",url,4,"","","folder","",move+25,name) if move!=None and move!=0 and move+25<=len(genre) and len(genre)-move>0: print "<<<----OLDU-2---->>>" addDir("Next-->>",url,4,"","","folder","",move+25,name) addDir("<<--Back",url,4,"","","folder","",move-25,name) if move+25>=len(genre) and move!=0: print "<<<----OLDU-3---->>>" addDir("<<--Back",url,4,"","","folder","",move-25,name) addDir("Home","",None,"","","folder","",None,"") progress.close() xbmcplugin.setContent(int(sys.argv[1]), 'movies') xbmc.executebuiltin("Container.SetViewMode(%s)" % addon.getSetting('MAIN') )