def getChannels(page): x = [] logger.debug("page: " + page) if (str(page) == "0"): html = Peliculasbiz.getContentFromUrl(Peliculasbiz.MAIN_URL) menuHtml = Decoder.extract('<ul class="clearfix">', '</ul>', html) for itemHtml in menuHtml.split("<li>"): if "href=" in itemHtml: item = {} item["title"] = Decoder.extract('">', '<', itemHtml) item["link"] = Decoder.extract('href="', '"', itemHtml) x.append(item) search = {} search["title"] = XBMCUtils.getString(11018) search["link"] = ".search" x.append(search) elif page == '.search': #display keyboard, it will wait for result keyboard = XBMCUtils.getKeyboard() keyboard.doModal() text = "" if (keyboard.isConfirmed()): text = keyboard.getText() x = Peliculasbiz.search(text) elif str(page).isdigit(): logger.debug("numeric detected...") link = Peliculasbiz.extractProviderFromLink(page) element = {} element["link"] = link x.append(element) else: x = Peliculasbiz.extractProvidersFromLink(page) return x
def extractIframe(html, referer): iframeUrl = '' if '<iframe frameborder="0" marginheight="0" allowfullscreen="true" marginwidth="0" height="555" src="' in html: iframeUrl = Decoder.extract( '<iframe frameborder="0" marginheight="0" allowfullscreen="true" marginwidth="0" height="555" src="', '"', html) elif '<iframe frameborder="0" marginheight="0" marginwidth="0" height="490" src="' in html: iframeUrl = Decoder.extract( '<iframe frameborder="0" marginheight="0" marginwidth="0" height="490" src="', '"', html) if "'" in iframeUrl: iframeUrl = iframeUrl[0:iframeUrl.find("'")] if "/update/" in iframeUrl: #get all urls and check if they hasn't /update/ for content in html.split(".php"): link = content[content.rfind('"') + 1:] + ".php" if "http://" in link and "/update/" not in link and "/update/" in iframeUrl: logger.debug("link has been updated from: " + iframeUrl + ", to: " + link) iframeUrl = link logger.debug("level 1, iframeUrl: " + iframeUrl + ", cookie: " + Cricfreetv.cookie) if iframeUrl != '': html = Cricfreetv.getContentFromUrl(iframeUrl, "", Cricfreetv.cookie, referer) file = Cricfreetv.seekIframeScript(html, referer, iframeUrl) item = {} item["title"] = referer item["link"] = file return item
def getChannels(page): x = [] if str(page) == '0': page = Live9net.MAIN_URL html = Live9net.getContentFromUrl(page, "", Live9net.cookie, "") #print html if html.find('ESPN</') > -1: #it's a list, needs decode table = Decoder.extract('ESPN</', '<div>', html) x = Live9net.extractElements(table) logger.debug("live9 channels logic done!") else: iframeUrl = Decoder.extract('src="', '"', html) logger.debug("iframe url is: " + iframeUrl) html2 = Live9net.getContentFromUrl(iframeUrl, "", Live9net.cookie, page) logger.debug("detecting sawlive links...") if html2.find('src="http://sawlive.tv/') > -1 or html2.find( 'src="http://www3.sawlive') > -1: logger.debug("Detected sawlive link!") if html2.find('src="http://sawlive.tv/') > -1: scriptSrc = Decoder.extractWithRegex( 'http://sawlive', '"></script>', html2).replace('"></script>', "") else: scriptSrc = Decoder.extractWithRegex( 'http://www3.sawlive', '"></script>', html2).replace('"></script>', "") finalRtmpUrl = Decoder.extractSawlive(scriptSrc, iframeUrl) element = {} element["link"] = finalRtmpUrl element["title"] = "Watch channel" element["permalink"] = True logger.debug("finished append element!") x.append(element) return x
def drawCNNNews(url): x = CNN.getChannels(url) if str(url) == '0': level = 4 for new in x: img = icon if new.has_key("thumbnail"): img = new["thumbnail"] add_dir(new["title"], new["link"], level, img, "editioncnn", new["link"]) else: body = x[0]["title"] logger.debug("body is: " + body) #clean bad html body = body.replace( '<h4 class="video__end-slate__tertiary-title">MUST WATCH</h4>', "") body = body.replace( '<div class="video__end-slate__engage__more"><a href="/videos" class="video__end-slate__replay-text">More Videos ...</a></div>', "") body = body.replace( '<h4 class="video__end-slate__tertiary-title">MUST WATCH</h4>', "") body = body.replace('<h3 class="cd__headline-title">JUST WATCHED</h3>', "") body = body.replace(".", ". \n") logger.debug("drawing new...") drawNew(textContent=(body))
def searchLists(param): url = "https://www.googleapis.com/customsearch/v1element?" \ "key=AIzaSyCVAXiUzRYsML1Pv6RwSG1gunmMikTzQqY" \ "&rsz=filtered_cse" \ "&num=20" \ "&hl=en" \ "&prettyPrint=false" \ "&source=gcsc" \ "&gss=.com" \ "&sig=8bdfc79787aa2b2b1ac464140255872c" \ "&cx=013305635491195529773:0ufpuq-fpt0" url += "&q="+param+"&sort=date&googlehost=www.google.com&callback=google.search.Search.apiary846" results = Pastebin.getContentFromUrl(url) x = [] jsonString = Decoder.extract(',"results":',']});',results) logger.debug(jsonString) for jsonResult in results.split('{"GsearchResultClass"'): element = {} link = Decoder.extract('"url":"','","',jsonResult) if "pastebin.com" in link and '/raw/' not in link: link = link[:link.rfind('/')]+"/raw/"+link[link.rfind('/')+1:] title = Decoder.extract('"title":"','","titleNoFormatting"',jsonResult) if "http" in link: logger.debug("appending result: "+title+", url: "+link) element["title"] = title element["link"] = link x.append(element) return x
def getChannels(): x = [] url = StreamingSports365.HTML_CHANNELS logger.debug("html channels url is: " + url) bruteResult = StreamingSports365.getContentFromUrl( url=url, referer=StreamingSports365.MAIN_URL) logger.debug("html is: " + bruteResult) htmlResults = Decoder.extract( '<table class="uk-table uk-table-hover uk-table-striped">', "</table>", bruteResult) for item in htmlResults.split("<tr>"): if "<td>acestream://" in item: name = "" if '"><strong>' in item: name = Decoder.extract('"><strong>', '</strong>', item) else: name = Decoder.extract('html">', '<', item) link = "acestream://" + Decoder.extract( '<td>acestream://', '</td>', item) logger.info("Added: " + name + ", url: " + link) element = {} element["title"] = name element["link"] = link x.append(element) return x
def extractElements(table): x = [] for fieldHtml in table.split('<li>'): if fieldHtml.find("<a href=") > -1: element = {} element["link"] = Decoder.extract('<a href="', '"', fieldHtml) title = Decoder.extract('alt="', '">', fieldHtml) if len(title) == 0 and len( element["link"] ) > 0: #provider probably has removed the alt content to 'destroy' scripts xD link = element["link"] title = Decoder.extract("channel/", "/", link) logger.debug("alternative title: " + title) element["title"] = Decoder.extract('/channel/', '-live', element["link"]).replace( "-", " ") element["thumbnail"] = Decoder.extract('src="', '" ', fieldHtml) logger.debug("found title: " + element["title"] + ", link: " + element["link"] + ", thumbnail: " + element["thumbnail"]) if len(element["title"]) > 0: x.append(element) return x
def update(): #download ZIP file start = time.clock() localfile = ROOT_DIR+"/update.zip" response = urllib2.urlopen(REMOTE_FILE_XML) html = response.read() remote_file = common.parseDOM(html,"file")[0].encode("utf-8") #remote version downloadtools.downloadfile(remote_file, localfile, notStop=False) end = time.clock() logger.info("org.harddevelop.kodi.tv Downloaded in %d seconds " % (end-start+1)) separatorChar = XBMCUtils.getSeparatorChar() #unzip unzipper = ziptools.ziptools() logger.info("org.harddevelop.kodi.tv destpathname=%s" % ROOT_DIR) addons_dir = XBMCUtils.getAddonsDir() current_plugin_dir = XBMCUtils.getPathFixedFrom(XBMCUtils.getAddonInfo('path')) logger.debug("using dir: "+addons_dir+" to extract content") unzipper.extractReplacingMainFolder(localfile,addons_dir,current_plugin_dir) #github issues #unzipper.extract(localfile,ROOT_DIR) #clean downloaded zip file logger.info("org.harddevelop.kodi.tv clean zip file...") os.remove(localfile) logger.info("org.harddevelop.kodi.tv clean done!")
def extractSeasons(html, url): items = [] #extract <a href='https://hdfull.me/serie/homeland/temporada-1'>1</a> while html.find("<a href='" + url + "/temporada-") > -1: item = {} aHtml = Decoder.extractWithRegex("<a href='" + url + "/temporada-", "</a>", html) html = html[html.find(aHtml) + len(aHtml):] item["permalink"] = Decoder.extractWithRegex( url + "/temporada-", "'", aHtml) item["permalink"] = item["permalink"][0:item["permalink"].find("'" )] item["title"] = Decoder.extract('>', '</a>', aHtml) logger.debug("found title: " + item["title"] + ", link: " + item["permalink"]) if item["title"].find( '<img class="tooltip" original-title="Temporada ') > -1: title = item["title"] item["title"] = Decoder.extract('original-title="', '"', title) item["thumbnail"] = Decoder.extract('" src="', '" />', title) logger.debug("procesed title: " + item["title"] + ", thumbnail: " + item["permalink"]) items.append(item) return items
def drawSplive(page): mode = 4 jsonChannels = Spliveappcom.getChannels(page) image = icon for item in jsonChannels: try: logger.debug("trying splive item...") #title = urllib.unquote_plus(item["title"].decode('iso-8859-1', 'ignore')) title = item["title"] link = item["link"] referer = "splive" if item.has_key("permaLink"): mode = 111 if item.has_key("referer"): referer = item["referer"] logger.info("referer is: " + referer) if item.has_key("thumbnail"): image = item["thumbnail"] logger.info("detected img: " + image) else: image = icon add_dir(title, link, mode, image, referer, link) except: logger.error("Something goes wrong with SPLIVEAPP drawer") pass
def extractIframeChannel(contentHtml, referer): logger.debug("proccessing level 2, cookie: " + Cineestrenostv.cookie) iframeUrl2 = "dummy url" if contentHtml.find( '<iframe scrolling="no" marginwidth="0" marginheight="0" frameborder="0" width="653" height="403" src="' ) > -1: iframeUrl2 = Decoder.extract( '<iframe scrolling="no" marginwidth="0" marginheight="0" frameborder="0" width="653" height="403" src="', '"></iframe>', contentHtml ).replace( "ñ", "%C3%B1" ) #same case with different width and height: TODO: change to regex!! elif contentHtml.find( '<iframe scrolling="no" marginwidth="0" marginheight="0" frameborder="0" width="650" height="400" src="' ) > -1: iframeUrl2 = Decoder.extract( '<iframe scrolling="no" marginwidth="0" marginheight="0" frameborder="0" width="650" height="400" src="', '"></iframe>', contentHtml).replace( "ñ", "%C3%B1") #normal cases, standar width and height elif '<iframe scrolling="no" marginwidth="0" marginheight="0" frameborder="0" allowfullscreen width="653" height="403" src="' in contentHtml: iframeUrl2 = Decoder.extract( '<iframe scrolling="no" marginwidth="0" marginheight="0" frameborder="0" allowfullscreen width="653" height="403" src="', '"></iframe>', contentHtml).replace( "ñ", "%C3%B1") # normal cases, standar width and height if iframeUrl2 != "dummy url": html3 = Cineestrenostv.getContentFromUrl(iframeUrl2, "", "", referer) return Cineestrenostv.mainLogicExtractIframeChannel( html3, iframeUrl2) else: return Cineestrenostv.mainLogicExtractIframeChannel( contentHtml, referer)
def extractScriptIframeUrl(html, scriptUrl, referer): iframeUrl = "" scriptContent = Cricfreetv.getContentFromUrl(scriptUrl, "", Cricfreetv.cookie, referer) logger.debug("script content is: " + scriptContent) iframeUrl = Decoder.extract('src="', '"', scriptContent) if iframeUrl.find("id='+id+'") > -1: #search id in html id = Decoder.extract("<script type='text/javascript'>id='", "';", html) iframeUrl = iframeUrl[ 0:iframeUrl.find('?id=') + len('?id=')] + id + Cricfreetv.getWidthAndHeightParams( html) + "&stretching=" elif iframeUrl.find("live=") > -1 or '+ fid +' in iframeUrl: if html.find("<script type='text/javascript'>fid='") > -1: id = Decoder.extract("<script type='text/javascript'>fid='", "';", html) elif html.find("<script type='text/javascript'>fid=\"") > -1: id = Decoder.extract("<script type='text/javascript'>fid=\"", "\";", html) else: id = Decoder.extract('<script>fid="', '";', html) if iframeUrl.find("live=") > -1: iframeUrl = iframeUrl[0:iframeUrl.find('?live=') + len( '?live=')] + id + Cricfreetv.getWidthAndHeightParams(html) else: iframeUrl = iframeUrl[ 0:iframeUrl.find('?v=') + len('?v=')] + id + Cricfreetv.getWidthAndHeightParams(html) else: iframeUrl = Decoder.extract("<iframe src='", "' ", scriptContent) return iframeUrl
def getChannels(page): x = [] start = False if str(page) == '0': start = True page = Sports4u.MAIN_URL html = Sports4u.getContentFromUrl(page, "", Sports4u.cookie, "") #print html if start and 'live-channels-list">' in html: #it's a list, needs decode table = Decoder.extract('live-channels-list">', '</li><br>', html) logger.debug("using menu table: " + table) x = Sports4u.extractElements(table) logger.debug("channel list logic done!") else: iframeUrl = Decoder.extract( '<iframe frameborder="0" marginheight="0" marginwidth="0" height="490" ', '"></iframe>', html) iframeUrl = Decoder.extract('src="', '"', iframeUrl) logger.debug("iframeUrl is: " + iframeUrl) html2 = Sports4u.getContentFromUrl(url=iframeUrl, referer=page) logger.debug("html is: " + html2) file = Cricfreetv.seekIframeScript(html2, page, iframeUrl) logger.debug("Finished file logic, obtained file: " + file) element = {} element["link"] = file element["title"] = "Watch streaming" x.append(element) return x
def extractElements(table): x = [] for fieldHtml in table.split(' href="'): if fieldHtml.find("<li>") > -1 or fieldHtml.find( ' <img src="') > -1: element = {} element["link"] = Zoptvcom.MAIN_URL + fieldHtml[0:fieldHtml. find('"')] if fieldHtml.find(' <img src="') > -1: element["title"] = Decoder.extract("<span>", "</span>", fieldHtml) element["thumbnail"] = Zoptvcom.MAIN_URL + Decoder.extract( '<img src="', '"> <span>', fieldHtml) logger.debug("found thumbnail: " + element["thumbnail"]) else: element["title"] = fieldHtml[fieldHtml.find('">') + 2:].replace( "<li>", "").replace( "</li>", "").replace( "</a>", "").replace( "<a", "" ).rstrip( os.linesep) logger.debug("found title: " + element["title"] + ", link: " + element["link"]) if len(element["title"]) > 0: x.append(element) return x
def extractContentFromLink(page, cookie=''): x = [] html = Elitetorrent.getContentFromUrl(url=page, cookie=cookie, referer=Elitetorrent.MAIN_URL) if '<ul class="miniboxs miniboxs-ficha">' in html: logger.debug("thumbnails parts...") content = Decoder.extract('<ul class="miniboxs miniboxs-ficha">', '</ul>', html) i = 0 for line in content.split("<li>"): if i > 0: link = Elitetorrent.MAIN_URL + Decoder.extract( '<a href="', '"', line) img = Elitetorrent.MAIN_URL + "/" + Decoder.extract( '<img src="', '"', line) title = Decoder.extract(' alt="', '"', line) element = {} element["link"] = link element["thumbnail"] = img element["title"] = title if len(title) > 0: x.append(element) i += 1 return x
def extractContentFromLink(page): x = [] html = MejorTorrent.getContentFromUrl(url=page,referer=MejorTorrent.MAIN_URL) if "<td height='20' width='440' colspan='2'>" in html: if "' class='paginar'> << Anterior </a>" in html: prevLink = MejorTorrent.MAIN_URL+Decoder.rExtract("<a href='/","' class='paginar'> << Anterior </a>",html) element = {} element["link"] = prevLink element["title"] = "Anterior" x.append(element) content = Decoder.extract("<td height='20' width='440' colspan='2'>","<center><span style='font-size:15px; font-family:arial;'><b>Páginas:</b>",html) i = 0 for line in content.split("<td><div align='justify'><center>"): if i>0: img = (MejorTorrent.MAIN_URL+Decoder.extract('<img src="', '"', line)).replace(".com//",".com/") link = (MejorTorrent.MAIN_URL + Decoder.extract('<a href="', '"', line)).replace(".com//",".com/") logger.debug("link is: "+link) element = {} element["link"] = link element["thumbnail"] = img.replace(" ","%20") element["title"] = img[img.rfind("/")+1:img.find(".jpg")] #if len(title)>0: x.append(element) i+=1 if "' class='paginar'> Siguiente >> </a>" in html: nextLink = MejorTorrent.MAIN_URL+Decoder.rExtract("<a href='/","' class='paginar'> Siguiente >> </a>",html) element = {} element["link"] = nextLink element["title"] = "Siguiente" x.append(element) else: #download link x = MejorTorrent.extractDownloadItem(html) return x
def extractProvidersFromLink(page): x = [] html = MejorTorrent.getContentFromUrl(url=page,referer=MejorTorrent.MAIN_URL) logger.debug("html is: "+html) if '<b>Selecciona los que quieres descargar</b>' in html: tableHtml = Decoder.extract('<b>Selecciona los que quieres descargar</b>','<b>Marcar/Desmarcar Todos</b>',html) logger.debug("TABLE HTML IS: "+tableHtml) if "<input type='hidden' name='tabla' value='" in html: table = Decoder.extract("<input type='hidden' name='tabla' value='", "'", html) elif "name='tabla' value='" in html: table = Decoder.extract("name='tabla' value='", "'", html) logger.debug("table is: "+table) i=0 for htmlLine in tableHtml.split("<td bgcolor='#C8DAC8' style='border-bottom:1px solid black;'>"): if i>0: if '<a ' in htmlLine: text = Decoder.extract("'>","<",htmlLine) else: text = htmlLine[:htmlLine.find("<")] logger.debug("target line html is: "+text) #text = Decoder.removeHTML(text) id = Decoder.extract(" value='","'",htmlLine) link = "http://www.mejortorrent.com/secciones.php?sec=descargas&ap=contar&tabla=%s&id=%s&link_bajar=1" % (table,id) element = {} element["title"] = text element["link"] = link element["finalLink"] = True logger.debug("appending: "+text+", link: "+link) x.append(element) i+=1 else: #download x = MejorTorrent.extractDownloadItem(html) return x
def extractFinalRtmpUrl(url, referer): rtmpUrl = "" html = Mamahdcom.getContentFromUrl(url, "", Mamahdcom.cookie, referer) if 'file:\'' in html: file = Decoder.extract("file:'", '\',', html) rtmp = file[0:file.rfind("/") + 1] playpath = file[file.rfind("/") + 1:] swfUrl = "" secureToken = "SECURET0KEN#yw%.?()@W!" if url.find("hdcast.org") > -1: swfUrl = "http://player.hdcast.org/jws/jwplayer.flash.swf" rtmpUrl = rtmp + " playPath=" + playpath + " swfUrl=" + swfUrl + " pageUrl=" + url + " flashver=WIN/2019,0,0,226 live=true timeout=14 token=" + secureToken logger.debug("built final rtmp link: " + rtmpUrl) elif 'allowtransparency="true" src=' in html: logger.debug("using second way...") secondIframe = Decoder.extract('allowtransparency="true" src=', ' ', html).replace("&", "&") logger.debug("found second way url: " + secondIframe + ", referer: " + url) headers = { "User-Agent": Downloader.USER_AGENT, "Accept-Language": "en-US,en;q=0.8,es-ES;q=0.5,es;q=0.3", "Upgrade-Insecure-Requests": "1", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "Referer": url } html2 = Mamahdcom.getContentFromUrl(url=secondIframe, headers=headers) logger.debug("html2 is: " + html2) if 'file:"' in html2: rtmpUrl = Decoder.extract('file:"', '",', html2) logger.debug("using m3u8 for: " + rtmpUrl) return rtmpUrl
def extractProviderFromLink(page, cookie=''): html = Elitetorrent.getContentFromUrl(url=page, cookie=cookie, referer=Elitetorrent.MAIN_URL) logger.debug("html is: " + html) link = "magnet:" + Decoder.extract('<a href="magnet:', '"', html) logger.debug("link obtained is: " + link) return link
def getWidthAndHeightParams(html): subUrl = "" if html.find("; v_width=") > -1: width = Decoder.extract("; v_width=", ";", html) height = Decoder.extract("; v_height=", ";", html) subUrl = "&vw=" + width + "&vh=" + height logger.debug("width-height subUrl now is: " + subUrl) return subUrl
def getAddonsDir(): separatorChar = '/' if XBMCUtils.isWindowsPlatform(): logger.debug("Detected Windows system...") separatorChar = "\\" addons_dir = xbmc.translatePath("special://home" + separatorChar + "addons" + separatorChar) return addons_dir
def openMobdro(url, page): try: link = base64.decodestring(url) except: logger.debug("not a valid base64 content...") pass logger.info("decoded streamgaroo link: " + link) open(link, page)
def getChannels(page): x = [] if str(page) == '0': html = Acesoplistingin.getContentFromUrl(url=Acesoplistingin.MAIN_URL) table = Decoder.extract('<table id="listing" ','</table></div>',html) x = Acesoplistingin.extractElements(table) logger.debug("appending "+str(len(x))+" elements") return x
def extractProviderFromLink(page): html = TuMejorTorrent.getContentFromUrl( url=page, referer=TuMejorTorrent.MAIN_URL) logger.debug("html is: " + html) link = TuMejorTorrent.MAIN_URL + "download/" + Decoder.extract( 'http://tumejorjuego.com/redirect/index.php?link=descargar-torrent/', '/";', html) + ".torrent" logger.debug("link obtained is: " + link) return link
def getChannels(page): x = [] if str(page) == '0': try: page=Arenavisionin.MAIN_URL+"guide" html = Arenavisionin.getContentFromUrl(page,"",'beget=begetok; has_js=1',Arenavisionin.MAIN_URL) #guideUrl = Decoder.rExtract('"','">EVENTS GUIDE</a></li>',html) #html = Arenavisionin.getContentFromUrl(page+guideUrl,"",'beget=begetok; has_js=1',Arenavisionin.MAIN_URL) except: page = Arenavisionin.MAIN_URL_RU+"guide" html = Arenavisionin.getContentFromUrl(page, "", 'beget=begetok; has_js=1', Arenavisionin.MAIN_URL_RU) #guideUrl = Decoder.rExtract('"', '">EVENTS GUIDE</a></li>', html) #html = Arenavisionin.getContentFromUrl(page+guideUrl, "", 'beget=begetok; has_js=1', Arenavisionin.MAIN_URL_RU) pass html = Decoder.extract('<table align="center" cellspacing="1" class="auto-style1" style="width: 100%; float: left"><tr><th class="auto-style4" style="width: 190px; height: 39px"><strong>DAY</strong></th>',"</tr></table></div></div></div>",html) x = Arenavisionin.extractElements(html) else: if page.find("-")>-1: #put a context menu and the user should decice, if not use the first one (default action) dialog = XBMCUtils.getDialog() cmenu = [] for contextItem in page.split("-"): #if len(contextItem)>0: cmenu.append(contextItem) result = dialog.select(XBMCUtils.getString(11016), cmenu) #choose logger.debug("result was: "+str(result)) if result == None or result==-1: target = page[:page.find("-")] page = target else: logger.debug("has choosed "+str(result)+": "+cmenu[result]) page = (cmenu[result]) html = Arenavisionin.getContentFromUrl(Arenavisionin.MAIN_URL, "", 'beget=begetok; has_js=1', Arenavisionin.MAIN_URL) link = Decoder.rExtract("<a href=\"","title=\"\">ArenaVision "+page+"</a>",html) logger.debug("html is: "+html) link = link[:link.find('"')] logger.debug("provisional link is: "+link) else: if "av" not in page: page = "av"+page link = "http://www.arenavision.us/"+page try: html = Arenavisionin.getContentFromUrl(link,"",'beget=begetok; has_js=1',Arenavisionin.MAIN_URL) logger.debug("second html is: "+html) except: logger.error("sonething goes wrong with link: "+link) pass if html.find("acestream://")>-1: link2 = Decoder.extractWithRegex("acestream://",'"',html).replace('"',"") else: link2 = Decoder.extractWithRegex("sop://",'"',html).replace('"',"") element = {} element["title"] = page element["link"] = link2 x.append(element) return x
def search(text): searchUrl = "q=" + urllib.quote_plus(text) html = TuMejorTorrent.getContentFromUrl( url=TuMejorTorrent.SEARCH, data=searchUrl, referer=TuMejorTorrent.MAIN_URL, launchLocation=True) logger.debug("search html is: " + html) return TuMejorTorrent.extractItems(html)
def drawYoutvgratis(url): channels = Youtvgratis.getChannels(url) logger.debug("items obtained: " + str(len(channels))) level = 119 # stream for channel in channels: img = '' if channel.has_key("thumbnail"): img = channel["thumbnail"] add_dir(channel["title"], channel["link"], level, img, "youtvgratis", channel["link"])
def drawPastebinCom(): param = urllib.quote_plus( str(XBMCUtils.getSettingFromContext(sys.argv[1], 'pastebin_param'))) logger.debug("extracted param to be searched: " + param) channels = Pastebin.searchLists(param=param) logger.debug("items obtained: " + str(len(channels))) level = 1 for channel in channels: add_dir(channel["title"], channel["link"], level, '', "pastebincom", channel["link"])
def drawZonaAppCom(): channels = ZonaAppCom.getChannelsJSON() logger.debug("items obtained: " + str(len(channels))) for channel in channels: image = '' level = 116 if channel.has_key('thumbnail'): image = channel["thumbnail"] add_dir(channel["title"], channel["link"], level, image, "zonaappcom", channel["link"])
def openTuMejorTorrent(url, page): logger.info("decoding tumejortorrent link... " + url) link = url if ".torrent" not in url: link = TuMejorTorrent.getChannels(url)[0]["link"] logger.debug("provisional link is: " + link) if ".torrent" not in link: link = TuMejorTorrent.getChannels(link)[0]["link"] logger.info("decoded two times tumejortorrent link: " + link) logger.info("decoded tumejortorrent link: " + link) open(link, page)