def play(params,url,category): logger.info("[peliculasid.py] play") title = unicode( xbmc.getInfoLabel( "ListItem.Title" ), "utf-8" ) thumbnail = urllib.unquote_plus( params.get("thumbnail") ) try: plot = unicode( xbmc.getInfoLabel( "ListItem.Plot" ), "utf-8" ) except: plot = xbmc.getInfoLabel( "ListItem.Plot" ) server = params["server"] if "|" in url: matches = url.split("|") patronvideos = 'file=([^\&]+)\&' c = 0 listdata = [] for match in matches: c += 1 print match data = scrapertools.downloadpageGzip(match) matches2 = re.compile(patronvideos,re.DOTALL).findall(data) listdata.append(["Parte %d" %c,matches2[0]]) url = xmltoplaylist.MakePlaylistFromList(listdata) elif "iframeplayer.php" in url: #"http://peliculasid.net/iframeplayer.php?url=aHR0cDovL3ZpZGVvLmFrLmZhY2Vib29rLmNvbS9jZnMtYWstc25jNC80MjIxNi82MS8xMjgxMTI4ODgxOTUwXzM5NTAwLm1wNA==" data = scrapertools.downloadpageGzip(url) patronvideos = 'file=([^\&]+)\&' matches = re.compile(patronvideos,re.DOTALL).findall(data) if len(matches)>0: url = matches[0] elif "iframevk.php" in url: data = scrapertools.downloadpageGzip(url) patronvideos = '<iframe src="(http://vk[^/]+/video_ext.php[^"]+)"' matches = re.compile(patronvideos,re.DOTALL).findall(data) if len(matches)>0: import vk server = "Directo" url = vk.geturl(matches[0]) elif "iframemv.php" in url: data = scrapertools.downloadpageGzip(url) patronvideos = 'src="http://www.megavideo.com/mv_player.swf\?v\=([^"]+)"' matches = re.compile(patronvideos,re.DOTALL).findall(data) if len(matches)>0: server = "Megavideo" url = matches[0] xbmctools.playvideo(CHANNELNAME,server,url,category,title,thumbnail,plot)
def play(params, url, category): logger.info("[peliculasid.py] play") title = unicode(xbmc.getInfoLabel("ListItem.Title"), "utf-8") thumbnail = urllib.unquote_plus(params.get("thumbnail")) try: plot = unicode(xbmc.getInfoLabel("ListItem.Plot"), "utf-8") except: plot = xbmc.getInfoLabel("ListItem.Plot") server = params["server"] if "|" in url: matches = url.split("|") patronvideos = 'file=([^\&]+)\&' c = 0 listdata = [] for match in matches: c += 1 print match data = scrapertools.downloadpageGzip(match) matches2 = re.compile(patronvideos, re.DOTALL).findall(data) listdata.append(["Parte %d" % c, matches2[0]]) url = xmltoplaylist.MakePlaylistFromList(listdata) elif "iframeplayer.php" in url: #"http://peliculasid.net/iframeplayer.php?url=aHR0cDovL3ZpZGVvLmFrLmZhY2Vib29rLmNvbS9jZnMtYWstc25jNC80MjIxNi82MS8xMjgxMTI4ODgxOTUwXzM5NTAwLm1wNA==" data = scrapertools.downloadpageGzip(url) patronvideos = 'file=([^\&]+)\&' matches = re.compile(patronvideos, re.DOTALL).findall(data) if len(matches) > 0: url = matches[0] elif "iframevk.php" in url: data = scrapertools.downloadpageGzip(url) patronvideos = '<iframe src="(http://vk[^/]+/video_ext.php[^"]+)"' matches = re.compile(patronvideos, re.DOTALL).findall(data) if len(matches) > 0: import vk server = "Directo" url = vk.geturl(matches[0]) elif "iframemv.php" in url: data = scrapertools.downloadpageGzip(url) patronvideos = 'src="http://www.megavideo.com/mv_player.swf\?v\=([^"]+)"' matches = re.compile(patronvideos, re.DOTALL).findall(data) if len(matches) > 0: server = "Megavideo" url = matches[0] xbmctools.playvideo(CHANNELNAME, server, url, category, title, thumbnail, plot)
def newlist(params,url,category): # Descarga la página data = scrapertools.downloadpageGzip(url) #logger.info(data) # Extrae las entradas (carpetas) patronvideos = '<div class="item">.*?<a href="([^"]+)"[^<]+<img src="([^"]+)".*?<div class="cover boxcaption">[^<]+<h1>([^<]+)</h1>' matches = re.compile(patronvideos,re.DOTALL).findall(data) scrapertools.printMatches(matches) for match in matches: scrapedtitle = match[2] scrapedurl = urlparse.urljoin(url,match[0]) scrapedthumbnail = urlparse.urljoin(url,match[1]) scrapeddescription = "" if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]") # Añade al listado de XBMC xbmctools.addthumbnailfolder( CHANNELNAME , scrapedtitle , scrapedurl , scrapedthumbnail, "detail" ) # Label (top-right)... xbmcplugin.setPluginCategory( handle=pluginhandle, category=category ) xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE ) xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
def MakePlaylistFromXML(xmlurl, title="default"): logger.info("[%s.py] MakePlaylistFromXML" % CHANNELNAME) if title == ("default" or ""): nombrefichero = FULL_FILENAME_PATH_XML else: nombrefichero = os.path.join(downloadtools.getDownloadPath(), title + ".pls") xmldata = scrapertools.downloadpageGzip(xmlurl) patron = '<title>([^<]+)</title>.*?<location>([^<]+)</location>' matches = re.compile(patron, re.DOTALL).findall(xmldata) if len(matches) > 0: playlistFile = open(nombrefichero, "w") playlistFile.write("[playlist]\n") playlistFile.write("\n") c = 0 for match in matches: c += 1 playlistFile.write("File%d=%s\n" % (c, match[1])) playlistFile.write("Title%d=%s\n" % (c, match[0])) playlistFile.write("\n") playlistFile.write("NumberOfEntries=%d\n" % c) playlistFile.write("Version=2\n") playlistFile.flush() playlistFile.close() return nombrefichero, c else: return ""
def play(params,url,category): logger.info("[animeid.py] play") title = urllib.unquote_plus( params.get("title") ) thumbnail = urllib.unquote_plus( params.get("thumbnail") ) plot = unicode( xbmc.getInfoLabel( "ListItem.Plot" ), "utf-8" ) server = params["server"] scrapedurl = "" # Lee la página con el player data = scrapertools.downloadpageGzip(url) #logger.info(data) # Extrae las entradas (capítulos #patronvideos = 'SWFObject\(\'http\:\/\/www\.SeriesID\.com\/player\.swf\'.*?\&file\=([^\&]+)&' patronvideos = "so.addParam\('flashvars','\&file=([^\&]+)\&" matches = re.compile(patronvideos,re.DOTALL).findall(data) if len(matches)>0: scrapedurl = matches[0] server = "Directo" else: patronvideos = '<param name="flashvars" value="file=([^\&]+)&' matches = re.compile(patronvideos,re.DOTALL).findall(data) if len(matches)>0: scrapedurl = matches[0] server= "Directo" xbmctools.playvideo(CHANNELNAME,server,scrapedurl,category,title,thumbnail,plot)
def newlist(item): # Descarga la página data = scrapertools.downloadpageGzip(item.url) #logger.info(data) # Extrae las entradas (carpetas) patronvideos = '<div class="item">.*?<a href="([^"]+)"[^<]+<img src="([^"]+)".*?<div class="cover boxcaption">[^<]+<h1>([^<]+)</h1>' matches = re.compile(patronvideos, re.DOTALL).findall(data) scrapertools.printMatches(matches) itemlist = [] for match in matches: scrapedtitle = match[2] scrapedurl = urlparse.urljoin(item.url, match[0]) scrapedthumbnail = urlparse.urljoin(item.url, match[1]) scrapedplot = "" if (DEBUG): logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") itemlist.append( Item(channel=CHANNELNAME, action="detail", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot)) return itemlist
def catlist(item): # Descarga la página data = scrapertools.downloadpageGzip(item.url) #logger.info(data) # Extrae las entradas (carpetas) <a class="accion linkFader" href="../accion-1.html"></a> patronvideos = '<a class="([^"]+)" href="([^"]+)"></a>' matches = re.compile(patronvideos, re.DOTALL).findall(data) scrapertools.printMatches(matches) itemlist = [] for match in matches: scrapedtitle = match[0].replace("linkFader", "").strip() scrapedurl = urlparse.urljoin(item.url, match[1]) scrapedthumbnail = "" scrapedplot = "" if (DEBUG): logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") itemlist.append( Item(channel=CHANNELNAME, action="newlist", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot)) return itemlist
def catlist(params,url,category): # Descarga la página data = scrapertools.downloadpageGzip(url) #logger.info(data) # Extrae las entradas (carpetas) <a class="accion linkFader" href="../accion-1.html"></a> patronvideos = '<a class="([^"]+)" href="([^"]+)"></a>' matches = re.compile(patronvideos,re.DOTALL).findall(data) scrapertools.printMatches(matches) for match in matches: scrapedtitle = match[0].replace("linkFader","").strip() scrapedurl = urlparse.urljoin(url,match[1]) scrapedthumbnail = "" scrapeddescription = "" if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]") # Añade al listado de XBMC xbmctools.addthumbnailfolder( CHANNELNAME , scrapedtitle , scrapedurl , scrapedthumbnail, "newlist" ) # Label (top-right)... xbmcplugin.setPluginCategory( handle=pluginhandle, category=category ) xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE ) xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
def fulllist(item): # Descarga la página data = scrapertools.downloadpageGzip(item.url) #logger.info(data) # Extrae las entradas (carpetas) patronvideos = '<li><a href="([^"]+)"><span>([^<]+)</span></a></li>' matches = re.compile(patronvideos, re.DOTALL).findall(data) scrapertools.printMatches(matches) itemlist = [] for match in matches: scrapedtitle = match[1] scrapedurl = urlparse.urljoin(item.url, match[0]) scrapedthumbnail = "" scrapedplot = "" if (DEBUG): logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") itemlist.append( Item(channel=CHANNELNAME, action="detail", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot)) return itemlist
def MakePlaylistFromXML(xmlurl,title="default"): logger.info("[%s.py] MakePlaylistFromXML" %CHANNELNAME) if title== ("default" or ""): nombrefichero = FULL_FILENAME_PATH_XML else: nombrefichero = os.path.join( downloadtools.getDownloadPath(),title + ".pls") xmldata = scrapertools.downloadpageGzip(xmlurl) patron = '<title>([^<]+)</title>.*?<location>([^<]+)</location>' matches = re.compile(patron,re.DOTALL).findall(xmldata) if len(matches)>0: playlistFile = open(nombrefichero,"w") playlistFile.write("[playlist]\n") playlistFile.write("\n") c = 0 for match in matches: c += 1 playlistFile.write("File%d=%s\n" %(c,match[1])) playlistFile.write("Title%d=%s\n" %(c,match[0])) playlistFile.write("\n") playlistFile.write("NumberOfEntries=%d\n" %c) playlistFile.write("Version=2\n") playlistFile.flush(); playlistFile.close() return nombrefichero,c else: return ""
def detail2(params,url,category): logger.info("[animeid.py] detail2") title = urllib.unquote_plus( params.get("title") ) thumbnail = urllib.unquote_plus( params.get("thumbnail") ) plot = unicode( xbmc.getInfoLabel( "ListItem.Plot" ), "utf-8" ) scrapedurl = "" # Lee la página con el player data = scrapertools.downloadpageGzip(url) #logger.info(data) patronvideos = 'file=([^\&]+)\&' matches = re.compile(patronvideos,re.DOTALL).findall(data) if len(matches)>0: scrapedurl = matches[0] server = 'Directo' if (DEBUG): logger.info("title=["+title+"], url=["+scrapedurl+"], thumbnail=["+thumbnail+"]") xbmctools.addnewvideo( CHANNELNAME , "play2" , category , server , title + " - [%s]" %server , scrapedurl , thumbnail, plot ) patronvideos = 'http://[^\.]+.megavideo.com[^\?]+\?v=([A-Z0-9a-z]{8})' matches = re.compile(patronvideos,re.DOTALL).findall(data) if len(matches)>0: scrapedurl = matches[0] server = 'Megavideo' if (DEBUG): logger.info("title=["+title+"], url=["+scrapedurl+"], thumbnail=["+thumbnail+"]") xbmctools.addnewvideo( CHANNELNAME , "play2" , category , server , title + " - [%s]" %server , scrapedurl , thumbnail, plot ) # Label (top-right)... xbmcplugin.setPluginCategory( handle=pluginhandle, category=category ) xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE ) xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
def detail2(params,url,category): logger.info("[animeid.py] detail2") title = urllib.unquote_plus( params.get("title") ) thumbnail = urllib.unquote_plus( params.get("thumbnail") ) plot = xbmc.getInfoLabel( "ListItem.Plot" ) scrapedurl = "" # Lee la página con el player data = scrapertools.downloadpageGzip(url) #logger.info(data) patronvideos = '(?:file=|video_src=)([^\&]+)\&' matches = re.compile(patronvideos,re.DOTALL).findall(data) if len(matches)>0: import urlparse c= 0 scrapedurl = "" for match in matches: parsedurl = urlparse.urlparse(match) parsedurl2 = parsedurl[1].split(".") parsedurl3 = parsedurl2[len(parsedurl2)-2] if parsedurl3 in scrapedurl: c += 1 else: c =1 scrapedurl = match server = 'Directo' if (DEBUG): logger.info("title=["+title+"], url=["+scrapedurl+"], thumbnail=["+thumbnail+"]") xbmctools.addnewvideo( CHANNELNAME , "play2" , category , server , title + " - parte %d [%s] [%s]" %(c,parsedurl[1],server) , scrapedurl , thumbnail, plot ) ''' patronvideos = '(http://www.facebook.com/v/[^"]+)"' matches = re.compile(patronvideos,re.DOTALL).findall(data) if len(matches)>0: c = 0 for match in matches: c +=1 scrapedurl = match server = 'Directo' if (DEBUG): logger.info("title=["+title+"], url=["+scrapedurl+"], thumbnail=["+thumbnail+"]") xbmctools.addnewvideo( CHANNELNAME , "play2" , category , server , title + " - parte %d [FACEBOOK] [%s]" %(c,server) , scrapedurl , thumbnail, plot ) ''' patronvideos = 'http://[^\.]+.megavideo.com[^\?]+\?v=([A-Z0-9a-z]{8})' matches = re.compile(patronvideos,re.DOTALL).findall(data) if len(matches)>0: scrapedurl = matches[0] server = 'Megavideo' if (DEBUG): logger.info("title=["+title+"], url=["+scrapedurl+"], thumbnail=["+thumbnail+"]") xbmctools.addnewvideo( CHANNELNAME , "play2" , category , server , title + " - [%s]" %server , scrapedurl , thumbnail, plot ) # Label (top-right)... xbmcplugin.setPluginCategory( handle=pluginhandle, category=category ) xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE ) xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
def ListadoCapitulosSeries(params,url,category): logger.info("[pelisflv.py] ListadoCapitulosSeries") title = urllib.unquote_plus( params.get("title") ) thumbnail = urllib.unquote_plus( params.get("thumbnail") ) # Descarga la p�gina data = scrapertools.downloadpageGzip(url) #logger.info(data) # Patron de las entradas patron = "<div class='post-body entry-content'>(.*?)<div class='post-footer'>" matches = re.compile(patron,re.DOTALL).findall(data) scrapertools.printMatches(matches) patron = '<a href="([^"]+)"[^>]+><[^>]+>(.*?)<' matches = re.compile(patron,re.DOTALL).findall(matches[0]) scrapertools.printMatches(matches) patron2 = '<iframe src="([^"]+)"' # A�ade las entradas encontradas for match in matches: # Atributos scrapedtitle = match[1] data2 = scrapertools.downloadpageGzip(match[0]) matches2 = re.compile(patron2,re.DOTALL).findall(data2) scrapertools.printMatches(matches2) scrapedurl = matches2[0] scrapedthumbnail = thumbnail scrapedplot = "" if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]") # A�ade al listado de XBMC xbmctools.addnewfolder( CHANNELNAME , "detail" , category , scrapedtitle , scrapedurl , scrapedthumbnail, scrapedplot ) # Asigna el t�tulo, desactiva la ordenaci�n, y cierra el directorio xbmcplugin.setPluginCategory( handle=pluginhandle, category=category ) xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE ) xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
def detail2(item): logger.info("[animeid.py] detail2") url = item.url title = item.title thumbnail = item.thumbnail plot = item.plot itemlist = [] scrapedurl = "" # Lee la página con el player data = scrapertools.downloadpageGzip(url) #logger.info(data) patronvideos = '(?:file=|video_src=)([^\&]+)\&' matches = re.compile(patronvideos,re.DOTALL).findall(data) if len(matches)>0: import urlparse c= 0 scrapedurl = "" for match in matches: parsedurl = urlparse.urlparse(match) parsedurl2 = parsedurl[1].split(".") parsedurl3 = parsedurl2[len(parsedurl2)-2] if parsedurl3 in scrapedurl: c += 1 else: c =1 scrapedurl = match server = 'Directo' scrapedtitle = title + " - parte %d [%s] [%s]" %(c,parsedurl[1],server) if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+thumbnail+"]") itemlist.append( Item(channel=CHANNELNAME, action="play" , title=scrapedtitle , url=scrapedurl, thumbnail=thumbnail, plot=plot, server=server, folder=False)) patronvideos = 'http://[^\.]+.megavideo.com[^\?]+\?v=([A-Z0-9a-z]{8})' matches = re.compile(patronvideos,re.DOTALL).findall(data) if len(matches)>0: scrapedurl = matches[0] scrapedtitle = title + " - [%s]" %server server = 'Megavideo' if (DEBUG): logger.info("title=["+title+"], url=["+scrapedurl+"], thumbnail=["+thumbnail+"]") itemlist.append( Item(channel=CHANNELNAME, action="play" , title=scrapedtitle , url=scrapedurl, thumbnail=thumbnail, plot=plot, server=server, folder=False)) return itemlist
def playmega(params,url,category): logger.info("[animeid.py] play") title = urllib.unquote_plus( params.get("title") ) thumbnail = urllib.unquote_plus( params.get("thumbnail") ) plot = unicode( xbmc.getInfoLabel( "ListItem.Plot" ), "utf-8" ) server = params["server"] # Lee la página con el player logger.info("[animeid.py] url="+url) if url.startswith("http://www.animeid.com"): url = "http://animeid.com" + url[22:] logger.info("[animeid.py] url="+url) data = scrapertools.downloadpageGzip(url) #logger.info(data) # Extrae las entradas (videos) listavideos = servertools.findvideos(data) if len(listavideos)>0: video = listavideos[0] xbmctools.playvideo(CHANNELNAME,video[2],video[1],category,title,thumbnail,plot)
def newlist(item): # Descarga la página data = scrapertools.downloadpageGzip(item.url) #logger.info(data) # Extrae las entradas (carpetas) patronvideos = '<div class="item">.*?<a href="([^"]+)"[^<]+<img src="([^"]+)".*?<div class="cover boxcaption">[^<]+<h1>([^<]+)</h1>' matches = re.compile(patronvideos,re.DOTALL).findall(data) scrapertools.printMatches(matches) itemlist = [] for match in matches: scrapedtitle = match[2] scrapedurl = urlparse.urljoin(item.url,match[0]) scrapedthumbnail = urlparse.urljoin(item.url,match[1]) scrapedplot = "" if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]") itemlist.append( Item(channel=CHANNELNAME, action="detail" , title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot)) return itemlist
def catlist(item): # Descarga la página data = scrapertools.downloadpageGzip(item.url) #logger.info(data) # Extrae las entradas (carpetas) <a class="accion linkFader" href="../accion-1.html"></a> patronvideos = '<a class="([^"]+)" href="([^"]+)"></a>' matches = re.compile(patronvideos,re.DOTALL).findall(data) scrapertools.printMatches(matches) itemlist = [] for match in matches: scrapedtitle = match[0].replace("linkFader","").strip() scrapedurl = urlparse.urljoin(item.url,match[1]) scrapedthumbnail = "" scrapedplot = "" if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]") itemlist.append( Item(channel=CHANNELNAME, action="newlist" , title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot)) return itemlist
def fulllist(item): # Descarga la página data = scrapertools.downloadpageGzip(item.url) #logger.info(data) # Extrae las entradas (carpetas) patronvideos = '<li><a href="([^"]+)"><span>([^<]+)</span></a></li>' matches = re.compile(patronvideos,re.DOTALL).findall(data) scrapertools.printMatches(matches) itemlist = [] for match in matches: scrapedtitle = match[1] scrapedurl = urlparse.urljoin(item.url,match[0]) scrapedthumbnail = "" scrapedplot = "" if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]") itemlist.append( Item(channel=CHANNELNAME, action="detail" , title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot)) return itemlist
def listvideos(params,url,category): logger.info("[peliculasid.py] listvideos") if url=="": url = "http://www.peliculasid.net/" # Descarga la p�gina data = scrapertools.downloadpageGzip(url) #logger.info(data) # Extrae las entradas (carpetas) patronvideos = '<div class="item">[^<]+<h1>([^<]+)</h1>[^<]+' patronvideos += '<a href="([^"]+)"><img src="([^"]+)"' #patronvideos += '<div class="cover boxcaption">.*?<h6>([^<]+)</h6>' #patronvideos += "<img src='(.*?)'" matches = re.compile(patronvideos,re.DOTALL).findall(data) scrapertools.printMatches(matches) for match in matches: # Titulo scrapedtitle = match[0] scrapedtitle = scrapedtitle.replace('’',"'") # URL scrapedurl = match[1] # Thumbnail scrapedthumbnail = match[2] scrapedthumbnail = scrapedthumbnail.replace(" ","") # Argumento scrapedplot = "" # Depuracion if (DEBUG): logger.info("scrapedtitle="+scrapedtitle) logger.info("scrapedurl="+scrapedurl) logger.info("scrapedthumbnail="+scrapedthumbnail) # A�ade al listado de XBMC xbmctools.addnewfolder( CHANNELNAME , "detail" , category , scrapedtitle , scrapedurl , scrapedthumbnail, scrapedplot ) # Extrae la marca de siguiente p�gina patronvideos = '<a href="([^"]+)" class="nextpostslink">' matches = re.compile(patronvideos,re.DOTALL).findall(data) scrapertools.printMatches(matches) if len(matches)>0: scrapedtitle = "P�gina siguiente" scrapedurl = urlparse.urljoin(url,matches[0]) scrapedthumbnail = "" scrapedplot = "" xbmctools.addnewfolder( CHANNELNAME , "listvideos" , category , scrapedtitle , scrapedurl , scrapedthumbnail, scrapedplot ) # Label (top-right)... xbmcplugin.setPluginCategory( handle=pluginhandle, category=category ) # Disable sorting... xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE ) # End of directory... xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
def detail(params, url, category): logger.info("[pelisflv.py] detail") title = urllib.unquote_plus(params.get("title")) thumbnail = urllib.unquote_plus(params.get("thumbnail")) plot = urllib.unquote_plus(params.get("plot")) # Descarga la p�gina datafull = scrapertools.cachePage(url) # logger.info(data) patron = "google_ad_section_start(.*?)google_ad_section_end -->" matches = re.compile(patron, re.DOTALL).findall(datafull) data = matches[0] # ------------------------------------------------------------------------------------ # Busca los enlaces a los videos # ------------------------------------------------------------------------------------ """ listavideos = servertools.findvideos(data) for video in listavideos: videotitle = video[0] url = video[1] server = video[2] xbmctools.addnewvideo( CHANNELNAME , "play" , category , server , title.strip() + " - " + videotitle , url , thumbnail , plot ) """ # Busca enlaces en el servidor Stagevu - "el modulo servertools.findvideos() no los encuentra" patronvideos = "'(http://stagevu.com[^']+)'" matches = re.compile(patronvideos, re.DOTALL).findall(data) if len(matches) > 0: logger.info(" Servidor Stagevu") for match in matches: scrapedurl = match.replace("&", "&") xbmctools.addnewvideo( CHANNELNAME, "play", category, "Stagevu", title + " - [Stagevu]", scrapedurl, thumbnail, plot ) # Busca enlaces en el servidor Movshare - "el modulo servertools.findvideos() no los encuentra" patronvideos = "'(http://www.movshare.net[^']+)'" matches = re.compile(patronvideos, re.DOTALL).findall(data) if len(matches) > 0: logger.info(" Servidor Movshare") for match in matches: scrapedurl = match.replace("&", "&") xbmctools.addnewvideo( CHANNELNAME, "play", category, "Movshare", title + " - [Movshare]", scrapedurl, thumbnail, plot ) # ------------------------------------------------------------------------------------ # --- Busca los videos Directos patronvideos = "file=(http\:\/\/[^\&]+)\&" matches = re.compile(patronvideos, re.DOTALL).findall(data) scrapertools.printMatches(matches) print "link directos encontrados :%s" % matches print data if len(matches) > 0: for match in matches: subtitle = "[FLV-Directo]" if "xml" in match: data2 = scrapertools.cachePage(match) logger.info("data2=" + data2) patronvideos = "<track>.*?" patronvideos += "<title>([^<]+)</title>[^<]+" patronvideos += "<location>([^<]+)</location>(?:[^<]+" patronvideos += '<meta rel="type">video</meta>[^<]+|[^<]+)' patronvideos += '<meta rel="captions">([^<]+)</meta>[^<]+' patronvideos += "</track>" matches2 = re.compile(patronvideos, re.DOTALL).findall(data2) scrapertools.printMatches(matches) for match2 in matches2: sub = "" playWithSubt = "play" if match2[2].endswith(".xml"): # Subtitulos con formato xml son incompatibles con XBMC sub = "[Subtitulo incompatible con xbmc]" if ".mp4" in match2[1]: subtitle = "[MP4-Directo]" scrapedtitle = "%s - (%s) %s" % (title, match2[0], subtitle) scrapedurl = match2[1].strip() scrapedthumbnail = thumbnail scrapedplot = plot if match2[2].endswith(".srt"): scrapedurl = scrapedurl + "|" + match2[2] playWithSubt = "play2" if DEBUG: logger.info( "title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]" ) # A�ade al listado de XBMC xbmctools.addnewvideo( CHANNELNAME, playWithSubt, category, "Directo", scrapedtitle, scrapedurl, scrapedthumbnail, scrapedplot, ) else: if match.endswith(".srt"): scrapedurl = scrapedurl + "|" + match xbmctools.addnewvideo( CHANNELNAME, "play2", category, "Directo", title + " (V.O.S) - " + subtitle, scrapedurl, thumbnail, plot, ) if match.endswith(".xml"): sub = "[Subtitulo incompatible con xbmc]" xbmctools.addnewvideo( CHANNELNAME, "play", category, "Directo", title + " (V.O) - %s %s" % (subtitle, sub), scrapedurl, thumbnail, plot, ) scrapedurl = match print scrapedurl # src="http://pelisflv.net63.net/player/videos.php?x=http://pelisflv.net63.net/player/xmls/The-Lord-Of-The-Ring.xml" patronvideos = "(http\:\/\/[^\/]+\/[^\/]+\/[^\/]+\/[^\.]+\.xml)" matches = re.compile(patronvideos, re.DOTALL).findall(data) scrapertools.printMatches(matches) # print data if len(matches) > 0: for match in matches: subtitle = "[FLV-Directo]" data2 = scrapertools.cachePage(match) logger.info("data2=" + data2) patronvideos = "<track>.*?" patronvideos += "<title>([^<]+)</title>.*?" patronvideos += "<location>([^<]+)</location>(?:[^<]+" patronvideos += '<meta rel="captions">([^<]+)</meta>[^<]+' patronvideos += "|([^<]+))</track>" matches2 = re.compile(patronvideos, re.DOTALL).findall(data2) scrapertools.printMatches(matches) for match2 in matches2: sub = "" playWithSubt = "play" if match2[2].endswith(".xml"): # Subtitulos con formato xml son incompatibles con XBMC sub = "[Subtitulo incompatible con xbmc]" if match2[1].endswith(".mp4"): subtitle = "[MP4-Directo]" scrapedtitle = "%s - (%s) %s" % (title, match2[0], subtitle) scrapedurl = match2[1].strip() scrapedthumbnail = thumbnail scrapedplot = plot if match2[2].endswith(".srt"): scrapedurl = scrapedurl + "|" + match2[2] playWithSubt = "play2" if DEBUG: logger.info( "title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]" ) # A�ade al listado de XBMC xbmctools.addnewvideo( CHANNELNAME, playWithSubt, category, "Directo", scrapedtitle, scrapedurl, scrapedthumbnail, scrapedplot, ) # Busca enlaces en el servidor Videoweed - "el modulo servertools.findvideos() no los encuentra" patronvideos = '(http\:\/\/[^\.]+\.videoweed.com\/[^"]+)"' matches = re.compile(patronvideos, re.DOTALL).findall(data) scrapertools.printMatches(matches) if len(matches) > 0: logger.info(" Servidor Videoweed") for match in matches: scrapedurl = match.replace("&", "&") xbmctools.addnewvideo( CHANNELNAME, "play", category, "Videoweed", title + " - [Videoweed]", scrapedurl, thumbnail, plot ) # Busca enlaces en el servidor Gigabyteupload # http://cdn-2.gigabyteupload.com/files/207bb7b658d5068650ebabaca8ffc52d/vFuriadeTitanes_newg.es.avi patronvideos = '(http\:\/\/[^\.]+\.gigabyteupload.com\/[^"]+)"' matches = re.compile(patronvideos, re.DOTALL).findall(data) scrapertools.printMatches(matches) if len(matches) > 0: logger.info(" Servidor Gigabyteupload") for match in matches: xbmctools.addnewvideo( CHANNELNAME, "play", category, "Gigabyteupload", title + " - [Gigabyteupload]", match, thumbnail, plot ) ## --------------------------------------------------------------------------------------## # Busca enlaces de videos para el servidor vk.com # ## --------------------------------------------------------------------------------------## """ var video_host = '447.gt3.vkadre.ru'; var video_uid = '0'; var video_vtag = '2638f17ddd39-'; var video_no_flv = 0; var video_max_hd = '0'; var video_title = 'newCine.NET+-+neWG.Es+%7C+Chicken+Little'; """ patronvideos = '<iframe src="(http://vk.com/video_ext.php[^"]+)"' matches = re.compile(patronvideos, re.DOTALL).findall(data) if len(matches) > 0: print " encontro VK.COM :%s" % matches[0] data2 = scrapertools.downloadpageGzip(matches[0]) print data2 regexp = re.compile(r"vkid=([^\&]+)\&") match = regexp.search(data2) vkid = "" print "match %s" % str(match) if match is not None: vkid = match.group(1) else: print "no encontro vkid" patron = "var video_host = '([^']+)'.*?" patron += "var video_uid = '([^']+)'.*?" patron += "var video_vtag = '([^']+)'.*?" patron += "var video_no_flv = ([^;]+);.*?" patron += "var video_max_hd = '([^']+)'" matches2 = re.compile(patron, re.DOTALL).findall(data2) if len(matches2) > 0: for match in matches2: if match[3].strip() == "0" and match[1] != "0": tipo = "flv" videourl = "%s/u%s/video/%s.%s" % (match[0], match[1], match[2], tipo) xbmctools.addnewvideo( CHANNELNAME, "play", category, "Directo", title + " - " + "[VK] [%s]" % tipo, videourl, thumbnail, plot, ) elif ( match[1] == "0" and vkid != "" ): # http://447.gt3.vkadre.ru/assets/videos/2638f17ddd39-75081019.vk.flv tipo = "flv" videourl = "%s/assets/videos/%s%s.vk.%s" % (match[0], match[1], vkid, tipo) xbmctools.addnewvideo( CHANNELNAME, "play", category, "Directo", title + " - " + "[VK] [%s]" % tipo, videourl, thumbnail, plot, ) else: tipo = "360.mp4" videourl = "%s/u%s/video/%s.%s" % (match[0], match[1], match[2], tipo) xbmctools.addnewvideo( CHANNELNAME, "play", category, "Directo", title + " - " + "[VK] [%s]" % tipo, videourl, thumbnail, plot, ) tipo = "240.mp4" videourl = "%s/u%s/video/%s.%s" % (match[0], match[1], match[2], tipo) xbmctools.addnewvideo( CHANNELNAME, "play", category, "Directo", title + " - " + "[VK] [%s]" % tipo, videourl, thumbnail, plot, ) # Label (top-right)... xbmcplugin.setPluginCategory(handle=pluginhandle, category=category) # Disable sorting... xbmcplugin.addSortMethod(handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE) # End of directory... xbmcplugin.endOfDirectory(handle=pluginhandle, succeeded=True)
def detail2(item): logger.info("[animeid.py] detail2") url = item.url title = item.title thumbnail = item.thumbnail plot = item.plot itemlist = [] scrapedurl = "" # Lee la página con el player data = scrapertools.downloadpageGzip(url) #logger.info(data) patronvideos = '(?:file=|video_src=)([^\&]+)\&' matches = re.compile(patronvideos, re.DOTALL).findall(data) if len(matches) > 0: import urlparse c = 0 scrapedurl = "" for match in matches: parsedurl = urlparse.urlparse(match) parsedurl2 = parsedurl[1].split(".") parsedurl3 = parsedurl2[len(parsedurl2) - 2] if parsedurl3 in scrapedurl: c += 1 else: c = 1 scrapedurl = match server = 'Directo' scrapedtitle = title + " - parte %d [%s] [%s]" % (c, parsedurl[1], server) if (DEBUG): logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + thumbnail + "]") itemlist.append( Item(channel=CHANNELNAME, action="play", title=scrapedtitle, url=scrapedurl, thumbnail=thumbnail, plot=plot, server=server, folder=False)) patronvideos = 'http://[^\.]+.megavideo.com[^\?]+\?v=([A-Z0-9a-z]{8})' matches = re.compile(patronvideos, re.DOTALL).findall(data) if len(matches) > 0: scrapedurl = matches[0] scrapedtitle = title + " - [%s]" % server server = 'Megavideo' if (DEBUG): logger.info("title=[" + title + "], url=[" + scrapedurl + "], thumbnail=[" + thumbnail + "]") itemlist.append( Item(channel=CHANNELNAME, action="play", title=scrapedtitle, url=scrapedurl, thumbnail=thumbnail, plot=plot, server=server, folder=False)) return itemlist
def listvideos(params, url, category): logger.info("[peliculasid.py] listvideos") if url == "": url = "http://www.peliculasid.net/" # Descarga la p�gina data = scrapertools.downloadpageGzip(url) #logger.info(data) # Extrae las entradas (carpetas) patronvideos = '<div class="item">[^<]+<h1>([^<]+)</h1>[^<]+' patronvideos += '<a href="([^"]+)"><img src="([^"]+)"' #patronvideos += '<div class="cover boxcaption">.*?<h6>([^<]+)</h6>' #patronvideos += "<img src='(.*?)'" matches = re.compile(patronvideos, re.DOTALL).findall(data) scrapertools.printMatches(matches) for match in matches: # Titulo scrapedtitle = match[0] scrapedtitle = scrapedtitle.replace('’', "'") # URL scrapedurl = match[1] # Thumbnail scrapedthumbnail = match[2] scrapedthumbnail = scrapedthumbnail.replace(" ", "") # Argumento scrapedplot = "" # Depuracion if (DEBUG): logger.info("scrapedtitle=" + scrapedtitle) logger.info("scrapedurl=" + scrapedurl) logger.info("scrapedthumbnail=" + scrapedthumbnail) # A�ade al listado de XBMC xbmctools.addnewfolder(CHANNELNAME, "detail", category, scrapedtitle, scrapedurl, scrapedthumbnail, scrapedplot) # Extrae la marca de siguiente p�gina patronvideos = '<a href="([^"]+)" class="nextpostslink">' matches = re.compile(patronvideos, re.DOTALL).findall(data) scrapertools.printMatches(matches) if len(matches) > 0: scrapedtitle = "P�gina siguiente" scrapedurl = urlparse.urljoin(url, matches[0]) scrapedthumbnail = "" scrapedplot = "" xbmctools.addnewfolder(CHANNELNAME, "listvideos", category, scrapedtitle, scrapedurl, scrapedthumbnail, scrapedplot) # Label (top-right)... xbmcplugin.setPluginCategory(handle=pluginhandle, category=category) # Disable sorting... xbmcplugin.addSortMethod(handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE) # End of directory... xbmcplugin.endOfDirectory(handle=pluginhandle, succeeded=True)
def detail(item): logger.info("[animeid.py] detail") url = item.url title = item.title thumbnail = item.thumbnail # Descarga la página data = scrapertools.downloadpageGzip(url) #logger.info(data) # Extrae el argumento patronvideos = '<div class="contenido">.*<p>([^<]+)<' matches = re.compile(patronvideos, re.DOTALL).findall(data) scrapertools.printMatches(matches) plot = "" if len(matches) > 0: plot = matches[0] # Extrae las entradas (capítulos) patronvideos = '<div class="contenido-titulo">[^<]+' patronvideos += '<h2>Lista de Capitulos de [^<]+</h2>[^<]+' patronvideos += '</div>[^<]+' patronvideos += '<div class="contenido">(.*?)</div>' matches = re.compile(patronvideos, re.DOTALL).findall(data) scrapertools.printMatches(matches) if len(matches) > 0: data = matches[0] patronvideos = '<a href="([^"]+)"[^>]+>([^<]+)</a>' matches = re.compile(patronvideos, re.DOTALL).findall(data) scrapertools.printMatches(matches) itemlist = [] for match in matches: scrapedtitle = match[1] scrapedurl = urlparse.urljoin(url, match[0]) scrapedthumbnail = thumbnail scrapedplot = plot if (DEBUG): logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") itemlist.append( Item(channel=CHANNELNAME, action="detail2", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot)) patronvideos = "<a href='([^']+)' target='_blank'>([^<]+)</a>" matches = re.compile(patronvideos, re.DOTALL).findall(data) scrapertools.printMatches(matches) for match in matches: scrapedtitle = match[1] scrapedurl = urlparse.urljoin(url, match[0]) scrapedthumbnail = thumbnail scrapedplot = plot if (DEBUG): logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") itemlist.append( Item(channel=CHANNELNAME, action="detail2", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot)) # Extrae las entradas (capítulos) patronvideos = '<param name="flashvars" value="file=([^\&]+)&' matches = re.compile(patronvideos, re.DOTALL).findall(data) scrapertools.printMatches(matches) if len(matches) > 0: scrapedurl = matches[0] itemlist.append( Item(channel=CHANNELNAME, action="play", title=title, url=scrapedurl, thumbnail=thumbnail, plot=plot, server="Directo", folder=False)) return itemlist
def detail(params, url, category): logger.info("[peliculasid.py] detail") title = urllib.unquote_plus(params.get("title")) thumbnail = urllib.unquote_plus(params.get("thumbnail")) plot = urllib.unquote_plus(params.get("plot")) # Descarga la p�gina data = scrapertools.downloadpageGzip(url) #logger.info(data) patrondescrip = '<strong>Sinopsis:</strong><br />(.*?)</p>' matches = re.compile(patrondescrip, re.DOTALL).findall(data) if len(matches) > 0: descripcion = matches[0] descripcion = descripcion.replace("<br/>", "") descripcion = descripcion.replace("\r", "") descripcion = descripcion.replace("\n", " ") descripcion = descripcion.replace("\t", " ") descripcion = re.sub("<[^>]+>", " ", descripcion) #logger.info("descripcion="+descripcion) descripcion = acentos(descripcion) #logger.info("descripcion="+descripcion) try: plot = unicode(descripcion, "utf-8").encode("iso-8859-1") except: plot = descripcion plot = scrapertools.unescape(plot.strip()) #--- Busca los videos Directos patronvideos = 'flashvars" value="file=([^\&]+)\&' matches = re.compile(patronvideos, re.DOTALL).findall(data) if len(matches) > 0: if ("xml" in matches[0]): xbmctools.addnewvideo(CHANNELNAME, "play", category, "xml", "Reproducir todas las partes a la vez", matches[0], thumbnail, plot) #data = scrapertools.downloadpageGzip(matches[0]) req = urllib2.Request(matches[0]) try: response = urllib2.urlopen(req) except: xbmctools.alertnodisponible() return data = response.read() response.close() #logger.info("archivo xml :"+data) newpatron = '<title>([^<]+)</title>[^<]+<location>([^<]+)</location>' newmatches = re.compile(newpatron, re.DOTALL).findall(data) if len(newmatches) > 0: for match in newmatches: logger.info(" videos = " + match[1]) if match[1].startswith("vid"): subtitle = match[0] + " (rtmpe) no funciona en xbmc" else: subtitle = match[0] xbmctools.addnewvideo(CHANNELNAME, "play", category, "Directo", title + " - " + subtitle, match[1], thumbnail, plot) else: logger.info(" matches = " + matches[0]) xbmctools.addnewvideo(CHANNELNAME, "play", category, "Directo", title, matches[0], thumbnail, plot) # Ahora usa servertools listavideos = servertools.findvideos(data) j = 1 for video in listavideos: videotitle = video[0] url = video[1] server = video[2] xbmctools.addnewvideo(CHANNELNAME, "play", category, server, (title.strip() + " (%d) " + videotitle) % j, url, thumbnail, plot) j = j + 1 patronvideos = '<a href="(http://peliculasid.net/modulos/iframeplayer.php[^"]+)" target="[^"]+">([^<]+)</a>' #patronvideos2 = 'file=([^\&]+)\&' matches = re.compile(patronvideos, re.DOTALL).findall(data) if len(matches) > 0: urllists = "" for match in matches: if urllists == "": urllists = match[0] else: urllists = urllists + "|" + match[0] #data2 = scrapertools.downloadpageGzip(match[0]) #matches2 = re.compile(patronvideos2,re.DOTALL).findall(data2) xbmctools.addnewvideo(CHANNELNAME, "play", category, "Directo", title + " - " + match[1], match[0], thumbnail, plot) xbmctools.addnewvideo(CHANNELNAME, "play", category, "Directo", "(Reproducir todas las partes a la vez...)", urllists, thumbnail, plot) ## --------------------------------------------------------------------------------------## # Busca enlaces de videos para el servidor vkontakte.ru # ## --------------------------------------------------------------------------------------## #"http://vkontakte.ru/video_ext.php?oid=89710542&id=147003951&hash=28845bd3be717e11&hd=1 ''' var video_host = 'http://cs12916.vkontakte.ru/'; var video_uid = '87155741'; var video_vtag = 'fc697084d3'; var video_no_flv = 1; var video_max_hd = '1' ''' patronvideos = '<iframe src="(http://vk[^/]+/video_ext.php[^"]+)"' matches = re.compile(patronvideos, re.DOTALL).findall(data) if len(matches) > 0: print " encontro VKontakte.ru :%s" % matches[0] data2 = scrapertools.downloadpageGzip(matches[0]) print data2 patron = "var video_host = '([^']+)'.*?" patron += "var video_uid = '([^']+)'.*?" patron += "var video_vtag = '([^']+)'.*?" patron += "var video_no_flv = ([^;]+);.*?" patron += "var video_max_hd = '([^']+)'" matches2 = re.compile(patron, re.DOTALL).findall(data2) if len( matches2 ) > 0: #http://cs12385.vkontakte.ru/u88260894/video/d09802a95b.360.mp4 for match in matches2: if match[3].strip() == "0": tipo = "flv" videourl = "%s/u%s/video/%s.%s" % (match[0], match[1], match[2], tipo) xbmctools.addnewvideo( CHANNELNAME, "play", category, "Directo", title + " - " + "[VKONTAKTE] [%s]" % tipo, videourl, thumbnail, plot) else: tipo = "360.mp4" videourl = "%s/u%s/video/%s.%s" % (match[0], match[1], match[2], tipo) xbmctools.addnewvideo( CHANNELNAME, "play", category, "Directo", title + " - " + "[VKONTAKTE] [%s]" % tipo, videourl, thumbnail, plot) tipo = "240.mp4" videourl = "%s/u%s/video/%s.%s" % (match[0], match[1], match[2], tipo) xbmctools.addnewvideo( CHANNELNAME, "play", category, "Directo", title + " - " + "[VKONTAKTE] [%s]" % tipo, videourl, thumbnail, plot) patronvideos = '"(http://peliculasid.net/modulos/iframevk.php[^"]+)"' matches = re.compile(patronvideos, re.DOTALL).findall(data) if len(matches) > 0: print " encontro VKontakte.ru :%s" % matches[0] xbmctools.addnewvideo(CHANNELNAME, "play", category, "Directo", title + " - [VKserver]", matches[0], thumbnail, plot) patronvideos = '"(http://peliculasid.net/modulos/iframemv.php[^"]+)"' matches = re.compile(patronvideos, re.DOTALL).findall(data) if len(matches) > 0: print " encontro Megavideo :%s" % matches[0] xbmctools.addnewvideo(CHANNELNAME, "play", category, "Megavideo", title + " - [Megavideo]", matches[0], thumbnail, plot) # Label (top-right)... xbmcplugin.setPluginCategory(handle=pluginhandle, category=category) xbmcplugin.addSortMethod(handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE) xbmcplugin.endOfDirectory(handle=pluginhandle, succeeded=True)
def detail(item): logger.info("[animeid.py] detail") url = item.url title = item.title thumbnail = item.thumbnail # Descarga la página data = scrapertools.downloadpageGzip(url) #logger.info(data) # Extrae el argumento patronvideos = '<div class="contenido">.*<p>([^<]+)<' matches = re.compile(patronvideos,re.DOTALL).findall(data) scrapertools.printMatches(matches) plot = "" if len(matches)>0: plot=matches[0] # Extrae las entradas (capítulos) patronvideos = '<div class="contenido-titulo">[^<]+' patronvideos += '<h2>Lista de Capitulos de [^<]+</h2>[^<]+' patronvideos += '</div>[^<]+' patronvideos += '<div class="contenido">(.*?)</div>' matches = re.compile(patronvideos,re.DOTALL).findall(data) scrapertools.printMatches(matches) if len(matches)>0: data = matches[0] patronvideos = '<a href="([^"]+)"[^>]+>([^<]+)</a>' matches = re.compile(patronvideos,re.DOTALL).findall(data) scrapertools.printMatches(matches) itemlist = [] for match in matches: scrapedtitle = match[1] scrapedurl = urlparse.urljoin(url,match[0]) scrapedthumbnail = thumbnail scrapedplot = plot if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]") itemlist.append( Item(channel=CHANNELNAME, action="detail2" , title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot)) patronvideos = "<a href='([^']+)' target='_blank'>([^<]+)</a>" matches = re.compile(patronvideos,re.DOTALL).findall(data) scrapertools.printMatches(matches) for match in matches: scrapedtitle = match[1] scrapedurl = urlparse.urljoin(url,match[0]) scrapedthumbnail = thumbnail scrapedplot = plot if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]") itemlist.append( Item(channel=CHANNELNAME, action="detail2" , title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot)) # Extrae las entradas (capítulos) patronvideos = '<param name="flashvars" value="file=([^\&]+)&' matches = re.compile(patronvideos,re.DOTALL).findall(data) scrapertools.printMatches(matches) if len(matches)>0: scrapedurl = matches[0] itemlist.append( Item(channel=CHANNELNAME, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, server="Directo", folder=False)) return itemlist
def detail(params,url,category): logger.info("[peliculasid.py] detail") title = urllib.unquote_plus( params.get("title") ) thumbnail = urllib.unquote_plus( params.get("thumbnail") ) plot = urllib.unquote_plus( params.get("plot") ) # Descarga la p�gina data = scrapertools.downloadpageGzip(url) #logger.info(data) patrondescrip = '<strong>Sinopsis:</strong><br />(.*?)</p>' matches = re.compile(patrondescrip,re.DOTALL).findall(data) if len(matches)>0: descripcion = matches[0] descripcion = descripcion.replace("<br/>","") descripcion = descripcion.replace("\r","") descripcion = descripcion.replace("\n"," ") descripcion = descripcion.replace("\t"," ") descripcion = re.sub("<[^>]+>"," ",descripcion) #logger.info("descripcion="+descripcion) descripcion = acentos(descripcion) #logger.info("descripcion="+descripcion) try : plot = unicode( descripcion, "utf-8" ).encode("iso-8859-1") except: plot = descripcion plot = scrapertools.unescape(plot.strip()) #--- Busca los videos Directos patronvideos = 'flashvars" value="file=([^\&]+)\&' matches = re.compile(patronvideos,re.DOTALL).findall(data) if len(matches)>0: if ("xml" in matches[0]): xbmctools.addnewvideo( CHANNELNAME , "play" , category , "xml" , "Reproducir todas las partes a la vez" , matches[0] , thumbnail , plot ) #data = scrapertools.downloadpageGzip(matches[0]) req = urllib2.Request(matches[0]) try: response = urllib2.urlopen(req) except: xbmctools.alertnodisponible() return data=response.read() response.close() #logger.info("archivo xml :"+data) newpatron = '<title>([^<]+)</title>[^<]+<location>([^<]+)</location>' newmatches = re.compile(newpatron,re.DOTALL).findall(data) if len(newmatches)>0: for match in newmatches: logger.info(" videos = "+match[1]) if match[1].startswith("vid"): subtitle = match[0] + " (rtmpe) no funciona en xbmc" else: subtitle = match[0] xbmctools.addnewvideo( CHANNELNAME , "play" , category , "Directo" , title + " - "+subtitle, match[1] , thumbnail , plot ) else: logger.info(" matches = "+matches[0]) xbmctools.addnewvideo( CHANNELNAME , "play" , category , "Directo" , title, matches[0] , thumbnail , plot ) # Ahora usa servertools listavideos = servertools.findvideos(data) j=1 for video in listavideos: videotitle = video[0] url = video[1] server = video[2] xbmctools.addnewvideo( CHANNELNAME , "play" , category , server , (title.strip() + " (%d) " + videotitle) % j , url , thumbnail , plot ) j=j+1 patronvideos = '<a href="(http://peliculasid.net/modulos/iframeplayer.php[^"]+)" target="[^"]+">([^<]+)</a>' #patronvideos2 = 'file=([^\&]+)\&' matches = re.compile(patronvideos,re.DOTALL).findall(data) if len(matches)>0: urllists = "" for match in matches: if urllists == "": urllists = match[0] else: urllists = urllists + "|" + match[0] #data2 = scrapertools.downloadpageGzip(match[0]) #matches2 = re.compile(patronvideos2,re.DOTALL).findall(data2) xbmctools.addnewvideo( CHANNELNAME , "play" , category , "Directo" , title+" - "+match[1], match[0] , thumbnail , plot ) xbmctools.addnewvideo( CHANNELNAME , "play" , category , "Directo" , "(Reproducir todas las partes a la vez...)", urllists , thumbnail , plot ) ## --------------------------------------------------------------------------------------## # Busca enlaces de videos para el servidor vkontakte.ru # ## --------------------------------------------------------------------------------------## #"http://vkontakte.ru/video_ext.php?oid=89710542&id=147003951&hash=28845bd3be717e11&hd=1 ''' var video_host = 'http://cs12916.vkontakte.ru/'; var video_uid = '87155741'; var video_vtag = 'fc697084d3'; var video_no_flv = 1; var video_max_hd = '1' ''' patronvideos = '<iframe src="(http://vk[^/]+/video_ext.php[^"]+)"' matches = re.compile(patronvideos,re.DOTALL).findall(data) if len(matches)>0: print " encontro VKontakte.ru :%s" %matches[0] data2 = scrapertools.downloadpageGzip(matches[0]) print data2 patron = "var video_host = '([^']+)'.*?" patron += "var video_uid = '([^']+)'.*?" patron += "var video_vtag = '([^']+)'.*?" patron += "var video_no_flv = ([^;]+);.*?" patron += "var video_max_hd = '([^']+)'" matches2 = re.compile(patron,re.DOTALL).findall(data2) if len(matches2)>0: #http://cs12385.vkontakte.ru/u88260894/video/d09802a95b.360.mp4 for match in matches2: if match[3].strip() == "0": tipo = "flv" videourl = "%s/u%s/video/%s.%s" % (match[0],match[1],match[2],tipo) xbmctools.addnewvideo( CHANNELNAME , "play" , category , "Directo" , title + " - "+"[VKONTAKTE] [%s]" %tipo, videourl , thumbnail , plot ) else: tipo = "360.mp4" videourl = "%s/u%s/video/%s.%s" % (match[0],match[1],match[2],tipo) xbmctools.addnewvideo( CHANNELNAME , "play" , category , "Directo" , title + " - "+"[VKONTAKTE] [%s]" %tipo, videourl , thumbnail , plot ) tipo = "240.mp4" videourl = "%s/u%s/video/%s.%s" % (match[0],match[1],match[2],tipo) xbmctools.addnewvideo( CHANNELNAME , "play" , category , "Directo" , title + " - "+"[VKONTAKTE] [%s]" %tipo, videourl , thumbnail , plot ) patronvideos = '"(http://peliculasid.net/modulos/iframevk.php[^"]+)"' matches = re.compile(patronvideos,re.DOTALL).findall(data) if len(matches)>0: print " encontro VKontakte.ru :%s" %matches[0] xbmctools.addnewvideo( CHANNELNAME , "play" , category , "Directo" , title+" - [VKserver]", matches[0] , thumbnail , plot ) patronvideos = '"(http://peliculasid.net/modulos/iframemv.php[^"]+)"' matches = re.compile(patronvideos,re.DOTALL).findall(data) if len(matches)>0: print " encontro Megavideo :%s" %matches[0] xbmctools.addnewvideo( CHANNELNAME , "play" , category , "Megavideo" , title+" - [Megavideo]", matches[0] , thumbnail , plot ) # Label (top-right)... xbmcplugin.setPluginCategory( handle=pluginhandle, category=category ) xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE ) xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
def detail(params,url,category): logger.info("[animeid.py] detail") title = urllib.unquote_plus( params.get("title") ) thumbnail = urllib.unquote_plus( params.get("thumbnail") ) # Descarga la página data = scrapertools.downloadpageGzip(url) #logger.info(data) # Extrae el argumento patronvideos = '<div class="contenido">.*<p>([^<]+)<' matches = re.compile(patronvideos,re.DOTALL).findall(data) scrapertools.printMatches(matches) plot = "" if len(matches)>0: plot=matches[0] # Extrae las entradas (capítulos) patronvideos = '<div class="contenido-titulo">[^<]+' patronvideos += '<h2>Lista de Capitulos de [^<]+</h2>[^<]+' patronvideos += '</div>[^<]+' patronvideos += '<div class="contenido">(.*?)</div>' matches = re.compile(patronvideos,re.DOTALL).findall(data) scrapertools.printMatches(matches) if len(matches)>0: data = matches[0] patronvideos = '<a href="([^"]+)"[^>]+>([^<]+)</a>' matches = re.compile(patronvideos,re.DOTALL).findall(data) scrapertools.printMatches(matches) for match in matches: scrapedtitle = match[1] scrapedurl = urlparse.urljoin(url,match[0]) scrapedthumbnail = thumbnail scrapedplot = plot if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]") # Añade al listado de XBMC xbmctools.addnewfolder( CHANNELNAME , "detail2" , category , scrapedtitle , scrapedurl , scrapedthumbnail, scrapedplot ) patronvideos = "<a href='([^']+)' target='_blank'>([^<]+)</a>" matches = re.compile(patronvideos,re.DOTALL).findall(data) scrapertools.printMatches(matches) for match in matches: scrapedtitle = match[1] scrapedurl = urlparse.urljoin(url,match[0]) scrapedthumbnail = thumbnail scrapedplot = plot if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]") # Añade al listado de XBMC xbmctools.addnewvideo( CHANNELNAME , "play" , category , "Directo" , scrapedtitle , scrapedurl , scrapedthumbnail, scrapedplot ) # Extrae las entradas (capítulos) patronvideos = '<param name="flashvars" value="file=([^\&]+)&' matches = re.compile(patronvideos,re.DOTALL).findall(data) scrapertools.printMatches(matches) if len(matches)>0: scrapedurl = matches[0] xbmctools.addnewvideo( CHANNELNAME , "playdirecto" , category , "Directo" , title , scrapedurl , thumbnail, plot ) # Extrae las entradas (capítulos) ''' patronvideos = '<div align="center"><a href="([^"]+)" target="_blank"><img src="([^"]+)" border="0">' matches = re.compile(patronvideos,re.DOTALL).findall(data) scrapertools.printMatches(matches) for match in matches: # Titulo scrapedtitle = match[0] # URL scrapedurl = urlparse.urljoin(url,match[0]) # Thumbnail scrapedthumbnail = urlparse.urljoin(url,match[1]) # Argumento scrapedplot = "" # Depuracion if (DEBUG): logger.info("scrapedtitle="+scrapedtitle) logger.info("scrapedurl="+scrapedurl) logger.info("scrapedthumbnail="+scrapedthumbnail) # Añade al listado de XBMC xbmctools.addnewvideo( CHANNELNAME , "playmega" , category , "Megavideo" , scrapedtitle , scrapedurl , scrapedthumbnail, scrapedplot ) ''' # Label (top-right)... xbmcplugin.setPluginCategory( handle=pluginhandle, category=category ) xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE ) xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )