def detail_1(item):
    logger.info("[filesmonster_catalogue.py] detail_1")
    itemlist = []

    # descarga la pagina
    data = scrapertools.downloadpageGzip(item.url)

    # Extrae las entradas (carpetas)

    #patronvideos ='class="product_link"><a href="([^"]+)" target="_blank".*?<img src="([^"]+)"'
    patronvideos = '<div class="panel-heading">.*?<a href="([^"]+)">([^<]+).*?</a>.*?<img src="([^"]+)"'

    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    for match in matches:
        scrapedtitle = item.title + " [generar enlace]"
        scrapedurl = "http://filesmonster.filesdl.net/" + match[0]
        scrapedthumbnail = "http://filesmonster.filesdl.net/" + match[1]
        imagen = ""
        scrapedplot = match[0]
        tipo = match[1]
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")
        scrapedplot = strip_tags(scrapedplot)
        itemlist.append(
            Item(channel=__channel__,
                 action="detail",
                 title=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=item.thumbnail,
                 plot=scrapedplot,
                 folder=True))

    return itemlist
Ejemplo n.º 2
0
def lista(item):
    logger.info("[Pelisxporno.py] lista")
    itemlist = []

    # Descarga la pagina  
    data = scrapertools.downloadpageGzip(item.url)

    # Extrae las entradas (carpetas)
                        
    patronvideos ='<div class="thumb">\n.*?<a href="([^"]+)" title="([^"]+)">.*?<img src="([^"]+)".*?\/>'
    matches = re.compile(patronvideos,re.DOTALL).findall(data)

    for match in matches:
        scrapedtitle = match[1]
        #scrapedtitle = scrapedtitle.replace("&#8211;","-")
        #scrapedtitle = scrapedtitle.replace("&#8217;","'")
        scrapedurl = match[0]
        scrapedthumbnail = match[2]
        #scrapedplot = match[0]  
        #if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
        #scrapedplot=strip_tags(scrapedplot)
        itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot="" , folder=True) )
 
 
    #Extrae la marca de siguiente página
    next_page = re.compile('<a class="page larger" href="([^"]+)">([^"]+)<\/a>',re.DOTALL).findall(data)
    if next_page:
      scrapedurl = next_page[0][0]
      page = next_page[0][1]
      itemlist.append( Item(channel=item.channel, action="lista", title="Página " + page , url=scrapedurl , plot="" , folder=True) )

    return itemlist
Ejemplo n.º 3
0
def lista(item):
    logger.info("[Pelisxporno.py] lista")
    itemlist = []

    # Descarga la pagina  
    data = scrapertools.downloadpageGzip(item.url)

    # Extrae las entradas (carpetas)                        
    patronvideos ='<div class="thumb">\n.*?<a href="([^"]+)" title="([^"]+)">.*?<img src="([^"]+)".*?\/>'
    matches = re.compile(patronvideos,re.DOTALL).findall(data)

    for match in matches:
        scrapedtitle = match[1]
        scrapedurl = match[0]
        scrapedthumbnail = match[2]
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
        itemlist.append( Item(channel=__channel__, action="detail", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , folder=True) )

  # Extrae la marca de siguiente página

    patronvideos ='<a class="page larger" href="([^"]+)">([^"]+)<\/a>'
    matches2 = re.compile(patronvideos,re.DOTALL).findall(data)
    
    scrapedurl = matches2[0][0]
    page = matches2[0][1]
    itemlist.append( Item(channel=__channel__, action="lista", title="Página " + page , url=scrapedurl , plot="" , folder=True) )

    return itemlist
Ejemplo n.º 4
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("pelisalacarta.servers.vidgg get_video_url(page_url='%s')" %
                page_url)

    file = scrapertools.find_single_match(page_url,
                                          'http://vidgg.to/video/([a-z0-9]+)')
    data = scrapertools.cache_page("http://vidgg.to/embed/?id=%s" % file)

    key = scrapertools.find_single_match(data, 'var fkzd="([^"]+)"')
    url = "http://www.vidgg.to/api/player.api.php?file=%s&key=%s&pass=undefined&cid3=undefined&numOfErrors=0&user=undefined&cid2=undefined&cid=undefined" % (
        file, key)

    data = scrapertools.downloadpageGzip(url)
    mediaurl = scrapertools.find_single_match(data, 'url=(.*?)&')
    video_urls = []
    video_urls.append([
        scrapertools.get_filename_from_url(mediaurl)[-4:] + " [vidgg]",
        mediaurl
    ])

    for video_url in video_urls:
        logger.info("[vidgg.py] %s - %s" % (video_url[0], video_url[1]))

    return video_urls
Ejemplo n.º 5
0
def play(item):
    logger.info("[beeg.py] play")
    itemlist = []
    '''
    headers=[]
    headers.append( [ "User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:12.0) Gecko/20100101 Firefox/12.0" ] )
    headers.append( [ "Referer","http://beeg.com/" ] )
    headers.append( [ "Accept" , "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8" ] )
    headers.append( [ "Accept-Encoding" , "gzip, deflate" ] )
    headers.append( [ "Accept-Language" , "es-es,es;q=0.8,en-us;q=0.5,en;q=0.3" ] )
    headers.append( [ "Connection" , "keep-alive" ] )
    headers.append( [ "Cookie" , "uniqid=xxxxxxxx; firsttime=1336260347; firsttimeref=direct; lasttime=1338080395; pageview=37;" ] )
    '''

    data = scrapertools.downloadpageGzip(item.url)
    if DEBUG: logger.info("data=" + data)

    #'file': 'http://45.video.mystreamservice.com/480p/4014660.mp4',
    #'file': 'http://02.007i.net/480p/4815411.mp4',
    patron = "'file'\: '([^']+)'"
    url = scrapertools.get_match(data, patron) + "?start=0"
    itemlist.append(
        Item(channel=__channel__,
             action="play",
             title=item.title,
             fulltitle=item.fulltitle,
             url=url,
             thumbnail=item.thumbnail,
             plot=item.plot,
             show=item.title,
             server="directo",
             folder=False))

    return itemlist
Ejemplo n.º 6
0
def play(item):
    logger.info("[beeg.py] play")
    itemlist = []

    '''
    headers=[]
    headers.append( [ "User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:12.0) Gecko/20100101 Firefox/12.0" ] )
    headers.append( [ "Referer","http://beeg.com/" ] )
    headers.append( [ "Accept" , "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8" ] )
    headers.append( [ "Accept-Encoding" , "gzip, deflate" ] )
    headers.append( [ "Accept-Language" , "es-es,es;q=0.8,en-us;q=0.5,en;q=0.3" ] )
    headers.append( [ "Connection" , "keep-alive" ] )
    headers.append( [ "Cookie" , "uniqid=xxxxxxxx; firsttime=1336260347; firsttimeref=direct; lasttime=1338080395; pageview=37;" ] )
    '''
    
    data = scrapertools.downloadpageGzip(item.url)
    logger.info("data="+data)

    #'file': 'http://45.video.mystreamservice.com/480p/4014660.mp4',
    #'file': 'http://02.007i.net/480p/4815411.mp4',
    patron = "'file'\: '([^']+)'"
    url = scrapertools.get_match(data,patron)+"?start=0"
    itemlist.append( Item(channel=CHANNELNAME, action="play" , title=item.title, fulltitle=item.fulltitle , url=url, thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))

    return itemlist
Ejemplo n.º 7
0
def lista(item):
    logger.info("[xhamster.py] lista")
    itemlist = []
    data = scrapertools.downloadpageGzip(item.url)
    #data = data.replace("\n","")
    #data = data.replace("\t","")

    if item.title == "Gays":
        data = scrapertools.get_match(
            data, '<div class="title">' + item.title +
            '</div>.*?<div class="list">(.*?)<div id="footer">')
    else:
        data = scrapertools.get_match(
            data, '<div class="title">' + item.title +
            '</div>.*?<div class="list">(.*?)<div class="catName">')
    patron = '(<div.*?</div>)'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for match in matches:
        data = data.replace(match, "")
    patron = 'href="([^"]+)">(.*?)</a>'
    data = ' '.join(data.split())
    logger.info(data)
    matches = re.compile(patron, re.DOTALL).findall(data)
    for scrapedurl, scrapedtitle in matches:
        itemlist.append(
            Item(channel=__channel__,
                 action="videos",
                 title=scrapedtitle,
                 url=scrapedurl,
                 folder=True))

    sorted_itemlist = sorted(itemlist, key=lambda Item: Item.title)
    return sorted_itemlist
Ejemplo n.º 8
0
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("pelisalacarta.servers.playwire url="+page_url)
    
    data = scrapertools.cachePage(page_url)
    data = jsontools.load_json(data)
    f4m = data['content']['media']['f4m']

    video_urls = []
    data = scrapertools.downloadpageGzip(f4m)

    xml = ET.fromstring(data)
    base_url = xml.find('{http://ns.adobe.com/f4m/1.0}baseURL').text
    for media in xml.findall('{http://ns.adobe.com/f4m/1.0}media'):
        if ".m3u8" in media.get('url'): continue
        media_url = base_url + "/" + media.get('url')
        try:
            height = media.get('height')
            width = media.get('width')
            label = "("+ width + "x" + height + ")"
        except:
            label = ""
        video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:]+" "+label+" [playwire]",media_url])


    for video_url in video_urls:
        logger.info("pelisalacarta.servers.playwire %s - %s" % (video_url[0],video_url[1]))

    return video_urls
Ejemplo n.º 9
0
def lista(item):
    logger.info("[Myhdlibrary.py] lista")
    itemlist = []

    # Descarga la pagina  
    data = scrapertools.downloadpageGzip(item.url)

    # Extrae las entradas (carpetas)
                        
    patronvideos ="<div class='post-thumb in-archive size-large'><a href=\"([^']+)\" title=\"([^']+)\".*?><img width.*?src='([^']+)'"
    matches = re.compile(patronvideos,re.DOTALL).findall(data)

    for match in matches:
        scrapedtitle = match[1]
        scrapedurl = match[0]
        scrapedthumbnail = match[2]
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
        itemlist.append( Item(channel=__channel__, action="detail", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , fanart=scrapedthumbnail , plot="" , folder=True) )
 
  # Extrae la marca de siguiente página
    patronvideos ="<span class='current'>.*?</span><a href='([^']+)' class='inactive' >([^']+)</a>"
    matches2 = re.compile(patronvideos,re.DOTALL).findall(data)
    
    for match2 in matches2:
        scrapedurl = match2[0]
        scrapedtitle = match2[1]

    itemlist.append( Item(channel=__channel__, action="lista", title="Página " + scrapedtitle , url=scrapedurl , folder=True) )
 
    return itemlist
def detail_2(item):
    logger.info("[filesmonster_catalogue.py] detail")
    itemlist = []
    numeros=0
	 # descarga la pagina
    data=scrapertools.downloadpageGzip(item.url)
    # descubre la url
    data=data.split("benefits_of_membership")
    data_despues=data[1].split("st__t_7914")
    data=data_despues[0]
  
    patronvideos2  = '<img.*?src="([^"]+).*?/>.*?http://filesmonster.com/download.php([^"]+)'
    matches2 = re.compile(patronvideos2,re.DOTALL).findall(data)
    for match2 in matches2:
        scrapedurl2 ="http://filesmonster.com/download.php"+match2[1] 
        scrapedthumbnail=match2[0]
        scrapedplot=match2[0]
        scrapedplot.strip()
		

        itemlist.append( Item(channel=__channel__ , action="play" ,  plot=scrapedplot ,  server="filesmonster", title=item.title+" [ver en filesmonster]" ,url=scrapedurl2, thumbnail=scrapedthumbnail,  folder=False))



    patronvideos2  = 'http://filesmonster.com/folders.php(.*?)".(.*?)'
    matches2 = re.compile(patronvideos2,re.DOTALL).findall(data)
    for match2 in matches2:
        scrapedurl2 ="http://filesmonster.com/folders.php"+match2[0] 
        
        itemlist.append( Item(channel=__channel__ , action="detail" ,  plot=data ,  title=item.title+" [abrir carpeta en filesmonster ]" ,url=scrapedurl2, thumbnail=item.thumbnail, folder=True))


    return itemlist
def detail_2(item):
    logger.info("[filesmonster_catalogue.py] detail")
    itemlist = []

	 # descarga la pagina
    data=scrapertools.downloadpageGzip(item.url)
    data=data.split('<span class="filesmonsterdlbutton">Download from Filesmonster</span>')
    data=data[0]
    # descubre la url
    patronvideos  = 'href="http://filesmonster.com/download.php(.*?)".(.*?)'
    matches = re.compile(patronvideos,re.DOTALL).findall(data)    
    for match2 in matches:
    	url ="http://filesmonster.com/download.php"+match2[0] 
        title = "Archivo %d: %s [filesmonster]" %(len(itemlist)+1, item.fulltitle)
        itemlist.append( Item(channel=item.channel , action="play" ,  server="filesmonster", title=title, fulltitle= item.fulltitle ,url=url, thumbnail=item.thumbnail, folder=False))



    patronvideos  = '["|\'](http\://filesmonster.com/folders.php\?[^"\']+)["|\']'
    matches = re.compile(patronvideos,re.DOTALL).findall(data)
    for url in matches: 
      if not url == item.url:
        logger.info(url)
        logger.info(item.url)
        title = "Carpeta %d: %s [filesmonster]" %(len(itemlist)+1, item.fulltitle)
        itemlist.append( Item(channel=item.channel , action="detail" ,  title=title, fulltitle= item.fulltitle ,url=url, thumbnail=item.thumbnail, folder=True))


    return itemlist
def detail(item):

    logger.info("[filesmonster_catalogue.py] detail")
    itemlist = []

	 # descarga la pagina
    data=scrapertools.downloadpageGzip(item.url)
    # descubre la url
    
    titulo=item.title.replace(" [abrir carpeta en filesmonster ]","")
    contador=0
    patronvideos2  = 'http://filesmonster.com/download.php(.*?)".(.*?)'
    matches2 = re.compile(patronvideos2,re.DOTALL).findall(data)
    for match2 in matches2:
        scrapedurl2 ="http://filesmonster.com/download.php"+match2[0] 
        contador=contador+1
        contador_t=str(contador)
       
        itemlist.append( Item(channel=__channel__ , action="play" ,  plot=data ,  server="filesmonster", title="CONTENIDO "+contador_t+": "+titulo+" [ver en filesmonster]" ,url=scrapedurl2, thumbnail=item.thumbnail, folder=False))



    patronvideos2  = 'http://filesmonster.com/folders.php(.*?)".(.*?)'
    matches2 = re.compile(patronvideos2,re.DOTALL).findall(data)
    for match2 in matches2:
        scrapedurl2 ="http://filesmonster.com/folders.php"+match2[0] 
        
        itemlist.append( Item(channel=__channel__ , action="detail" ,  plot=data ,  title=item.title+" [abrir carpeta en filesmonster ]" ,url=scrapedurl2, thumbnail=item.thumbnail, folder=True))


    return itemlist
def detail_1(item):
    logger.info("[filesmonster_catalogue.py] detail_1")
    itemlist = []

    # descarga la pagina
    data=scrapertools.downloadpageGzip(item.url)

    
    # Extrae las entradas (carpetas)
    
    #patronvideos ='class="product_link"><a href="([^"]+)" target="_blank".*?<img src="([^"]+)"'
    patronvideos='<div class="panel-heading">.*?<a href="([^"]+)">([^<]+).*?</a>.*?<img src="([^"]+)"'  
    
    matches = re.compile(patronvideos,re.DOTALL).findall(data)

    for match in matches:
        scrapedtitle =item.title+" [generar enlace]"
        scrapedurl= "http://filesmonster.filesdl.net/"+match[0]
        scrapedthumbnail ="http://filesmonster.filesdl.net/"+match[1]
        imagen = ""
        scrapedplot = match[0]  
        tipo = match[1]
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
        scrapedplot=strip_tags(scrapedplot)
        itemlist.append( Item(channel=__channel__, action="detail", title=scrapedtitle , url=scrapedurl , thumbnail=item.thumbnail , plot=scrapedplot , folder=True) )
      
    return itemlist
Ejemplo n.º 14
0
def lista(item):
    logger.info("[freejav.py] lista")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.downloadpageGzip(item.url)

    # Extrae las entradas (carpetas)                        
    patronvideos ='<div class="poster">.*?<a href="([^"]+)" title="([^"]+)"><img src="([^"]+)"'
    matches = re.compile(patronvideos,re.DOTALL).findall(data)

    for match in matches:
        scrapedtitle = match[1]
        scrapedurl = match[0]
        scrapedthumbnail = match[2]
        imagen = ""
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
        itemlist.append( Item(channel=__channel__, action="lista2", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , fanart=scrapedthumbnail , plot="" , folder=True) )
 
 
  
  # Extrae la marca de siguiente página
    data = scrapertools.find_single_match(data,'<div class="pagination">(.*?)<div class="block topic">')
    patronvideos ='<span class="currentpage"><span>.*?</span>.*?<a href="([^"]+)">([^"]+)</a>'
    matches2 = re.compile(patronvideos,re.DOTALL).findall(data)
    
    for match in matches2:
        scrapedurl = match[0]
        scrapedtitle = match[1]
        url = urlparse.urljoin("http://freejav.us/",scrapedurl)
        if (DEBUG): logger.info("url=["+scrapedurl+"]")
        itemlist.append( Item(channel=__channel__, action="lista", title="Página " + scrapedtitle , url=url , plot="" , folder=True) )
 
    return itemlist
Ejemplo n.º 15
0
def videos(item):
    logger.info()
    itemlist = []
    data = scrapertools.downloadpageGzip(item.url)
    patron = '<div class="item-block[^<]+'
    patron += '<div class="inner-block[^<]+'
    patron += '<a href="([^"]+)" title="([^"]+)"[^<]+'
    patron += '<span class="image".*?'
    patron += '<img src="([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
        title = scrapedtitle
        url = scrapedurl
        thumbnail = scrapedthumbnail.replace(" ", "%20")
        logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" +
                     thumbnail + "]")
        itemlist.append(
            Item(channel=item.channel,
                 action="play",
                 title=title,
                 url=url,
                 thumbnail=thumbnail,
                 folder=False))
    next_page_url = scrapertools.find_single_match(
        data, "<a href='([^']+)' class=\"next\">NEXT</a>")
    if next_page_url != "":
        url = urlparse.urljoin(item.url, next_page_url)
        itemlist.append(
            Item(channel=item.channel,
                 action="videos",
                 title=">> Página siguiente",
                 url=url,
                 folder=True,
                 viewmode="movie"))
    return itemlist
Ejemplo n.º 16
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("url=" + page_url)

    data = scrapertools.cachePage(page_url)
    data = jsontools.load_json(data)
    f4m = data['content']['media']['f4m']

    video_urls = []
    data = scrapertools.downloadpageGzip(f4m)

    xml = ET.fromstring(data)
    base_url = xml.find('{http://ns.adobe.com/f4m/1.0}baseURL').text
    for media in xml.findall('{http://ns.adobe.com/f4m/1.0}media'):
        if ".m3u8" in media.get('url'): continue
        media_url = base_url + "/" + media.get('url')
        try:
            height = media.get('height')
            width = media.get('width')
            label = "(" + width + "x" + height + ")"
        except:
            label = ""
        video_urls.append([
            scrapertools.get_filename_from_url(media_url)[-4:] + " " + label +
            " [playwire]", media_url
        ])

    for video_url in video_urls:
        logger.info("%s - %s" % (video_url[0], video_url[1]))

    return video_urls
Ejemplo n.º 17
0
def detail_2(item):
    logger.info("[filesmonster_catalogue.py] detail")
    itemlist = []
    numeros = 0
    # descarga la pagina
    data = scrapertools.downloadpageGzip(item.url)
    # descubre la url
    data = data.split("benefits_of_membership")
    data_despues = data[1].split("st__t_7914")
    data = data_despues[0]

    patronvideos2 = '<img.*?src="([^"]+).*?/>.*?http://filesmonster.com/download.php([^"]+)'
    matches2 = re.compile(patronvideos2, re.DOTALL).findall(data)
    for match2 in matches2:
        scrapedurl2 = "http://filesmonster.com/download.php" + match2[1]
        scrapedthumbnail = match2[0]
        scrapedplot = match2[0]
        scrapedplot.strip()

        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 plot=scrapedplot,
                 server="filesmonster",
                 title=item.title + " [ver en filesmonster]",
                 url=scrapedurl2,
                 thumbnail=scrapedthumbnail,
                 folder=False))

    return itemlist
Ejemplo n.º 18
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("pelisalacarta.servers.rutube url=" + page_url)

    data = scrapertools.cachePage(page_url)
    if "embed" in page_url:
        link = scrapertools.find_single_match(
            data,
            '<link rel="canonical" href="https://rutube.ru/video/([\da-z]{32})'
        )
        url = "http://rutube.ru/api/play/options/%s/?format=json" % link
        data = scrapertools.cachePage(url)

    data = jsontools.load_json(data)
    m3u8 = data['video_balancer']['m3u8']
    data = scrapertools.downloadpageGzip(m3u8)
    video_urls = []
    mediaurls = scrapertools.find_multiple_matches(data,
                                                   '(http://.*?)\?i=(.*?)_')
    for media_url, label in mediaurls:
        video_urls.append([
            scrapertools.get_filename_from_url(media_url)[-4:] + " (" + label +
            ") [rutube]", media_url
        ])

    for video_url in video_urls:
        logger.info("pelisalacarta.servers.rutube %s - %s" %
                    (video_url[0], video_url[1]))

    return video_urls
Ejemplo n.º 19
0
def play(item):
    logger.info("pelisalacarta.channels.seriesdanko play (url="+item.url+", server="+item.server+")" )

    # Descarga la página
    if "seriesdanko" in item.url:
        data = scrapertools.downloadpageGzip(item.url)
    else:
        data = item.url
    return servertools.find_video_items(data=data)
Ejemplo n.º 20
0
def play(item):
    logger.info("[beeg.py] play")
    itemlist = []
    data = scrapertools.downloadpageGzip(item.url)
    if DEBUG: logger.info(data)
    patron = "'file'\: '([^']+)'"
    url = scrapertools.get_match(data,patron)
    itemlist.append( Item(channel=__channel__, action="play" , title=item.title , url=url, thumbnail=item.thumbnail, server="directo", folder=False))

    return itemlist
Ejemplo n.º 21
0
def listaseries(item):
    logger.info("[newpct.py] listaseries")
    itemlist=[]

    data = scrapertools.downloadpageGzip(item.url)
    patron = "<li[^<]+<a href='([^']+)'>.*?<img src='([^']+)'.*?<h3>([^']+)<\/h3>"
    matches = re.compile(patron,re.DOTALL|re.M).findall(data)
    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        itemlist.append( Item(channel=__channel__, action="episodios" , title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail, folder=True))
    return itemlist
Ejemplo n.º 22
0
def play(item):
    logger.info("pelisalacarta.channels.seriesdanko play (url=" + item.url +
                ", server=" + item.server + ")")

    # Descarga la página
    if "seriesdanko" in item.url:
        data = scrapertools.downloadpageGzip(item.url)
    else:
        data = item.url
    return servertools.find_video_items(data=data)
Ejemplo n.º 23
0
def play(item):
    logger.info("pelisalacarta.yaske play item.url=" + item.url)

    itemlist = []

    data = scrapertools.downloadpageGzip(item.url)
    # logger.info("data="+data)
    itemlist = servertools.find_video_items(data=data)

    return itemlist
Ejemplo n.º 24
0
def listaseries(item):
    logger.info()
    itemlist=[]

    data = scrapertools.downloadpageGzip(item.url)
    patron = "<li[^<]+<a href='([^']+)'>.*?<img src='([^']+)'.*?<h3>([^']+)<\/h3>"
    matches = re.compile(patron,re.DOTALL|re.M).findall(data)
    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        itemlist.append( Item(channel=item.channel, action="episodios" , title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail, folder=True))
    return itemlist
Ejemplo n.º 25
0
def novedades(item):
    logger.info("pelisalacarta.channels.seriesdanko novedades")

    itemlist = []
    extra = ""
    # Descarga la página
    data = scrapertools.downloadpageGzip(item.url).replace("\n", "")
    # print data
    """
    <h3 class='post-title entry-title'>The Good Wife 4x01 Sub.espa&ntildeol</h3>
    <div class='comentariosyfechas'>
    <a href='#D' title='¿Deje un comentario?'>&#191;Deje un comentario?</a>
    <span class='etiquetineditar'>2012-10-01 a las 16:33:04</span>
    </div>
    <div class='post-header'>
    <br /><a href="serie.php?serie=553" title='TODO-TITLE'><img class='ict' style='display: block; border: 3px solid #616161; opacity: 1; margin: 0px auto 10px; text-align: center; cursor: pointer; width: 400px; height: 500px; 'src='http://1.bp.blogspot.com/-YJMaorkbMtU/UGmpQZqIhiI/AAAAAAAAWPA/IXywwgXawFY/s400/the-good-wife-julianna-margulies-4.jpg' alt='TODO-alt' title='TODO-title' /></a><div face='trebuchet ms' style='text-align: center;'><a href='serie.php?serie=553'>
    <span style='font-weight: bold;'> </span>
    <span style='font-weight: bold;'>Ya Disponible en V.O.S.E para ver online y descargar,aqui en SeriesDanko.com</span></a>
    <span style='font-weight: bold;'></span></div><div class='post-header-line-1'></div>
    </div>
    <div class='post-body entry-content'>
    """
    patronvideos = "(<h3 class='post-title entry-title'>.*?<div class='post-body entry-content')"
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    totalItems = len(matches)
    for match in matches:
        try:
            scrapedurl = urlparse.urljoin(item.url, re.compile(r"href=\"(serie.+?)\">").findall(match)[0])
        except:
            continue
        try:
            scrapedthumbnail = re.compile(r"src='(.+?)'").findall(match)[0]
        except:
            scrapedthumbnail = ""
        try:
            scrapedtitle = re.compile(r"class='post-title entry-title'>(.+?)<").findall(match)[0]
            scrapedtitle = decodeHtmlentities(scrapedtitle)
        except:
            scrapedtitle = "sin titulo"
        scrapedplot = ""
        itemlist.append(
            Item(
                channel=__channel__,
                action="episodios",
                title=scrapedtitle,
                url=scrapedurl,
                thumbnail=scrapedthumbnail,
                plot=scrapedplot,
                extra=extra,
                folder=True,
                totalItems=totalItems,
            )
        )

    return itemlist
Ejemplo n.º 26
0
def novedades(item):
    logger.info("pelisalacarta.channels.seriesdanko novedades")

    itemlist = []
    extra = ""
    # Descarga la página
    data = scrapertools.downloadpageGzip(item.url).replace("\n", "")
    #print data
    '''
    <h3 class='post-title entry-title'>The Good Wife 4x01 Sub.espa&ntildeol</h3>
    <div class='comentariosyfechas'>
    <a href='#D' title='¿Deje un comentario?'>&#191;Deje un comentario?</a>
    <span class='etiquetineditar'>2012-10-01 a las 16:33:04</span>
    </div>
    <div class='post-header'>
    <br /><a href="serie.php?serie=553" title='TODO-TITLE'><img class='ict' style='display: block; border: 3px solid #616161; opacity: 1; margin: 0px auto 10px; text-align: center; cursor: pointer; width: 400px; height: 500px; 'src='http://1.bp.blogspot.com/-YJMaorkbMtU/UGmpQZqIhiI/AAAAAAAAWPA/IXywwgXawFY/s400/the-good-wife-julianna-margulies-4.jpg' alt='TODO-alt' title='TODO-title' /></a><div face='trebuchet ms' style='text-align: center;'><a href='serie.php?serie=553'>
    <span style='font-weight: bold;'> </span>
    <span style='font-weight: bold;'>Ya Disponible en V.O.S.E para ver online y descargar,aqui en SeriesDanko.com</span></a>
    <span style='font-weight: bold;'></span></div><div class='post-header-line-1'></div>
    </div>
    <div class='post-body entry-content'>
    '''
    patronvideos = "(<h3 class='post-title entry-title'>.*?<div class='post-body entry-content')"
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    totalItems = len(matches)
    for match in matches:
        try:
            scrapedurl = urlparse.urljoin(
                item.url,
                re.compile(r"href=\"(serie.+?)\">").findall(match)[0])
        except:
            continue
        try:
            scrapedthumbnail = re.compile(r"src='(.+?)'").findall(match)[0]
        except:
            scrapedthumbnail = ""
        try:
            scrapedtitle = re.compile(
                r"class='post-title entry-title'>(.+?)<").findall(match)[0]
            scrapedtitle = decodeHtmlentities(scrapedtitle)
        except:
            scrapedtitle = "sin titulo"
        scrapedplot = ""
        itemlist.append(
            Item(channel=__channel__,
                 action="episodios",
                 title=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 extra=extra,
                 folder=True,
                 totalItems=totalItems))

    return itemlist
Ejemplo n.º 27
0
def allserieslist(item):
    logger.info("pelisalacarta.channels.seriesdanko allserieslist")

    Basechars = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
    BaseUrl = "http://seriesdanko.com/series.php?id=%s"
    action = "series"

    itemlist = []

    # Descarga la página
    data = scrapertools.downloadpageGzip(item.url)
    #logger.info(data)

    # Extrae el bloque de las series
    patronvideos = "Listado de series disponibles</h2>(.*?)<div class='clear'></div>"
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    data = matches[0]
    scrapertools.printMatches(matches)

    # Extrae las entradas (carpetas)
    patronvideos = "<a href='([^']+)'.+?>([^<]+)</a>"
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    totalItems = len(matches)
    for url, title in matches:
        scrapedtitle = title.replace("\n", "").replace("\r", "")
        scrapedurl = url
        scrapedurl = urlparse.urljoin(
            item.url,
            scrapedurl.replace("\n", "").replace("\r", ""))
        scrapedthumbnail = ""
        scrapedplot = ""

        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")
        if title in Basechars or title == "0-9":

            scrapedurl = BaseUrl % title
        else:
            action = "episodios"

        # Añade al listado de XBMC
        itemlist.append(
            Item(channel=__channel__,
                 action=action,
                 title=scrapedtitle,
                 show=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 fulltitle=scrapedtitle,
                 totalItems=totalItems))

    return itemlist
Ejemplo n.º 28
0
def allserieslist(item):
    logger.info("pelisalacarta.channels.seriesdanko allserieslist")

    Basechars = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
    BaseUrl = "http://seriesdanko.com/series.php?id=%s"
    action = "series"

    itemlist = []

    # Descarga la página
    data = scrapertools.downloadpageGzip(item.url)
    # logger.info(data)

    # Extrae el bloque de las series
    patronvideos = "Listado de series disponibles</h2>(.*?)<div class='clear'></div>"
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    data = matches[0]
    scrapertools.printMatches(matches)

    # Extrae las entradas (carpetas)
    patronvideos = "<a href='([^']+)'.+?>([^<]+)</a>"
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    totalItems = len(matches)
    for url, title in matches:
        scrapedtitle = title.replace("\n", "").replace("\r", "")
        scrapedurl = url
        scrapedurl = urlparse.urljoin(item.url, scrapedurl.replace("\n", "").replace("\r", ""))
        scrapedthumbnail = ""
        scrapedplot = ""

        if DEBUG:
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
        if title in Basechars or title == "0-9":

            scrapedurl = BaseUrl % title
        else:
            action = "episodios"

        # Añade al listado de XBMC
        itemlist.append(
            Item(
                channel=__channel__,
                action=action,
                title=scrapedtitle,
                show=scrapedtitle,
                url=scrapedurl,
                thumbnail=scrapedthumbnail,
                plot=scrapedplot,
                fulltitle=scrapedtitle,
                totalItems=totalItems,
            )
        )

    return itemlist
Ejemplo n.º 29
0
def lista(item):
    logger.info("[gaypornshare.py] lista")
    itemlist = []

    # Descarga la pagina
    
    data = scrapertools.downloadpageGzip(item.url)
    #logger.info(data)



    # Extrae las entradas (carpetas)
    #<h2 class="posttitle"><a href='http://gaypornshare.org/workshop-bears/' class='entry-title' rel='bookmark' title='Workshop Bears' >Workshop Bears</a></h2>
                        
    patronvideos ="<a href='([^']+)' class='entry-title'.*?>([^']+)</a></h2>"+'.*?<img src="([^"]+)'
    matches = re.compile(patronvideos,re.DOTALL).findall(data)

    for match in matches:
        scrapedtitle = match[1]
        scrapedtitle = scrapedtitle.replace("&#8211;","-")
        scrapedtitle = scrapedtitle.replace("&#8217;","'")
        scrapedurl = match[0]
        scrapedthumbnail = match[2]
        imagen = ""
        scrapedplot = match[0]  
        tipo = match[1]
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
        scrapedplot=strip_tags(scrapedplot)
        itemlist.append( Item(channel=item.channel, action="detail", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
 
 
  
  # Extrae la marca de siguiente página
  #<span class='current'>8</span><a class="page larger" href="http://gaypornshare.org/page/9/">9</a>

    patronvideos ="<span class='current'.*?</span>"+'<a.*?href="([^"]+)".*?>([^<]+)</a>'




    matches2 = re.compile(patronvideos,re.DOTALL).findall(data)

    for match2 in matches2:
        scrapedtitle = ">> página "+match2[1]
        scrapedurl = match2[0]
        scrapedthumbnail = ""
        imagen = ""
        scrapedplot = match2[0]  
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
        itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
    itemlist.append( Item(channel=item.channel, action="mainlist", title="<< volver al inicio",  folder=True) )

 
    return itemlist
Ejemplo n.º 30
0
def lista(item):
    logger.info("[gaypornshare.py] lista")
    itemlist = []

    # Descarga la pagina
    
    data = scrapertools.downloadpageGzip(item.url)
    #logger.info(data)



    # Extrae las entradas (carpetas)
    #<h2 class="posttitle"><a href='http://gaypornshare.org/workshop-bears/' class='entry-title' rel='bookmark' title='Workshop Bears' >Workshop Bears</a></h2>
                        
    patronvideos ="<a href='([^']+)' class='entry-title'.*?>([^']+)</a></h2>"+'.*?<img src="([^"]+)'
    matches = re.compile(patronvideos,re.DOTALL).findall(data)

    for match in matches:
        scrapedtitle = match[1]
        scrapedtitle = scrapedtitle.replace("&#8211;","-")
        scrapedtitle = scrapedtitle.replace("&#8217;","'")
        scrapedurl = match[0]
        scrapedthumbnail = match[2]
        imagen = ""
        scrapedplot = match[0]  
        tipo = match[1]
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
        scrapedplot=strip_tags(scrapedplot)
        itemlist.append( Item(channel=__channel__, action="detail", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
 
 
  
  # Extrae la marca de siguiente página
  #<span class='current'>8</span><a class="page larger" href="http://gaypornshare.org/page/9/">9</a>

    patronvideos ="<span class='current'.*?</span>"+'<a.*?href="([^"]+)".*?>([^<]+)</a>'




    matches2 = re.compile(patronvideos,re.DOTALL).findall(data)

    for match2 in matches2:
        scrapedtitle = ">> página "+match2[1]
        scrapedurl = match2[0]
        scrapedthumbnail = ""
        imagen = ""
        scrapedplot = match2[0]  
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
        itemlist.append( Item(channel=__channel__, action="lista", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
    itemlist.append( Item(channel=__channel__, action="mainlist", title="<< volver al inicio",  folder=True) )

 
    return itemlist
Ejemplo n.º 31
0
def listcategorias(item):
    logger.info("[beeg.py] listcategorias")
    data = scrapertools.downloadpageGzip(item.url)
    data = scrapertools.get_match(data,'<div class="block block-tags">(.*?)<!-- /TAGS -->')
    patron = '<li><a target="_self" href="([^"]+)" >([^"]+)</a></li>'
    matches = re.compile(patron,re.DOTALL).findall(data)
    itemlist = []
    for url, categoria in matches:
      url= "http://beeg.com" + url
      itemlist.append( Item(channel=__channel__, action="videos" , title=categoria, url=url))
      
    return itemlist
Ejemplo n.º 32
0
def videos(item):
    logger.info()
    itemlist = []
    '''
    <div class="item-block item-normal col" >
    <div class="inner-block">
    <a href="http://www.submityourflicks.com/1846642-my-hot-wife-bending-over-and-getting-her-c**t-reamed.html" title="My hot wife bending over and getting her c**t reamed..">
    <span class="image">
    <script type='text/javascript'>stat['56982c566d05c'] = 0;
    pic['56982c566d05c'] = new Array();
    pics['56982c566d05c'] = new Array(1, 1, 1, 1, 1, 1, 1, 1, 1, 1);</script>
    <img src="
    '''

    data = scrapertools.downloadpageGzip(item.url)
    patron = '<div class="item-block[^<]+'
    patron += '<div class="inner-block[^<]+'
    patron += '<a href="([^"]+)" title="([^"]+)"[^<]+'
    patron += '<span class="image".*?'
    patron += '<img src="([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedthumbnail in matches:

        title = scrapedtitle
        url = scrapedurl
        thumbnail = scrapedthumbnail.replace(" ", "%20")
        plot = ""

        logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" +
                     thumbnail + "]")
        itemlist.append(
            Item(channel=item.channel,
                 action="play",
                 title=title,
                 url=url,
                 thumbnail=thumbnail,
                 plot=plot,
                 folder=False))

    next_page_url = scrapertools.find_single_match(
        data, "<a href='([^']+)' class=\"next\">NEXT</a>")
    if next_page_url != "":
        url = urlparse.urljoin(item.url, next_page_url)
        itemlist.append(
            Item(channel=item.channel,
                 action="videos",
                 title=">> Página siguiente",
                 url=url,
                 folder=True,
                 viewmode="movie"))

    return itemlist
Ejemplo n.º 33
0
def ListadoCapitulosSeries(params,url,category):
    logger.info("[pelisflv.py] ListadoCapitulosSeries")
    title = urllib.unquote_plus( params.get("title") )
    thumbnail = urllib.unquote_plus( params.get("thumbnail") )
    
    # Descarga la p�gina
    data = scrapertools.downloadpageGzip(url)
    #logger.info(data)

    # Patron de las entradas

    patron = "<div class='post-body entry-content'>(.*?)<div class='post-footer'>"
    matches = re.compile(patron,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    

    patron = '<a href="([^"]+)"[^>]+><[^>]+>(.*?)<'
    matches = re.compile(patron,re.DOTALL).findall(matches[0])
    scrapertools.printMatches(matches)
    patron2 = '<iframe src="([^"]+)"'
    
    # A�ade las entradas encontradas
    for match in matches:
        # Atributos
        scrapedtitle = match[1]
        data2 = scrapertools.downloadpageGzip(match[0])
        matches2 = re.compile(patron2,re.DOTALL).findall(data2)
        scrapertools.printMatches(matches2)    
        scrapedurl = matches2[0]
        scrapedthumbnail = thumbnail
        scrapedplot = ""
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

        # A�ade al listado de XBMC
        xbmctools.addnewfolder( __channel__ , "detail" , category , scrapedtitle , scrapedurl , scrapedthumbnail, scrapedplot )

    # Asigna el t�tulo, desactiva la ordenaci�n, y cierra el directorio
    xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
    xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )
    xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
Ejemplo n.º 34
0
def ListadoCapitulosSeries(params,url,category):
    logger.info("[pelisflv.py] ListadoCapitulosSeries")
    title = urllib.unquote_plus( params.get("title") )
    thumbnail = urllib.unquote_plus( params.get("thumbnail") )
    
    # Descarga la p�gina
    data = scrapertools.downloadpageGzip(url)
    #logger.info(data)

    # Patron de las entradas

    patron = "<div class='post-body entry-content'>(.*?)<div class='post-footer'>"
    matches = re.compile(patron,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    

    patron = '<a href="([^"]+)"[^>]+><[^>]+>(.*?)<'
    matches = re.compile(patron,re.DOTALL).findall(matches[0])
    scrapertools.printMatches(matches)
    patron2 = '<iframe src="([^"]+)"'
    
    # A�ade las entradas encontradas
    for match in matches:
        # Atributos
        scrapedtitle = match[1]
        data2 = scrapertools.downloadpageGzip(match[0])
        matches2 = re.compile(patron2,re.DOTALL).findall(data2)
        scrapertools.printMatches(matches2)    
        scrapedurl = matches2[0]
        scrapedthumbnail = thumbnail
        scrapedplot = ""
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

        # A�ade al listado de XBMC
        xbmctools.addnewfolder( __channel__ , "detail" , category , scrapedtitle , scrapedurl , scrapedthumbnail, scrapedplot )

    # Asigna el t�tulo, desactiva la ordenaci�n, y cierra el directorio
    xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
    xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )
    xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
Ejemplo n.º 35
0
def detail(item):
    logger.info("[Myhdlibrary.py] detail")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.downloadpageGzip(item.url)

    # Busca los enlaces a los videos de los servidores
    video_itemlist = servertools.find_video_items(data=data)
    for video_item in video_itemlist:
        itemlist.append( Item(channel=__channel__ , action="play" , server=video_item.server, title=item.title+video_item.title, url=video_item.url, thumbnail=item.thumbnail, plot=item.plot, folder=False))

    return itemlist
def videos(item):
    logger.info("[submityourflicks.py] videos")
    # <div id="movies" style="width: 100%; ">
    data = scrapertools.downloadpageGzip(item.url)
    itemlist = [] 
    matches = re.compile(r"""<div class='content_item'>.*?</span>.*?<br style='clear: both;' />.*?</div>""",re.DOTALL).findall(data)
    for match in matches:
        datos = re.compile(r"""<div class='content_item'>.*?</div>""", re.S).findall(match)
        for vid in datos:
            aRef = re.compile(r"""<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)" title="([^"]+)" name="([^"]+)" border="1" id='([^"]+)' width="170" height="130" onmouseover="([^"]+)" onmouseout="([^"]+)" /></a>""", re.S).findall(vid)
            aTime= re.compile(r"""<span class='l5'>([^"]+)Min<br />.*?</span>""", re.S).findall(vid)
            if len(aTime) > 0:
                cTime= aTime[0].replace("\r\n","")
                cTime= cTime.replace(" ","")
            else:
                cTime=""
                
            aPosted = re.compile(r"""<span class='l5'>.*?Posted: ([^"]+)<br /></span>""", re.S).findall(vid)
            if len(aPosted) > 0:
                cPosted = aPosted[0].replace("\r\n","")
                cPosted = cPosted.replace(" ","")
            else:
                cPosted = ""
                
            video = aRef[0]
            try:
                scrapedtitle = unicode( video[2], "utf-8" ).encode("iso-8859-1")
                scrapedtitle = scrapedtitle+"("+cTime+")["+cPosted+"]"
            except:
                scrapedtitle = video[2]
            scrapedurl =  urlparse.urljoin( "http://www.submityourflicks.com/", video[0] )
            scrapedthumbnail = urlparse.urljoin( "http://www.submityourflicks.com/", video[1] )
            scrapedplot = ""
            # Depuracion
            if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")            
            itemlist.append( Item(channel=__channel__, action="play" , title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, show=scrapedtitle, folder=False))

    #Paginador
    print "paginador"
    matches = re.compile('<a href="([^"]+)">Next</a>', re.DOTALL).findall(data)
    if len(matches)>0:
        scrapedurl =  urlparse.urljoin( "http://www.submityourflicks.com/", matches[0] )
        print scrapedurl
        paginador = Item(channel=__channel__, action="videos" , title="!Página siguiente" , url=scrapedurl, thumbnail="", plot="", extra = "" , show=item.show)
    else:
        paginador = None
    
    if paginador is not None:
        itemlist.append( paginador )

    return itemlist
def lista_categoria_2(item):
    logger.info("[filesmonster_catalogue.py] list")
    itemlist = [] 
    url=item.url
    cuantos_videos=0

    # Descarga la pagina  con (plot es la pagina)
    data= scrapertools.downloadpageGzip(url)
    
    #dividimos el data para analizar la parte que nos interesa
    data_despues=data.split("box-table-a")
    data=data_despues[2]
   
    #logger.info(data)
    
    # Extrae las entradas (carpetas)
    
    # patronvideos ='<h1 class="product_title"><a href="([^"]+)">([^<]+).*?</a>.*?<img src="([^"]+)".*?'
   #  http://filesmonster.filesdl.net/posts/view/941244/rblue-eric-jonathan">
   
   
   
    patronvideos ='</td><td><a href="([^"]+)" title=".*?">([^<]+)</a>';
    
    matches = re.compile(patronvideos,re.DOTALL).findall(data)

    for match in matches:
        scrapedtitle = match[1]
        scrapedtitle = scrapedtitle.replace("&#8211;","-")
        scrapedtitle = scrapedtitle.replace("&#8217;","'")
        scrapedtitle =scrapedtitle.strip()
        scrapedurl= "http://filesmonster.biz/"+match[0]
        scrapedthumbnail = match[0]
        imagen = ""
        scrapedplot = match[0]  
        logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
        scrapedplot=strip_tags(scrapedplot)     
         
        if scrapedtitle!='': itemlist.append( Item(channel=__channel__, action="detail_2", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail ,   folder=True) ) 
      		      
     
    url_antes=url.split("page=")
    url_despues=url_antes[1].split("&")
    pagina_inicial=url_despues[0]
    pagina=int(pagina_inicial)+1
    pagina=str(pagina)
    pagina_inicial=str(pagina_inicial)
    url_siguiente=re.sub(pagina_inicial, pagina, url)
    itemlist.append( Item(channel=__channel__, action="lista_categoria_2", title=">> siguientes [página "+pagina+"]", url=url_siguiente, folder=True) )
    itemlist.append( Item(channel=__channel__, action="mainlist_2", title="<< volver al inicio",  folder=True) )
    return itemlist
Ejemplo n.º 38
0
def detail(item):
    logger.info("[gaypornshare.py] detail")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.downloadpageGzip(item.url)
    
    


    # Busca los enlaces a los videos de los servidores
    video_itemlist = servertools.find_video_items(data=data)
    for video_item in video_itemlist:
        itemlist.append( Item(channel=__channel__ , action="play" , server=video_item.server, title=item.title+video_item.title, url=video_item.url, thumbnail=item.thumbnail, plot=item.plot, folder=False))

    return itemlist
def detail(item):
    logger.info("[filesmonster_catalogue.py] detail")
    itemlist = []

	 # descarga la pagina
    data2=scrapertools.downloadpageGzip(item.url)
    # descubre la url
    patronvideos2 ='window.location.href = "([^"]+)"([^"]+)'
    matches2 = re.compile(patronvideos2,re.DOTALL).findall(data2)
    for match2 in matches2:
        scrapedurl2 =match2[0]
        scrapedtitle2 ="ver película ->"+ match2[0]
        itemlist.append( Item(channel=__channel__ , action="play" , server="filesmonster", title="ver en filesmonster",url=scrapedurl2, thumbnail=item.thumbnail, folder=False))


    return itemlist
def lista(item):
    logger.info("[gaypornshare.py] lista")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.downloadpageGzip(item.url)
    #logger.info(data)



    # Extrae las entradas (carpetas)
 #<div class="post" id="post-xxx> <a href="http://gaypornshare.org/a-toy-story-2013/" title="A Toy Story (2013)"><img width="240" height="170" src="http://gaypornshare.org/wp-content/uploads/2013/07/18132279_a168223_xlb-300x213.jpg" class="attachment-240x180 wp-post-image" alt="A Toy Story (2013)" title="" /></a>
                        
    patronvideos ='<div class="post" id="post-.*?<a href="([^"]+)".*?<img.*?src="([^"]+)".*?alt="([^"]+)".*?</a>'
    matches = re.compile(patronvideos,re.DOTALL).findall(data)

    for match in matches:
        scrapedtitle = match[2]
        scrapedtitle = scrapedtitle.replace("&#8211;","-")
        scrapedtitle = scrapedtitle.replace("&#8217;","'")
        scrapedurl = match[0]
        scrapedthumbnail = match[1]
        imagen = ""
        scrapedplot = match[0]  
        tipo = match[1]
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
        scrapedplot=strip_tags(scrapedplot)
        itemlist.append( Item(channel=__channel__, action="detail", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
 
 
  
  # Extrae la marca de siguiente página
#<span class="current">3</span><a href='http://gaypornshare.org/page/4/' class="inactive">
    patronvideos ="<span.*?current.*?</span><a href='([^']+)' class=\"inactive\">([^']+)</a>"
    matches2 = re.compile(patronvideos,re.DOTALL).findall(data)

    for match2 in matches2:
        scrapedtitle = ">> página "+match2[1]
        scrapedurl = match2[0]
        scrapedthumbnail = ""
        imagen = ""
        scrapedplot = match2[0]  
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
        itemlist.append( Item(channel=__channel__, action="lista", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
    itemlist.append( Item(channel=__channel__, action="mainlist", title="<< volver al inicio",  folder=True) )
 
    return itemlist
Ejemplo n.º 41
0
def listcategorias(item):
    logger.info("[beeg.py] listcategorias")
    data = scrapertools.downloadpageGzip(item.url)
    data = scrapertools.get_match(
        data, '<div class="block block-tags">(.*?)<!-- /TAGS -->')
    patron = '<li><a target="_self" href="([^"]+)" >([^"]+)</a></li>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    itemlist = []
    for url, categoria in matches:
        url = "http://beeg.com" + url
        itemlist.append(
            Item(channel=__channel__,
                 action="videos",
                 title=categoria,
                 url=url))

    return itemlist
Ejemplo n.º 42
0
def categorias(item):
    logger.info("[Pelisxporno.py] categorias")
    itemlist = []

    # Descarga la pagina  
    data = scrapertools.downloadpageGzip(item.url)

    # Extrae las entradas (carpetas)
    bloque_cat = scrapertools.find_single_match(data, '<li id="categories-2"(.*?)</ul>')
    patronvideos ='<a href="([^"]+)" >(.*?)</a>'
    matches = re.compile(patronvideos,re.DOTALL).findall(bloque_cat)

    for scrapedurl, scrapedtitle in matches:
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"]")
        itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle , url=scrapedurl , folder=True) )

    return itemlist
Ejemplo n.º 43
0
def play(item):
    logger.info("[beeg.py] play")
    itemlist = []
    data = scrapertools.downloadpageGzip(item.url)
    if DEBUG: logger.info(data)
    patron = "'file'\: '([^']+)'"
    url = scrapertools.get_match(data, patron)
    itemlist.append(
        Item(channel=__channel__,
             action="play",
             title=item.title,
             url=url,
             thumbnail=item.thumbnail,
             server="directo",
             folder=False))

    return itemlist
Ejemplo n.º 44
0
def categorias_2(item):
    logger.info("[filesmonster_catalogue.py] categorias")
    itemlist = []
    url = "http://filesmonster.biz/index.php"
    cual = item.url

    # Descarga la portada para recuperar las categorias
    data = scrapertools.downloadpageGzip(url)

    patronvideos = '<li><a href="([^"]+)">([^<]+)</a>'

    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    for match in matches:
        scrapedtitle = match[1]
        scrapedtitle = scrapedtitle.replace("&#8211;", "-")
        scrapedtitle = scrapedtitle.replace("&#8217;", "'")
        scrapedtitle = scrapedtitle.strip()
        scrapedurl = "http://filesmonster.biz/" + match[0] + "&page=1"
        scrapedthumbnail = match[0]
        imagen = ""
        scrapedplot = match[0]
        logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                    "], thumbnail=[" + scrapedthumbnail + "]")
        scrapedplot = strip_tags(scrapedplot)

        #filtramos los resultados para mostrar solo los de la categoria
        if cual == 'gay' and (scrapedurl.find("Gay") <= 0
                              and scrapedurl.find("Bisexual") <= 0):
            scrapedtitle = ''
        else:
            if cual == 'hetero' and (scrapedurl.find("Bisexual") >= 0
                                     or scrapedurl.find("Gay") >= 0):
                scrapedtitle = ''

        if scrapedtitle != '':
            itemlist.append(
                Item(channel=__channel__,
                     action="lista_categoria_2",
                     title=scrapedtitle,
                     url=scrapedurl,
                     thumbnail=scrapedthumbnail,
                     plot=scrapedurl,
                     folder=True))

    return itemlist
def lista_categoria(item):
    logger.info("[filesmonster_catalogue.py] lista")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.downloadpageGzip(item.url)
    #logger.info(data)

    
    # Extrae las entradas (carpetas)
    patronvideos ='<h1 class="product_title"><a href="([^"]+)">([^<]+).*?</a>.*?<img src="([^"]+)".*?'
    matches = re.compile(patronvideos,re.DOTALL).findall(data)

    for match in matches:
        scrapedtitle = match[1]
        scrapedtitle = scrapedtitle.replace("&#8211;","-")
        scrapedtitle = scrapedtitle.replace("&#8217;","'")
        scrapedurl= "http://filesmonster.filesdl.net/"+match[0]
        scrapedthumbnail = match[2]
        imagen = ""
        scrapedplot = match[0]  
        tipo = match[2]
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
        scrapedplot=strip_tags(scrapedplot)
        itemlist.append( Item(channel=__channel__, action="detail_1", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail ,  plot=scrapedplot , folder=True) )
 
 
  # Extrae la marca de siguiente página
    patronvideos ='<a href="([^"]+)">Next</a>([^"]+).*?<div class="product_content">'
    matches2 = re.compile(patronvideos,re.DOTALL).findall(data)

    for match2 in matches2:
        scrapedtitle = ">> página siguiente"
        scrapedurl = "http://filesmonster.filesdl.net/index.php"+match2[0]
        scrapedthumbnail = ""
        imagen = ""
        scrapedplot = match2[0]  
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
        itemlist.append( Item(channel=__channel__, action="lista_categoria", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
 

    itemlist.append( Item(channel=__channel__, action="mainlist", title="<< volver al inicio",  folder=True) )
  

    return itemlist
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("streamondemand.servers.vidgg get_video_url(page_url='%s')" % page_url)

    file = scrapertools.find_single_match(page_url, 'http://vidgg.to/video/([a-z0-9]+)')
    data = scrapertools.cache_page("http://vidgg.to/embed/?id=%s" % file)

    key = scrapertools.find_single_match(data, 'var fkzd="([^"]+)"')
    url = "http://www.vidgg.to/api/player.api.php?file=%s&key=%s&pass=undefined&cid3=undefined&numOfErrors=0&user=undefined&cid2=undefined&cid=undefined" % (file, key)

    data = scrapertools.downloadpageGzip(url)
    mediaurl = scrapertools.find_single_match(data, 'url=(.*?)&')
    video_urls = []
    video_urls.append( [ scrapertools.get_filename_from_url(mediaurl)[-4:]+" [vidgg]", mediaurl])

    for video_url in video_urls:
        logger.info("[vidgg.py] %s - %s" % (video_url[0],video_url[1]))

    return video_urls
def detail(item):
    logger.info("[filesmonster_catalogue.py] detail")
    itemlist = []

	 # descarga la pagina
    data=scrapertools.downloadpageGzip(item.url)
    # descubre la url
    
    
    patronvideos2  = 'http://filesmonster.com/download.php(.*?)".(.*?)'
    matches2 = re.compile(patronvideos2,re.DOTALL).findall(data)
    for match2 in matches2:
        scrapedurl2 ="http://filesmonster.com/download.php"+match2[0] 
        
        itemlist.append( Item(channel=__channel__ , action="play" ,  plot=data ,  server="filesmonster", title=item.title+" [ver en filesmonster]" ,url=scrapedurl2, thumbnail=item.thumbnail, folder=False))


    return itemlist
Ejemplo n.º 48
0
def detail(item):

    logger.info("[filesmonster_catalogue.py] detail")
    itemlist = []

    # descarga la pagina
    data = scrapertools.downloadpageGzip(item.url)
    # descubre la url

    titulo = item.title.replace(" [abrir carpeta en filesmonster ]", "")
    contador = 0
    patronvideos2 = 'http://filesmonster.com/download.php(.*?)".(.*?)'
    matches2 = re.compile(patronvideos2, re.DOTALL).findall(data)
    for match2 in matches2:
        scrapedurl2 = "http://filesmonster.com/download.php" + match2[0]
        contador = contador + 1
        contador_t = str(contador)

        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 plot=data,
                 server="filesmonster",
                 title="CONTENIDO " + contador_t + ": " + titulo +
                 " [ver en filesmonster]",
                 url=scrapedurl2,
                 thumbnail=item.thumbnail,
                 folder=False))

    patronvideos2 = 'http://filesmonster.com/folders.php(.*?)".(.*?)'
    matches2 = re.compile(patronvideos2, re.DOTALL).findall(data)
    for match2 in matches2:
        scrapedurl2 = "http://filesmonster.com/folders.php" + match2[0]

        itemlist.append(
            Item(channel=__channel__,
                 action="detail",
                 plot=data,
                 title=item.title + " [abrir carpeta en filesmonster ]",
                 url=scrapedurl2,
                 thumbnail=item.thumbnail,
                 folder=True))

    return itemlist
def detail(item):
    logger.info("[filesmonster_catalogue.py] detail")
    itemlist = []

	 # descarga la pagina
    data=scrapertools.downloadpageGzip(item.url)
    # descubre la url
    
    
    patronvideos2  = 'http://filesmonster.com/download.php(.*?)".(.*?)'
    matches2 = re.compile(patronvideos2,re.DOTALL).findall(data)
    for match2 in matches2:
        scrapedurl2 ="http://filesmonster.com/download.php"+match2[0] 
        
        itemlist.append( Item(channel=__channel__ , action="play" ,  plot=data ,  server="filesmonster", title=item.title+" [ver en filesmonster]" ,url=scrapedurl2, thumbnail=item.thumbnail, folder=False))


    return itemlist
    
def categorias_2(item):
    logger.info("[filesmonster_catalogue.py] categorias")
    itemlist = [] 
    url="http://unusualporn.net/"
    cual=item.url


# Descarga la portada para recuperar las categorias
    data= scrapertools.downloadpageGzip(url)
  
    patronvideos ='<a class="rss_s" title="([^"]+)" target="_blank" href="([^"]+)"></a>';
    
    matches = re.compile(patronvideos,re.DOTALL).findall(data)

    for match in matches:
        scrapedtitle = match[0]
        scrapedtitle = scrapedtitle.replace("&#8211;","-")
        scrapedtitle = scrapedtitle.replace("&#8217;","'")
        scrapedtitle =scrapedtitle.strip()
        scrapedtitle=scrapedtitle.replace("RSS feed","")
        scrapedurl = match[1]
        scrapedurl=scrapedurl.replace("/feed","")
        scrapedurl=scrapedurl.replace("http://unusualporn.net/category","")
        scrapedthumbnail = match[0]
        imagen = ""
        scrapedplot = match[0]  
        logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
        scrapedplot=strip_tags(scrapedplot)     
        pagina="1" 
         
        #filtramos los resultados para mostrar solo los de la categoria
        if cual=='gay' and( scrapedurl.find("gay")<= 0 and scrapedurl.find("Bisexual")<= 0):
        	scrapedtitle='' 
        else:  
			if cual=='hetero' and (scrapedurl.find("bisexual")>= 0  or scrapedurl.find("gay")>= 0): 
				scrapedtitle='' 
		
				
        
        if scrapedtitle!='': itemlist.append( Item(channel=__channel__, action="lista_categoria_2", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=pagina,  folder=True) ) 
 
    
    return itemlist
Ejemplo n.º 51
0
def play(item):
    logger.info("pelisalacarta.yaske play item.url="+item.url)
    
    itemlist=[]
    
    if item.url.startswith("http://adf.ly"):
        from servers import adfly
        item.url = adfly.get_long_url(item.url)

    data = scrapertools.downloadpageGzip(item.url)
    #logger.info("data="+data)

    data = data.replace("http://www.yaske.net/archivos/allmyvideos/play.php?v=","http://allmyvideos.net/")

    itemlist = servertools.find_video_items(data=data)
    for newitem in itemlist:
        newitem.fulltitle = item.fulltitle
    
    return itemlist
Ejemplo n.º 52
0
def videos(item):
    logger.info()
    itemlist = []
    data = scrapertools.downloadpageGzip(item.url)
    patron = '<div class="item-block item-normal col" >.*?'
    patron += '<a href="([^"]+)" title="([^"]+)">.*?'
    patron += 'data-src="([^"]+)".*?'
    patron += '</span> ([^"]+)<'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedtime in matches:
        title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
        url = scrapedurl
        thumbnail = scrapedthumbnail.replace(" ", "%20")
        itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
                             fanart=thumbnail))
    next_page = scrapertools.find_single_match(data, "<a href='([^']+)' class=\"next\">NEXT</a>")
    if next_page != "":
        url = urlparse.urljoin(item.url, next_page)
        itemlist.append(Item(channel=item.channel, action="videos", title=">> Página siguiente", url=url))
    return itemlist
Ejemplo n.º 53
0
def play(item):
    logger.info("pelisalacarta.yaske play item.url=" + item.url)

    itemlist = []

    if item.url.startswith("http://adf.ly"):
        from servers import adfly
        item.url = adfly.get_long_url(item.url)

    data = scrapertools.downloadpageGzip(item.url)
    #logger.info("data="+data)

    data = data.replace(
        "http://www.yaske.net/archivos/allmyvideos/play.php?v=",
        "http://allmyvideos.net/")

    itemlist = servertools.find_video_items(data=data)
    for newitem in itemlist:
        newitem.fulltitle = item.fulltitle

    return itemlist
Ejemplo n.º 54
0
def detail_2(item):
    logger.info()
    itemlist = []

    # descarga la pagina
    data = scrapertools.downloadpageGzip(item.url)
    data = data.split('<span class="filesmonsterdlbutton">Download from Filesmonster</span>')
    data = data[0]
    # descubre la url
    patronvideos = 'href="http://filesmonster.com/download.php(.*?)".(.*?)'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    for match2 in matches:
        url = "http://filesmonster.com/download.php" + match2[0]
        title = "Archivo %d: %s [filesmonster]" % (len(itemlist) + 1, item.fulltitle)
        itemlist.append(
            Item(channel=item.channel, action="play", server="filesmonster", title=title, fulltitle=item.fulltitle,
                 url=url, thumbnail=item.thumbnail, folder=False))
        itemlist.append(Item(channel=item.channel, action="anadir_favorito",
                             title="(+) Añadir el vídeo a tus favoritos en filesmonster", url=match2[0],
                             thumbnail=item.thumbnail, plot="el archivo", folder=True))
        itemlist.append(Item(channel=item.channel, title=""));

    patronvideos = '["|\'](http\://filesmonster.com/folders.php\?[^"\']+)["|\']'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    for url in matches:
        if not url == item.url:
            logger.info(url)
            logger.info(item.url)
            title = "Carpeta %d: %s [filesmonster]" % (len(itemlist) + 1, item.fulltitle)
            itemlist.append(Item(channel=item.channel, action="detail", title=title, fulltitle=item.fulltitle, url=url,
                                 thumbnail=item.thumbnail, folder=True))
            itemlist.append(Item(channel=item.channel, action="anadir_favorito",
                                 title="(+) Añadir la carpeta a tus favoritos en filesmonster", url=url,
                                 thumbnail=item.thumbnail, plot="la carpeta", folder=True))
            itemlist.append(Item(channel=item.channel, title=""));

    return itemlist
Ejemplo n.º 55
0
def episodios(item):
    logger.info("tvalacarta.channels.discoverymax episodios")

    itemlist = []
    fanart = item.fanart
    clips = False
    if item.title != "Avances":
        data = scrapertools.cache_page(item.url.rsplit("/",2)[0]).replace("\n","")
        sinopsis = scrapertools.find_single_match(data, '<div class="cfct-mod-content">(.*?)</div>')
        sinopsis = scrapertools.htmlclean(sinopsis)
        fanart =  scrapertools.find_single_match(data, '<div class="dni-image ">.*?src="([^"]+)"')
        if "kodi" in config.get_platform(): action = "sinopsis"
        else: action = ""
        itemlist.append( Item(channel=CHANNELNAME, title="[COLOR yellow]Sinopsis[/COLOR]", action=action, url="", thumbnail=fanart, plot=sinopsis, fanart=fanart, folder=False) )
        if "<span>Clips</span>" in data and item.title != "[COLOR red]>> Clips de vídeo[/COLOR]":
            clips = True
            url_clips = scrapertools.find_single_match(data, '<a href="([^"]+)"><span>Clips</span>')

    try:
        data = scrapertools.downloadpageGzip(item.url).replace("\n","")
    except:
        data = scrapertools.cache_page(item.url.rsplit("/",2)[0]).replace("\n","")


    patron = '<div class="dni-video-playlist-thumb-box.*?<a href="([^"]+)".*?'
    patron += '<img src="([^"]+)".*?<h3 data-content=.*?>([^<]+)<.*?'
    patron += '<p class="desc".*?>([^<]+)<'
    matches = scrapertools.find_multiple_matches(data, patron)
    for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot in matches:
        scrapedurl = item.url + scrapedurl
        itemlist.append( Item(channel=CHANNELNAME, title=scrapedtitle, action="play", url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, fanart=fanart, show=item.show, folder=False) )

    if len(itemlist) < 2:
        itemlist.append( Item(channel=CHANNELNAME, title="No hay episodios completos disponibles", action="", url="", thumbnail=fanart, plot=sinopsis, fanart=fanart, folder=False) )
    if clips:
        itemlist.append( Item(channel=CHANNELNAME, title="[COLOR red]>> Clips de vídeo[/COLOR]", action="episodios", url=url_clips, thumbnail=fanart, plot=sinopsis, fanart=fanart, folder=True) )
    return itemlist
Ejemplo n.º 56
0
def anti_cloudflare(url):
    # global headers

    try:
        resp_headers = scrapertools.get_headers_from_response(url, headers=headers)
        resp_headers = dict(resp_headers)
    except urllib2.HTTPError, e:
        resp_headers = e.headers

    if 'refresh' in resp_headers:
        time.sleep(int(resp_headers['refresh'][:1]))

        scrapertools.get_headers_from_response(host + '/' + resp_headers['refresh'][7:], headers=headers)

    try:
        data = scrapertools.downloadpageGzip(url)
        cloudflare = False
    except:
        data = scrapertools.cache_page(url, headers=headers)
        cloudflare = True

    return data, cloudflare

def get_cookie_value(extension=""):
    cookies = os.path.join( config.get_data_path(), 'cookies.dat' )
    cookiedatafile = open(cookies,'r')
    cookiedata = cookiedatafile.read()
    cookiedatafile.close();
    cfduid = scrapertools.find_single_match(cookiedata,"tv-vip.*?__cfduid\s+([A-Za-z0-9\+\=]+)")
    cf_clearance = scrapertools.find_single_match(cookiedata,"tv-vip.*?cf_clearance\s+([A-Za-z0-9\+\=-]+)")
    cookie = "&Cookie=__cfduid="+cfduid+extension+"; cf_clearance="+cf_clearance
Ejemplo n.º 57
0
def videos(item):
    logger.info("[xhamster.py] videos")
    data = scrapertools.downloadpageGzip(item.url)
    itemlist = []

    if item.extra != "buscar":
        data = scrapertools.get_match(
            data,
            '<div class="boxC videoList clearfix">(.*?)<div id="footer">')
        patron = "<div class='vDate'>.*?<\/div><a href='([^']+)'.*?><img src='([^']+)'.*?><img.*?><b>([^']+)</b><u>([^']+)</u>"
        matches = re.compile(patron, re.DOTALL).findall(data)
        for scrapedurl, scrapedthumbnail, time, title in matches:
            scrapedtitle = "" + time + " - " + title
            # Depuracion
            if (DEBUG):
                logger.info("title=[" + scrapedtitle + "], url=[" +
                            scrapedurl + "], thumbnail=[" + scrapedthumbnail +
                            "]")
            itemlist.append(
                Item(channel=__channel__,
                     action="detail",
                     title=scrapedtitle,
                     url=scrapedurl,
                     thumbnail=scrapedthumbnail,
                     folder=True))

    patron = "<div class='video'><a href='([^']+)'.*?><img src='([^']+)'.*?><img.*?><b>([^']+)</b><u>([^']+)</u>"
    matches = re.compile(patron, re.DOTALL).findall(data)
    for scrapedurl, scrapedthumbnail, time, title in matches:
        scrapedtitle = "" + time + " - " + title
        # Depuracion
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(
            Item(channel=__channel__,
                 action="detail",
                 title=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 folder=True))

    # # EXTRAE EL PAGINADOR
    if item.extra == "buscar":
        patron = "<a href='([^']+)' class='last'"
    else:
        patron = "<div class='pager'>.*?<span>.*?<a href='([^']+)'>([^']+)<\/a>"
    matches = re.compile(patron, re.DOTALL).findall(data)
    for match in matches:
        if item.extra == "buscar":
            page = scrapertools.find_single_match(match, "page=(\d+)")
            scrapedurl = match.replace("amp;", "&")
            itemlist.append(
                Item(channel=__channel__,
                     action="videos",
                     title=">> Página " + page,
                     url=scrapedurl,
                     extra="buscar",
                     folder=True))
        else:
            page = match[1]
            scrapedurl = match[0]
            itemlist.append(
                Item(channel=__channel__,
                     action="videos",
                     title=">> Página " + page,
                     url=scrapedurl,
                     folder=True))

    return itemlist
Ejemplo n.º 58
0
def videos(item):
    logger.info("[submityourflicks.py] videos")
    # <div id="movies" style="width: 100%; ">
    data = scrapertools.downloadpageGzip(item.url)
    itemlist = []
    matches = re.compile(
        r"""<div class='content_item'>.*?</span>.*?<br style='clear: both;' />.*?</div>""",
        re.DOTALL).findall(data)
    for match in matches:
        datos = re.compile(r"""<div class='content_item'>.*?</div>""",
                           re.S).findall(match)
        for vid in datos:
            aRef = re.compile(
                r"""<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)" title="([^"]+)" name="([^"]+)" border="1" id='([^"]+)' width="170" height="130" onmouseover="([^"]+)" onmouseout="([^"]+)" /></a>""",
                re.S).findall(vid)
            aTime = re.compile(
                r"""<span class='l5'>([^"]+)Min<br />.*?</span>""",
                re.S).findall(vid)
            if len(aTime) > 0:
                cTime = aTime[0].replace("\r\n", "")
                cTime = cTime.replace(" ", "")
            else:
                cTime = ""

            aPosted = re.compile(
                r"""<span class='l5'>.*?Posted: ([^"]+)<br /></span>""",
                re.S).findall(vid)
            if len(aPosted) > 0:
                cPosted = aPosted[0].replace("\r\n", "")
                cPosted = cPosted.replace(" ", "")
            else:
                cPosted = ""

            video = aRef[0]
            try:
                scrapedtitle = unicode(video[2], "utf-8").encode("iso-8859-1")
                scrapedtitle = scrapedtitle + "(" + cTime + ")[" + cPosted + "]"
            except:
                scrapedtitle = video[2]
            scrapedurl = urlparse.urljoin("http://www.submityourflicks.com/",
                                          video[0])
            scrapedthumbnail = urlparse.urljoin(
                "http://www.submityourflicks.com/", video[1])
            scrapedplot = ""
            # Depuracion
            if (DEBUG):
                logger.info("title=[" + scrapedtitle + "], url=[" +
                            scrapedurl + "], thumbnail=[" + scrapedthumbnail +
                            "]")
            itemlist.append(
                Item(channel=__channel__,
                     action="play",
                     title=scrapedtitle,
                     url=scrapedurl,
                     thumbnail=scrapedthumbnail,
                     plot=scrapedplot,
                     show=scrapedtitle,
                     folder=False))

    #Paginador
    print "paginador"
    matches = re.compile('<a href="([^"]+)">Next</a>', re.DOTALL).findall(data)
    if len(matches) > 0:
        scrapedurl = urlparse.urljoin("http://www.submityourflicks.com/",
                                      matches[0])
        print scrapedurl
        paginador = Item(channel=__channel__,
                         action="videos",
                         title="!Página siguiente",
                         url=scrapedurl,
                         thumbnail="",
                         plot="",
                         extra="",
                         show=item.show)
    else:
        paginador = None

    if paginador is not None:
        itemlist.append(paginador)

    return itemlist