示例#1
0
def search(item, texto):
    logger.info(item.url)
    texto = texto.replace(" ", "+")
    item.url = 'http://www.repelis.tv/buscar/?s=%s' % (texto)
    logger.info(item.url)

    data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')

    logger.info("data: " + data)

    patron = '<div class="col-xs-2">.*?'
    patron += '<div class="row">.*?'
    patron += '<a href="(.*?)" title="(.*?)">.*?'
    patron += '<img src="(.*?)"'

    logger.info(patron)

    matches = re.compile(patron, re.DOTALL).findall(data)

    itemlist = []

    for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
        title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
        title = title.replace("Online", "")
        url = urlparse.urljoin(item.url, scrapedurl)
        thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
        logger.info(url)
        itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
                             thumbnail=thumbnail, fanart=thumbnail))

    return itemlist
示例#2
0
def menuestre(item):
    logger.info(item.url)
    itemlist = []
    data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
    patronenlaces = '<h1>Estrenos</h1>(.*?)</section>'
    matchesenlaces = scrapertools.find_multiple_matches(data, patronenlaces)
    for bloque_enlaces in matchesenlaces:
        patron = '<div class="poster-media-card">.*?'
        patron += '<a href="(.*?)".*?title="(.*?)"(.*?)'
        patron += '<img src="(.*?)"'
        matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces)
        for scrapedurl, scrapedtitle, extra_info, scrapedthumbnail in matches:
            title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
            title = title.replace("Online", "");
            url = scrapedurl
            thumbnail = scrapedthumbnail
            quality = scrapertools.find_single_match(extra_info, 'calidad.*?>Calidad (.*?)<')
            year = scrapertools.find_single_match(extra_info, '"anio">(\d{4})<')
            language = scrapertools.find_single_match(extra_info, 'class="(latino|espanol|subtitulado)"')

            itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
                                 thumbnail=thumbnail, fanart=thumbnail, language=language, quality=quality,
                                 infoLabels={'year': year}))

    try:
        next_page = scrapertools.get_match(data, '<span class="current">\d+</span><a href="([^"]+)"')
        title = "[COLOR red][B]Pagina siguiente »[/B][/COLOR]"
        itemlist.append(Item(channel=item.channel, title=title, url=next_page, action="menuestre", folder=True))
    except:
        pass
    return itemlist
示例#3
0
def porcateg(item):

    logger.info("[repelis] poranyo")
    logger.info("[repelis] " + item.url )
    itemlist = []

    data = scrapertools.cache_page(item.url).decode('iso-8859-1').encode('utf-8')
    patron = '<li class="cat-item cat-item-3">.*?<a href="([^"]+)" title="([^"]+)">'
    matches = re.compile(patron,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    itemlist = []

    for scrapedurl,scrapedtitle in matches:
        title = scrapertools.remove_show_from_title(scrapedtitle,"Ver Película")
        title = title.replace("Online","")
        url = urlparse.urljoin(item.url,scrapedurl)
        logger.info("[repelis] "+url)
        #si no esta permitidas categoria adultos, la filtramos
        erotica = ""
        if config.get_setting("enableadultmode") == "false":
            patron = '.*?/erotic.*?'
            try:
                erotica = scrapertools.get_match(scrapedurl,patron)
            except:
               itemlist.append( Item(channel=item.channel, action="todaspelis", fanart=item.fanart,title=title, fulltitle=title , url=url ) )
        else:
            itemlist.append( Item(channel=item.channel, action="todaspelis", title=title, fulltitle=title , url=url, fanart=item.fanart ) )

    return itemlist
示例#4
0
def porcateg(item):
    logger.info(item.url)
    itemlist = []

    data = scrapertools.cache_page(item.url).decode('iso-8859-1').encode('utf-8')
    patron = '<li class="cat-item cat-item-3">.*?<a href="([^"]+)" title="([^"]+)">'
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    itemlist = []

    for scrapedurl, scrapedtitle in matches:
        title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
        title = title.replace("Online", "")
        url = urlparse.urljoin(item.url, scrapedurl)
        logger.info(url)
        # si no esta permitidas categoria adultos, la filtramos
        erotica = ""
        if config.get_setting("adult_mode") == 0:
            patron = '.*?/erotic.*?'
            try:
                erotica = scrapertools.get_match(scrapedurl, patron)
            except:
                itemlist.append(
                    Item(channel=item.channel, action="todaspelis", fanart=item.fanart, title=title, fulltitle=title,
                         url=url))
        else:
            itemlist.append(Item(channel=item.channel, action="todaspelis", title=title, fulltitle=title, url=url,
                                 fanart=item.fanart))

    return itemlist
示例#5
0
def poranyo(item):

    logger.info(item.url)

    itemlist = []

    data = scrapertools.cache_page(
        item.url).decode('iso-8859-1').encode('utf-8')

    patron = '<option value="([^"]+)">(.*?)</option>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    for scrapedurl, scrapedtitle in matches:
        title = scrapertools.remove_show_from_title(scrapedtitle,
                                                    "Ver Película")
        title = title.replace("Online", "")
        url = urlparse.urljoin(item.url, scrapedurl)
        itemlist.append(
            Item(channel=item.channel,
                 action="todaspelis",
                 title=title,
                 fulltitle=title,
                 url=url,
                 fanart=item.fanart))

    return itemlist
示例#6
0
def menudesta(item):
    logger.info(item.url)

    itemlist = []

    data = scrapertools.cache_page(item.url).decode('iso-8859-1').encode('utf-8')

    patronenlaces = '<h3>.*?Destacadas.*?>(.*?)<h3>'
    matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(data)

    for bloque_enlaces in matchesenlaces:

        # patron = '<a href="([^"]+)" title="([^"]+)"> <div class="poster".*?<img src="([^"]+)"'

        patron = '<div class="poster-media-card">.*?'
        patron += '<a href="(.*?)".*?title="(.*?)".*?'
        patron += '<img src="(.*?)"'

        matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces)
        scrapertools.printMatches(matches)
        for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
            title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
            title = title.replace("Online", "");
            url = urlparse.urljoin(item.url, scrapedurl)
            thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
            itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
                                 thumbnail=thumbnail, fanart=thumbnail))

    return itemlist
示例#7
0
def poranyo(item):
    logger.info(item.url)

    itemlist = []

    data = httptools.downloadpage(
        item.url).data.decode('iso-8859-1').encode('utf-8')

    patron = '<option value="([^"]+)">(.*?)</option>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for scrapedurl, scrapedtitle in matches:
        title = scrapertools.remove_show_from_title(scrapedtitle,
                                                    "Ver Película")
        title = title.replace("Online", "")
        url = item.url + scrapedurl
        itemlist.append(
            Item(channel=item.channel,
                 action="menupelis",
                 title=title,
                 fulltitle=title,
                 url=url,
                 fanart=item.fanart,
                 extra='year'))

    return itemlist
示例#8
0
def porcateg(item):
    logger.info(item.url)
    itemlist = []

    data = httptools.downloadpage(
        item.url).data.decode('iso-8859-1').encode('utf-8')
    patron = '<li class="cat-item cat-item-3">.*?<a href="([^"]+)" title="([^"]+)">'
    matches = scrapertools.find_multiple_matches(data, patron)

    for scrapedurl, scrapedtitle in matches:
        title = scrapertools.remove_show_from_title(scrapedtitle,
                                                    "Ver Película")
        title = title.replace("Online", "")
        url = scrapedurl
        logger.info(url)
        # si no esta permitidas categoria adultos, la filtramos
        extra = title
        adult_mode = config.get_setting("adult_mode")
        if adult_mode != 0:
            if 'erotic' in scrapedurl:
                extra = 'adult'
        else:
            extra = title

        if (extra == 'adult' and adult_mode != 0) or extra != 'adult':
            itemlist.append(
                Item(channel=item.channel,
                     action="menupelis",
                     title=title,
                     fulltitle=title,
                     url=url,
                     fanart=item.fanart,
                     extra=extra))

    return itemlist
示例#9
0
def menudesta(item):

    logger.info("[repelis] menupelis")
    logger.info("[repelis] "+item.url)

    itemlist = []

    data = scrapertools.cache_page(item.url).decode('iso-8859-1').encode('utf-8')

    patronenlaces= '<h3>.*?Destacadas.*?>(.*?)<h3>'
    matchesenlaces = re.compile(patronenlaces,re.DOTALL).findall(data)

    for bloque_enlaces in matchesenlaces:

        #patron = '<a href="([^"]+)" title="([^"]+)"> <div class="poster".*?<img src="([^"]+)"'

        patron = '<div class="poster-media-card">.*?'
        patron += '<a href="(.*?)".*?title="(.*?)".*?'
        patron +='<img src="(.*?)"'

        matches = re.compile(patron,re.DOTALL).findall(bloque_enlaces)
        scrapertools.printMatches(matches)
        for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
            title = scrapertools.remove_show_from_title(scrapedtitle,"Ver Película")
            title = title.replace("Online","");
            url = urlparse.urljoin(item.url,scrapedurl)
            thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
            itemlist.append( Item(channel=item.channel, action="findvideos", title=title, fulltitle=title , url=url , thumbnail=thumbnail, fanart=thumbnail) )

    return itemlist
示例#10
0
def menupelis(item):

    logger.info("[repelis] menupelis")
    logger.info("[repelis] " + item.url)

    itemlist = []

    data = scrapertools.cache_page(
        item.url).decode('iso-8859-1').encode('utf-8')

    patronenlaces = '<h3>Películas Recién Agregadas</h3>.*?>(.*?)</section>'
    matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(data)

    logger.info("begin ----------")
    scrapertools.printMatches(matchesenlaces)
    logger.info("end ----------")

    for bloque_enlaces in matchesenlaces:

        patron = '<div class="poster-media-card">.*?'
        patron += '<a href="(.*?)".*?title="(.*?)".*?'
        patron += '<img src="(.*?)"'
        matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces)
        scrapertools.printMatches(matches)

        for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
            logger.info("He encontrado el segundo bloque")
            title = scrapertools.remove_show_from_title(
                scrapedtitle, "Ver Película")
            title = title.replace("Online", "")
            url = urlparse.urljoin(item.url, scrapedurl)
            thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
            itemlist.append(
                Item(channel=__channel__,
                     action="verpeli",
                     title=title,
                     fulltitle=title,
                     url=url,
                     thumbnail=thumbnail,
                     fanart=thumbnail))

    ## Paginación
    #<span class="current">2</span><a href="http://www.repelis.tv/page/3"

    # Si falla no muestra ">> Página siguiente"
    try:
        next_page = scrapertools.get_match(
            data, '<span class="current">\d+</span><a href="([^"]+)"')
        title = "[COLOR red][B]Pagina siguiente »[/B][/COLOR]"
        itemlist.append(
            Item(channel=__channel__,
                 title=title,
                 url=next_page,
                 action="menupelis",
                 thumbnail=item.thumbnail,
                 fanart=item.fanart,
                 folder=True))
    except:
        pass
    return itemlist
示例#11
0
def menupelis(item):
    logger.info(item.url)

    itemlist = []

    data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')

    if item.extra == '':
        section = 'Recién Agregadas'
    elif item.extra == 'year':
        section = 'del Año \d{4}'
    elif item.extra == 'adult':
        section = 'de Eróticas \+18'
    else:
        section = 'de %s'%item.extra

    patronenlaces = '<h.>Películas %s<\/h.>.*?>(.*?)<\/section>'%section


    matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(data)

    for bloque_enlaces in matchesenlaces:

        patron = '<div class="poster-media-card">.*?'
        patron += '<a href="(.*?)".*?title="(.*?)"(.*?)'
        patron += '<img src="(.*?)"'
        matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces)

        for scrapedurl, scrapedtitle, extra_info, scrapedthumbnail in matches:
            logger.info("He encontrado el segundo bloque")
            logger.info("extra_info: %s" % extra_info)
            title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
            title = title.replace("Online", "");
            url = urlparse.urljoin(item.url, scrapedurl)
            thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
            quality = scrapertools.find_single_match(extra_info, 'calidad.*?>Calidad (.*?)<')
            year = scrapertools.find_single_match(extra_info, '"anio">(\d{4})<')
            language = scrapertools.find_multiple_matches(extra_info, 'class="(latino|espanol|subtitulado)"')
            # if language = 'ingles':
            #    language='vo'
            new_item=Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
                                 thumbnail=thumbnail, fanart=thumbnail, language=language, quality=quality,
                                 infoLabels={'year': year})
            if year:
                tmdb.set_infoLabels_item(new_item)

            itemlist.append(new_item)

    try:
        next_page = scrapertools.get_match(data, '<span class="current">\d+</span><a href="([^"]+)"')
        title = "[COLOR red][B]Pagina siguiente »[/B][/COLOR]"
        itemlist.append(
                Item(channel=item.channel, title=title, url=next_page, action="menupelis", thumbnail=item.thumbnail,
                     fanart=item.fanart, folder=True, extra=item.extra))
    except:
        pass
    return itemlist
示例#12
0
def todaspelis(item):

    logger.info(item.url)

    itemlist = []

    data = scrapertools.cache_page(
        item.url).decode('iso-8859-1').encode('utf-8')
    print data
    patronenlaces = '<h1>.*?</h1>.*?>(.*?)</section>'
    matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(data)

    for bloque_enlaces in matchesenlaces:

        #patron = '<a href="([^"]+)" title="([^"]+)"> <div class="poster".*?<img src="([^"]+)"'

        patron = '<div class="poster-media-card">.*?'
        patron += '<a href="(.*?)".*?title="(.*?)".*?'
        patron += '<img src="(.*?)"'

        matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces)
        scrapertools.printMatches(matches)
        for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
            title = scrapertools.remove_show_from_title(
                scrapedtitle, "Ver Película")
            title = title.replace("Online", "")
            url = urlparse.urljoin(item.url, scrapedurl)
            thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
            itemlist.append(
                Item(channel=item.channel,
                     action="findvideos",
                     title=title,
                     fulltitle=title,
                     url=url,
                     thumbnail=thumbnail,
                     fanart=thumbnail))

    ## Paginación
    #<span class="current">2</span><a href="http://www.repelis.tv/page/3"

    # Si falla no muestra ">> Página siguiente"
    try:
        next_page = scrapertools.get_match(
            data, '<span class="current">\d+</span><a href="([^"]+)"')
        title = "[COLOR red][B]Pagina siguiente »[/B][/COLOR]"
        itemlist.append(
            Item(channel=item.channel,
                 title=title,
                 url=next_page,
                 action="todaspelis",
                 folder=True))
    except:
        pass
    return itemlist
示例#13
0
def search(item, texto):

    logger.info(item.url)
    texto = texto.replace(" ", "+")
    item.url = 'http://www.repelis.tv/buscar/?s=%s' % (texto)
    logger.info(item.url)

    data = scrapertools.cache_page(
        item.url).decode('iso-8859-1').encode('utf-8')

    logger.info("data: " + data)
    '''
   <div class="col-xs-2">
   <div class="row">
   <a href="http://www.repelis.tv/8973/pelicula/contracted-phase-ii.html"  title="Ver Película Contracted: Phase II Online">
   <img src="http://1.bp.blogspot.com/-YWmw6voBipE/VcB91p-EcnI/AAAAAAAAQZs/EhUzWlInmA8/s175/contracted-phase-2.jpg" border="0">
   '''

    patron = '<div class="col-xs-2">.*?'
    patron += '<div class="row">.*?'
    patron += '<a href="(.*?)" title="(.*?)">.*?'
    patron += '<img src="(.*?)"'

    logger.info(patron)

    matches = re.compile(patron, re.DOTALL).findall(data)

    scrapertools.printMatches(matches)
    print "repelis ..................................."
    itemlist = []

    for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
        title = scrapertools.remove_show_from_title(scrapedtitle,
                                                    "Ver Película")
        title = title.replace("Online", "")
        url = urlparse.urljoin(item.url, scrapedurl)
        thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
        logger.info(url)
        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 title=title,
                 fulltitle=title,
                 url=url,
                 thumbnail=thumbnail,
                 fanart=thumbnail))

    return itemlist
示例#14
0
def poranyo(item):

    logger.info("[repelis] poranyo")
    logger.info("[repelis] "+item.url)

    itemlist = []

    data = scrapertools.cache_page(item.url).decode('iso-8859-1').encode('utf-8')

    patron = '<option value="([^"]+)">(.*?)</option>'
    matches = re.compile(patron,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    for scrapedurl,scrapedtitle in matches:
       title = scrapertools.remove_show_from_title(scrapedtitle,"Ver Película")
       title = title.replace("Online","")
       url = urlparse.urljoin(item.url,scrapedurl)
       itemlist.append( Item(channel=item.channel, action="todaspelis", title=title, fulltitle=title , url=url, fanart=item.fanart ) )

    return itemlist
示例#15
0
def menupelis(item):

    logger.info("[repelis] menupelis")
    logger.info("[repelis] "+item.url)

    itemlist = []

    data = scrapertools.cache_page(item.url).decode('iso-8859-1').encode('utf-8')

    patronenlaces= '<h3>Películas Recién Agregadas</h3>.*?>(.*?)</section>'
    matchesenlaces = re.compile(patronenlaces,re.DOTALL).findall(data)

    logger.info("begin ----------")
    scrapertools.printMatches(matchesenlaces)
    logger.info("end ----------")

    for bloque_enlaces in matchesenlaces:

        patron = '<div class="poster-media-card">.*?'
        patron += '<a href="(.*?)".*?title="(.*?)".*?'
        patron +='<img src="(.*?)"'
        matches = re.compile(patron,re.DOTALL).findall(bloque_enlaces)
        scrapertools.printMatches(matches)

        for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
            logger.info("He encontrado el segundo bloque")
            title = scrapertools.remove_show_from_title(scrapedtitle,"Ver Película")
            title = title.replace("Online","");
            url = urlparse.urljoin(item.url,scrapedurl)
            thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
            itemlist.append( Item(channel=item.channel, action="findvideos", title=title, fulltitle=title , url=url , thumbnail=thumbnail, fanart=thumbnail) )

    ## Paginación
    #<span class="current">2</span><a href="http://www.repelis.tv/page/3"

    # Si falla no muestra ">> Página siguiente"
    try:
        next_page = scrapertools.get_match(data,'<span class="current">\d+</span><a href="([^"]+)"')
        title= "[COLOR red][B]Pagina siguiente »[/B][/COLOR]"
        itemlist.append( Item(channel=item.channel, title=title, url=next_page, action="menupelis", thumbnail=item.thumbnail, fanart=item.fanart, folder=True) )
    except: pass
    return itemlist
示例#16
0
def search(item, texto):

   logger.info("[repelis] "+item.url)
   texto = texto.replace(" ", "+")
   item.url = 'http://www.repelis.tv/buscar/?s=%s' % (texto)
   logger.info("[repelis] "+item.url)

   data = scrapertools.cache_page(item.url).decode('iso-8859-1').encode('utf-8')

   logger.info("repelis data: "+data)

   '''
   <div class="col-xs-2">
   <div class="row">
   <a href="http://www.repelis.tv/8973/pelicula/contracted-phase-ii.html"  title="Ver Película Contracted: Phase II Online">
   <img src="http://1.bp.blogspot.com/-YWmw6voBipE/VcB91p-EcnI/AAAAAAAAQZs/EhUzWlInmA8/s175/contracted-phase-2.jpg" border="0">
   '''

   patron = '<div class="col-xs-2">.*?'
   patron+= '<div class="row">.*?'
   patron+= '<a href="(.*?)" title="(.*?)">.*?'
   patron+= '<img src="(.*?)"'

   logger.info("repelis:"+ patron )

   matches = re.compile(patron,re.DOTALL).findall(data)

   scrapertools.printMatches(matches)
   print "repelis ..................................."
   itemlist = []

   for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
      title = scrapertools.remove_show_from_title(scrapedtitle,"Ver Película")
      title = title.replace("Online","")
      url = urlparse.urljoin(item.url,scrapedurl)
      thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
      logger.info("[repelis] "+url)
      itemlist.append( Item(channel=item.channel, action="findvideos", title=title, fulltitle=title , url=url , thumbnail=thumbnail, fanart=thumbnail) )

   return itemlist
示例#17
0
def episodios(item):
    logger.info("[seriesdanko.py] episodios")

    if config.get_platform() == "xbmc" or config.get_platform(
    ) == "xbmcdharma":
        import xbmc
        if config.get_setting("forceview") == "true":
            xbmc.executebuiltin("Container.SetViewMode(53)")  #53=icons
            #xbmc.executebuiltin("Container.Content(Movies)")

    if "|" in item.url:
        url = item.url.split("|")[0]
        sw = True
    else:
        url = item.url
        sw = False
    # Descarga la página
    if item.extra:

        contenidos = item.extra
        #print contenidos
    else:
        data = scrapertools.downloadpageWithoutCookies(url)

        # Extrae las entradas
        if sw:
            try:
                datadict = eval("(" + data + ")")
                data = urllib.unquote_plus(
                    datadict["entry"]["content"]["$t"].replace("\\u00", "%"))
                matches = []
                matches.append(data)
            except:
                matches = []
        else:
            patronvideos = "entry-content(.*?)<div class='blog-pager' id='blog-pager'>"
            matches = re.compile(patronvideos, re.DOTALL).findall(data)

        if len(matches) > 0:
            contenidos = matches[0].replace('"', "'").replace("\n", "")
        else:
            contenidos = item.url
            if sw:
                url = item.url.split("|")[1]
                if not url.startswith("http://"):
                    url = urlparse.urljoin("http://seriesdanko.com", url)
                # Descarga la página
                data = scrapertools.downloadpageGzip(url)
                patronvideos = "entry-content(.*?)<div class='post-footer'>"
                matches = re.compile(patronvideos, re.DOTALL).findall(data)
                if len(matches) > 0:
                    contenidos = matches[0]

    patronvideos = "<a href='([^']+)'>([^<]+)</a> <img(.+?)/>"
    matches = re.compile(patronvideos,
                         re.DOTALL).findall(contenidos.replace('"', "'"))
    #print contenidos
    try:
        plot = re.compile(r'(Informac.*?/>)</div>').findall(contenidos)[0]
        if len(plot) == 0:
            plot = re.compile(r"(Informac.*?both;'>)</div>").findall(
                contenidos)[0]
        plot = re.sub('<[^>]+>', " ", plot)
    except:
        plot = ""

    itemlist = []
    for match in matches:
        scrapedtitle = match[1].replace("\n", "").replace("\r", "")
        scrapedtitle = scrapertools.remove_show_from_title(
            scrapedtitle, item.show)

        #[1x01 - Capitulo 01]
        #patron = "(\d+x\d+) - Capitulo \d+"
        #matches = re.compile(patron,re.DOTALL).findall(scrapedtitle)
        #print matches
        #if len(matches)>0 and len(matches[0])>0:
        #    scrapedtitle = matches[0]

        if "es.png" in match[2]:
            subtitle = " (Español)"
        elif "la.png" in match[2]:
            subtitle = " (Latino)"
        elif "vo.png" in match[2]:
            subtitle = " (Version Original)"
        elif "vos.png" in match[2]:
            subtitle = " (Subtitulado)"
        elif "ca.png" in match[2]:
            subtitle = " (Catalan)"
        elif "ga.jpg" in match[2]:
            subtitle = " (Gallego)"
        elif "eu.jpg" in match[2]:
            subtitle = " (Euskera)"
        elif "ba.png" in match[2]:
            subtitle = " (Bable)"
        else:
            subtitle = ""
        scrapedplot = plot
        scrapedurl = urlparse.urljoin(item.url,
                                      match[0]).replace("\n",
                                                        "").replace("\r", "")
        if not item.thumbnail:
            try:
                scrapedthumbnail = re.compile(r"src=([^']+)'").findall(
                    contenidos)[0]
            except:
                scrapedthumbnail = ""
        else:
            scrapedthumbnail = item.thumbnail
        scrapedthumbnail = scrapedthumbnail.replace("\n", "").replace("\r", "")
        if item.fulltitle == '':
            item.fulltitle = scrapedtitle + subtitle
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")

        # Añade al listado de XBMC
        itemlist.append(
            Item(channel=__channel__,
                 action="findvideos",
                 title=scrapedtitle + subtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 fulltitle=item.fulltitle,
                 show=item.show,
                 context="4",
                 folder=True))

    #xbmc.executebuiltin("Container.Content(Movies)")

    if len(itemlist) == 0:
        listvideos = servertools.findvideos(contenidos)

        for title, url, server in listvideos:

            if server == "youtube":
                scrapedthumbnail = "http://i.ytimg.com/vi/" + url + "/0.jpg"
            else:
                scrapedthumbnail = item.thumbnail
            scrapedtitle = title
            scrapedplot = ""
            scrapedurl = url

            if (DEBUG):
                logger.info("title=[" + scrapedtitle + "], url=[" +
                            scrapedurl + "], thumbnail=[" + scrapedthumbnail +
                            "]")

            # Añade al listado de XBMC
            itemlist.append(
                Item(channel=__channel__,
                     action="play",
                     server=server,
                     title=item.title + " " + scrapedtitle,
                     url=scrapedurl,
                     thumbnail=scrapedthumbnail,
                     plot=scrapedplot,
                     fulltitle=scrapedtitle,
                     folder=False))

    return itemlist
def episodios(item):
    logger.info("[seriesdanko.py] episodios")
    
    if config.get_platform()=="xbmc" or config.get_platform()=="xbmcdharma":
        import xbmc
        if config.get_setting("forceview")=="true":
            xbmc.executebuiltin("Container.SetViewMode(53)")  #53=icons
            #xbmc.executebuiltin("Container.Content(Movies)")
        
    if "|" in item.url:
        url = item.url.split("|")[0]
        sw = True
    else:
        url = item.url
        sw = False
    # Descarga la página
    if item.extra:
        
        contenidos = item.extra
        #print contenidos
    else:
        data = scrapertools.downloadpageWithoutCookies(url)

    # Extrae las entradas
        if sw:
            try:
                datadict = eval( "(" + data + ")" )    
                data = urllib.unquote_plus(datadict["entry"]["content"]["$t"].replace("\\u00","%"))
                matches=[]
                matches.append(data)
            except:
                matches = []
        else:
            patronvideos = "entry-content(.*?)<div class='blog-pager' id='blog-pager'>"
            matches = re.compile(patronvideos,re.DOTALL).findall(data)
            
        if len(matches)>0:
            contenidos = matches[0].replace('"',"'").replace("\n","")
        else:
            contenidos = item.url
            if sw:
                url = item.url.split("|")[1]
                if not url.startswith("http://"):
                    url = urlparse.urljoin("http://seriesdanko.com",url)
                # Descarga la página
                data = scrapertools.downloadpageGzip(url)
                patronvideos  = "entry-content(.*?)<div class='post-footer'>"
                matches = re.compile(patronvideos,re.DOTALL).findall(data)
                if len(matches)>0:
                    contenidos = matches[0]
                
    patronvideos  = "<a href='([^']+)'>([^<]+)</a> <img(.+?)/>"
    matches = re.compile(patronvideos,re.DOTALL).findall(contenidos.replace('"',"'"))
    #print contenidos        
    try:
        plot = re.compile(r'(Informac.*?/>)</div>').findall(contenidos)[0]
        if len(plot)==0:
            plot = re.compile(r"(Informac.*?both;'>)</div>").findall(contenidos)[0]
        plot = re.sub('<[^>]+>'," ",plot)
    except:
        plot = ""

    itemlist = []
    for match in matches:
        scrapedtitle = match[1].replace("\n","").replace("\r","")
        scrapedtitle = scrapertools.remove_show_from_title(scrapedtitle,item.show)
        
        #[1x01 - Capitulo 01]
        #patron = "(\d+x\d+) - Capitulo \d+"
        #matches = re.compile(patron,re.DOTALL).findall(scrapedtitle)
        #print matches
        #if len(matches)>0 and len(matches[0])>0:
        #    scrapedtitle = matches[0]

        if "es.png" in match[2]:
            subtitle = " (Español)"
        elif "la.png" in match[2]:
            subtitle = " (Latino)"
        elif "vo.png" in match[2]:
            subtitle = " (Version Original)"
        elif "vos.png" in match[2]:
            subtitle = " (Subtitulado)"
        elif "ca.png"  in match[2]:
            subtitle = " (Catalan)"
        elif "ga.jpg"  in match[2]:
            subtitle = " (Gallego)"
        elif "eu.jpg"  in match[2]:
            subtitle = " (Euskera)"
        elif "ba.png"  in match[2]:
            subtitle = " (Bable)"
        else:
            subtitle = ""
        scrapedplot = plot
        scrapedurl = urlparse.urljoin(item.url,match[0]).replace("\n","").replace("\r","")
        if not item.thumbnail:
            try:
                scrapedthumbnail = re.compile(r"src=([^']+)'").findall(contenidos)[0]
            except:
                    scrapedthumbnail = ""
        else:
            scrapedthumbnail = item.thumbnail
        scrapedthumbnail = scrapedthumbnail.replace("\n","").replace("\r","")
        if item.fulltitle == '':
            item.fulltitle = scrapedtitle + subtitle 
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

        # Añade al listado de XBMC
        itemlist.append( Item(channel=__channel__, action="findvideos", title=scrapedtitle+subtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , fulltitle = item.fulltitle, show = item.show , context="4", folder=True) )

    #xbmc.executebuiltin("Container.Content(Movies)")
    
    if len(itemlist)==0:
        listvideos = servertools.findvideos(contenidos)
        
        for title,url,server in listvideos:
            
            if server == "youtube":
                scrapedthumbnail = "http://i.ytimg.com/vi/" + url + "/0.jpg"
            else:
                scrapedthumbnail = item.thumbnail
            scrapedtitle = title
            scrapedplot = ""
            scrapedurl = url
            
            if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

            # Añade al listado de XBMC
            itemlist.append( Item(channel=__channel__, action="play", server=server, title=item.title +" "+ scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot, fulltitle = scrapedtitle , folder=False) )

    return itemlist