def listado(item):
    logger.info()
    itemlist = []

    data_thumb = httptools.downloadpage(
        item.url, item.post.replace("Mode=List", "Mode=Gallery")).data
    if not item.post:
        data_thumb = ""
        item.url = item.url.replace("/gallery,", "/list,")

    data = httptools.downloadpage(item.url, item.post).data
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)

    folder = filetools.join(config.get_data_path(), 'thumbs_copiapop')
    patron = '<div class="size">(.*?)</div></div></div>'
    bloques = scrapertools.find_multiple_matches(data, patron)
    for block in bloques:
        if "adult_info" in block and not adult_content:
            continue
        size = scrapertools.find_single_match(block, '<p>([^<]+)</p>')
        scrapedurl, scrapedtitle = scrapertools.find_single_match(
            block, '<div class="name"><a href="([^"]+)".*?>([^<]+)<')
        scrapedthumbnail = scrapertools.find_single_match(
            block, "background-image:url\('([^']+)'")
        if scrapedthumbnail:
            try:
                thumb = scrapedthumbnail.split("-", 1)[0].replace("?", "\?")
                if data_thumb:
                    url_thumb = scrapertools.find_single_match(
                        data_thumb, "(%s[^']+)'" % thumb)
                else:
                    url_thumb = scrapedthumbnail
                scrapedthumbnail = filetools.join(
                    folder, "%s.jpg" % url_thumb.split("e=", 1)[1][-20:])
            except:
                scrapedthumbnail = ""

        if scrapedthumbnail:
            t = threading.Thread(target=download_thumb,
                                 args=[scrapedthumbnail, url_thumb])
            t.setDaemon(True)
            t.start()

        else:
            scrapedthumbnail = item.extra + "/img/file_types/gallery/movie.png"

        scrapedurl = item.extra + scrapedurl
        title = "%s (%s)" % (scrapedtitle, size)
        if "adult_info" in block:
            title += " [COLOR %s][+18][/COLOR]" % color4
        plot = scrapertools.find_single_match(block,
                                              '<div class="desc">(.*?)</div>')
        if plot:
            plot = scrapertools.decodeHtmlentities(plot)

        new_item = Item(channel=item.channel,
                        action="findvideos",
                        title=title,
                        url=scrapedurl,
                        thumbnail=scrapedthumbnail,
                        contentTitle=scrapedtitle,
                        text_color=color2,
                        extra=item.extra,
                        infoLabels={'plot': plot},
                        post=item.post)
        if item.post:
            try:
                new_item.folderurl, new_item.foldername = scrapertools.find_single_match(
                    block, '<p class="folder"><a href="([^"]+)".*?>([^<]+)<')
            except:
                pass
        else:
            new_item.folderurl = item.url.rsplit("/", 1)[0]
            new_item.foldername = item.foldername
            new_item.fanart = item.thumbnail

        itemlist.append(new_item)

    next_page = scrapertools.find_single_match(
        data, '<div class="pageSplitterBorder" data-nextpage-number="([^"]+)"')
    if next_page:
        if item.post:
            post = re.sub(r'pageNumber=(\d+)', "pageNumber=" + next_page,
                          item.post)
            url = item.url
        else:
            url = re.sub(r',\d+\?ref=pager', ",%s?ref=pager" % next_page,
                         item.url)
            post = ""
        itemlist.append(
            Item(channel=item.channel,
                 action="listado",
                 title=">> Página Siguiente (%s)" % next_page,
                 url=url,
                 post=post,
                 extra=item.extra))

    return itemlist
Example #2
0
def listado(item):
    logger.info()
    itemlist = []

    data_thumb = httptools.downloadpage(item.url, item.post.replace("Mode=List", "Mode=Gallery")).data
    if not item.post:
        data_thumb = ""
        item.url = item.url.replace("/gallery,", "/list,")

    data = httptools.downloadpage(item.url, item.post).data
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)

    folder = filetools.join(config.get_data_path(), 'thumbs_copiapop')
    patron = '<div class="size">(.*?)</div></div></div>'
    bloques = scrapertools.find_multiple_matches(data, patron)
    for block in bloques:
        if "adult_info" in block and not adult_content:
            continue
        size = scrapertools.find_single_match(block, '<p>([^<]+)</p>')
        scrapedurl, scrapedtitle = scrapertools.find_single_match(block, '<div class="name"><a href="([^"]+)".*?>([^<]+)<')
        scrapedthumbnail = scrapertools.find_single_match(block, "background-image:url\('([^']+)'")
        if scrapedthumbnail:
            try:
                thumb = scrapedthumbnail.split("-", 1)[0].replace("?", "\?")
                if data_thumb:
                    url_thumb = scrapertools.find_single_match(data_thumb, "(%s[^']+)'" % thumb)
                else:
                    url_thumb = scrapedthumbnail
                scrapedthumbnail = filetools.join(folder, "%s.jpg" % url_thumb.split("e=", 1)[1][-20:])
            except:
                scrapedthumbnail = ""

        if scrapedthumbnail:
            t = threading.Thread(target=download_thumb, args=[scrapedthumbnail, url_thumb])
            t.setDaemon(True) 
            t.start()

        else:
            scrapedthumbnail = item.extra + "/img/file_types/gallery/movie.png"

        scrapedurl = item.extra + scrapedurl
        title = "%s (%s)" % (scrapedtitle, size)
        if "adult_info" in block:
            title += " [COLOR %s][+18][/COLOR]" % color4
        plot = scrapertools.find_single_match(block, '<div class="desc">(.*?)</div>')
        if plot:
            plot = scrapertools.decodeHtmlentities(plot)

        new_item = Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl,
                        thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, text_color=color2,
                        extra=item.extra, infoLabels={'plot': plot}, post=item.post)
        if item.post:
            try:
                new_item.folderurl, new_item.foldername = scrapertools.find_single_match(block, '<p class="folder"><a href="([^"]+)".*?>([^<]+)<')
            except:
                pass
        else:
            new_item.folderurl = item.url.rsplit("/", 1)[0]
            new_item.foldername = item.foldername
            new_item.fanart = item.thumbnail

        itemlist.append(new_item)

    next_page = scrapertools.find_single_match(data, '<div class="pageSplitterBorder" data-nextpage-number="([^"]+)"')
    if next_page:
        if item.post:
            post = re.sub(r'pageNumber=(\d+)', "pageNumber="+next_page, item.post)
            url = item.url
        else:
            url = re.sub(r',\d+\?ref=pager', ",%s?ref=pager" % next_page, item.url)
            post = ""
        itemlist.append(Item(channel=item.channel, action="listado", title=">> Página Siguiente (%s)" % next_page,
                             url=url, post=post, extra=item.extra))

    return itemlist