예제 #1
0
def genres(item):
    itemlist = support.scrape(
        item,
        '<a href="([^"]+)">([^<]+)<', ['url', 'title'],
        action='peliculas',
        patron_block=r'Generi.*?<ul.*?>(.*?)<\/ul>',
        blacklist=['Contattaci', 'Privacy Policy', 'DMCA'])
    return support.thumb(itemlist)
예제 #2
0
def menu(item):
    support.log()
    itemlist = support.scrape(item,
                              '<li><a href="(.*?)">(.*?)</a></li>',
                              ['url', 'title'],
                              headers,
                              patron_block='<ul class="listSubCat" id="' +
                              str(item.args) + '">(.*?)</ul>',
                              action='peliculas')
    return support.thumb(itemlist)
예제 #3
0
def categories(item):
    support.log(item)
    itemlist = support.scrape(
        item,
        '<li><a href="([^"]+)">(.*?)</a></li>', ['url', 'title'],
        headers,
        'Altadefinizione01',
        patron_block='<ul class="kategori_list">(.*?)</ul>',
        action='peliculas',
        url_host=host)
    return support.thumb(itemlist)
예제 #4
0
def peliculas_tv(item):
    logger.info("icarus serietvsubita peliculas_tv")
    itemlist = []

    data = httptools.downloadpage(item.url).data
    logger.debug(data)
    patron = '<div class="post-meta">\s*<a href="([^"]+)"\s*title="([^"]+)"\s*class=".*?"></a>'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle in matches:
        if "FACEBOOK" in scrapedtitle or "RAPIDGATOR" in scrapedtitle:
            continue
        if scrapedtitle == "WELCOME!":
            continue
        scrapedthumbnail = ""
        scrapedplot = ""
        scrapedtitle = cleantitle(scrapedtitle)
        title = scrapedtitle.split(" S0")[0].strip()
        title = title.split(" S1")[0].strip()
        title = title.split(" S2")[0].strip()
        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 contentSerieName=title,
                 plot=scrapedplot,
                 folder=True))

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    # Paginazione
    patron = '<strong class=\'on\'>\d+</strong>\s*<a href="([^<]+)">\d+</a>'
    next_page = scrapertools.find_single_match(data, patron)
    if next_page != "":
        if item.extra == "search_tv":
            next_page = next_page.replace('&#038;', '&')
        itemlist.append(
            Item(channel=item.channel,
                 action='peliculas_tv',
                 contentType=item.contentType,
                 title=support.typo(config.get_localized_string(30992),
                                    'color kod bold'),
                 url=next_page,
                 args=item.args,
                 extra=item.extra,
                 thumbnail=support.thumb()))

    return itemlist
예제 #5
0
def lista_serie(item):
    support.log(item.channel + " lista_serie")
    itemlist = []

    PERPAGE = 15

    p = 1
    if '{}' in item.url:
        item.url, p = item.url.split('{}')
        p = int(p)

    # Descarga la pagina
    data = httptools.downloadpage(item.url).data

    # Extrae las entradas
    patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)" >([^<]+)</a>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for i, (scrapedurl, scrapedtitle) in enumerate(matches):
        scrapedplot = ""
        scrapedthumbnail = ""
        if (p - 1) * PERPAGE > i: continue
        if i >= p * PERPAGE: break
        title = cleantitle(scrapedtitle)
        itemlist.append(
            Item(channel=item.channel,
                 extra=item.extra,
                 action="episodes",
                 title=title,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 fulltitle=title,
                 show=title,
                 plot=scrapedplot,
                 folder=True))

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    # Paginazione
    if len(matches) >= p * PERPAGE:
        scrapedurl = item.url + '{}' + str(p + 1)
        itemlist.append(
            Item(channel=item.channel,
                 action='lista_serie',
                 contentType=item.contentType,
                 title=support.typo(config.get_localized_string(30992),
                                    'color kod bold'),
                 url=scrapedurl,
                 args=item.args,
                 thumbnail=support.thumb()))

    return itemlist
예제 #6
0
def episodes(item):
    support.log(item.channel + " episodes")
    itemlist = []

    data = httptools.downloadpage(item.url).data

    patron = '<div class="post-meta">\s*<a href="([^"]+)"\s*title="([^"]+)"\s*class=".*?"></a>.*?'
    patron += '<p><a href="([^"]+)">'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
        scrapedplot = ""
        scrapedtitle = cleantitle(scrapedtitle)
        title = scrapedtitle.split(" S0")[0].strip()
        title = title.split(" S1")[0].strip()
        title = title.split(" S2")[0].strip()

        itemlist.append(
            Item(channel=item.channel,
                 extra=item.extra,
                 action="findvideos",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 contentSerieName=title,
                 folder=True))

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    # Paginazionazione
    patron = '<strong class=\'on\'>\d+</strong>\s*<a href="([^<]+)">\d+</a>'
    next_page = scrapertools.find_single_match(data, patron)
    if next_page != "":
        itemlist.append(
            Item(channel=item.channel,
                 action='episodes',
                 contentType=item.contentType,
                 title=support.typo(config.get_localized_string(30992),
                                    'color kod bold'),
                 url=next_page,
                 args=item.args,
                 thumbnail=support.thumb()))

    # support.videolibrary(itemlist,item,'bold color kod')

    return itemlist
예제 #7
0
파일: cineblog01.py 프로젝트: hypno99/addon
def menu(item):
    findhost()
    itemlist = []
    data = httptools.downloadpage(item.url, headers=headers).data
    data = re.sub('\n|\t', '', data)
    block = scrapertoolsV2.find_single_match(
        data, item.args + r'<span.*?><\/span>.*?<ul.*?>(.*?)<\/ul>')
    support.log('MENU BLOCK= ', block)
    patron = r'href="?([^">]+)"?>(.*?)<\/a>'
    matches = re.compile(patron, re.DOTALL).findall(block)
    for scrapedurl, scrapedtitle in matches:
        itemlist.append(
            Item(channel=item.channel,
                 title=scrapedtitle,
                 contentType=item.contentType,
                 action='peliculas',
                 url=host + scrapedurl))

    return support.thumb(itemlist)
예제 #8
0
def episodios(item):
    support.log(item.channel + " episodios")
    itemlist = []

    data = httptools.downloadpage(item.url, headers=headers).data

    patron = r'<option value="(\d+)"[\sselected]*>.*?</option>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for value in matches:
        patron = r'<div class="list [active]*" data-id="%s">(.*?)</div>\s*</div>' % value
        blocco = scrapertools.find_single_match(data, patron)

        patron = r'(<a data-id="\d+[^"]*" data-href="([^"]+)" data-original="([^"]+)" class="[^"]+">)[^>]+>[^>]+>([^<]+)<'
        matches = re.compile(patron, re.DOTALL).findall(blocco)
        for scrapedextra, scrapedurl, scrapedimg, scrapedtitle in matches:
            number = scrapertools.decodeHtmlentities(
                scrapedtitle.replace("Episodio", "")).strip()
            itemlist.append(
                Item(channel=item.channel,
                     action="findvideos",
                     title=value + "x" + number.zfill(2),
                     fulltitle=scrapedtitle,
                     contentType="episode",
                     url=scrapedurl,
                     thumbnail=scrapedimg,
                     extra=scrapedextra,
                     folder=True))

    if config.get_videolibrary_support() and len(itemlist) != 0:
        itemlist.append(
            Item(channel=item.channel,
                 title=support.typo(
                     config.get_localized_string(30161) + ' bold color kod'),
                 thumbnail=support.thumb(),
                 url=item.url,
                 action="add_serie_to_library",
                 extra="episodios",
                 contentSerieName=item.fulltitle,
                 show=item.show))

    return itemlist