示例#1
0
def episodios(item):
    log()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    patron = r'<iframe width=".+?" height=".+?" src="([^"]+)" allowfullscreen frameborder="0">'
    url = scrapertoolsV2.find_single_match(data,
                                           patron).replace("?seriehd", "")
    seasons = support.match(item, r'<li[^>]+><a href="([^"]+)">(\d+)<',
                            r'<h3>STAGIONE</h3><ul>(.*?)</ul>', headers,
                            url)[0]

    for season_url, season in seasons:
        season_url = urlparse.urljoin(url, season_url)
        episodes = support.match(item, r'<li[^>]+><a href="([^"]+)">(\d+)<',
                                 '<h3>EPISODIO</h3><ul>(.*?)</ul>', headers,
                                 season_url)[0]
        for episode_url, episode in episodes:
            episode_url = urlparse.urljoin(url, episode_url)
            title = season + "x" + episode.zfill(2)

            itemlist.append(
                Item(channel=item.channel,
                     action="findvideos",
                     contentType="episode",
                     title=support.typo(title + ' - ' + item.show, 'bold'),
                     url=episode_url,
                     fulltitle=title + ' - ' + item.show,
                     show=item.show,
                     thumbnail=item.thumbnail))

    support.videolibrary(itemlist, item, 'color kod bold')

    return itemlist
示例#2
0
def episodios(item):
    logger.info("[filmigratis.py] episodios")
    itemlist = []

    data = httptools.downloadpage(item.url, headers=headers).data

    block = scrapertools.find_single_match(
        data, r'<div class="row">(.*?)<section class="main-content">')

    patron = r'href="(.*?)".*?(S[^<]+)												<'
    matches = re.compile(patron, re.DOTALL).findall(block)

    for scrapedurl, scrapedtitle in matches:
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        scrapedtitle = scrapedtitle.replace("S0", "")
        scrapedtitle = scrapedtitle.replace(" - EP ", "x")
        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 contentType='episode',
                 title=scrapedtitle,
                 fulltitle=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=item.thumb,
                 args=item.args,
                 show=item.title))

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
    support.videolibrary(itemlist, item, 'color kod')
    return itemlist
示例#3
0
def findvideos(item):
    support.log()

    itemlist = support.hdpass_get_servers(item)

    if __comprueba_enlaces__:
        itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)

    itemlist = filtertools.get_links(itemlist, item, list_language)

    autoplay.start(itemlist, item)
    support.videolibrary(itemlist, item ,'color blue bold')

    return itemlist
def findvideos(item):
    logger.info("%s mainlist findvideos_film log: %s" % (__channel__, item))
    itemlist = []
    # scarico la pagina
    #data = scrapertools.cache_page(item.url) #non funziona più?
    data = httptools.downloadpage(item.url, headers=headers).data
    # da qui fare le opportuni modifiche
    patron = '<li.*?<a href="#" data-target="(.*?)">'
    matches = scrapertools.find_multiple_matches(data, patron)
    #logger.info("altadefinizione01_linkMATCHES: %s " % matches)
    for scrapedurl in matches:

        try:
            itemlist = servertools.find_video_items(data=data)

            for videoitem in itemlist:
                logger.info("Videoitemlist2: %s" % videoitem)
                videoitem.title = "%s [%s]" % (
                    item.contentTitle, videoitem.title
                )  #"[%s] %s" % (videoitem.server, item.title) #"[%s]" % (videoitem.title)
                videoitem.show = item.show
                videoitem.contentTitle = item.contentTitle
                videoitem.contentType = item.contentType
                videoitem.channel = item.channel
                videoitem.year = item.infoLabels['year']
                videoitem.infoLabels['plot'] = item.infoLabels['plot']
        except AttributeError:
            logger.error("data doesn't contain expected URL")

    # Controlla se i link sono validi
    if __comprueba_enlaces__:
        itemlist = servertools.check_list_links(itemlist,
                                                __comprueba_enlaces_num__)

    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay
    autoplay.start(itemlist, item)

    # Aggiunge alla videoteca
    if item.extra != 'findvideos' and item.extra != "library" and config.get_videolibrary_support(
    ) and len(itemlist) != 0:
        support.videolibrary(itemlist, item)

    return itemlist
示例#5
0
def episodios(item):
    item.contentType = 'episode'
    itemlist = []

    data = httptools.downloadpage(item.url).data
    matches = scrapertoolsV2.find_multiple_matches(
        data,
        r'(<div class="sp-head[a-z ]*?" title="Espandi">[^<>]*?</div>.*?)<div class="spdiv">\[riduci\]</div>'
    )

    for match in matches:
        support.log(match)
        blocks = scrapertoolsV2.find_multiple_matches(
            match, '(?:<p>)(.*?)(?:</p>|<br)')
        season = scrapertoolsV2.find_single_match(
            match, r'title="Espandi">.*?STAGIONE\s+\d+([^<>]+)').strip()

        for block in blocks:
            episode = scrapertoolsV2.find_single_match(
                block, r'([0-9]+(?:&#215;|×)[0-9]+)').strip()
            seasons_n = scrapertoolsV2.find_single_match(
                block, r'<strong>STAGIONE\s+\d+([^<>]+)').strip()

            if seasons_n:
                season = seasons_n

            if not episode: continue

            season = re.sub(r'&#8211;|–', "-", season)
            itemlist.append(
                Item(channel=item.channel,
                     action="findvideos",
                     contentType=item.contentType,
                     title="[B]" + episode + "[/B] " + season,
                     fulltitle=episode + " " + season,
                     show=episode + " " + season,
                     url=block,
                     extra=item.extra,
                     thumbnail=item.thumbnail,
                     infoLabels=item.infoLabels))

    support.videolibrary(itemlist, item)

    return itemlist
示例#6
0
def findvideos(item):
    support.log()

    itemlist = support.server(item, headers=headers)

    # Requerido para Filtrar enlaces
    if __comprueba_enlaces__:
        itemlist = servertools.check_list_links(itemlist,
                                                __comprueba_enlaces_num__)

    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay
    autoplay.start(itemlist, item)

    support.videolibrary(itemlist, item, 'color kod')

    return itemlist
示例#7
0
def findvideos(item):
    logger.info('[filmigratis.py] findvideos')

    data = httptools.downloadpage(item.url, headers=headers).data

    itemlist = servertools.find_video_items(data=data)

    for videoitem in itemlist:
        videoitem.title = item.title + '[COLOR green][B] - ' + videoitem.title + '[/B][/COLOR]'
        videoitem.fulltitle = item.fulltitle
        videoitem.show = item.show
        videoitem.thumbnail = item.thumbnail
        videoitem.channel = item.channel
        videoitem.contentType = item.content

    if item.args == "film":
        support.videolibrary(itemlist, item, 'color kod')

    autoplay.start(itemlist, item)

    return itemlist
示例#8
0
def findvideos(item):

    if item.quality.lower() in ["ended", "canceled", "returning series"]:
        return episodios(item)

    itemlist = []
    data = scrapertoolsV2.decodeHtmlentities(
        httptools.downloadpage(item.url).data)
    btns = re.compile(
        r'data-tplayernv="Opt.*?><span>([^<]+)</span><span>([^<]+)</span>',
        re.DOTALL).findall(data)
    matches = re.compile(r'<iframe.*?src="([^"]+trembed=[^"]+)',
                         re.DOTALL).findall(data)
    for i, scrapedurl in enumerate(matches):

        scrapedurl = scrapertoolsV2.decodeHtmlentities(scrapedurl)
        patron = r'<iframe.*?src="([^"]+)"'
        link_data = httptools.downloadpage(scrapedurl).data
        url = scrapertoolsV2.find_single_match(link_data, patron)

        itemlist.append(
            Item(
                channel=item.channel,
                action="play",
                contentType=item.contentType,
                title="[B]" + btns[i][0] + "[/B] - " + btns[i][1],
                fulltitle=btns[i][0] + " " + btns[i][1],
                show=btns[i][0] + " " + btns[i][1],
                url=url,
                extra=item.extra,
                infoLabels=item.infoLabels,
                server=btns[i][0],
                contentQuality=btns[i][1].replace('Italiano - ', ''),
            ))

    if item.contentType == "movie":
        support.videolibrary(itemlist, item)
    autoplay.start(itemlist, item)

    return itemlist
示例#9
0
def episodios(item):
    logger.info("[toonitalia.py] episodios")
    itemlist = []

    data = httptools.downloadpage(item.url, headers=headers).data

    patron = r'<br /> <a href="([^"]+)"\s*target="_blank"\s*rel[^>]+>([^<]+)</a>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle in matches:
        if 'Wikipedia' not in scrapedurl:
            scrapedtitle = scrapertools.decodeHtmlentities(
                scrapedtitle).replace("×", "x")
            scrapedtitle = scrapedtitle.replace("_", " ")
            scrapedtitle = scrapedtitle.replace(".mp4", "")
            puntata = scrapertools.find_single_match(scrapedtitle,
                                                     '[0-9]+x[0-9]+')
            for i in itemlist:
                if i.args == puntata:  #è già stata aggiunta
                    i.url += " " + scrapedurl
                    break

            else:
                itemlist.append(
                    Item(channel=channel,
                         action="findvideos",
                         contentType=item.contentType,
                         title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                         thumbnail=item.thumbnail,
                         fulltitle=scrapedtitle,
                         url=scrapedurl,
                         args=puntata,
                         show=item.show,
                         plot=item.plot))

    support.videolibrary(itemlist, item, 'color kod')

    return itemlist
示例#10
0
def episodios(item):
    logger.info(item.channel + 'episodios')
    itemlist = []

    data = httptools.downloadpage(item.url, headers=headers).data
    block = scrapertoolsV2.find_single_match(data, r'<table>(.*?)<\/table>')

    matches = re.compile(r'<tr><td>(.*?)</td><tr>', re.DOTALL).findall(block)

    for episode in matches:
        episode = "<td class=\"title\">" + episode
        logger.info('EPISODE= ' + episode)
        title = scrapertoolsV2.find_single_match(
            episode, '<td class="title">(.*?)</td>')
        title = title.replace(item.title, "")
        if scrapertoolsV2.find_single_match(title, '([0-9]+x[0-9]+)'):
            title = scrapertoolsV2.find_single_match(
                title, '([0-9]+x[0-9]+)') + ' - ' + re.sub(
                    '([0-9]+x[0-9]+)', ' -', title)
        elif scrapertoolsV2.find_single_match(
                title,
                ' ([0-9][0-9])') and not scrapertoolsV2.find_single_match(
                    title, ' ([0-9][0-9][0-9])'):
            title = '1x' + scrapertoolsV2.find_single_match(
                title, ' ([0-9]+)') + ' - ' + re.sub(' ([0-9]+)', ' -', title)
        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 fulltitle=title,
                 contentType="episode",
                 show=title,
                 title=title,
                 url=episode))

    support.videolibrary(itemlist, item, 'bold color blue')

    return itemlist
示例#11
0
def findvideos(item):
    findhost()

    if item.contentType == "episode":
        return findvid_serie(item)

    def load_links(itemlist, re_txt, color, desc_txt, quality=""):
        streaming = scrapertoolsV2.find_single_match(data,
                                                     re_txt).replace('"', '')
        support.log('STREAMING=', streaming)
        patron = '<td><a.*?href=(.*?) (?:target|rel)[^>]+>([^<]+)<'
        matches = re.compile(patron, re.DOTALL).findall(streaming)
        for scrapedurl, scrapedtitle in matches:
            logger.debug("##### findvideos %s ## %s ## %s ##" %
                         (desc_txt, scrapedurl, scrapedtitle))
            title = "[COLOR " + color + "]" + desc_txt + ":[/COLOR] " + item.fulltitle + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]"
            itemlist.append(
                Item(channel=item.channel,
                     action="play",
                     title=title,
                     url=scrapedurl,
                     server=scrapedtitle,
                     fulltitle=item.fulltitle,
                     thumbnail=item.thumbnail,
                     show=item.show,
                     quality=quality,
                     contentType=item.contentType,
                     folder=False))

    support.log()

    itemlist = []

    # Carica la pagina
    data = httptools.downloadpage(item.url).data
    data = re.sub('\n|\t', '', data)

    # Extract the quality format
    patronvideos = '>([^<]+)</strong></div>'
    matches = re.compile(patronvideos, re.DOTALL).finditer(data)
    QualityStr = ""
    for match in matches:
        QualityStr = scrapertoolsV2.decodeHtmlentities(match.group(1))[6:]

    # Estrae i contenuti - Streaming
    load_links(
        itemlist,
        '<strong>Streaming:</strong>(.*?)<tableclass=cbtable height=30>',
        "orange", "Streaming", "SD")

    # Estrae i contenuti - Streaming HD
    load_links(
        itemlist,
        '<strong>Streaming HD[^<]+</strong>(.*?)<tableclass=cbtable height=30>',
        "yellow", "Streaming HD", "HD")

    autoplay.start(itemlist, item)

    # Estrae i contenuti - Streaming 3D
    load_links(
        itemlist,
        '<strong>Streaming 3D[^<]+</strong>(.*?)<tableclass=cbtable height=30>',
        "pink", "Streaming 3D")

    # Estrae i contenuti - Download
    # load_links(itemlist, '<strong>Download:</strong>(.*?)<tableclass=cbtable height=30>', "aqua", "Download")

    # Estrae i contenuti - Download HD
    # load_links(itemlist, '<strong>Download HD[^<]+</strong>(.*?)<tableclass=cbtable width=100% height=20>', "azure", "Download HD")

    if len(itemlist) == 0:
        itemlist = servertools.find_video_items(item=item)

    # Requerido para Filtrar enlaces

    if __comprueba_enlaces__:
        itemlist = servertools.check_list_links(itemlist,
                                                __comprueba_enlaces_num__)

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    support.videolibrary(itemlist, item)

    return itemlist