示例#1
0
def findvideos(item):
    logger.debug()
    data = item.data if item.data else support.match(item.url,
                                                     headers=headers).data
    itemlist = []

    if '/serietv/series/names' in item.url:
        itemlist.extend(
            support.server(item, itemlist=hdpass(Item(url=item.url))))
    else:
        urls = support.match(data, patron=player_iframe).matches
        if item.otherLinks:
            urls += support.match(item.otherLinks,
                                  patron=r'href="([^"]+)').matches

        logger.debug('URLS', urls)
        for u in urls:
            if 'hdplayer.casa/series/' in u:
                urls.remove(u)
                itemlist.extend(
                    support.server(item, itemlist=hdpass(Item(url=u))))
                break
        else:
            itemlist.extend(support.server(item, urls))
        support.addQualityTag(item, itemlist, data,
                              'Keywords:\s*(?:<span>)?([^<]+)')
    return itemlist
示例#2
0
def findvideos(item):
    info()
    support.info("ITEMLIST: ", item)
    data = support.match(item.url, headers=headers).data
    check = support.match(data, patron=r'<div class="category-film">(.*?)</div>').match
    if 'sub' in check.lower():
        item.contentLanguage = 'Sub-ITA'
    support.info("CHECK : ", check)
    if 'anime' in check.lower():
        item.contentType = 'tvshow'
        item.data = data
        support.info('select = ### è una anime ###')
        try:
            return episodios(item)
        except:
            pass
    elif 'serie' in check.lower():
        item.contentType = 'tvshow'
        item.data = data
        return episodios(item)
    else:
        item.contentTitle = item.fulltitle
        item.contentType = 'movie'

    # if 'protectlink' in data:
    #     urls = scrapertools.find_multiple_matches(data, r'<iframe src="[^=]+=(.*?)"')
    #     support.info("SONO QUI: ", urls)
    #     for url in urls:
    #         url = url.decode('base64')
    #         # tiro via l'ultimo carattere perchè non c'entra
    #         url, c = unshorten_only(url)
    #         if 'nodmca' in url:
    #             page = httptools.downloadpage(url, headers=headers).data
    #             url = '\t' + scrapertools.find_single_match(page, '<meta name="og:url" content="([^=]+)">')
    #         if url:
    #             listurl.add(url)
    # data += '\n'.join(listurl)
    info(data)
    itemlist = []
    # support.dbg()

    if '/serietv/series/names' in item.url:
        itemlist.extend(support.server(item, itemlist=hdpass(Item(url=item.url))))
    else:
        urls = support.match(data, patron=player_iframe).matches
        # support.dbg()
        if item.otherLinks:
            urls += support.match(item.otherLinks, patron=r'href="([^"]+)').matches

        info('URLS', urls)
        for u in urls:
            if 'hdplayer.casa/series/' in u:
                urls.remove(u)
                itemlist.extend(support.server(item, itemlist=hdpass(Item(url=u))))
                break
        else:
            itemlist.extend(support.server(item, urls))
        support.addQualityTag(item, itemlist, data, 'Keywords:\s*(?:<span>)?([^<]+)')
    return itemlist
示例#3
0
def findvideos(item):
    if item.contentType == "episode":
        return findvid_serie(item)

    def load_links(itemlist, re_txt, color, desc_txt, quality=""):
        streaming = scrapertools.find_single_match(data,
                                                   re_txt).replace('"', '')
        support.log('STREAMING', streaming)
        support.log('STREAMING=', streaming)
        matches = support.match(
            streaming, patron=r'<td><a.*?href=([^ ]+) [^>]+>([^<]+)<').matches
        for scrapedurl, scrapedtitle in matches:
            logger.debug("##### findvideos %s ## %s ## %s ##" %
                         (desc_txt, scrapedurl, scrapedtitle))
            itemlist.append(
                Item(channel=item.channel,
                     action="play",
                     title=scrapedtitle,
                     url=scrapedurl,
                     server=scrapedtitle,
                     fulltitle=item.fulltitle,
                     thumbnail=item.thumbnail,
                     show=item.show,
                     quality=quality,
                     contentType=item.contentType,
                     folder=False))

    support.log()

    itemlist = []

    # Carica la pagina
    data = httptools.downloadpage(item.url).data
    data = re.sub('\n|\t', '', data)

    # Estrae i contenuti - Streaming
    load_links(itemlist, '<strong>Streamin?g:</strong>(.*?)cbtable', "orange",
               "Streaming", "SD")

    # Estrae i contenuti - Streaming HD
    load_links(itemlist, '<strong>Streamin?g HD[^<]+</strong>(.*?)cbtable',
               "yellow", "Streaming HD", "HD")

    # Estrae i contenuti - Streaming 3D
    load_links(itemlist, '<strong>Streamin?g 3D[^<]+</strong>(.*?)cbtable',
               "pink", "Streaming 3D")

    itemlist = support.server(item, itemlist=itemlist)
    # Extract the quality format
    patronvideos = '>([^<]+)</strong></div>'
    support.addQualityTag(item, itemlist, data, patronvideos)

    return itemlist
示例#4
0
def findvideos(item):
    if item.serieFolder:
        return support.server(item, data=item.url)
    if item.contentType == "episode":
        return findvid_serie(item)

    def load_links(itemlist, re_txt, desc_txt, quality=""):
        streaming = scrapertools.find_single_match(data,
                                                   re_txt).replace('"', '')
        logger.debug('STREAMING', streaming)
        logger.debug('STREAMING=', streaming)
        matches = support.match(
            streaming, patron=r'<td><a.*?href=([^ ]+) [^>]+>([^<]+)<').matches
        for scrapedurl, scrapedtitle in matches:
            logger.debug("##### findvideos %s ## %s ## %s ##" %
                         (desc_txt, scrapedurl, scrapedtitle))
            itemlist.append(
                item.clone(action="play",
                           title=scrapedtitle,
                           url=scrapedurl,
                           server=scrapedtitle,
                           quality=quality))

    logger.debug()

    itemlist = []

    # Carica la pagina
    data = httptools.downloadpage(item.url).data
    data = re.sub('\n|\t', '', data)

    # Estrae i contenuti - Streaming
    load_links(itemlist, '<strong>Streamin?g:</strong>(.*?)cbtable',
               "Streaming", "SD")

    # Estrae i contenuti - Streaming HD
    load_links(itemlist, '<strong>Streamin?g HD[^<]+</strong>(.*?)cbtable',
               "Streaming HD", "HD")

    # Estrae i contenuti - Streaming 3D
    load_links(itemlist, '<strong>Streamin?g 3D[^<]+</strong>(.*?)cbtable',
               "Streaming 3D")

    itemlist = support.server(item, itemlist=itemlist)
    # Extract the quality format
    patronvideos = r'([\w.]+)</strong></div></td>'
    support.addQualityTag(item, itemlist, data, patronvideos)

    return itemlist
示例#5
0
def findvideos(item):
    log()
    listurl = set()
    itemlist = []
    support.log("ITEMLIST: ", item)
    data = support.match(item.url, headers=headers).data
    check = support.match(
        data, patron=r'<div class="category-film">(.*?)</div>').match
    if 'sub' in check.lower():
        item.contentLanguage = 'Sub-ITA'
    support.log("CHECK : ", check)
    if 'anime' in check.lower():
        item.contentType = 'tvshow'
        item.data = data
        support.log('select = ### è una anime ###')
        try:
            return episodios(item)
        except:
            pass
    elif 'serie' in check.lower():
        item.contentType = 'tvshow'
        item.data = data
        return episodios(item)

    # if 'protectlink' in data:
    #     urls = scrapertools.find_multiple_matches(data, r'<iframe src="[^=]+=(.*?)"')
    #     support.log("SONO QUI: ", urls)
    #     for url in urls:
    #         url = url.decode('base64')
    #         # tiro via l'ultimo carattere perchè non c'entra
    #         url, c = unshorten_only(url)
    #         if 'nodmca' in url:
    #             page = httptools.downloadpage(url, headers=headers).data
    #             url = '\t' + scrapertools.find_single_match(page, '<meta name="og:url" content="([^=]+)">')
    #         if url:
    #             listurl.add(url)
    # data += '\n'.join(listurl)
    log(data)
    urls = support.match(data, patron=r'<iframe.*?src="([^"]+)"').matches
    if item.otherLinks:
        urls += support.match(item.otherLinks, patron=r'href="([^"]+)').matches
    log('URLS', urls)

    itemlist = support.server(item, urls)
    support.addQualityTag(item, itemlist, data,
                          'Keywords:\s*(?:<span>)?([^<]+)')
    return itemlist