Exemplo n.º 1
0
def anime(item):
    log()
    itemlist = []

    seasons = support.match(item, r'<div class="sp-body[^"]+">(.*?)<\/div>')[0]
    for season in seasons:
        episodes = scrapertoolsV2.find_multiple_matches(
            season, r'<a.*?href="([^"]+)"[^>]+>([^<]+)<\/a>(.*?)<(:?br|\/p)')
        for url, title, urls, none in episodes:
            urls = scrapertoolsV2.find_multiple_matches(
                urls, '<a.*?href="([^"]+)"[^>]+>')

            for url2 in urls:
                url += url2 + '\n'

            log('EP URL', url)

            itemlist.append(
                Item(channel=item.channel,
                     action="findvideos",
                     contentType=item.contentType,
                     title=support.typo(title + ' - ' + item.fulltitle,
                                        'bold'),
                     url=url,
                     fulltitle=title + ' - ' + item.show,
                     show=item.show,
                     thumbnail=item.thumbnail,
                     args=item.args))

    autorenumber.renumber(itemlist, item, 'bold')
    support.videolibrary(itemlist, item, 'color kod bold')

    return itemlist
Exemplo n.º 2
0
def hdpass_get_servers(item):
    # Carica la pagina
    data = httptools.downloadpage(item.url).data.replace('\n', '')
    patron = r'<iframe(?: id="[^"]+")? width="[^"]+" height="[^"]+" src="([^"]+)"[^>]+><\/iframe>'
    url = scrapertoolsV2.find_single_match(data, patron).replace("?alta", "")
    url = url.replace("&download=1", "")
    if 'https' not in url:
        url = 'https:' + url

    if 'hdpass' or 'hdplayer' in url:
        data = httptools.downloadpage(url).data

        start = data.find('<div class="row mobileRes">')
        end = data.find('<div id="playerFront">', start)
        data = data[start:end]

        patron_res = '<div class="row mobileRes">(.*?)</div>'
        patron_mir = '<div class="row mobileMirrs">(.*?)</div>'
        patron_media = r'<input type="hidden" name="urlEmbed" data-mirror="([^"]+)" id="urlEmbed"\s*value="([^"]+)"\s*/>'

        res = scrapertoolsV2.find_single_match(data, patron_res)

        itemlist = []

        for res_url, res_video in scrapertoolsV2.find_multiple_matches(
                res, '<option.*?value="([^"]+?)">([^<]+?)</option>'):

            data = httptools.downloadpage(urlparse.urljoin(
                url, res_url)).data.replace('\n', '')

            mir = scrapertoolsV2.find_single_match(data, patron_mir)

            for mir_url, server in scrapertoolsV2.find_multiple_matches(
                    mir, '<option.*?value="([^"]+?)">([^<]+?)</value>'):

                data = httptools.downloadpage(urlparse.urljoin(
                    url, mir_url)).data.replace('\n', '')
                for media_label, media_url in scrapertoolsV2.find_multiple_matches(
                        data, patron_media):
                    itemlist.append(
                        Item(channel=item.channel,
                             action="play",
                             title=item.title +
                             typo(server, '-- [] color kod') +
                             typo(res_video, '-- [] color kod'),
                             fulltitle=item.fulltitle,
                             quality=res_video,
                             show=item.show,
                             thumbnail=item.thumbnail,
                             contentType=item.contentType,
                             server=server,
                             url=url_decode(media_url)))
                    log("video -> ", res_video)

    return controls(itemlist, item, AutoPlay=True, CheckLinks=True)
Exemplo n.º 3
0
def episodios(item):
    logger.info(item.channel + 'findvideos')
    itemlist = []

    data = httptools.downloadpage(item.url, headers=headers).data

    if 'accordion-item' in data:
        block = scrapertoolsV2.find_single_match(
            data, 'accordion-item.*?>(.*?)<div id="disqus_thread">')
        patron = r'<img src="([^"]+)">.*?<li class="season-no">(.*?)<\/li>(.*?)<\/table>'
        matches = scrapertoolsV2.find_multiple_matches(block, patron)

        for scrapedthumb, scrapedtitle, scrapedurl in matches:
            title = scrapedtitle + ' - ' + item.title
            if title[0] == 'x':
                title = '1' + title

            itemlist.append(
                Item(channel=item.channel,
                     action='findvideos',
                     contentType=item.contentType,
                     title=title,
                     fulltitle=title,
                     show=title,
                     quality=item.quality,
                     url=scrapedurl,
                     thumbnail=scrapedthumb))

    else:
        block = scrapertoolsV2.find_single_match(
            data, '<div id="info" class="pad">(.*?)<div id="disqus_thread">'
        ).replace('</p>', '<br />').replace('×', 'x')
        matches = scrapertoolsV2.find_multiple_matches(
            block, r'<strong>(.*?)<\/strong>.*?<p>(.*?)<span')
        for lang, seasons in matches:
            lang = re.sub('.*?Stagione[^a-zA-Z]+', '', lang)
            # patron = r'([0-9]+x[0-9]+) (.*?)<br'
            season = scrapertoolsV2.find_multiple_matches(
                seasons, r'([0-9]+x[0-9]+) (.*?)<br')
            for title, url in season:
                title = title + ' - ' + lang
                itemlist.append(
                    Item(channel=item.channel,
                         title=title,
                         fulltitle=title,
                         show=title,
                         url=url,
                         contentType=item.contentType,
                         action='findvideos'))

    return itemlist
Exemplo n.º 4
0
def findvideos(item):
    log()
    # itemlist = []

    if item.args == 'anime':
        data = item.url
    else:
        data = httptools.downloadpage(item.url, headers=headers).data

        # Check if is series
        check = scrapertoolsV2.find_single_match(
            data.replace('\t', '').replace('\n', ''),
            r'<div class="category-film"><h3>([^<]+)<\/h3>')
        if 'serie tv' in check.lower(): return episodios(item)
        elif 'anime' in check.lower(): return anime(item)

        if 'protectlink' in data:
            urls = scrapertoolsV2.find_multiple_matches(
                data, r'<iframe src="[^=]+=(.*?)"')
            for url in urls:
                url = url.decode('base64')
                if '\t' in url:
                    url = url[:-1]
                data += '\t' + url
            if 'nodmca' in data:
                page = httptools.downloadpage(url, headers=headers).data
                data += '\t' + scrapertoolsV2.find_single_match(
                    page, '<meta name="og:url" content="([^=]+)">')

    return support.server(item, data, headers=headers)
Exemplo n.º 5
0
def dooplay_get_links(item, host):
    # get links from websites using dooplay theme and dooplay_player
    # return a list of dict containing these values: url, title and server

    data = httptools.downloadpage(item.url).data.replace("'", '"')
    patron = r'<li id="player-option-[0-9]".*?data-type="([^"]+)" data-post="([^"]+)" data-nume="([^"]+)".*?<span class="title".*?>([^<>]+)</span>(?:<span class="server">([^<>]+))?'
    matches = scrapertoolsV2.find_multiple_matches(data, patron)

    ret = []

    for type, post, nume, title, server in matches:
        postData = urllib.urlencode({
            "action": "doo_player_ajax",
            "post": post,
            "nume": nume,
            "type": type
        })
        dataAdmin = httptools.downloadpage(host + '/wp-admin/admin-ajax.php',
                                           post=postData,
                                           headers={
                                               'Referer': item.url
                                           }).data
        link = scrapertoolsV2.find_single_match(dataAdmin,
                                                "<iframe.*src='([^']+)'")
        ret.append({'url': link, 'title': title, 'server': server})

    return ret
Exemplo n.º 6
0
def dooplay_get_episodes(item):
    itemlist = []
    item.contentType = "episode"
    data = httptools.downloadpage(item.url).data.replace("'", '"')
    patron = '<li class="mark-[0-9]">.*?<img.*?data-lazy-src="([^"]+).*?([0-9] - [0-9]).*?<a href="([^"]+)">([^<>]+).*?([0-9]{4})'

    for scrapedthumb, scrapedep, scrapedurl, scrapedtitle, scrapedyear in scrapertoolsV2.find_multiple_matches(
            data, patron):
        scrapedep = scrapedep.replace(' - ', 'x')
        infoLabels = {}
        infoLabels['year'] = scrapedyear

        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 contentType="episode",
                 title=scrapedep + " " + scrapedtitle,
                 fulltitle=scrapedtitle,
                 show=item.fulltitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumb,
                 infoLabels=infoLabels))
    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
    videolibrary(itemlist, item)
    return itemlist
Exemplo n.º 7
0
def findvideos(item):
    logger.info(item.channel + 'findvideos')
    itemlist = []
    logger.info('TYPE= ' + item.contentType)
    if item.contentType == 'movie':
        data = httptools.downloadpage(item.url, headers=headers).data
        logger.info('DATA= ' + data)
        item.url = scrapertoolsV2.find_single_match(data,
                                                    r'<table>(.*?)<\/table>')

    urls = scrapertoolsV2.find_multiple_matches(
        item.url, r"<a href='([^']+)'.*?>.*?>.*?([a-zA-Z]+).*?<\/a>")
    logger.info('EXTRA= ' + item.extra)
    for url, server in urls:
        itemlist.append(
            Item(channel=item.channel,
                 action='play',
                 title=item.title + ' [COLOR blue][' + server + '][/COLOR]',
                 contentType="movie",
                 server=server,
                 url=url))

    autoplay.start(itemlist, item)

    return itemlist
Exemplo n.º 8
0
def findvideos(item):
    log()
    itemlist = []
   
    matches, data = support.match(item, r'class="tab.*?data-name="([0-9]+)">([^<]+)</span', headers=headers)
    videoData = ''
    
    for serverid, servername in matches:
        block = scrapertoolsV2.find_multiple_matches(data,'data-id="'+serverid+'">(.*?)<div class="server')
        id = scrapertoolsV2.find_single_match(str(block),r'<a data-id="([^"]+)" data-base="'+item.fulltitle+'"')
        dataJson = httptools.downloadpage('%s/ajax/episode/info?id=%s&server=%s&ts=%s' % (host, id, serverid, int(time.time())), headers=[['x-requested-with', 'XMLHttpRequest']]).data
        json = jsontools.load(dataJson)
        log('JSON= ',json)

        videoData +='\n'+json['grabber']

        if serverid == '28':
            itemlist.append(
                Item(
                    channel=item.channel,
                    action="play",
                    title='diretto',
                    quality='',
                    url=json['grabber'],
                    server='directo',
                    show=item.show,
                    contentType=item.contentType,
                    folder=False))

    return support.server(item, videoData, itemlist)
Exemplo n.º 9
0
def search(item, texto):
    logger.info("[casacinemaInfo.py] " + item.url + " search " + texto)

    item.url = host + "?s=" + texto
    data = httptools.downloadpage(item.url).data

    itemlist = []

    patron = '<li class="col-md-12 itemlist">.*?<a href="([^"]+)" title="([^"]+)".*?<img src="([^"]+)".*?Film dell\\\'anno: ([0-9]{4}).*?<p class="text-list">([^<>]+)</p>'
    matches = scrapertoolsV2.find_multiple_matches(data, patron)
    for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedyear, scrapedplot in matches:
        title = scrapertoolsV2.decodeHtmlentities(scrapedtitle)
        cleantitle = title.replace('[Sub-ITA]', '').strip()

        infoLabels = {
            "plot": scrapertoolsV2.decodeHtmlentities(scrapedplot),
            "year": scrapedyear
        }

        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 contentType="movie",
                 title=title,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 infoLabels=infoLabels,
                 fulltitle=cleantitle))

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    return itemlist
Exemplo n.º 10
0
def match(item, patron='', patronBlock='', headers='', url='', post=''):
    matches = []
    if type(item) == str:
        data = item
    else:
        url = url if url else item.url
        if post:
            data = httptools.downloadpage(url,
                                          headers=headers,
                                          ignore_response_code=True,
                                          post=post).data.replace("'", '"')
        else:
            data = httptools.downloadpage(
                url, headers=headers,
                ignore_response_code=True).data.replace("'", '"')
    data = re.sub(r'\n|\t', ' ', data)
    data = re.sub(r'>\s\s*<', '><', data)
    log('DATA= ', data)

    if patronBlock:
        block = scrapertoolsV2.find_single_match(data, patronBlock)
        log('BLOCK= ', block)
    else:
        block = data

    if patron:
        matches = scrapertoolsV2.find_multiple_matches(block, patron)
        log('MATCHES= ', matches)

    return matches, block
Exemplo n.º 11
0
def peliculas(item):
    support.log()
    itemlist = []

    data = httptools.downloadpage(item.url, headers=headers).data
    patron = r'<div class="cover_kapsul ml-mask".*?<a href="(.*?)">(.*?)<\/a>.*?<img .*?src="(.*?)".*?<div class="trdublaj">(.*?)<\/div>.(<div class="sub_ita">(.*?)<\/div>|())'
    matches = scrapertoolsV2.find_multiple_matches(data, patron)

    for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedquality, subDiv, subText, empty in matches:
        info = scrapertoolsV2.find_multiple_matches(
            data,
            r'<span class="ml-label">([0-9]+)+<\/span>.*?<span class="ml-label">(.*?)<\/span>.*?<p class="ml-cat".*?<p>(.*?)<\/p>.*?<a href="(.*?)" class="ml-watch">'
        )
        infoLabels = {}
        for infoLabels['year'], duration, scrapedplot, checkUrl in info:
            if checkUrl == scrapedurl:
                break

        infoLabels['duration'] = int(duration.replace(
            ' min', '')) * 60  # calcolo la durata in secondi
        scrapedthumbnail = host + scrapedthumbnail
        scrapedtitle = scrapertoolsV2.decodeHtmlentities(scrapedtitle)
        fulltitle = scrapedtitle
        if subDiv:
            fulltitle += support.typo(subText + ' _ () color limegreen')
        fulltitle += support.typo(scrapedquality.strip() + ' _ [] color kod')

        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 contentType=item.contenType,
                 contentTitle=scrapedtitle,
                 contentQuality=scrapedquality.strip(),
                 plot=scrapedplot,
                 title=fulltitle,
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 url=scrapedurl,
                 infoLabels=infoLabels,
                 thumbnail=scrapedthumbnail))

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
    support.nextPage(itemlist, item, data,
                     '<span>[^<]+</span>[^<]+<a href="(.*?)">')

    return itemlist
Exemplo n.º 12
0
def findvideos(item):
    log()
    itemlist =[]
    matches, data = support.match(item, '<iframe class="metaframe rptss" src="([^"]+)"[^>]+>',headers=headers)
    for url in matches:
        html = httptools.downloadpage(url, headers=headers).data
        data += str(scrapertoolsV2.find_multiple_matches(html, '<meta name="og:url" content="([^"]+)">'))
    itemlist = support.server(item, data)
    return itemlist
Exemplo n.º 13
0
def episodios(item):
    item.contentType = 'episode'
    itemlist = []

    data = httptools.downloadpage(item.url).data
    matches = scrapertoolsV2.find_multiple_matches(
        data,
        r'(<div class="sp-head[a-z ]*?" title="Espandi">[^<>]*?</div>.*?)<div class="spdiv">\[riduci\]</div>'
    )

    for match in matches:
        support.log(match)
        blocks = scrapertoolsV2.find_multiple_matches(
            match, '(?:<p>)(.*?)(?:</p>|<br)')
        season = scrapertoolsV2.find_single_match(
            match, r'title="Espandi">.*?STAGIONE\s+\d+([^<>]+)').strip()

        for block in blocks:
            episode = scrapertoolsV2.find_single_match(
                block, r'([0-9]+(?:&#215;|×)[0-9]+)').strip()
            seasons_n = scrapertoolsV2.find_single_match(
                block, r'<strong>STAGIONE\s+\d+([^<>]+)').strip()

            if seasons_n:
                season = seasons_n

            if not episode: continue

            season = re.sub(r'&#8211;|–', "-", season)
            itemlist.append(
                Item(channel=item.channel,
                     action="findvideos",
                     contentType=item.contentType,
                     title="[B]" + episode + "[/B] " + season,
                     fulltitle=episode + " " + season,
                     show=episode + " " + season,
                     url=block,
                     extra=item.extra,
                     thumbnail=item.thumbnail,
                     infoLabels=item.infoLabels))

    support.videolibrary(itemlist, item)

    return itemlist
Exemplo n.º 14
0
def elenco_aggiornamenti_serietv(item):
    """
    def per la lista degli aggiornamenti
    """
    logger.info("%s elenco_aggiornamenti_serietv log: %s" %
                (__channel__, item))
    itemlist = []

    # Carica la pagina
    data = httptools.downloadpage(item.url).data

    # Estrae i contenuti
    #bloque = scrapertoolsV2.get_match(data, '<div class="entry">(.*?)<div class="clear"></div>')
    bloque = scrapertoolsV2.find_single_match(
        data, '<div class="entry">(.*?)<div class="clear"></div>')
    patron = '<span class="serieTitle".*?>(.*?)<.*?href="(.*?)".*?>(.*?)<'
    matches = scrapertoolsV2.find_multiple_matches(bloque, patron)

    for scrapedtitle, scrapedurl, scrapedepisodies in matches:
        if "(SUB ITA)" in scrapedepisodies.upper():
            lang = "SUB ITA"
            scrapedepisodies = scrapedepisodies.replace('(SUB ITA)', '')
        else:
            lang = "ITA"
            scrapedepisodies = scrapedepisodies.replace(lang, '')
        #num = scrapertoolsV2.find_single_match(scrapedepisodies, '(-\d+/)')
        #if num:
        #    scrapedurl = scrapedurl.replace(num, "-episodi/")
        scrapedtitle = scrapedtitle.replace("–",
                                            "").replace('\xe2\x80\x93 ',
                                                        '').strip()
        scrapedepisodies = scrapedepisodies.replace('\xe2\x80\x93 ',
                                                    '').strip()
        itemlist.append(
            Item(
                channel=item.channel,
                action="episodios",
                contentType="tvshow",
                title="%s" %
                scrapedtitle,  # %s" % (scrapedtitle, scrapedepisodies),
                fulltitle="%s %s" % (scrapedtitle, scrapedepisodies),
                text_color=color5,
                url=scrapedurl,
                #show = "%s %s" % (scrapedtitle, scrapedepisodies),
                extra=item.extra,
                #lang = lang,
                #data = data,
                folder=True))

    # locandine e trama e altro da tmdb se presente l'anno migliora la ricerca
    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True, idioma_busqueda='it')

    return itemlist
Exemplo n.º 15
0
def episodios(item):
    logger.info("kod.mondoserietv episodios")
    itemlist = []

    data = httptools.downloadpage(item.url, headers=headers).data
    blocco = scrapertools.find_single_match(data, '<table>(.*?)</table>')

    patron = "<tr><td><b>(.*?)(\d+)((?:x\d+| ))(.*?)<\/b>(.*?<tr>)"
    matches = scrapertoolsV2.find_multiple_matches(blocco, patron)

    for t1, s, e, t2, scrapedurl in matches:

        if "x" not in e:
            e = s

        if e == s:
            s = None

        if s is None:
            s = "1"

        if s.startswith('0'):
            s = s.replace("0", "")

        if e.startswith('x'):
            e = e.replace("x", "")

        scrapedtitle = t1 + s + "x" + e + " " + t2
        itemlist.append(
            Item(channel=item.channel,
                 contentType="episode",
                 action="findvideos",
                 items=s,
                 iteme=e,
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail=item.scrapedthumbnail,
                 plot=item.scrapedplot,
                 folder=True))

    if config.get_videolibrary_support() and len(itemlist) != 0:
        itemlist.append(
            Item(channel=item.channel,
                 title="[COLOR lightblue]%s[/COLOR]" %
                 config.get_localized_string(30161),
                 url=item.url,
                 action="add_serie_to_library",
                 extra="episodios",
                 show=item.show))

    return itemlist
Exemplo n.º 16
0
def findvideos(item):
    log()
    itemlist = []
    data = httptools.downloadpage(item.url).data

    if 'keepem.online' in data:
        urls = scrapertoolsV2.find_multiple_matches(
            data, r'(https://keepem\.online/f/[^"]+)"')
        for url in urls:
            url = httptools.downloadpage(url).url
            itemlist += servertools.find_video_items(data=url)

    return support.server(item, data, itemlist)
Exemplo n.º 17
0
def peliculas(item):
    logger.info(item.channel + 'peliculas')
    itemlist = []

    data = httptools.downloadpage(item.url, headers=headers).data
    block = scrapertoolsV2.find_single_match(data,
                                             r'<ul class="posts">(.*)<\/ul>')

    patron = r'<li><a href="([^"]+)" data-thumbnail="([^"]+)">.*?<div class="title">([^<]+)<\/div>'
    matches = scrapertoolsV2.find_multiple_matches(block, patron)

    for scrapedurl, scrapedthumb, scrapedtitle in matches:
        title = re.sub(r'.\(.*?\)|.\[.*?\]', '', scrapedtitle)
        quality = scrapertoolsV2.find_single_match(scrapedtitle, r'\[(.*?)\]')
        if not quality:
            quality = 'SD'

        longtitle = title + ' [COLOR blue][' + quality + '][/COLOR]'

        if item.contentType == 'episode':
            action = 'episodios'
        else:
            action = 'findvideos'

        itemlist.append(
            Item(channel=item.channel,
                 action=action,
                 contentType=item.contentType,
                 title=longtitle,
                 fulltitle=title,
                 show=title,
                 quality=quality,
                 url=scrapedurl,
                 thumbnail=scrapedthumb))

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    next_page = scrapertoolsV2.find_single_match(data,
                                                 '<a href="([^"]+)">Pagina')
    if next_page != "":
        itemlist.append(
            Item(channel=item.channel,
                 action="peliculas",
                 contentType=item.contentType,
                 title="[COLOR blue]" + config.get_localized_string(30992) +
                 " >[/COLOR]",
                 url=next_page,
                 thumbnails=thumb()))

    return itemlist
Exemplo n.º 18
0
def peliculas(item):
    logger.info("[casacinemaInfo.py] peliculas")

    itemlist = []

    # Carica la pagina
    data = httptools.downloadpage(item.url).data

    # Estrae i contenuti

    patron = '<div class="col-mt-5 postsh">[^<>]+<div class="poster-media-card">[^<>]+<a href="([^"]+)" title="([^"]+)".*?<img src="([^"]+)"'

    matches = scrapertoolsV2.find_multiple_matches(data, patron)

    for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
        title = scrapertoolsV2.decodeHtmlentities(scrapedtitle)
        cleantitle = title.replace('[Sub-ITA]', '').strip()

        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 contentType="movie",
                 title=title,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 fulltitle=cleantitle))

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    ## Paginación
    next_page = scrapertoolsV2.find_single_match(
        data, '<a href="([^"]+)"><i class="glyphicon glyphicon-chevron-right"'
    )  ### <- Regex rimosso spazio - precedente <li><a href="([^"]+)" >Pagina -> Continua. riga 221

    if next_page != "":
        itemlist.append(
            Item(
                channel=item.channel,
                action="peliculas",
                title="[COLOR lightgreen]" +
                config.get_localized_string(30992) + "[/COLOR]",
                url=next_page,
                extra=item.extra,
                thumbnail=
                "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"
            ))

    return itemlist
Exemplo n.º 19
0
def genre(item):
    logger.info(item.channel + 'genre')
    itemlist = []

    data = httptools.downloadpage(item.url, headers=headers).data
    block = scrapertoolsV2.find_single_match(data, r'<ul class="table-list">(.*?)<\/ul>')
    matches = scrapertoolsV2.find_multiple_matches(block, r'<a href="([^"]+)">.*?<\/span>(.*?)<\/a>')
    for url, title in matches:
        itemlist.append(
            Item(channel=item.channel,
                 action='peliculas',
                 title=title,
                 url=host+url)
        )
    itemlist = thumb(itemlist)
    return itemlist
Exemplo n.º 20
0
def az(item):
    logger.info(item.channel + 'genre')
    itemlist = []

    data = httptools.downloadpage(item.url, headers=headers).data
    block = scrapertoolsV2.find_single_match(data, r'<select class="cats">(.*?)<\/select>')
    matches = scrapertoolsV2.find_multiple_matches(block, r'<option data-src="([^"]+)">(.*?)<\/option>')
    for url, title in matches:
        itemlist.append(
            Item(channel=item.channel,
                 action='peliculas',
                 title=title,
                 url=url)
        )
    itemlist = thumb(itemlist)
    return itemlist
Exemplo n.º 21
0
def match(item, patron='', patron_block='', headers='', url=''):
    url = url if url else item.url
    data = httptools.downloadpage(url, headers=headers).data.replace("'", '"')
    data = re.sub('\n|\t', '', data)
    log('DATA= ', data)

    if patron_block:
        block = scrapertoolsV2.find_single_match(data, patron_block)
        log('BLOCK= ', block)
    else:
        block = data

    if patron:
        matches = scrapertoolsV2.find_multiple_matches(block, patron)
        log('MATCHES= ', matches)

    return matches, data
Exemplo n.º 22
0
def web_menu():
    itemlist = []

    data = httptools.downloadpage(host, headers=headers).data
    matches = scrapertoolsV2.find_multiple_matches(
        data, r'<li class="page_item.*?><a href="([^"]+)">(.*?)<\/a>')
    blacklist = ['DMCA', 'Contatti', 'Attenzione NON FARTI OSCURARE']

    for url, title in matches:
        if not title in blacklist:
            title = title.replace('Lista ', '') + ' bold'
            if 'film' in title.lower():
                contentType = 'movie'
            else:
                contentType = 'episode'
            support.menu(itemlist,
                         title,
                         'peliculas',
                         url,
                         contentType=contentType)

    return itemlist
Exemplo n.º 23
0
def findvideos(item):
    support.log()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = re.sub(r'\n|\t', ' ', data)
    data = re.sub(r'>\s\s*<', '><', data)
    patronBlock = r'LINK STREAMING(?P<block>.*?)LINK DOWNLOAD'
    patron = r'href="(.+?)"'
    block = scrapertoolsV2.find_single_match(data, patronBlock)
    urls = scrapertoolsV2.find_multiple_matches(block, patron)

    for url in urls:

        lang = ''
        if 'sub_ita' in url.lower():
            lang = 'Sub-ITA'
        else:
            lang = 'ITA'

        if 'keepem.online' in data:
            urls = scrapertoolsV2.find_multiple_matches(
                data, r'(https://keepem\.online/f/[^"]+)"')
            for url in urls:
                url = httptools.downloadpage(url).url
                itemlist += servertools.find_video_items(data=url)

        elif 'keepsetsu' in url.lower() or 'woof' in url.lower():
            if 'keepsetsu' in url.lower():
                support.log("keepsetsu url -> ", url)
                data = httptools.downloadpage(url).url
                support.log("LINK-DATA :", data)

            data = httptools.downloadpage(data).data
            support.log("LINK-DATA2 :", data)
            video_urls = scrapertoolsV2.find_single_match(
                data, r'<meta name="description" content="([^"]+)"')

        else:

            data = httptools.downloadpage(url).data
            host_video = scrapertoolsV2.find_single_match(
                data, r'var thisPageUrl = "(http[s]\:\/\/[^\/]+).+?"')
            link = scrapertoolsV2.find_single_match(data,
                                                    r'<video src="([^"]+)"')
            video_urls = host_video + link

        title = support.typo(item.fulltitle, '_ bold') + support.typo(
            lang, '_ [] color kod')
        itemlist.append(
            support.Item(
                channel=item.channel,
                action="play",
                contentType=item.contentType,
                title=title,
                fulltitle=title,
                show=title,
                url=video_urls,
                infoLabels=item.infoLabels,
                thumbnail=item.thumbnail,
                contentSerieName=item.contentSerieName,
                contentTitle=title,
                contentLanguage='ITA' if lang == [] else lang,
                args=item.args,
                server='directo',
            ))
    return itemlist
Exemplo n.º 24
0
def episodios(item):
    #logger.info("%s episodios log: %s" % (__channel__, item))
    itemlist = []

    if not (item.lang):
        lang_season = {'ITA': 0, 'SUB ITA': 0}
        # Download pagina
        data = httptools.downloadpage(item.url).data
        #========
        if 'clicca qui per aprire' in data.lower():
            logger.info("%s CLICCA QUI PER APRIRE GLI EPISODI log: %s" %
                        (__channel__, item))
            item.url = scrapertoolsV2.find_single_match(
                data, '"go_to":"(.*?)"')
            item.url = item.url.replace("\\", "")
            # Carica la pagina
            data = httptools.downloadpage(item.url).data
            #logger.info("%s FINE CLICCA QUI PER APRIRE GLI EPISODI log: %s" % (__channel__, item))
        elif 'clicca qui</span>' in data.lower():
            logger.info("%s inizio CLICCA QUI</span> log: %s" %
                        (__channel__, item))
            item.url = scrapertoolsV2.find_single_match(
                data, '<h2 style="text-align: center;"><a href="(.*?)">')
            data = httptools.downloadpage(item.url).data
            #logger.info("%s fine CLICCA QUI</span> log: %s" % (__channel__, item))
        #=========
        data = scrapertoolsV2.decodeHtmlentities(data)
        bloque = scrapertoolsV2.find_single_match(
            data, '<div class="su-accordion">(.*?)<div class="clear"></div>')
        patron = '<span class="su-spoiler-icon"></span>(.*?)</div>'
        matches = scrapertoolsV2.find_multiple_matches(bloque, patron)
        for scrapedseason in matches:
            #logger.info("%s scrapedseason log: %s" % (__channel__, scrapedseason))
            if "(SUB ITA)" in scrapedseason.upper():
                lang = "SUB ITA"
                lang_season['SUB ITA'] += 1
            else:
                lang = "ITA"
                lang_season['ITA'] += 1
            #logger.info("%s lang_dict log: %s" % (__channel__, lang_season))

        for lang in sorted(lang_season):
            if lang_season[lang] > 0:
                itemlist.append(
                    Item(
                        channel=item.channel,
                        action="episodios",
                        #contentType = "episode",
                        contentSerieName=item.title,
                        title='%s (%s)' % (item.title, lang),
                        url=item.url,
                        fulltitle=item.title,
                        data=data,
                        lang=lang,
                        show=item.show,
                        folder=True,
                    ))

        # locandine e trama e altro da tmdb se presente l'anno migliora la ricerca
        tmdb.set_infoLabels_itemlist(itemlist,
                                     seekTmdb=True,
                                     idioma_busqueda='it')

        return itemlist

    else:
        # qui ci vanno le puntate delle stagioni
        html = item.data
        logger.info("%s else log: [%s]" % (__channel__, item))

        if item.lang == 'SUB ITA':
            item.lang = '\(SUB ITA\)'
            logger.info("%s item.lang log: %s" % (__channel__, item.lang))
        bloque = scrapertoolsV2.find_single_match(
            html, '<div class="su-accordion">(.*?)<div class="clear"></div>')
        patron = '<span class="su-spoiler-icon"></span>.*?' + item.lang + '</div>(.*?)</div>'  # leggo tutte le stagioni
        #logger.info("%s patronpatron log: %s" % (__channel__, patron))
        matches = scrapertoolsV2.find_multiple_matches(bloque, patron)
        for scrapedseason in matches:
            #logger.info("%s scrapedseasonscrapedseason log: %s" % (__channel__, scrapedseason))
            scrapedseason = scrapedseason.replace('<strong>',
                                                  '').replace('</strong>', '')
            patron = '(\d+)×(\d+)(.*?)<(.*?)<br />'  # stagione - puntanta - titolo - gruppo link
            matches = scrapertoolsV2.find_multiple_matches(
                scrapedseason, patron)
            for scrapedseason, scrapedpuntata, scrapedtitolo, scrapedgroupurl in matches:
                #logger.info("%s finale log: %s" % (__channel__, patron))
                scrapedtitolo = scrapedtitolo.replace('–', '')
                itemlist.append(
                    Item(
                        channel=item.channel,
                        action="findvideos",
                        contentType="episode",
                        #contentSerieName = item.contentSerieName,
                        contentTitle=scrapedtitolo,
                        title='%sx%s %s' %
                        (scrapedseason, scrapedpuntata, scrapedtitolo),
                        url=scrapedgroupurl,
                        fulltitle=item.fulltitle,
                        #show = item.show,
                        #folder = True,
                    ))

        logger.info("%s itemlistitemlist log: %s" % (__channel__, itemlist))

        # Opción "Añadir esta película a la biblioteca de KODI"
        if item.extra != "library":
            if config.get_videolibrary_support(
            ) and len(itemlist) > 0 and item.extra != 'findvideos':
                itemlist.append(
                    Item(channel=item.channel,
                         title="%s" % config.get_localized_string(30161),
                         text_color="green",
                         extra="episodios",
                         action="add_serie_to_library",
                         url=item.url,
                         thumbnail=get_thumb('videolibrary', auto=True),
                         contentTitle=item.contentSerieName,
                         lang=item.lang,
                         show=item.show,
                         data=html
                         #, infoLabels = item.infoLabels
                         ))

        return itemlist
Exemplo n.º 25
0
def scrape(item,
           patron='',
           listGroups=[],
           headers="",
           blacklist="",
           data="",
           patron_block="",
           patronNext="",
           action="findvideos",
           url_host="",
           addVideolibrary=True):
    # patron: the patron to use for scraping page, all capturing group must match with listGroups
    # listGroups: a list containing the scraping info obtained by your patron, in order
    # accepted values are: url, title, thumb, quality, year, plot, duration, genre, rating

    # header: values to pass to request header
    # blacklist: titles that you want to exclude(service articles for example)
    # data: if you want to pass data manually, maybe because you need some custom replacement
    # patron_block: patron to get parts of the page (to scrape with patron attribute),
    #               if you need a "block inside another block" you can create a list, please note that all matches
    #               will be packed as string
    # patronNext: patron for scraping next page link
    # action: if you want results perform an action different from "findvideos", useful when scraping film by genres
    # url_host: string to prepend to scrapedurl, useful when url don't contain host
    # example usage:
    #   import support
    #   itemlist = []
    #   patron = 'blablabla'
    #   headers = [['Referer', host]]
    #   blacklist = 'Request a TV serie!'
    #   return support.scrape(item, itemlist, patron, ['thumb', 'quality', 'url', 'title', 'year', 'plot'],
    #                           headers=headers, blacklist=blacklist)

    itemlist = []

    if not data:
        data = httptools.downloadpage(item.url,
                                      headers=headers).data.replace("'", '"')
        data = re.sub('\n|\t', ' ', data)
        # replace all ' with " and eliminate newline, so we don't need to worry about
        log('DATA =', data)

        block = data

        if patron_block:
            if type(patron_block) == str:
                patron_block = [patron_block]

            for n, regex in enumerate(patron_block):
                blocks = scrapertoolsV2.find_multiple_matches(block, regex)
                block = ""
                for b in blocks:
                    block += "\n" + b
                log('BLOCK ', n, '=', block)
    else:
        block = data
    if patron and listGroups:
        matches = scrapertoolsV2.find_multiple_matches(block, patron)
        log('MATCHES =', matches)

        for match in matches:
            if len(listGroups) > len(match):  # to fix a bug
                match = list(match)
                match.extend([''] * (len(listGroups) - len(match)))

            scrapedurl = url_host + match[listGroups.index(
                'url')] if 'url' in listGroups else ''
            scrapedtitle = match[listGroups.index(
                'title')] if 'title' in listGroups else ''
            scrapedthumb = match[listGroups.index(
                'thumb')] if 'thumb' in listGroups else ''
            scrapedquality = match[listGroups.index(
                'quality')] if 'quality' in listGroups else ''
            scrapedyear = match[listGroups.index(
                'year')] if 'year' in listGroups else ''
            scrapedplot = match[listGroups.index(
                'plot')] if 'plot' in listGroups else ''
            scrapedduration = match[listGroups.index(
                'duration')] if 'duration' in listGroups else ''
            scrapedgenre = match[listGroups.index(
                'genre')] if 'genre' in listGroups else ''
            scrapedrating = match[listGroups.index(
                'rating')] if 'rating' in listGroups else ''

            title = scrapertoolsV2.decodeHtmlentities(scrapedtitle)
            plot = scrapertoolsV2.decodeHtmlentities(scrapedplot)
            if scrapedquality:
                longtitle = '[B]' + title + '[/B] [COLOR blue][' + scrapedquality + '][/COLOR]'
            else:
                longtitle = '[B]' + title + '[/B]'

            if item.infoLabels[
                    "title"] or item.fulltitle:  # if title is set, probably this is a list of episodes or video sources
                infolabels = item.infoLabels
            else:
                infolabels = {}
                if scrapedyear:
                    infolabels['year'] = scrapedyear
                if scrapedplot:
                    infolabels['plot'] = plot
                if scrapedduration:
                    matches = scrapertoolsV2.find_multiple_matches(
                        scrapedduration,
                        r'([0-9])\s*?(?:[hH]|:|\.|,|\\|\/|\||\s)\s*?([0-9]+)')
                    for h, m in matches:
                        scrapedduration = int(h) * 60 + int(m)
                    infolabels['duration'] = int(scrapedduration) * 60
                if scrapedgenre:
                    genres = scrapertoolsV2.find_multiple_matches(
                        scrapedgenre, '[A-Za-z]+')
                    infolabels['genre'] = ", ".join(genres)
                if scrapedrating:
                    infolabels['rating'] = scrapertoolsV2.decodeHtmlentities(
                        scrapedrating)

            if not scrapedtitle in blacklist:
                itemlist.append(
                    Item(channel=item.channel,
                         action=action,
                         contentType=item.contentType,
                         title=longtitle,
                         fulltitle=title,
                         show=title,
                         quality=scrapedquality,
                         url=scrapedurl,
                         infoLabels=infolabels,
                         thumbnail=scrapedthumb))

        tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

        if patronNext:
            nextPage(itemlist, item, data, patronNext, 2)

        if addVideolibrary and (item.infoLabels["title"] or item.fulltitle):
            item.fulltitle = item.infoLabels["title"]
            videolibrary(itemlist, item)

    return itemlist
Exemplo n.º 26
0
def scrapeBlock(item, args, block, patron, headers, action, pagination, debug,
                typeContentDict, typeActionDict, blacklist, search, pag,
                function, lang):
    itemlist = []
    log("scrapeBlock qui", block, patron)
    matches = scrapertoolsV2.find_multiple_matches_groups(block, patron)
    log('MATCHES =', matches)

    if debug:
        regexDbg(item, patron, headers, block)

    known_keys = [
        'url', 'title', 'title2', 'episode', 'thumb', 'quality', 'year',
        'plot', 'duration', 'genere', 'rating', 'type', 'lang'
    ]
    # lang = ''  # aggiunto per gestire i siti con pagine di serietv dove si hanno i video in ita e in subita
    for i, match in enumerate(matches):
        if pagination and (pag - 1) * pagination > i: continue  # pagination
        if pagination and i >= pag * pagination: break  # pagination
        listGroups = match.keys()
        match = match.values()

        if len(listGroups) > len(match):  # to fix a bug
            match = list(match)
            match.extend([''] * (len(listGroups) - len(match)))

        scraped = {}
        for kk in known_keys:
            val = match[listGroups.index(kk)] if kk in listGroups else ''
            if val and (kk == "url" or kk == 'thumb') and 'http' not in val:
                val = scrapertoolsV2.find_single_match(
                    item.url, 'https?://[a-z0-9.-]+') + val
            scraped[kk] = val

        episode = re.sub(r'\s-\s|-|x|&#8211|&#215;', 'x',
                         scraped['episode']) if scraped['episode'] else ''
        title = cleantitle(scraped['title']) if scraped['title'] else ''
        title2 = cleantitle(scraped['title2']) if scraped['title2'] else ''
        quality = scraped['quality'].strip() if scraped['quality'] else ''
        Type = scraped['type'] if scraped['type'] else ''
        plot = cleantitle(scraped["plot"]) if scraped["plot"] else ''

        # make formatted Title [longtitle]
        s = ' - '
        title = episode + (s if episode and title else '') + title
        longtitle = title + (s if title and title2 else '') + title2
        longtitle = typo(longtitle, 'bold')
        longtitle += (typo(Type, '_ () bold') if Type else '') + (typo(
            quality, '_ [] color kod') if quality else '')

        # # per togliere la voce [ITA] da liste che non siano titoli (es.: genere)
        # if action != 'peliculas':
        #     lang, longtitle = scrapeLang(scraped, lang, longtitle)
        # else:
        #     longtitle = longtitle.replace('[ITA]','')
        #     lang = ''

        lang, longtitle = scrapeLang(scraped, lang, longtitle)

        # if title is set, probably this is a list of episodes or video sources
        # necessaria l'aggiunta di == scraped["title"] altrimenti non prende i gruppi dopo le categorie
        if item.infoLabels["title"] == scraped["title"]:
            infolabels = item.infoLabels
        else:
            infolabels = {}
            if scraped['year']:
                infolabels['year'] = scraped['year']
            if scraped["plot"]:
                infolabels['plot'] = plot
            if scraped['duration']:
                matches = scrapertoolsV2.find_multiple_matches(
                    scraped['duration'],
                    r'([0-9])\s*?(?:[hH]|:|\.|,|\\|\/|\||\s)\s*?([0-9]+)')
                for h, m in matches:
                    scraped['duration'] = int(h) * 60 + int(m)
                if not matches:
                    scraped['duration'] = scrapertoolsV2.find_single_match(
                        scraped['duration'], r'(\d+)')
                infolabels['duration'] = int(scraped['duration']) * 60
            if scraped['genere']:
                genres = scrapertoolsV2.find_multiple_matches(
                    scraped['genere'], '[A-Za-z]+')
                infolabels['genere'] = ", ".join(genres)
            if scraped["rating"]:
                infolabels['rating'] = scrapertoolsV2.decodeHtmlentities(
                    scraped["rating"])

        AC = CT = ''
        if typeContentDict:
            for name, variants in typeContentDict.items():
                if str(scraped['type']).lower() in variants:
                    CT = name
                else:
                    CT = item.contentType
        if typeActionDict:
            for name, variants in typeActionDict.items():
                if str(scraped['type']).lower() in variants:
                    AC = name
                else:
                    AC = action

        if (scraped["title"] not in blacklist) and (search.lower()
                                                    in longtitle.lower()):
            it = Item(
                channel=item.channel,
                action=AC if AC else action,
                contentType='episode'
                if function == 'episodios' else CT if CT else item.contentType,
                title=longtitle,
                fulltitle=item.fulltitle if function == 'episodios' else title,
                show=item.show if function == 'episodios' else title,
                quality=quality,
                url=scraped["url"],
                infoLabels=infolabels,
                thumbnail=item.thumbnail
                if function == 'episodios' else scraped["thumb"],
                args=item.args,
                contentSerieName=title
                if item.contentType != 'movie' and function != 'episodios' else
                item.fulltitle if function == 'episodios' else '',
                contentTitle=title if item.contentType == 'movie' else '',
                contentLanguage=lang,
                ep=episode if episode else '')

            for lg in list(set(listGroups).difference(known_keys)):
                it.__setattr__(lg, match[listGroups.index(lg)])

            if 'itemHook' in args:
                it = args['itemHook'](it)
            itemlist.append(it)

    return itemlist, matches
Exemplo n.º 27
0
def scrape(item,
           patron='',
           listGroups=[],
           headers="",
           blacklist="",
           data="",
           patron_block="",
           patronNext="",
           action="findvideos",
           addVideolibrary=True,
           type_content_dict={},
           type_action_dict={}):
    # patron: the patron to use for scraping page, all capturing group must match with listGroups
    # listGroups: a list containing the scraping info obtained by your patron, in order
    # accepted values are: url, title, thumb, quality, year, plot, duration, genre, rating, episode, lang

    # header: values to pass to request header
    # blacklist: titles that you want to exclude(service articles for example)
    # data: if you want to pass data manually, maybe because you need some custom replacement
    # patron_block: patron to get parts of the page (to scrape with patron attribute),
    #               if you need a "block inside another block" you can create a list, please note that all matches
    #               will be packed as string
    # patronNext: patron for scraping next page link
    # action: if you want results perform an action different from "findvideos", useful when scraping film by genres
    # url_host: string to prepend to scrapedurl, useful when url don't contain host
    # example usage:
    #   import support
    #   itemlist = []
    #   patron = 'blablabla'
    #   headers = [['Referer', host]]
    #   blacklist = 'Request a TV serie!'
    #   return support.scrape(item, itemlist, patron, ['thumb', 'quality', 'url', 'title', 'title2', 'year', 'plot', 'episode', 'lang'],
    #                           headers=headers, blacklist=blacklist)
    # listGroups
    #    thumb = immagine, quality = qualità, url = link singolo o gruppo, title = titolo film o serie, title2 = titolo aggiuntivo
    #    year = anno del film o della serie, plot = descrizione film o serie, episode = numero stagione - numero episodio in caso di serie,
    #    lang = lingua del video
    # 'type' is a check for typologies of content e.g. Film or TV Series
    # 'episode' is a key to grab episode numbers if it is separated from the title
    # IMPORTANT 'type' is a special key, to work need type_content_dict={} and type_action_dict={}

    itemlist = []

    if not data:
        data = httptools.downloadpage(item.url,
                                      headers=headers,
                                      ignore_response_code=True).data.replace(
                                          "'", '"')
        data = re.sub('\n|\t', ' ', data)
        # replace all ' with " and eliminate newline, so we don't need to worry about
        log('DATA =', data)

        block = data

        if patron_block:
            if type(patron_block) == str:
                patron_block = [patron_block]

            for n, regex in enumerate(patron_block):
                blocks = scrapertoolsV2.find_multiple_matches(block, regex)
                block = ""
                for b in blocks:
                    block += "\n" + str(b)
                log('BLOCK ', n, '=', block)
    else:
        block = data
    if patron and listGroups:
        matches = scrapertoolsV2.find_multiple_matches(block, patron)
        log('MATCHES =', matches)

        known_keys = [
            'url', 'title', 'title2', 'episode', 'thumb', 'quality', 'year',
            'plot', 'duration', 'genere', 'rating', 'type', 'lang'
        ]  #by greko aggiunto episode
        lang = ''  # aggiunto per gestire i siti con pagine di serietv dove si hanno i video in ita e in subita

        for match in matches:
            if len(listGroups) > len(match):  # to fix a bug
                match = list(match)
                match.extend([''] * (len(listGroups) - len(match)))

            scraped = {}
            for kk in known_keys:
                val = match[listGroups.index(kk)] if kk in listGroups else ''
                if val and (kk == "url"
                            or kk == 'thumb') and 'http' not in val:
                    val = scrapertoolsV2.find_single_match(
                        item.url, 'https?://[a-z0-9.-]+') + val
                scraped[kk] = val

            title = scrapertoolsV2.htmlclean(
                scrapertoolsV2.decodeHtmlentities(scraped["title"])).replace(
                    '’', '\'').replace('"',
                                       "'").strip()  # fix by greko da " a '
            plot = scrapertoolsV2.htmlclean(
                scrapertoolsV2.decodeHtmlentities(scraped["plot"]))

            longtitle = typo(title, 'bold')
            if scraped['quality']:
                longtitle = longtitle + typo(scraped['quality'],
                                             '_ [] color kod')
            if scraped['episode']:
                scraped['episode'] = re.sub(r'\s-\s|-|x|&#8211', 'x',
                                            scraped['episode'])
                longtitle = typo(scraped['episode'] + ' - ',
                                 'bold') + longtitle
            if scraped['title2']:
                title2 = scrapertoolsV2.htmlclean(
                    scrapertoolsV2.decodeHtmlentities(
                        scraped["title2"])).replace('"', "'").strip()
                longtitle = longtitle + typo(title2, 'bold _ -- _')

            ##    Aggiunto/modificato per gestire i siti che hanno i video
            ##    in ita e subita delle serie tv nella stessa pagina
            if scraped['lang']:
                if 'sub' in scraped['lang'].lower():
                    lang = 'Sub-ITA'
                else:
                    lang = 'ITA'
            if lang != '':
                longtitle += typo(lang, '_ [] color kod')

            if item.infoLabels[
                    "title"] or item.fulltitle:  # if title is set, probably this is a list of episodes or video sources
                infolabels = item.infoLabels
            else:
                infolabels = {}
                if scraped["year"]:
                    infolabels['year'] = scraped["year"]
                if scraped["plot"]:
                    infolabels['plot'] = plot
                if scraped["duration"]:
                    matches = scrapertoolsV2.find_multiple_matches(
                        scraped["duration"],
                        r'([0-9])\s*?(?:[hH]|:|\.|,|\\|\/|\||\s)\s*?([0-9]+)')
                    for h, m in matches:
                        scraped["duration"] = int(h) * 60 + int(m)
                    if not matches:
                        scraped["duration"] = scrapertoolsV2.find_single_match(
                            scraped["duration"], r'(\d+)')
                    infolabels['duration'] = int(scraped["duration"]) * 60
                if scraped["genere"]:
                    genres = scrapertoolsV2.find_multiple_matches(
                        scraped["genere"], '[A-Za-z]+')
                    infolabels['genere'] = ", ".join(genres)
                if scraped["rating"]:
                    infolabels['rating'] = scrapertoolsV2.decodeHtmlentities(
                        scraped["rating"])

            if type_content_dict:
                for name, variants in type_content_dict.items():
                    if scraped['type'] in variants:
                        item.contentType = name
            if type_action_dict:
                for name, variants in type_action_dict.items():
                    if scraped['type'] in variants:
                        action = name

            if inspect.stack()[1][3] == 'episodios':
                item.contentType = 'episode'

            if scraped["title"] not in blacklist:
                it = Item(channel=item.channel,
                          action=action,
                          contentType=item.contentType,
                          title=longtitle,
                          fulltitle=title,
                          show=title,
                          language=lang if lang != '' else '',
                          quality=scraped["quality"],
                          url=scraped["url"],
                          infoLabels=infolabels,
                          thumbnail=scraped["thumb"],
                          args=item.args)

                for lg in list(set(listGroups).difference(known_keys)):
                    it.__setattr__(lg, match[listGroups.index(lg)])

                itemlist.append(it)
        checkHost(item, itemlist)
        if (item.contentType == "tvshow" and (action != "findvideos" and action != "play")) \
                or (item.contentType == "episode" and action != "play") \
                or (item.contentType == "movie" and action != "play"):
            tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
        else:
            for it in itemlist:
                it.infoLabels = item.infoLabels

        if patronNext:
            nextPage(itemlist, item, data, patronNext, 2)

        if addVideolibrary and (item.infoLabels["title"] or item.fulltitle):
            item.fulltitle = item.infoLabels["title"]
            videolibrary(itemlist, item)

    return itemlist
Exemplo n.º 28
0
def scrapeBlock(item, args, block, patron, headers, action, pagination, debug,
                typeContentDict, typeActionDict, blacklist, search, pag,
                function, lang):
    itemlist = []
    log("scrapeBlock qui", block, patron)
    matches = scrapertoolsV2.find_multiple_matches_groups(block, patron)
    log('MATCHES =', matches)

    if debug:
        regexDbg(item, patron, headers, block)

    known_keys = [
        'url', 'title', 'title2', 'season', 'episode', 'thumb', 'quality',
        'year', 'plot', 'duration', 'genere', 'rating', 'type', 'lang'
    ]
    # Legenda known_keys per i groups nei patron
    # known_keys = ['url', 'title', 'title2', 'season', 'episode', 'thumb', 'quality',
    #                'year', 'plot', 'duration', 'genere', 'rating', 'type', 'lang']
    # url = link relativo o assoluto alla pagina titolo film/serie
    # title = titolo Film/Serie/Anime/Altro
    # title2 = titolo dell'episodio Serie/Anime/Altro
    # season = stagione in formato numerico
    # episode = numero episodio, in formato numerico.
    # thumb = linkrealtivo o assoluto alla locandina Film/Serie/Anime/Altro
    # quality = qualità indicata del video
    # year = anno in formato numerico (4 cifre)
    # duration = durata del Film/Serie/Anime/Altro
    # genere = genere del Film/Serie/Anime/Altro. Es: avventura, commedia
    # rating = punteggio/voto in formato numerico
    # type = tipo del video. Es. movie per film o tvshow per le serie. Di solito sono discrimanti usati dal sito
    # lang = lingua del video. Es: ITA, Sub-ITA, Sub, SUB ITA.
    # AVVERTENZE: Se il titolo è trovato nella ricerca TMDB/TVDB/Altro allora le locandine e altre info non saranno quelle recuperate nel sito.!!!!

    stagione = ''  # per quei siti che hanno la stagione nel blocco ma non nelle puntate
    for i, match in enumerate(matches):
        if pagination and (pag - 1) * pagination > i: continue  # pagination
        if pagination and i >= pag * pagination: break  # pagination
        listGroups = match.keys()
        match = match.values()

        if len(listGroups) > len(match):  # to fix a bug
            match = list(match)
            match.extend([''] * (len(listGroups) - len(match)))

        scraped = {}
        for kk in known_keys:
            val = match[listGroups.index(kk)] if kk in listGroups else ''
            if val and (kk == "url" or kk == 'thumb') and 'http' not in val:
                val = scrapertoolsV2.find_single_match(
                    item.url, 'https?://[a-z0-9.-]+') + val
            scraped[kk] = val

        if scraped['season'] != None:
            season = scraped['season']
        if stagione:
            episode = season + 'x' + scraped['episode']
        elif item.contentType == 'tvshow' and (scraped['episode'] == ''
                                               and season == ''):
            item.args = 'season_completed'
            episode = ''
        else:
            episode = re.sub(r'\s-\s|-|x|&#8211|&#215;', 'x',
                             scraped['episode']) if scraped['episode'] else ''

        #episode = re.sub(r'\s-\s|-|x|&#8211|&#215;', 'x', scraped['episode']) if scraped['episode'] else ''
        title = cleantitle(scraped['title']) if scraped['title'] else ''
        title2 = cleantitle(scraped['title2']) if scraped['title2'] else ''
        quality = scraped['quality'].strip() if scraped['quality'] else ''
        Type = scraped['type'] if scraped['type'] else ''
        plot = cleantitle(scraped["plot"]) if scraped["plot"] else ''

        # make formatted Title [longtitle]
        s = ' - '
        title = episode + (s if episode and title else '') + title
        longtitle = title + (s if title and title2 else '') + title2
        longtitle = typo(longtitle, 'bold')
        longtitle += (typo(Type, '_ () bold') if Type else '') + (typo(
            quality, '_ [] color kod') if quality else '')

        lang1, longtitle = scrapeLang(scraped, lang, longtitle)

        # if title is set, probably this is a list of episodes or video sources
        # necessaria l'aggiunta di == scraped["title"] altrimenti non prende i gruppi dopo le categorie
        if item.infoLabels["title"] == scraped["title"]:
            infolabels = item.infoLabels
        else:
            infolabels = {}
            if scraped['year']:
                infolabels['year'] = scraped['year']
            if scraped["plot"]:
                infolabels['plot'] = plot
            if scraped['duration']:
                matches = scrapertoolsV2.find_multiple_matches(
                    scraped['duration'],
                    r'([0-9])\s*?(?:[hH]|:|\.|,|\\|\/|\||\s)\s*?([0-9]+)')
                for h, m in matches:
                    scraped['duration'] = int(h) * 60 + int(m)
                if not matches:
                    scraped['duration'] = scrapertoolsV2.find_single_match(
                        scraped['duration'], r'(\d+)')
                infolabels['duration'] = int(scraped['duration']) * 60
            if scraped['genere']:
                genres = scrapertoolsV2.find_multiple_matches(
                    scraped['genere'], '[A-Za-z]+')
                infolabels['genere'] = ", ".join(genres)
            if scraped["rating"]:
                infolabels['rating'] = scrapertoolsV2.decodeHtmlentities(
                    scraped["rating"])

        AC = CT = ''
        if typeContentDict:
            for name, variants in typeContentDict.items():
                if str(scraped['type']).lower() in variants:
                    CT = name
                    break
                else:
                    CT = item.contentType
        if typeActionDict:
            for name, variants in typeActionDict.items():
                if str(scraped['type']).lower() in variants:
                    AC = name
                    break
                else:
                    AC = action

        if (scraped["title"] not in blacklist) and (search.lower()
                                                    in longtitle.lower()):
            it = Item(
                channel=item.channel,
                action=AC if AC else action,
                contentType='episode'
                if function == 'episodios' else CT if CT else item.contentType,
                title=longtitle,
                fulltitle=item.fulltitle if function == 'episodios' else title,
                show=item.show if function == 'episodios' else title,
                quality=quality,
                url=scraped["url"],
                infoLabels=infolabels,
                thumbnail=item.thumbnail
                if function == 'episodios' else scraped["thumb"],
                args=item.args,
                contentSerieName=scraped['title'] if item.contentType
                or CT != 'movie' and function != 'episodios' else
                item.fulltitle if function == 'episodios' else '',
                contentTitle=scraped['title']
                if item.contentType or CT == 'movie' else '',
                contentLanguage=lang1,
                contentEpisodeNumber=episode if episode else '')

            for lg in list(set(listGroups).difference(known_keys)):
                it.__setattr__(lg, match[listGroups.index(lg)])

            if 'itemHook' in args:
                it = args['itemHook'](it)
            itemlist.append(it)

    return itemlist, matches