Exemple #1
0
def renumeration(itemlist, item, typography, dict_series, ID, SEASON, EPISODE,
                 MODE, TITLE):
    log()
    # Se ID è 0 salta la rinumerazione
    if ID == '0':
        return itemlist

    # Numerazione per gli Speciali
    elif SEASON == '0':
        EpisodeDict = {}
        for item in itemlist:
            number = scrapertoolsV2.find_single_match(item.title, r'\d+')
            item.title = typo('0x' + number + ' - ', typography) + item.title

    # Usa la lista degli Episodi se esiste nel Json

    elif EPISODE:
        EpisodeDict = json.loads(base64.b64decode(EPISODE))

        # Controlla che la lista egli Episodi sia della stessa lunghezza di Itemlist
        if EpisodeDict == 'none':
            return error(itemlist)
        if len(EpisodeDict) >= len(itemlist):
            for item in itemlist:
                number = scrapertoolsV2.find_single_match(item.title,
                                                          r'\d+').lstrip("0")
                item.title = typo(EpisodeDict[str(number)] + ' - ',
                                  typography) + item.title
        else:
            make_list(itemlist, item, typography, dict_series, ID, SEASON,
                      EPISODE, MODE, TITLE)

    else:
        make_list(itemlist, item, typography, dict_series, ID, SEASON, EPISODE,
                  MODE, TITLE)
Exemple #2
0
def nextPage(itemlist,
             item,
             data='',
             patron='',
             function_level=1,
             next_page='',
             resub=[]):
    # Function_level is useful if the function is called by another function.
    # If the call is direct, leave it blank
    if next_page == '':
        next_page = scrapertoolsV2.find_single_match(data, patron)

    if next_page != "":
        if resub: next_page = re.sub(resub[0], resub[1], next_page)
        if 'http' not in next_page:
            next_page = scrapertoolsV2.find_single_match(
                item.url, 'https?://[a-z0-9.-]+') + next_page
        next_page = re.sub('&', '&', next_page)
        log('NEXT= ', next_page)
        itemlist.append(
            Item(channel=item.channel,
                 action=item.action,
                 contentType=item.contentType,
                 title=typo(config.get_localized_string(30992),
                            'color kod bold'),
                 url=next_page,
                 args=item.args,
                 thumbnail=thumb()))

    return itemlist
Exemple #3
0
def play(item):
    support.log()
    itemlist = []
    ### Handling new cb01 wrapper
    if host[9:] + "/film/" in item.url:
        iurl = httptools.downloadpage(item.url, only_headers=True, follow_redirects=False).headers.get("location", "")
        support.log("/film/ wrapper: ", iurl)
        if iurl:
            item.url = iurl

    if '/goto/' in item.url:
        item.url = item.url.split('/goto/')[-1].decode('base64')

    item.url = item.url.replace('http://cineblog01.uno', 'http://k4pp4.pw')

    logger.debug("##############################################################")
    if "go.php" in item.url:
        data = httptools.downloadpage(item.url).data
        if "window.location.href" in data:
            try:
                data = scrapertoolsV2.find_single_match(data, 'window.location.href = "([^"]+)";')
            except IndexError:
                data = httptools.downloadpage(item.url, only_headers=True, follow_redirects=False).headers.get("location", "")
            data, c = unshortenit.unwrap_30x_only(data)
        else:
            data = scrapertoolsV2.find_single_match(data, r'<a href="([^"]+)".*?class="btn-wrapper">.*?licca.*?</a>')
        
        logger.debug("##### play go.php data ##\n%s\n##" % data)
    else:
        data = support.swzz_get_url(item)

    return servertools.find_video_items(data=data)
Exemple #4
0
def episodios(item):
    support.log("episodios: %s" % item)
    action = 'findvideos'
    item.contentType = 'episode'
    # Carica la pagina
    data = httptools.downloadpage(item.url,
                                  headers=headers).data.replace("'", '"')
    #========
    if 'clicca qui per aprire' in data.lower():
        item.url = scrapertoolsV2.find_single_match(data, '"go_to":"([^"]+)"')
        item.url = item.url.replace("\\", "")
        # Carica la pagina
        data = httptools.downloadpage(item.url,
                                      headers=headers).data.replace("'", '"')
    elif 'clicca qui</span>' in data.lower():
        item.url = scrapertoolsV2.find_single_match(
            data, '<h2 style="text-align: center;"><a href="([^"]+)">')
        # Carica la pagina
        data = httptools.downloadpage(item.url,
                                      headers=headers).data.replace("'", '"')
    #=========
    data = re.sub('\n|\t', ' ', data)
    patronBlock = r'(?P<block>STAGIONE\s\d+ (?:\()?(?P<lang>ITA|SUB ITA)(?:\))?<\/div>.*?)</div></div>'
    patron = r'(?:\s|\Wn)?(?:|<strong>)?(?P<episode>\d+&#\d+;\d+)(?:|</strong>) (?P<title>.*?)(?:|–)?<a\s(?P<url>.*?)<\/a><br\s\/>'

    return locals()
Exemple #5
0
def findvideos(item):
    log()
    # itemlist = []

    if item.args == 'anime':
        data = item.url
    else:
        data = httptools.downloadpage(item.url, headers=headers).data

        # Check if is series
        check = scrapertoolsV2.find_single_match(
            data.replace('\t', '').replace('\n', ''),
            r'<div class="category-film"><h3>([^<]+)<\/h3>')
        if 'serie tv' in check.lower(): return episodios(item)
        elif 'anime' in check.lower(): return anime(item)

        if 'protectlink' in data:
            urls = scrapertoolsV2.find_multiple_matches(
                data, r'<iframe src="[^=]+=(.*?)"')
            for url in urls:
                url = url.decode('base64')
                if '\t' in url:
                    url = url[:-1]
                data += '\t' + url
            if 'nodmca' in data:
                page = httptools.downloadpage(url, headers=headers).data
                data += '\t' + scrapertoolsV2.find_single_match(
                    page, '<meta name="og:url" content="([^=]+)">')

    return support.server(item, data, headers=headers)
Exemple #6
0
def hdpass_get_servers(item):
    # Carica la pagina
    data = httptools.downloadpage(item.url).data.replace('\n', '')
    patron = r'<iframe(?: id="[^"]+")? width="[^"]+" height="[^"]+" src="([^"]+)"[^>]+><\/iframe>'
    url = scrapertoolsV2.find_single_match(data, patron).replace("?alta", "")
    url = url.replace("&download=1", "")
    if 'https' not in url:
        url = 'https:' + url

    if 'hdpass' or 'hdplayer' in url:
        data = httptools.downloadpage(url).data

        start = data.find('<div class="row mobileRes">')
        end = data.find('<div id="playerFront">', start)
        data = data[start:end]

        patron_res = '<div class="row mobileRes">(.*?)</div>'
        patron_mir = '<div class="row mobileMirrs">(.*?)</div>'
        patron_media = r'<input type="hidden" name="urlEmbed" data-mirror="([^"]+)" id="urlEmbed"\s*value="([^"]+)"\s*/>'

        res = scrapertoolsV2.find_single_match(data, patron_res)

        itemlist = []

        for res_url, res_video in scrapertoolsV2.find_multiple_matches(
                res, '<option.*?value="([^"]+?)">([^<]+?)</option>'):

            data = httptools.downloadpage(urlparse.urljoin(
                url, res_url)).data.replace('\n', '')

            mir = scrapertoolsV2.find_single_match(data, patron_mir)

            for mir_url, server in scrapertoolsV2.find_multiple_matches(
                    mir, '<option.*?value="([^"]+?)">([^<]+?)</value>'):

                data = httptools.downloadpage(urlparse.urljoin(
                    url, mir_url)).data.replace('\n', '')
                for media_label, media_url in scrapertoolsV2.find_multiple_matches(
                        data, patron_media):
                    itemlist.append(
                        Item(channel=item.channel,
                             action="play",
                             title=item.title +
                             typo(server, '-- [] color kod') +
                             typo(res_video, '-- [] color kod'),
                             fulltitle=item.fulltitle,
                             quality=res_video,
                             show=item.show,
                             thumbnail=item.thumbnail,
                             contentType=item.contentType,
                             server=server,
                             url=url_decode(media_url)))
                    log("video -> ", res_video)

    return controls(itemlist, item, AutoPlay=True, CheckLinks=True)
Exemple #7
0
def last(item):
    support.log()
    
    itemlist = []
    infoLabels = {}
    quality = ''
    PERPAGE = 20
    page = 1
    if item.page:
        page = item.page

    if item.contentType == 'tvshow':
        matches = support.match(item, r'<a href="([^">]+)".*?>([^(:(|[)]+)([^<]+)<\/a>', '<article class="sequex-post-content.*?</article>', headers)[0]
    else:
        matches = support.match(item, r'<a href=([^>]+)>([^(:(|[)]+)([^<]+)<\/a>', r'<strong>Ultimi 100 film Aggiornati:<\/a><\/strong>(.*?)<td>', headers)[0]

    for i, (url, title, info) in enumerate(matches):
        if (page - 1) * PERPAGE > i: continue
        if i >= page * PERPAGE: break
        add = True
        title = title.rstrip()
        if item.contentType == 'tvshow':
            for i in itemlist:
                if i.url == url: # togliamo i doppi
                    add = False
        else:
            infoLabels['year'] = scrapertoolsV2.find_single_match(info, r'\(([0-9]+)\)')
            quality = scrapertoolsV2.find_single_match(info, r'\[([A-Z]+)\]')

        if quality:
            longtitle = title + support.typo(quality,'_ [] color kod')
        else:
            longtitle = title

        if add:
            itemlist.append(
                    Item(channel=item.channel,
                        action='findvideos' if item.contentType == 'movie' else 'episodios',
                        contentType=item.contentType,
                        title=longtitle,
                        fulltitle=title,
                        show=title,
                        quality=quality,
                        url=url,
                        infoLabels=infoLabels
                        )
                )
    support.pagination(itemlist, item, page, PERPAGE)

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    return itemlist
def episodios(item):
    ##    import web_pdb; web_pdb.set_trace()
    support.log("episodios")
    itemlist = []

    # Carica la pagina
    data = httptools.downloadpage(item.url).data
    #========
    if 'clicca qui per aprire' in data.lower():
        item.url = scrapertoolsV2.find_single_match(data, '"go_to":"([^"]+)"')
        item.url = item.url.replace("\\", "")
        # Carica la pagina
        data = httptools.downloadpage(item.url).data
    elif 'clicca qui</span>' in data.lower():
        item.url = scrapertoolsV2.find_single_match(
            data, '<h2 style="text-align: center;"><a href="([^"]+)">')
        # Carica la pagina
        data = httptools.downloadpage(item.url).data
    #=========
    patron = r'(?:<\/span>\w+ STAGIONE\s\d+ (?:\()?(ITA|SUB ITA)(?:\))?<\/div>'\
             '<div class="su-spoiler-content su-clearfix" style="display:none">|'\
             '(?:\s|\Wn)?(?:<strong>)?(\d+&#.*?)(?:|–)?<a\s(.*?)<\/a><br\s\/>)'
    ##    '(?:<\/span>\w+ STAGIONE\s\d+ (?:\()?(ITA|SUB ITA)(?:\))?'\
    ##             '<\/div><div class="su-spoiler-content su-clearfix" style="display:none">|'\
    ##             '(?:\s|\Wn)?(?:<strong>)?(\d[&#].*?)(?:–|\W)?<a\s(.*?)<\/a><br\s\/>)'
    ##    '(?:<\/span>\w+ STAGIONE\s\d+ (?:\()?(ITA|SUB ITA)(?:\))?<\/div>'\
    ##             '<div class="su-spoiler-content su-clearfix" style="display:none">|'\
    ##             '\s(?:<strong>)?(\d[&#].*?)–<a\s(.*?)<\/a><br\s\/>)'
    listGroups = ['lang', 'title', 'url']
    itemlist = support.scrape(item,
                              data=data,
                              patron=patron,
                              listGroups=listGroups,
                              action='findvideos')

    # Permette la configurazione della videoteca senza andare nel menu apposito
    # così si possono Attivare/Disattivare le impostazioni direttamente dalla
    # pagina delle puntate
    itemlist.append(
        Item(
            channel='setting',
            action="channel_config",
            title=support.typo("Configurazione Videoteca color lime"),
            plot=
            'Filtra per lingua utilizzando la configurazione della videoteca.\
                     Escludi i video in sub attivando "Escludi streams... " e aggiungendo sub in Parole',
            config='videolibrary',  #item.channel,
            folder=False,
            thumbnail=channelselector.get_thumb('setting_0.png')))

    itemlist = filtertools.get_links(itemlist, item, list_language)
    return itemlist
Exemple #9
0
def episodios(item):
    log()
    itemlist = []
    if item.args == 'anime': return anime(item)

    data = httptools.downloadpage(item.url).data

    # Check if is series
    check = scrapertoolsV2.find_single_match(
        data.replace('\t', '').replace('\n', ''),
        r'<div class="category-film"><h3>([^<]+)<\/h3>')

    if 'serie tv' not in check.lower(): return findvideos(item)

    elif 'anime' in check.lower(): return findvideos(item)

    patron = r'<iframe src="([^"]+)" scrolling="no" frameborder="0" width="626" height="550" allowfullscreen="true" webkitallowfullscreen="true" mozallowfullscreen="true">'
    url = scrapertoolsV2.find_single_match(data, patron)
    log('URL =', url)
    seasons = support.match(
        item,
        r'<a href="([^"]+)"\s*>\s*<i[^>]+><\/i>\s*(\d+)<\/a>',
        r'Stagioni<\/a>.*?<ul class="nav navbar-nav">(.*?)<\/ul>',
        headers=headers,
        url=url)[0]

    for season_url, season in seasons:
        season_url = urlparse.urljoin(url, season_url)
        episodes = support.match(
            item,
            r'<a href="([^"]+)"\s*>\s*<i[^>]+><\/i>\s*(\d+)<\/a>',
            r'Episodio<\/a>.*?<ul class="nav navbar-nav">(.*?)<\/ul>',
            headers=headers,
            url=season_url)[0]
        for episode_url, episode in episodes:
            episode_url = urlparse.urljoin(url, episode_url)
            title = season + "x" + episode.zfill(2)

            itemlist.append(
                Item(channel=item.channel,
                     action="findvideos",
                     contentType=item.contentType,
                     title=support.typo(title + ' - ' + item.fulltitle,
                                        'bold'),
                     url=episode_url,
                     fulltitle=title + ' - ' + item.show,
                     show=item.show,
                     thumbnail=item.thumbnail))

    support.videolibrary(itemlist, item, 'color kod bold')

    return itemlist
Exemple #10
0
def episodios(item):
    logger.info(item.channel + 'findvideos')
    itemlist = []

    data = httptools.downloadpage(item.url, headers=headers).data

    if 'accordion-item' in data:
        block = scrapertoolsV2.find_single_match(
            data, 'accordion-item.*?>(.*?)<div id="disqus_thread">')
        patron = r'<img src="([^"]+)">.*?<li class="season-no">(.*?)<\/li>(.*?)<\/table>'
        matches = scrapertoolsV2.find_multiple_matches(block, patron)

        for scrapedthumb, scrapedtitle, scrapedurl in matches:
            title = scrapedtitle + ' - ' + item.title
            if title[0] == 'x':
                title = '1' + title

            itemlist.append(
                Item(channel=item.channel,
                     action='findvideos',
                     contentType=item.contentType,
                     title=title,
                     fulltitle=title,
                     show=title,
                     quality=item.quality,
                     url=scrapedurl,
                     thumbnail=scrapedthumb))

    else:
        block = scrapertoolsV2.find_single_match(
            data, '<div id="info" class="pad">(.*?)<div id="disqus_thread">'
        ).replace('</p>', '<br />').replace('×', 'x')
        matches = scrapertoolsV2.find_multiple_matches(
            block, r'<strong>(.*?)<\/strong>.*?<p>(.*?)<span')
        for lang, seasons in matches:
            lang = re.sub('.*?Stagione[^a-zA-Z]+', '', lang)
            # patron = r'([0-9]+x[0-9]+) (.*?)<br'
            season = scrapertoolsV2.find_multiple_matches(
                seasons, r'([0-9]+x[0-9]+) (.*?)<br')
            for title, url in season:
                title = title + ' - ' + lang
                itemlist.append(
                    Item(channel=item.channel,
                         title=title,
                         fulltitle=title,
                         show=title,
                         url=url,
                         contentType=item.contentType,
                         action='findvideos'))

    return itemlist
Exemple #11
0
def episodios(item):
    logger.info("%s - %s" % (item.title, item.url))

    itemlist = []

    # Descarga la página
    data = httptools.downloadpage(item.url).data

    fanart = scrapertoolsV2.find_single_match(
        data, "background-image[^'\"]+['\"]([^'\"]+)")
    plot = scrapertoolsV2.find_single_match(
        data, "id=['\"]profile2['\"]>\s*(.*?)\s*</div>")

    # logger.debug("fanart: %s" % fanart)
    # logger.debug("plot: %s" % plot)

    episodes = re.findall(
        "<tr.*?href=['\"](?P<url>[^'\"]+).+?>(?P<title>.+?)</a>.*?<td>(?P<flags>.*?)</td>",
        data, re.MULTILINE | re.DOTALL)
    for url, title, flags in episodes:
        title = re.sub("<span[^>]+>", "", title).replace("</span>", "")
        idiomas = " ".join([
            "[%s]" % IDIOMAS.get(language, "OVOS") for language in re.findall(
                "banderas/([^\.]+)", flags, re.MULTILINE)
        ])
        filter_lang = idiomas.replace("[", "").replace("]", "").split(" ")
        display_title = "%s - %s %s" % (item.show, title, idiomas)

        season_episode = scrapertoolsV2.get_season_and_episode(title).split(
            'x')
        item.infoLabels['season'] = season_episode[0]
        item.infoLabels['episode'] = season_episode[1]
        # logger.debug("Episode found %s: %s" % (display_title, urlparse.urljoin(HOST, url)))
        itemlist.append(
            item.clone(title=display_title,
                       url=urlparse.urljoin(HOST, url),
                       action="findvideos",
                       plot=plot,
                       fanart=fanart,
                       language=filter_lang))

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
    itemlist = filtertools.get_links(itemlist, item, list_idiomas, CALIDADES)

    if config.get_videolibrary_support() and len(itemlist) > 0:
        itemlist.append(
            item.clone(title="Añadir esta serie a la videoteca",
                       action="add_serie_to_library",
                       extra="episodios"))

    return itemlist
Exemple #12
0
def lista_anime(item):
    log()
    itemlist = []
    matches, data = support.match(
        item,
        r'<div class="item"><a href="([^"]+)".*?src="([^"]+)".*?data-jtitle="([^"]+)".*?>([^<]+)<\/a><p>(.*?)<\/p>'
    )
    for scrapedurl, scrapedthumb, scrapedoriginal, scrapedtitle, scrapedplot in matches:

        if scrapedoriginal == scrapedtitle:
            scrapedoriginal = ''
        else:
            scrapedoriginal = support.typo(scrapedoriginal, ' -- []')

        year = ''
        lang = ''
        infoLabels = {}
        if '(' in scrapedtitle:
            year = scrapertoolsV2.find_single_match(scrapedtitle,
                                                    r'(\([0-9]+\))')
            lang = scrapertoolsV2.find_single_match(scrapedtitle,
                                                    r'(\([a-zA-Z]+\))')

        infoLabels['year'] = year
        title = scrapedtitle.replace(year, '').replace(lang, '').strip()
        original = scrapedoriginal.replace(year, '').replace(lang, '').strip()
        if lang: lang = support.typo(lang, '_ color kod')
        longtitle = '[B]' + title + '[/B]' + lang + original

        itemlist.append(
            Item(channel=item.channel,
                 extra=item.extra,
                 contentType="episode",
                 action="episodios",
                 title=longtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumb,
                 fulltitle=title,
                 show=title,
                 infoLabels=infoLabels,
                 plot=scrapedplot,
                 folder=True))

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
    autorenumber.renumber(itemlist)

    # Next page
    support.nextPage(itemlist, item, data,
                     r'<a class="page-link" href="([^"]+)" rel="next"')

    return itemlist
Exemple #13
0
def peliculas(item):
    logger.info(item.channel + 'peliculas')
    itemlist = []

    data = httptools.downloadpage(item.url, headers=headers).data
    block = scrapertoolsV2.find_single_match(data,
                                             r'<ul class="posts">(.*)<\/ul>')

    patron = r'<li><a href="([^"]+)" data-thumbnail="([^"]+)">.*?<div class="title">([^<]+)<\/div>'
    matches = scrapertoolsV2.find_multiple_matches(block, patron)

    for scrapedurl, scrapedthumb, scrapedtitle in matches:
        title = re.sub(r'.\(.*?\)|.\[.*?\]', '', scrapedtitle)
        quality = scrapertoolsV2.find_single_match(scrapedtitle, r'\[(.*?)\]')
        if not quality:
            quality = 'SD'

        longtitle = title + ' [COLOR blue][' + quality + '][/COLOR]'

        if item.contentType == 'episode':
            action = 'episodios'
        else:
            action = 'findvideos'

        itemlist.append(
            Item(channel=item.channel,
                 action=action,
                 contentType=item.contentType,
                 title=longtitle,
                 fulltitle=title,
                 show=title,
                 quality=quality,
                 url=scrapedurl,
                 thumbnail=scrapedthumb))

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    next_page = scrapertoolsV2.find_single_match(data,
                                                 '<a href="([^"]+)">Pagina')
    if next_page != "":
        itemlist.append(
            Item(channel=item.channel,
                 action="peliculas",
                 contentType=item.contentType,
                 title="[COLOR blue]" + config.get_localized_string(30992) +
                 " >[/COLOR]",
                 url=next_page,
                 thumbnails=thumb()))

    return itemlist
Exemple #14
0
def typo(string, typography=''):

    kod_color = '0xFF65B3DA'  #'0xFF0081C2'

    # Check if the typographic attributes are in the string or outside
    if typography:
        string = string + ' ' + typography
    if config.get_localized_string(30992) in string:
        string = string + ' >'

    # If there are no attributes, it applies the default ones
    attribute = [
        '[]', '()', '{}', 'submenu', 'color', 'bold', 'italic', '_', '--',
        '[B]', '[I]', '[COLOR]'
    ]

    movie_word_list = ['film', 'serie', 'tv', 'anime', 'cinema', 'sala']
    search_word_list = ['cerca']
    categories_word_list = [
        'genere', 'categoria', 'categorie', 'ordine', 'lettera', 'anno',
        'alfabetico', 'a-z', 'menu'
    ]

    if not any(word in string for word in attribute):
        if any(word in string.lower() for word in search_word_list):
            string = '[COLOR ' + kod_color + ']' + string + '[/COLOR]'
        elif any(word in string.lower() for word in categories_word_list):
            string = ' > ' + string
        elif any(word in string.lower() for word in movie_word_list):
            string = '[B]' + string + '[/B]'

    # Otherwise it uses the typographical attributes of the string
    else:
        if '[]' in string:
            string = '[' + re.sub(r'\s\[\]', '', string) + ']'
        if '()' in string:
            string = '(' + re.sub(r'\s\(\)', '', string) + ')'
        if '{}' in string:
            string = '{' + re.sub(r'\s\{\}', '', string) + '}'
        if 'submenu' in string:
            string = u"\u2022\u2022 ".encode('utf-8') + re.sub(
                r'\ssubmenu', '', string)
        if 'color' in string:
            color = scrapertoolsV2.find_single_match(string, 'color ([a-z]+)')
            if color == 'kod' or '': color = kod_color
            string = '[COLOR ' + color + ']' + re.sub(r'\scolor\s([a-z]+)', '',
                                                      string) + '[/COLOR]'
        if 'bold' in string:
            string = '[B]' + re.sub(r'\sbold', '', string) + '[/B]'
        if 'italic' in string:
            string = '[I]' + re.sub(r'\sitalic', '', string) + '[/I]'
        if '_' in string:
            string = ' ' + re.sub(r'\s_', '', string)
        if '--' in string:
            string = ' - ' + re.sub(r'\s--', '', string)
        if 'bullet' in string:
            string = '[B]' + u"\u2022".encode('utf-8') + '[/B] ' + re.sub(
                r'\sbullet', '', string)

    return string
Exemple #15
0
def episodios(item):
    log()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    patron = r'<iframe width=".+?" height=".+?" src="([^"]+)" allowfullscreen frameborder="0">'
    url = scrapertoolsV2.find_single_match(data,
                                           patron).replace("?seriehd", "")
    seasons = support.match(item, r'<li[^>]+><a href="([^"]+)">(\d+)<',
                            r'<h3>STAGIONE</h3><ul>(.*?)</ul>', headers,
                            url)[0]

    for season_url, season in seasons:
        season_url = urlparse.urljoin(url, season_url)
        episodes = support.match(item, r'<li[^>]+><a href="([^"]+)">(\d+)<',
                                 '<h3>EPISODIO</h3><ul>(.*?)</ul>', headers,
                                 season_url)[0]
        for episode_url, episode in episodes:
            episode_url = urlparse.urljoin(url, episode_url)
            title = season + "x" + episode.zfill(2)

            itemlist.append(
                Item(channel=item.channel,
                     action="findvideos",
                     contentType="episode",
                     title=support.typo(title + ' - ' + item.show, 'bold'),
                     url=episode_url,
                     fulltitle=title + ' - ' + item.show,
                     show=item.show,
                     thumbnail=item.thumbnail))

    support.videolibrary(itemlist, item, 'color kod bold')

    return itemlist
Exemple #16
0
def findvideos(item):
    log()
    itemlist = []
   
    matches, data = support.match(item, r'class="tab.*?data-name="([0-9]+)">([^<]+)</span', headers=headers)
    videoData = ''
    
    for serverid, servername in matches:
        block = scrapertoolsV2.find_multiple_matches(data,'data-id="'+serverid+'">(.*?)<div class="server')
        id = scrapertoolsV2.find_single_match(str(block),r'<a data-id="([^"]+)" data-base="'+item.fulltitle+'"')
        dataJson = httptools.downloadpage('%s/ajax/episode/info?id=%s&server=%s&ts=%s' % (host, id, serverid, int(time.time())), headers=[['x-requested-with', 'XMLHttpRequest']]).data
        json = jsontools.load(dataJson)
        log('JSON= ',json)

        videoData +='\n'+json['grabber']

        if serverid == '28':
            itemlist.append(
                Item(
                    channel=item.channel,
                    action="play",
                    title='diretto',
                    quality='',
                    url=json['grabber'],
                    server='directo',
                    show=item.show,
                    contentType=item.contentType,
                    folder=False))

    return support.server(item, videoData, itemlist)
Exemple #17
0
def episodios(item):
    log()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    block = scrapertoolsV2.find_single_match(data, r'(?:<p style="text-align: left;">|<div class="pagination clearfix">\s*)(.*?)</span></a></div>')

    itemlist.append(
        Item(channel=item.channel,
             action='findvideos',
             contentType='episode',
             title=support.typo('Episodio 1 bold'),
             fulltitle=item.title,
             url=item.url,
             thumbnail=item.thumbnail))

    if block:
        matches = re.compile(r'<a href="([^"]+)".*?><span class="pagelink">(\d+)</span></a>', re.DOTALL).findall(data)
        for url, number in matches:
            itemlist.append(
                Item(channel=item.channel,
                     action='findvideos',
                     contentType='episode',
                     title=support.typo('Episodio ' + number,'bold'),
                     fulltitle=item.title,
                     url=url,
                     thumbnail=item.thumbnail))
    
    autorenumber.renumber(itemlist, item)
    support.videolibrary
    return itemlist
Exemple #18
0
def match(item, patron='', patronBlock='', headers='', url='', post=''):
    matches = []
    if type(item) == str:
        data = item
    else:
        url = url if url else item.url
        if post:
            data = httptools.downloadpage(url,
                                          headers=headers,
                                          ignore_response_code=True,
                                          post=post).data.replace("'", '"')
        else:
            data = httptools.downloadpage(
                url, headers=headers,
                ignore_response_code=True).data.replace("'", '"')
    data = re.sub(r'\n|\t', ' ', data)
    data = re.sub(r'>\s\s*<', '><', data)
    log('DATA= ', data)

    if patronBlock:
        block = scrapertoolsV2.find_single_match(data, patronBlock)
        log('BLOCK= ', block)
    else:
        block = data

    if patron:
        matches = scrapertoolsV2.find_multiple_matches(block, patron)
        log('MATCHES= ', matches)

    return matches, block
Exemple #19
0
def alfabetico(item):
    logger.info("[animeworld.py] alfabetico")
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = re.sub(r'\n|\t', '', data)
    data = re.sub(r'>\s*<', '><', data)

    block = scrapertoolsV2.find_single_match(
        data, r'<span>.*?A alla Z.<\/span>.*?<ul>(.*?)<\/ul>')

    matches = re.compile('<a href="([^"]+)" title="([^"]+)">',
                         re.DOTALL).findall(block)
    scrapertoolsV2.printMatches(matches)

    for url, title in matches:
        itemlist.append(
            Item(channel=item.channel,
                 action='lista_anime',
                 contentType="tvshow",
                 title=title,
                 fulltitle=title,
                 show=title,
                 url=url,
                 plot=""))

    return itemlist
Exemple #20
0
def dooplay_get_links(item, host):
    # get links from websites using dooplay theme and dooplay_player
    # return a list of dict containing these values: url, title and server

    data = httptools.downloadpage(item.url).data.replace("'", '"')
    patron = r'<li id="player-option-[0-9]".*?data-type="([^"]+)" data-post="([^"]+)" data-nume="([^"]+)".*?<span class="title".*?>([^<>]+)</span>(?:<span class="server">([^<>]+))?'
    matches = scrapertoolsV2.find_multiple_matches(data, patron)

    ret = []

    for type, post, nume, title, server in matches:
        postData = urllib.urlencode({
            "action": "doo_player_ajax",
            "post": post,
            "nume": nume,
            "type": type
        })
        dataAdmin = httptools.downloadpage(host + '/wp-admin/admin-ajax.php',
                                           post=postData,
                                           headers={
                                               'Referer': item.url
                                           }).data
        link = scrapertoolsV2.find_single_match(dataAdmin,
                                                "<iframe.*src='([^']+)'")
        ret.append({'url': link, 'title': title, 'server': server})

    return ret
Exemple #21
0
def findhost():
    data = httptools.downloadpage('https://seriehd.nuovo.link/').data
    global host, headers
    host = scrapertoolsV2.find_single_match(
        data, r'<div class="elementor-button-wrapper"> <a href="([^"]+)"')
    headers = [['Referer', host]]
    return host
Exemple #22
0
def findvideos(item):
    itemlist = []
    for link in support.dooplay_get_links(item, host):
        if link['title'] != 'Trailer':
            logger.info(link['title'])
            server, quality = scrapertoolsV2.find_single_match(
                link['title'], '([^ ]+) ?(HD|3D)?')
            if quality:
                title = server + " [COLOR blue][" + quality + "][/COLOR]"
            else:
                title = server
            itemlist.append(
                Item(channel=item.channel,
                     action="play",
                     title=title,
                     url=link['url'],
                     server=server,
                     fulltitle=item.fulltitle,
                     thumbnail=item.thumbnail,
                     show=item.show,
                     quality=quality,
                     contentType=item.contentType,
                     folder=False))

    autoplay.start(itemlist, item)

    return itemlist
Exemple #23
0
def findvideos(item):
    logger.info(item.channel + 'findvideos')
    itemlist = []
    logger.info('TYPE= ' + item.contentType)
    if item.contentType == 'movie':
        data = httptools.downloadpage(item.url, headers=headers).data
        logger.info('DATA= ' + data)
        item.url = scrapertoolsV2.find_single_match(data,
                                                    r'<table>(.*?)<\/table>')

    urls = scrapertoolsV2.find_multiple_matches(
        item.url, r"<a href='([^']+)'.*?>.*?>.*?([a-zA-Z]+).*?<\/a>")
    logger.info('EXTRA= ' + item.extra)
    for url, server in urls:
        itemlist.append(
            Item(channel=item.channel,
                 action='play',
                 title=item.title + ' [COLOR blue][' + server + '][/COLOR]',
                 contentType="movie",
                 server=server,
                 url=url))

    autoplay.start(itemlist, item)

    return itemlist
Exemple #24
0
def episodios(item):
    domain, id, season, episode = scrapertoolsV2.find_single_match(
        item.url, r'(https?://[a-z0-9.-]+).*?/([^-/]+)-S([0-9]+)-([0-9]+)$')
    itemlist = []
    for n in range(1, int(episode)):
        url = domain + '/play_s.php?s=' + id + '-S' + season + '&e=' + str(n)
        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 title=str(int(season)) + 'x' + str(n) +
                 support.typo(item.quality, '-- [] color kod'),
                 url=url,
                 fulltitle=item.fulltitle,
                 thumbnail=item.thumbnail,
                 show=item.show,
                 quality=item.quality,
                 contentType=item.contentType,
                 folder=False,
                 args={
                     'id': id,
                     'season': season,
                     'episode': episode
                 }))

    support.videolibrary(itemlist, item)
    return itemlist
Exemple #25
0
def findvideos(item):
    logger.info()
    itemlist = list()
    sublist = list()

    # Descarga la página
    data = httptools.downloadpage(item.url).data

    if not item.plot:
        item.plot = scrapertoolsV2.find_single_match(data, '>Sinopsis</dt> <dd>([^<]+)</dd>')
        item.plot = scrapertoolsV2.decodeHtmlentities(item.plot)

    patron = '<option value="([^"]+)"[^>]+'
    patron += '>([^<]+).*?</i>([^<]+)'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for url, idioma, calidad in matches:
        if 'yaske' in url:
            data = httptools.downloadpage(url).data
            url_enc = scrapertoolsV2.find_single_match(data, "eval.*?'(.*?)'")
            url_dec = base64.b64decode(url_enc)
            url = scrapertoolsV2.find_single_match(url_dec, 'iframe src="(.*?)"')
        sublist.append(item.clone(action="play", url=url, folder=False, text_color=color1, quality=calidad.strip(),
                                  language=idioma.strip()))

    sublist = servertools.get_servers_itemlist(sublist, lambda i: "Ver en %s %s" % (i.server, i.quality), True)

    # Añadir servidores encontrados, agrupandolos por idioma
    for k in ["Español", "Latino", "Subtitulado", "Ingles"]:
        lista_idioma = filter(lambda i: i.language == k, sublist)
        if lista_idioma:
            itemlist.append(Item(channel=item.channel, title=k, fanart=item.fanart, folder=False,
                                 text_color=color2, text_bold=True, thumbnail=thumbnail_host))
            itemlist.extend(lista_idioma)

    # Insertar items "Buscar trailer" y "Añadir a la videoteca"
    if itemlist and item.extra != "library":
        title = "%s [Buscar trailer]" % (item.contentTitle)
        itemlist.insert(0, item.clone(channel="trailertools", action="buscartrailer",
                                      text_color=color3, title=title, viewmode="list"))

        if config.get_videolibrary_support():
            itemlist.append(Item(channel=item.channel, title="Añadir película a la videoteca",
                                 action="add_pelicula_to_library", url=item.url, text_color="green",
                                 contentTitle=item.contentTitle, extra="library", thumbnail=thumbnail_host))

    return itemlist
Exemple #26
0
def swzz_get_url(item):
    headers = {
        'User-Agent':
        'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:59.0) Gecko/20100101 Firefox/59.0'
    }

    if "/link/" in item.url:
        data = httptools.downloadpage(item.url, headers=headers).data
        if "link =" in data:
            data = scrapertoolsV2.find_single_match(data, 'link = "([^"]+)"')
            if 'http' not in data:
                data = 'https:' + data
        else:
            match = scrapertoolsV2.find_single_match(
                data, r'<meta name="og:url" content="([^"]+)"')
            match = scrapertoolsV2.find_single_match(
                data, r'URL=([^"]+)">') if not match else match

            if not match:
                from lib import jsunpack

                try:
                    data = scrapertoolsV2.find_single_match(
                        data.replace('\n', ''),
                        r"(eval\s?\(function\(p,a,c,k,e,d.*?)</script>")
                    data = jsunpack.unpack(data)

                    logger.debug("##### play /link/ unpack ##\n%s\n##" % data)
                except:
                    logger.debug(
                        "##### The content is yet unpacked ##\n%s\n##" % data)

                data = scrapertoolsV2.find_single_match(
                    data, r'var link(?:\s)?=(?:\s)?"([^"]+)";')
                data, c = unshortenit.unwrap_30x_only(data)
            else:
                data = match
        if data.startswith('/'):
            data = urlparse.urljoin("http://swzz.xyz", data)
            if not "vcrypt" in data:
                data = httptools.downloadpage(data).data
        logger.debug("##### play /link/ data ##\n%s\n##" % data)
    else:
        data = item.url

    return data
Exemple #27
0
def serietv(item):

    logger.info("%s serietv log: %s" % (__channel__, item))
    itemlist = []
    # Carica la pagina
    data = httptools.downloadpage(item.url).data

    # Estrae i contenuti
    patron = '<div class="post-thumb">\s*<a href="([^"]+)" title="([^"]+)">\s*<img src="([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
        #scrapedplot = ""
        scrapedtitle = scrapertoolsV2.decodeHtmlentities(
            scrapedtitle)  #.replace("Streaming", ""))
        if scrapedtitle.startswith("Link to "):
            scrapedtitle = scrapedtitle[8:]
        num = scrapertoolsV2.find_single_match(scrapedurl, '(-\d+/)')
        if num:
            scrapedurl = scrapedurl.replace(num, "-episodi/")
        itemlist.append(
            Item(
                channel=item.channel,
                action="episodios",
                #contentType="tvshow",
                contentSerieName=scrapedtitle,
                title=scrapedtitle,
                #text_color="azure",
                url=scrapedurl,
                thumbnail=scrapedthumbnail,
                #plot=scrapedplot,
                show=item.show,
                extra=item.extra,
                folder=True))

    # locandine e trama e altro da tmdb se presente l'anno migliora la ricerca
    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True, idioma_busqueda='it')

    # Paginazione
    patronvideos = '<a class="next page-numbers" href="?([^>"]+)">Avanti &raquo;</a>'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(
                channel=item.channel,
                action="serietv",
                title="[COLOR lightgreen]" +
                config.get_localized_string(30992) + "[/COLOR]",
                url=scrapedurl,
                thumbnail=
                "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                extra=item.extra,
                folder=True))

    return itemlist
Exemple #28
0
def episodios(item):
    item.contentType = 'episode'
    itemlist = []

    data = httptools.downloadpage(item.url).data
    matches = scrapertoolsV2.find_multiple_matches(
        data,
        r'(<div class="sp-head[a-z ]*?" title="Espandi">[^<>]*?</div>.*?)<div class="spdiv">\[riduci\]</div>'
    )

    for match in matches:
        support.log(match)
        blocks = scrapertoolsV2.find_multiple_matches(
            match, '(?:<p>)(.*?)(?:</p>|<br)')
        season = scrapertoolsV2.find_single_match(
            match, r'title="Espandi">.*?STAGIONE\s+\d+([^<>]+)').strip()

        for block in blocks:
            episode = scrapertoolsV2.find_single_match(
                block, r'([0-9]+(?:&#215;|×)[0-9]+)').strip()
            seasons_n = scrapertoolsV2.find_single_match(
                block, r'<strong>STAGIONE\s+\d+([^<>]+)').strip()

            if seasons_n:
                season = seasons_n

            if not episode: continue

            season = re.sub(r'&#8211;|–', "-", season)
            itemlist.append(
                Item(channel=item.channel,
                     action="findvideos",
                     contentType=item.contentType,
                     title="[B]" + episode + "[/B] " + season,
                     fulltitle=episode + " " + season,
                     show=episode + " " + season,
                     url=block,
                     extra=item.extra,
                     thumbnail=item.thumbnail,
                     infoLabels=item.infoLabels))

    support.videolibrary(itemlist, item)

    return itemlist
Exemple #29
0
def findhost():
    global host, headers
    data = httptools.downloadpage('https://casacinema.nuovo.link').data
    host = scrapertoolsV2.find_single_match(
        data,
        r'<div class="elementor-widget-container"><div class="elementor-button-wrapper"> <a href="([^"]+)"'
    )
    headers = [['Referer', host]]
    if host.endswith('/'):
        host = host[:-1]
Exemple #30
0
def episodios(item):
    logger.info("[animeworld.py] episodios")
    itemlist = []

    data = httptools.downloadpage(item.url).data.replace('\n', '')
    data = re.sub(r'>\s*<', '><', data)
    block = scrapertoolsV2.find_single_match(
        data, r'<div class="widget servers".*?>(.*?)<div id="download"')
    block = scrapertoolsV2.find_single_match(
        block, r'<div class="server.*?>(.*?)<div class="server.*?>')

    patron = r'<li><a.*?href="([^"]+)".*?>(.*?)<\/a>'
    matches = re.compile(patron, re.DOTALL).findall(block)

    for scrapedurl, scrapedtitle in matches:
        scrapedtitle = '[B] Episodio ' + scrapedtitle + '[/B]'
        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 contentType="episode",
                 title=scrapedtitle,
                 url=urlparse.urljoin(host, scrapedurl),
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 plot=item.plot,
                 fanart=item.thumbnail,
                 thumbnail=item.thumbnail))

    autorenumber.renumber(itemlist, item, 'bold')

    # Aggiungi a Libreria
    if config.get_videolibrary_support() and len(itemlist) != 0:
        itemlist.append(
            Item(channel=item.channel,
                 title="[COLOR lightblue]%s[/COLOR]" %
                 config.get_localized_string(30161),
                 url=item.url,
                 action="add_serie_to_library",
                 extra="episodios",
                 show=item.show))

    return itemlist