Exemple #1
0
def peliculas_tv(item):
    log()
    itemlist = []

    patron = '<div class="post-meta">\s*<a href="([^"]+)"\s*title="([^"]+)"\s*class=".*?"></a>'

    html = support.match(item, patron=patron, headers=headers)
    matches = html.matches
    data = html.data

    for scrapedurl, scrapedtitle in matches:
        if scrapedtitle in ["FACEBOOK", "RAPIDGATOR", "WELCOME!"]:
            continue

        scrapedthumbnail = ""
        scrapedplot = ""
        scrapedtitle = cleantitle(scrapedtitle)
        infoLabels = {}
        episode = scrapertools.find_multiple_matches(scrapedtitle,
                                                     r'((\d*)x(\d*))')
        if episode:  # workaround per quando mettono le serie intere o altra roba, sarebbero da intercettare TODO
            episode = episode[0]
            title = scrapedtitle.split(" S0")[0].strip()
            title = title.split(" S1")[0].strip()
            title = title.split(" S2")[0].strip()

            infoLabels['season'] = episode[1]
            infoLabels['episode'] = episode[2].zfill(2)

            itemlist.append(
                Item(channel=item.channel,
                     action="findvideos",
                     fulltitle=scrapedtitle,
                     show=scrapedtitle,
                     title=title + " - " + episode[0] + " " +
                     support.typo("Sub-ITA", '_ [] color kod'),
                     url=scrapedurl,
                     thumbnail=scrapedthumbnail,
                     contentSerieName=title,
                     contentLanguage='Sub-ITA',
                     plot=scrapedplot,
                     infoLabels=infoLabels,
                     folder=True))

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    # Paginazione
    patron = r'<strong class="on">\d+</strong>\s?<a href="([^<]+)">\d+</a>'
    support.nextPage(itemlist, item, data, patron)

    return itemlist
Exemple #2
0
def lista_serie(item):
    log()
    itemlist = []

    PERPAGE = 15

    p = 1
    if '{}' in item.url:
        item.url, p = item.url.split('{}')
        p = int(p)

    if '||' in item.url:
        series = item.url.split('\n\n')
        matches = []
        for i, serie in enumerate(series):
            matches.append(serie.split('||'))
    else:
        # Extrae las entradas
        patron = r'<li class="cat-item cat-item-\d+"><a href="([^"]+)" >([^<]+)</a>'
        matches = support.match(item, patron, headers=headers)[0]

    for i, (scrapedurl, scrapedtitle) in enumerate(matches):
        scrapedplot = ""
        scrapedthumbnail = ""
        if (p - 1) * PERPAGE > i: continue
        if i >= p * PERPAGE: break
        title = cleantitle(scrapedtitle)
        itemlist.append(
            Item(channel=item.channel,
                 extra=item.extra,
                 action="episodios",
                 title=title,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 fulltitle=title,
                 show=title,
                 plot=scrapedplot,
                 contentType='episode',
                 originalUrl=scrapedurl,
                 folder=True))

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    # Paginazione
    if len(matches) >= p * PERPAGE:
        support.nextPage(itemlist,
                         item,
                         next_page=(item.url + '{}' + str(p + 1)))

    return itemlist
Exemple #3
0
def lista_serie(item):
    log()
    itemlist = []

    p = 1
    if '{}' in item.url:
        item.url, p = item.url.split('{}')
        p = int(p)

    if '||' in item.url:
        series = item.url.split('\n\n')
        matches = []
        for i, serie in enumerate(series):
            matches.append(serie.decode('utf-8').split('||'))
        series = matches
        support.log("SERIE ALF :", series)
    else:
        series = serietv()
        support.log("SERIE ALF 2 :", series)

    for i, (scrapedurl, scrapedtitle) in enumerate(series):
        if (p - 1) * PERPAGE > i: continue
        if i >= p * PERPAGE: break

        scrapedplot = ""
        scrapedthumbnail = ""

        itemlist.append(
            Item(channel=item.channel,
                 action="episodios",
                 title=scrapedtitle,
                 fulltitle=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 show=scrapedtitle,
                 extra=item.extra,
                 contentType='tvshow',
                 originalUrl=scrapedurl,
                 folder=True))

    support.checkHost(item, itemlist)
    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    if len(series) >= p * PERPAGE:
        next_page = item.url + '{}' + str(p + 1)
        support.nextPage(itemlist, item, next_page=next_page)

    return itemlist
def peliculas(item):
    logger.info("%s mainlist peliculas log: %s" % (__channel__, item))
    itemlist = []
    # scarico la pagina
    data = httptools.downloadpage(item.url, headers=headers).data
    # da qui fare le opportuni modifiche
    if item.args != 'orderalf':
        if item.args == 'pellicola' or item.args == 'years':
            bloque = scrapertools.find_single_match(
                data,
                '<div class="cover boxcaption">(.*?)<div id="right_bar">')
        elif item.args == "search":
            bloque = scrapertools.find_single_match(
                data, '<div class="cover boxcaption">(.*?)</a>')
        else:
            bloque = scrapertools.find_single_match(
                data,
                '<div class="cover boxcaption">(.*?)<div class="page_nav">')
        patron = '<h2>.<a href="(.*?)".*?src="(.*?)".*?class="trdublaj">(.*?)<div class="ml-item-hiden".*?class="h4">(.*?)<.*?label">(.*?)</span'
        matches = scrapertools.find_multiple_matches(data, patron)
        for scrapedurl, scrapedimg, scrapedqualang, scrapedtitle, scrapedyear in matches:

            if 'sub ita' in scrapedqualang.lower():
                scrapedlang = 'Sub-Ita'
            else:
                scrapedlang = 'ITA'
            itemlist.append(
                Item(channel=item.channel,
                     action="findvideos",
                     contentTitle=scrapedtitle,
                     fulltitle=scrapedtitle,
                     url=scrapedurl,
                     infoLabels={'year': scrapedyear},
                     contenType="movie",
                     thumbnail=host + scrapedimg,
                     title="%s [%s]" % (scrapedtitle, scrapedlang),
                     language=scrapedlang))

    # poichè  il sito ha l'anno del film con TMDB la ricerca titolo-anno è esatta quindi inutile fare lo scrap delle locandine
    # e della trama dal sito che a volte toppano
    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    # Paginazione
    support.nextPage(itemlist, item, data,
                     '<span>[^<]+</span>[^<]+<a href="(.*?)">')

    return itemlist
Exemple #5
0
def peliculas(item):
    support.log()
    itemlist = []

    data = httptools.downloadpage(item.url, headers=headers).data
    patron = r'<div class="cover_kapsul ml-mask".*?<a href="(.*?)">(.*?)<\/a>.*?<img .*?src="(.*?)".*?<div class="trdublaj">(.*?)<\/div>.(<div class="sub_ita">(.*?)<\/div>|())'
    matches = scrapertoolsV2.find_multiple_matches(data, patron)

    for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedquality, subDiv, subText, empty in matches:
        info = scrapertoolsV2.find_multiple_matches(
            data,
            r'<span class="ml-label">([0-9]+)+<\/span>.*?<span class="ml-label">(.*?)<\/span>.*?<p class="ml-cat".*?<p>(.*?)<\/p>.*?<a href="(.*?)" class="ml-watch">'
        )
        infoLabels = {}
        for infoLabels['year'], duration, scrapedplot, checkUrl in info:
            if checkUrl == scrapedurl:
                break

        infoLabels['duration'] = int(duration.replace(
            ' min', '')) * 60  # calcolo la durata in secondi
        scrapedthumbnail = host + scrapedthumbnail
        scrapedtitle = scrapertoolsV2.decodeHtmlentities(scrapedtitle)
        fulltitle = scrapedtitle
        if subDiv:
            fulltitle += support.typo(subText + ' _ () color limegreen')
        fulltitle += support.typo(scrapedquality.strip() + ' _ [] color kod')

        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 contentType=item.contenType,
                 contentTitle=scrapedtitle,
                 contentQuality=scrapedquality.strip(),
                 plot=scrapedplot,
                 title=fulltitle,
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 url=scrapedurl,
                 infoLabels=infoLabels,
                 thumbnail=scrapedthumbnail))

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
    support.nextPage(itemlist, item, data,
                     '<span>[^<]+</span>[^<]+<a href="(.*?)">')

    return itemlist
Exemple #6
0
def lista_anime(item):
    log()
    itemlist = []
    matches ,data = support.match(item, r'<div class="item"><a href="([^"]+)".*?src="([^"]+)".*?data-jtitle="([^"]+)".*?>([^<]+)<\/a><p>(.*?)<\/p>')
    for scrapedurl, scrapedthumb, scrapedoriginal, scrapedtitle, scrapedplot in matches:
        
        if scrapedoriginal == scrapedtitle:
            scrapedoriginal=''
        else:
            scrapedoriginal = support.typo(scrapedoriginal,' -- []')

        year = ''
        lang = ''
        infoLabels = {}
        if '(' in scrapedtitle:
            year = scrapertoolsV2.find_single_match(scrapedtitle, r'(\([0-9]+\))')
            lang = scrapertoolsV2.find_single_match(scrapedtitle, r'(\([a-zA-Z]+\))')

        infoLabels['year'] = year
        title = scrapedtitle.replace(year,'').replace(lang,'').strip()
        original = scrapedoriginal.replace(year,'').replace(lang,'').strip()
        if lang: lang = support.typo(lang,'_ color kod')
        longtitle = '[B]' + title + '[/B]' + lang + original

        itemlist.append(
                Item(channel=item.channel,
                     extra=item.extra,
                     contentType="episode",
                     action="episodios",
                     title=longtitle,
                     url=scrapedurl,
                     thumbnail=scrapedthumb,
                     fulltitle=title,
                     show=title,
                     infoLabels=infoLabels,
                     plot=scrapedplot,
                     folder=True))

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)    
    autorenumber.renumber(itemlist)

    # Next page
    support.nextPage(itemlist, item, data, r'<a class="page-link" href="([^"]+)" rel="next"')

    return itemlist
Exemple #7
0
def peliculas(item):
    support.log()
    itemlist = []
    data = httptools.downloadpage(item.url, headers=headers).data
    json_object = jsontools.load(data)

    for movie in json_object['hydra:member']:
        itemlist.extend(get_itemlist_movie(movie, item))

    try:
        if support.inspect.stack()[1][3] not in ['newest']:
            support.nextPage(itemlist,
                             item,
                             next_page=json_object['hydra:view']['hydra:next'])
    except:
        pass

    return itemlist
Exemple #8
0
def lista_serie(item):
    info()
    itemlist = []

    PERPAGE = 15

    p = 1 if not item.args else int(item.args)

    if '||' in item.data:
        series = item.data.split('\n\n')
        matches = []
        for i, serie in enumerate(series):
            matches.append(serie.split('||'))
    else:
        # Extrae las entradas
        patron = r'<li class="cat-item cat-item-\d+"><a href="([^"]+)"\s?>([^<]+)</a>'
        matches = support.match(item, patron=patron, headers=headers).matches
    for i, (scrapedurl, scrapedtitle) in enumerate(matches):
        scrapedplot = ""
        scrapedthumbnail = ""
        if (p - 1) * PERPAGE > i: continue
        if i >= p * PERPAGE: break
        title = cleantitle(scrapedtitle)
        itemlist.append(
            item.clone(action="episodios",
                       title=title,
                       url=scrapedurl,
                       thumbnail=scrapedthumbnail,
                       fulltitle=title,
                       show=title,
                       plot=scrapedplot,
                       contentType='episode',
                       originalUrl=scrapedurl))

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    # Paginazione
    if len(matches) >= p * PERPAGE:
        item.args = p + 1
        support.nextPage(itemlist, item, next_page=item.url)

    return itemlist
Exemple #9
0
def peliculas(item):
    support.info()
    itemlist = []

    data = support.match(item.url, headers=headers).data
    json_object = jsontools.load(data)
    for element in json_object['hydra:member']:
        if 'shows' not in item.url:
            item.contentType='movie'
        else:
            item.contentType='tvshow'
        itemlist.extend(get_itemlist_element(element, item))

    try:
        if support.inspect.stack()[1][3] not in ['newest']:
            support.nextPage(itemlist, item, next_page=json_object['hydra:view']['hydra:next'])
    except:
        pass

    return itemlist
Exemple #10
0
def peliculas(item):    
    log()
    itemlist = []

    blacklist = ['top 10 anime da vedere']
    matches, data = support.match(item, r'<a class="[^"]+" href="([^"]+)" title="([^"]+)"><img[^s]+src="([^"]+)"[^>]+')

    for url, title, thumb in matches:
        title = scrapertoolsV2.decodeHtmlentities(title.strip()).replace("streaming", "")        
        lang = scrapertoolsV2.find_single_match(title, r"((?:SUB ITA|ITA))")
        videoType = '' 
        if 'movie' in title.lower():
            videoType = ' - (MOVIE)' 
        if 'ova' in title.lower():
            videoType = ' - (OAV)'

        cleantitle = title.replace(lang, "").replace('(Streaming & Download)', '').replace('( Streaming & Download )', '').replace('OAV', '').replace('OVA', '').replace('MOVIE', '').strip()

        if not videoType :
            contentType="tvshow"
            action="episodios"
        else:
            contentType="movie"
            action="findvideos"

        if not title.lower() in blacklist:
            itemlist.append(
                Item(channel=item.channel,
                    action=action,
                    contentType=contentType,
                    title=support.typo(cleantitle + videoType, 'bold') + support.typo(lang,'_ [] color kod'),
                    fulltitle=cleantitle,                    
                    show=cleantitle,
                    url=url,
                    thumbnail=thumb))

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
    autorenumber.renumber(itemlist)    
    support.nextPage(itemlist, item, data, r'<a class="next page-numbers" href="([^"]+)">')

    return itemlist
Exemple #11
0
def latest_added(item):
    itemlist = []
    page = item.page if item.page else 0
    url = '{}/api/home/latest-episodes?page={}'.format(host, page)
    js = httptools.downloadpage(url).json

    for episode in js:
        title = episode['title'] if episode['title'] else ''
        animeTitle, lang = get_lang(episode['animeTitle'])
        quality = 'Full HD' if episode['fullHd'] else 'HD'
        long_title = support.typo(
            '{}. {}{}'.format(int(float(episode['episodeNumber'])),
                              title + ' - ' if title else '', animeTitle),
            'bold') + support.typo(lang, '_ [] color kod') + support.typo(
                quality, '_ [] color kod')
        image = get_thumbnail(episode, 'episodeImages')

        itemlist.append(
            item.clone(
                title=long_title,
                fulltitle=title,
                animeId=episode['animeId'],
                id=episode['id'],
                contentType='episode',
                contentTitle=title,
                contentSerieName=animeTitle,
                contentLanguage=lang,
                quality=quality,
                contentEpisodeNumber=int(float(episode['episodeNumber'])),
                animeUrl='{}/api/anime/{}'.format(host, episode['animeId']),
                thumbnail=image,
                fanart=image,
                action='findvideos'))

    if stack()[1][3] not in ['newest']:
        support.nextPage(itemlist, item.clone(page=page + 1))

    return itemlist
Exemple #12
0
def peliculas(item):
    logger.debug()

    itemlist = []
    page = item.page if item.page else 0
    js = httptools.downloadpage(
        '{}?page={}&size={}{}&sort={},{}&sort=id'.format(
            item.url, page, perpage, item.variable, sort, order)).json

    for it in js:
        title, lang = get_lang(it['title'])

        long_title = support.typo(title, 'bold') + support.typo(
            lang, '_ [] color kod')

        itemlist.append(
            item.clone(
                title=long_title,
                fulltitle=title,
                show=title,
                contentLanguage=lang,
                contentType='movie' if it['type'] == 'Movie' else 'tvshow',
                contentTitle=title,
                contentSerieName=title if it['type'] == 'Serie' else '',
                action='findvideos' if it['type'] == 'Movie' else
                'episodios',  # '' if not active else 'findvideos' if it['type'] == 'Movie' else 'episodios',
                plot=it['storyline'],
                #    year = it['startDate'].split('-')[0],
                url='{}/api/anime/{}'.format(host, it['id']),
                thumbnail=get_thumbnail(it),
                fanart=get_thumbnail(it, 'horizontalImages')))

    autorenumber.start(itemlist)
    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    if len(itemlist) == perpage:
        support.nextPage(itemlist, item.clone(page=page + 1))
    return itemlist
Exemple #13
0
def lista_anime(item):
    log()
    itemlist = []

    PERPAGE = 15

    p = 1
    if '{}' in item.url:
        item.url, p = item.url.split('{}')
        p = int(p)

    if '||' in item.url:
        series = item.url.split('\n\n')
        matches = []
        for i, serie in enumerate(series):
            matches.append(serie.split('||'))
    else:
        # Estrae i contenuti
        patron = r'<a href="([^"]+)"[^>]*?>[^>]*?>(.+?)<'
        matches = support.match(item, patron, headers=headers)[0]

    scrapedplot = ""
    scrapedthumbnail = ""
    for i, (scrapedurl, scrapedtitle) in enumerate(matches):
        if (p - 1) * PERPAGE > i: continue
        if i >= p * PERPAGE: break
        title = cleantitle(scrapedtitle).replace('(ita)', '(ITA)')
        movie = False
        showtitle = title
        if '(ITA)' in title:
            title = title.replace('(ITA)', '').strip()
            showtitle = title
        else:
            title += ' ' + support.typo('Sub-ITA', '_ [] color kod')

        infoLabels = {}
        if 'Akira' in title:
            movie = True
            infoLabels['year'] = 1988

        if 'Dragon Ball Super Movie' in title:
            movie = True
            infoLabels['year'] = 2019

        itemlist.append(
            Item(channel=item.channel,
                 extra=item.extra,
                 action="episodios" if movie == False else 'findvideos',
                 title=title,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 fulltitle=showtitle,
                 show=showtitle,
                 contentTitle=showtitle,
                 plot=scrapedplot,
                 contentType='episode' if movie == False else 'movie',
                 originalUrl=scrapedurl,
                 infoLabels=infoLabels,
                 folder=True))

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
    autorenumber.renumber(itemlist)

    # Paginazione
    if len(matches) >= p * PERPAGE:
        support.nextPage(itemlist,
                         item,
                         next_page=(item.url + '{}' + str(p + 1)))

    return itemlist
Exemple #14
0
def video(item):
    log()
    itemlist = []

    matches, data = support.match(
        item,
        r'<a href="([^"]+)" class[^>]+><img src="([^"]+)"(.*?)data-jtitle="([^"]+)" .*?>(.*?)<\/a>',
        '<div class="widget-body">(.*?)<div id="sidebar"',
        headers=headers)

    for scrapedurl, scrapedthumb, scrapedinfo, scrapedoriginal, scrapedtitle in matches:
        # Cerca Info come anno o lingua nel Titolo
        year = ''
        lang = ''
        if '(' in scrapedtitle:
            year = scrapertoolsV2.find_single_match(scrapedtitle,
                                                    r'( \([0-9]+\))')
            lang = scrapertoolsV2.find_single_match(scrapedtitle,
                                                    r'( \([a-zA-Z]+\))')

        # Rimuove Anno e Lingua nel Titolo
        title = scrapedtitle.replace(year, '').replace(lang, '').strip()
        original = scrapedoriginal.replace(year, '').replace(lang, '').strip()

        # Compara Il Titolo con quello originale
        if original == title:
            original = ''
        else:
            original = support.typo(scrapedoriginal, '-- []')

        # cerca info supplementari
        ep = ''
        ep = scrapertoolsV2.find_single_match(scrapedinfo,
                                              '<div class="ep">(.*?)<')
        if ep != '':
            ep = ' - ' + ep

        ova = ''
        ova = scrapertoolsV2.find_single_match(scrapedinfo,
                                               '<div class="ova">(.*?)<')
        if ova != '':
            ova = ' - (' + ova + ')'

        ona = ''
        ona = scrapertoolsV2.find_single_match(scrapedinfo,
                                               '<div class="ona">(.*?)<')
        if ona != '':
            ona = ' - (' + ona + ')'

        movie = ''
        movie = scrapertoolsV2.find_single_match(scrapedinfo,
                                                 '<div class="movie">(.*?)<')
        if movie != '':
            movie = ' - (' + movie + ')'

        special = ''
        special = scrapertoolsV2.find_single_match(
            scrapedinfo, '<div class="special">(.*?)<')
        if special != '':
            special = ' - (' + special + ')'

        # Concatena le informazioni

        lang = support.typo(
            'Sub-ITA', '_ [] color kod') if '(ita)' not in lang.lower() else ''

        info = ep + lang + year + ova + ona + movie + special

        # Crea il title da visualizzare
        long_title = '[B]' + title + '[/B]' + info + original

        # Controlla se sono Episodi o Film
        if movie == '':
            contentType = 'tvshow'
            action = 'episodios'
        else:
            contentType = 'movie'
            action = 'findvideos'

        itemlist.append(
            Item(channel=item.channel,
                 contentType=contentType,
                 action=action,
                 title=long_title,
                 url=scrapedurl,
                 fulltitle=title,
                 show=title,
                 thumbnail=scrapedthumb,
                 context=autoplay.context,
                 number='1'))

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
    autorenumber.renumber(itemlist)

    # Next page
    support.nextPage(itemlist,
                     item,
                     data,
                     r'href="([^"]+)" rel="next"',
                     resub=['&amp;', '&'])
    return itemlist
Exemple #15
0
 def fullItemlistHook(itemlist):
     cat_id = support.match(data, patron=r''''cat':"(\d+)"''').match
     if cat_id: item.cat_id = cat_id
     item.page += 1
     support.nextPage(itemlist, item, function_or_level='peliculas')
     return itemlist
Exemple #16
0
def peliculas(item):
    itemlist = []
    res = get_programs(item)
    video_id = ''

    for it in res['items']:
        if not 'MediasetPlay_ANY' in it.get('mediasetprogram$channelsRights',
                                            ['MediasetPlay_ANY']):
            continue
        thumb = ''
        fanart = ''
        contentSerieName = ''
        url = 'https:' + it.get(
            'mediasettvseason$pageUrl',
            it.get('mediasetprogram$videoPageUrl',
                   it.get('mediasetprogram$pageUrl')))
        title = it.get('mediasetprogram$brandTitle', it.get('title'))
        title2 = it['title']
        if title != title2:
            title = '{} - {}'.format(title, title2)
        plot = it.get(
            'longDescription',
            it.get('description',
                   it.get('mediasettvseason$brandDescription', '')))

        if it.get('seriesTitle') or it.get('seriesTvSeasons'):
            contentSerieName = it.get('seriesTitle', it.get('title'))
            contentType = 'tvshow'
            action = 'epmenu'
        else:
            contentType = 'movie'
            video_id = it['guid']
            action = 'findvideos'
        for k, v in it['thumbnails'].items():
            if 'image_vertical' in k and not thumb:
                thumb = v['url'].replace('.jpg', '@3.jpg')
            if 'image_header_poster' in k and not fanart:
                fanart = v['url'].replace('.jpg', '@3.jpg')
            if thumb and fanart:
                break

        itemlist.append(
            item.clone(title=support.typo(title, 'bold'),
                       fulltitle=title,
                       contentTitle=title,
                       contentSerieName=contentSerieName,
                       action=action,
                       contentType=contentType,
                       thumbnail=thumb,
                       fanart=fanart,
                       plot=plot,
                       url=url,
                       video_id=video_id,
                       seriesid=it.get('seriesTvSeasons', it.get('id', '')),
                       disable_videolibrary=True,
                       forcethumb=True))
    if res['next']:
        item.page = res['next']
        support.nextPage(itemlist, item)

    return itemlist
Exemple #17
0
                 title=title,
                 fulltitle=title,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 show=serie,
                 extra=item.extra,
                 contentSerieName=serie,
                 contentLanguage='Sub-ITA',
                 infoLabels=infoLabels,
                 folder=True))
    support.checkHost(item, itemlist)
    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    # Paginazione
    support.nextPage(itemlist, item, data, '<a class="next page-numbers" href="(.*?)">Successivi')

    return itemlist


def serietv():
    log()

    itemlist = []
    matches = support.match(Item(), r'<option class="level-([0-9]?)" value="([^"]+)">([^<]+)</option>',
                            r'<select\s*?name="cat"\s*?id="cat"\s*?class="postform"\s*?>(.*?)</select>', headers,
                            url="%s/" % host)[0]
    index = 0

    for level, cat, title in matches:
        title = cleantitle(title)