Beispiel #1
0
def Browse_ItemSenpai(html, page, metamethod='', content='movies', view='515'):
    if (len(html) == 0):
        return
    html = GetDataBeetwenMarkers(html, '<h4>Lista anime</h4>',
                                 '<footer class="page-footer indigo">',
                                 False)[1]
    data = re.findall(
        'href="\/anime\/' + page +
        '(.+?)">\n(\s+)<img src="/Resources/anime/covers/(.+?)"', html)
    ItemCount = len(data)
    for item in data:
        strona = mainSite + page + item[0]
        name = urllib2.unquote(page + item[0].encode("utf-8"))
        name = ParseDescription(name)
        ### scraper
        if (tfalse(addst("senpai-thumbs")) == True):
            import scraper
            scrap = scraper.scraper_check(host, name)
            try:
                if (name not in scrap):
                    img = 'http://senpai.com.pl/Resources/anime/covers/' + urllib2.quote(
                        item[2])
                    plot = ''
                    scraper.scraper_add(host, name, img, plot, '')
                    scrap = scraper.scraper_check(host, name)
            except:
                scrap = ''
            try:
                img = scrap[1]
            except:
                img = ''
            try:
                plot = scrap[2]
            except:
                plot = ''
        else:
            img = ''
            plot = ''
        fanart = fanartAol
        labs = {}
        try:
            labs['plot'] = plot
        except:
            labs['plot'] = ''
        fanart = fanartAol
        labs = {}
        try:
            labs['plot'] = plot
        except:
            labs['plot'] = ''


###
        pars = {
            'mode': 'EpisodesSenpai',
            'site': site,
            'section': section,
            'title': name,
            'url': strona,
            'img': img,
            'fanart': fanart
        }
        contextLabs = {
            'title': name,
            'url': strona,
            'img': img,
            'fanart': fanart,
            'todoparams': _addon.build_plugin_url(pars),
            'site': site,
            'section': section,
            'plot': labs['plot']
        }
        if section == 'senpai':
            contextMenuItems = ContextMenu_Series(contextLabs)
        else:
            contextMenuItems = []
        labs['title'] = name
        _addon.add_directory(pars,
                             labs,
                             is_folder=True,
                             fanart=fanart,
                             img=img,
                             contextmenu_items=contextMenuItems,
                             total_items=ItemCount)
    set_view(content, view_mode=addst('tvshows-view'))
def Browse_ItemAnimecentrum(html, url, metamethod='', content='movies', view='515'):
    if (len(html) == 0):
        return
    data = re.findall('tb-cell"><a href="(.+?)"><img src="(.+?)" alt="(.+?)"><\/a>', html)
    ItemCount = len(data)
    for item in data:
        strona = mainSite + item[0]
        strona = strona + '?page=1'
        name = item[2].encode("utf-8")
        name = ParseDescription(name)
### scraper
        if (tfalse(addst("acentr-thumbs")) == True):
            import scraper
            scrap = scraper.scraper_check(host, name)
            try:
                if (name not in scrap):
                    html = nURL(strona)
                    html = GetDataBeetwenMarkers(html, '<article class="content-1">', '<section class="gap-2">', False)[1]
                    data = re.findall('<img src="(.+?)" alt=', html)
                    ItemCount = len(data)
                    if len(data) > 0:
                        for item in data:
                            img = item+ "|Referer=http://anime-centrum.pl/"
                    else:
                        img = ''
                    data = re.findall('<p>(.+?)</p>', html)
                    ItemCount = len(data)
                    if len(data) > 0:
                        for item in data:
                            plot = item
                            plot = ParseDescription(item)
                    else:
                        plot = ''
                    scraper.scraper_add(host, name, img, plot, '')
                    scrap = scraper.scraper_check(host, name)
            except:
                scrap = ''
            try:
                img = scrap[1]
            except:
                img = ''
            try:
                plot = scrap[2]
            except:
                plot = ''
        else:
            img = ''
            plot = ''
        fanart = fanartAol
        labs = {}
        try:
            labs['plot'] = plot
        except:
            labs['plot'] = ''
###
        pars = {'mode': 'EpisodesAnimecentrum', 'site': site, 'section': section, 'title': name, 'url': strona, 'img': img, 'fanart': fanart}
        contextLabs = {'title': name, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot']}
        if section == 'animecentrum':
            contextMenuItems = ContextMenu_Series(contextLabs)
        else:
            contextMenuItems = []
        labs['title'] = name
        _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount)
    set_view(content, view_mode=addst('tvshows-view'))
def Browse_Itemslist(url, page='', content='episodes', view='515'):

    data = 'http://www.inne.wbijam.pl/'
    html = nURL(data)
    html = html.encode('utf-8', '')
    if 'Polecane serie anime' in url:
        data1 = '<div class="pmenu_naglowek_blue">Polecane serie anime</div>'
        data2 = '<a href="http://www.inne.wb'
        link = ''
        mode = 'Browse_Episodeswijam'
    elif 'Lżejsze klimaty' in url:
        data1 = '<div class="pmenu_naglowek_red">Lżejsze klimaty</div>'
        data2 = '<div class="pmenu_naglowek_blue">Polecane serie anime</div>'
        link = 'http://www.inne.wbijam.pl/'
        mode = 'Browse_Episodeswijaminne'
    elif 'Akcja' in url:
        data1 = '<div class="pmenu_naglowek_red">Akcja</div>'
        data2 = '<div class="pmenu_naglowek_red">Lżejsze klimaty</div>'
        link = 'http://www.inne.wbijam.pl/'
        mode = 'Browse_Episodeswijaminne'

    data = GetDataBeetwenMarkers(html, data1, data2, False)[1]
    data = re.findall('<a href="(.+?)">(.+?)</a></li>', data)
    data.sort()
    ItemCount = len(data)
    if len(data) > 0:
        for item in data:
            strona = link + item[0]
            name = item[1].encode('utf-8', '')
### scrper
            if (tfalse(addst("wbij-thumbs")) == True):
                import scraper
                scrap = scraper.scraper_check(host, name)
                try:
                    if (name not in scrap):
                        html = nURL(strona)
                        data = re.findall('<img src="grafika/(.+?)">', html)
                        ItemCount = len(data)
                        if len(data) > 0:
                            for item in data:
                                img = url + '/grafika/' + item

                        else:
                            img = ''
                        plot = ''
                        scraper.scraper_add(host, name, img, plot, '')
                        scrap = scraper.scraper_check(host, name)
                except:
                    scrap = ''
                try:
                    img = scrap[1]
                except:
                    img = ''
                try:
                    plot = scrap[2]
                except:
                    plot = ''
            else:
                img = ''
                plot = ''
            fanart = fanartAol
            labs = {}
            try:
                labs['plot'] = plot
            except:
                labs['plot'] = ''
    ##
            pars = {'mode': mode, 'site': site, 'section': section, 'title': name, 'url': strona, 'page': url, 'img': img, 'fanart': fanart}
            contextLabs = {'title': name, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot']}
            contextMenuItems = ContextMenu_Series(contextLabs)
            labs['title'] = name
            _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount)
    set_view(content, view_mode=addst('tvshows-view'))
    eod()
def Browse_ItemAol(html, page, metamethod='', content='movies', view='515'):
    if (len(html) == 0):
        return
    html = GetDataBeetwenMarkers(html, 'Tytu', '</table>', False)[1]
    page = page.lower()
    data = re.findall('<a href="https://a-o.ninja/anime/' + page+ '(.+?)">(.+?)</a>', html)
    ItemCount = len(data)
    for item in data:
        strona =  'https://a-o.ninja/anime/' + page + item[0]
        name = item[1].encode("utf-8")
        name = ParseDescription(name)
### scraper
        if (tfalse(addst("aodc-thumbs")) == True):
            import scraper
            scrap = scraper.scraper_check(host, name)
            try:
                if (name not in scrap):
                    if '?page=0'in strona:
                        strona2 = strona.replace('?page=0','')
                    else:
                        strona2 = strona
                    html = nURL(strona2)
                    html = GetDataBeetwenMarkers(html, 'field-name-field-okladka field-type-image field-label-above', '<p>&nbsp;</p>', False)[1]
                    data = re.findall('<img src="(.+?)"', html)
                    ItemCount = len(data)
                    if len(data) > 0:
                        for item in data:
                            img = item
                    else:
                        img = ''
                    data = re.findall('<p><p>(.+?)</p>', html)
                    ItemCount = len(data)
                    if len(data) > 0:
                        for item in data:
                            plot = ParseDescription(item)
                    else:
                        plot = ''
                    scraper.scraper_add(host, name, img, plot, '')
                    scrap = scraper.scraper_check(host, name)
            except:
                scrap = ''
            try:
                img = scrap[1]
            except:
                img = ''
            try:
                plot = scrap[2]
            except:
                plot = ''
        else:
            img = ''
            plot =''
        fanart = fanartAol
        labs = {}
        try:
            labs['plot'] = plot
        except:
            labs['plot'] = ''
###
        pars = {'mode': 'EpisodesAnime', 'site': site, 'section': section, 'title': name, 'url': strona, 'img': img, 'fanart': fanart}
        contextLabs = {'title': name, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot']}
        if section == 'animeonline':
            contextMenuItems = ContextMenu_Series(contextLabs)
        else:
            contextMenuItems = []
        labs['title'] = name
        _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount)
    set_view(content, view_mode=addst('tvshows-view'))
Beispiel #5
0
def Browse_ItemAol(html, url, metamethod='', content='movies', view='515'):
    if (len(html) == 0):
        return
    html = html.replace('\'', '')
    html = html.replace('\n', '')
    html = html.replace('\r', '')
    html = html.replace('\t', '')
    html = GetDataBeetwenMarkers(html, 'zaczynaj', '<header>Kresk', False)[1]
    #    print html.encode('ascii','ignore')
    data = re.findall(
        'href="/kreskowka/(.+?)"><div(.+?)<b class="larger white">(.+?)</b>',
        html)
    ItemCount = len(data)
    for item in data:
        strona = mainSite + '/kreskowka/' + item[0]
        name = item[2].encode("utf-8")
        name = ParseDescription(name)
        ### scraper
        if (tfalse(addst("kresk-thumbs")) == True):
            import scraper
            scrap = scraper.scraper_check(host, name)
            try:
                if (name not in scrap):
                    html = nURL(strona)
                    html = GetDataBeetwenMarkers(html, '>Serie</a>',
                                                 '<div class="info-basic">',
                                                 False)[1]
                    #print html.encode('ascii','ignore')
                    data = re.findall("/upload/cats/(.+?).jpg", html)
                    ItemCount = len(data)
                    if len(data) > 0:
                        for item in data:
                            img = mainSite + '/upload/cats/' + item + '.jpg'
                    else:
                        img = ''
                    plot = ''
                    scraper.scraper_add(host, name, img, plot, '')
                    scrap = scraper.scraper_check(host, name)
            except:
                scrap = ''
            try:
                img = scrap[1]
            except:
                img = ''
            try:
                plot = scrap[2]
            except:
                plot = ''
        else:
            img = ''
            plot = ''
        fanart = fanartAol
        labs = {}
        try:
            labs['plot'] = plot
        except:
            labs['plot'] = ''


###
        pars = {
            'mode': 'Episodeskresk',
            'site': site,
            'section': section,
            'title': name,
            'url': strona,
            'img': img,
            'fanart': fanart
        }
        contextLabs = {
            'title': name,
            'url': strona,
            'img': img,
            'fanart': fanart,
            'todoparams': _addon.build_plugin_url(pars),
            'site': site,
            'section': section,
            'plot': labs['plot']
        }
        if section == 'kreskoweczki':
            contextMenuItems = ContextMenu_Series(contextLabs)
        else:
            contextMenuItems = []
        labs['title'] = name
        _addon.add_directory(pars,
                             labs,
                             is_folder=True,
                             fanart=fanart,
                             img=img,
                             contextmenu_items=contextMenuItems,
                             total_items=ItemCount)
    set_view(content, view_mode=addst('tvshows-view'))
    eod()
Beispiel #6
0
def Browse_Itemscen(html, name, metamethod='', content='movies', view='515'):
    if (len(html) == 0):
        return
    data = re.compile('<div class="anim"><a href="(.+?)">  ' + name +
                      '(.+?)</a>').findall(html)
    ItemCount = len(data)
    for item in data:
        strona = 'http://anime-joy.tv/' + item[0]
        name2 = name + item[1]
        ### scraper
        if (tfalse(addst("ajoy-thumbs")) == True):
            import scraper
            scrap = scraper.scraper_check(host, name2)
            try:
                if (name2 not in scrap):
                    html = nURL(strona)
                    htmlimg = GetDataBeetwenMarkers(
                        html, '<div class="animedetay">',
                        '<div class="addcolumn centerbutton">', False)[1]
                    data = re.findall('<img src="(.+?)"/>', htmlimg)
                    ItemCount = len(data)
                    if len(data) > 0:
                        for item in data:
                            img = item
                    else:
                        img = ''
                    htmlplot = GetDataBeetwenMarkers(
                        html, '<strong>Summary:</strong> </br>', '</div>',
                        False)[1]
                    ItemCount = len(htmlplot)
                    if len(data) > 0:
                        plot = clean_html(htmlplot)
                    else:
                        plot = ''
                    scraper.scraper_add(host, name2, img, plot, '')
                    scrap = scraper.scraper_check(host, name2)
            except:
                scrap = ''
            try:
                img = scrap[1]
            except:
                img = ''
            try:
                plot = scrap[2]
            except:
                plot = ''
        else:
            img = ''
            plot = ''
        fanart = fanartAol
        labs = {}
        try:
            labs['plot'] = plot
        except:
            labs['plot'] = ''


##
        pars = {
            'mode': 'Episodesjoy',
            'site': site,
            'section': section,
            'title': name2,
            'url': strona,
            'img': img,
            'fanart': fanart
        }
        contextLabs = {
            'title': name2,
            'url': strona,
            'img': img,
            'fanart': fanart,
            'todoparams': _addon.build_plugin_url(pars),
            'site': site,
            'section': section,
            'plot': labs['plot']
        }
        if section == 'animejoy':
            contextMenuItems = ContextMenu_Series(contextLabs)
        else:
            contextMenuItems = []
        labs['title'] = name2
        _addon.add_directory(pars,
                             labs,
                             is_folder=True,
                             fanart=fanart,
                             img=img,
                             contextmenu_items=contextMenuItems,
                             total_items=ItemCount)
    set_view(content, view_mode=addst('links-view'))
    eod()
def Browse_ItemAol(html, url, metamethod='', content='movies', view='515'):
    if (len(html) == 0):
        return
    html = GetDataBeetwenMarkers(html, 'Anime na liter',
                                 '<ul class="pagination">', False)[1]
    data = re.findall('<a href="(.+?)">(.+?)</a>', html)
    ItemCount = len(data)
    for item in data:
        strona = mainSite7 + item[0]
        name = item[1].encode("utf-8")
        name = ParseDescription(name)
        ### scraper
        if (tfalse(addst("zone-thumbs")) == True):
            import scraper
            scrap = scraper.scraper_check(host, name)
            try:
                if (name not in scrap):
                    html = nURL(strona)
                    html = GetDataBeetwenMarkers(html, 'og:image',
                                                 '<h5>Odcinki</h5>', False)[1]
                    html = html.replace('\'', '')
                    html = html.replace('\n', '')
                    html = html.replace('\r', '')
                    html = html.replace('\t', '')
                    print html.encode('ascii', 'ignore')
                    data = re.findall('content="(.+?)"></head>', html)
                    ItemCount = len(data)
                    if len(data) > 0:
                        for item in data:
                            img = item
                    else:
                        img = ''
                    data = re.findall('summary">(.+?)<div class', html)
                    ItemCount = len(data)
                    if len(data) > 0:
                        for item in data:
                            plot = ParseDescription(item)
                    else:
                        plot = ''
                    scraper.scraper_add(host, name, img, plot, '')
                    scrap = scraper.scraper_check(host, name)
            except:
                scrap = ''
            try:
                img = scrap[1]
            except:
                img = ''
            try:
                plot = scrap[2]
            except:
                plot = ''
        else:
            img = ''
            plot = ''
        fanart = fanartAol
        labs = {}
        try:
            labs['plot'] = plot
        except:
            labs['plot'] = ''
###
        pars = {
            'mode': 'Episodeszone',
            'site': site,
            'section': section,
            'title': name,
            'url': strona,
            'img': img,
            'fanart': fanart
        }
        contextLabs = {
            'title': name,
            'url': strona,
            'img': img,
            'fanart': fanart,
            'todoparams': _addon.build_plugin_url(pars),
            'site': site,
            'section': section,
            'plot': labs['plot']
        }
        if section == 'animezone':
            contextMenuItems = ContextMenu_Series(contextLabs)
        else:
            contextMenuItems = []
        labs['title'] = name
        _addon.add_directory(pars,
                             labs,
                             is_folder=True,
                             fanart=fanart,
                             img=img,
                             contextmenu_items=contextMenuItems,
                             total_items=ItemCount)


# next page
    npage = url[:-1] + str(int(url[-1:]) + 1)
    #    if -1 != html.find("do strony "):
    _addon.add_directory(
        {
            'mode': 'Pagezone',
            'site': site,
            'section': section,
            'url': npage,
            'page': npage
        }, {'title': "Next page"},
        is_folder=True,
        fanart=fanartAol,
        img=nexticon)
    set_view(content, view_mode=addst('tvshows-view'))