def ListTitles(): section = params['section'] name = params['name'] url = params['url'] result = requests.get(url, timeout=15).text result = CleanHTML(result) if section == 'All': result = parseDOM(result, 'tr', attrs={'class': 'list-item'}) link = [parseDOM(item, 'a', ret='href')[0] for item in result] title = [parseDOM(item, 'a')[0] for item in result] elif section == 'Aired': result = parseDOM(result, 'section', attrs={'id': 'block-views-anime-emitowane-block'}) link = parseDOM(result, 'a', ret='href') title = parseDOM(result, 'a') else: result = parseDOM(result, 'tr', attrs={'data-fl' : str(section).lower()}) link = [parseDOM(item, 'a', ret='href')[0] for item in result] title = [parseDOM(item, 'a')[0] for item in result] for i in zip(title, link): addon.addDir(str(i[0]), str(i[1]), mode='AOListEpisodes', section=section, thumb=str(fanartAodc), fanart=custom_background, subdir=name)
def PageAnimeShinden(): addon.addDir("[Anime] Alfabetycznie", mainLink + 'series', mode='SHAlfabetycznie', fanart=default_background, section='Alfabetycznie') addon.addDir("[Anime] Wszystkie", mainLink + 'series', mode='SHListTitles', fanart=default_background, section='All') addon.addDir("Wyszukiwarka", mainLink + 'series?search=', mode='SHSearch', fanart=default_background, section='search', thumb=searchicon) addon.addDir("Gatunki", mainLink + 'series?', mode='SHGatunki', fanart=default_background, section='gatunki', thumb=searchicon) addon.addDir("[Anime] Emitowane", mainLink + 'series?series_status[0]=Currently+Airing', mode='SHListTitles', fanart=default_background, section='Aired')
def Search(): section = params['section'] name = params['name'] url = params['url'] if section == 'search': keyb = xbmc.Keyboard('', "Wyszukiwarka anime") keyb.doModal() if keyb.isConfirmed() and len(keyb.getText().strip()) > 0: search = keyb.getText() url = url + '%s' % search.replace(" ", "+") else: PageAnimeOdcinki(mainLink) elif section == 'nextpage': url = url html = requests.get(url, timeout=15).text result = parseDOM(html, 'li', attrs={'class': 'search-result'}) for item in result: nazwa = CleanHTML(str(parseDOM(item, 'a')[0])) link = str(parseDOM(item, 'a', ret='href')[0]) plot = CleanHTML(str(parseDOM(item, 'p')[0])) addon.addDir(nazwa, link, mode='AOListEpisodes', thumb=fanartAodc, plot=plot, fanart=custom_background, section='search', subdir=name) if 'nextpostslink' in html: nextpage = parseDOM(html, 'a', attrs={'class':'nextpostslink'}, ret='href')[0] addon.addDir('[I]następna strona[/I]', nextpage, mode='AOSearch', thumb=nexticon, fanart=custom_background, section='nextpage')
def Kategorie(): cookie = cache.cache_get('dramaqueen_cookie')['value'] headersget.update({'Cookie': cookie}) url = params['url'] rG = requests.get(url, headers=headersget, timeout=15).text # LoginCheck(url=rG) result = parseDOM(rG, 'div', attrs={'class': 'tagcloud'})[0] links = parseDOM(result, 'a', ret='href') label = parseDOM(result, 'a') count = [ re.findall('\d+', i)[0] for i in parseDOM(result, 'a', ret='aria-label') ] for item in zip(label, links, count): addon.addDir(str(item[0]), str(item[1]), mode=5, fanart='', plot='', thumb='', code='[B][COLOR %s]%s[/COLOR][/B]' % ('green', str(item[2]) + ' pozycji')) xbmcplugin.addSortMethod(int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_TITLE, label2Mask='%P')
def Kategorie(): cookie = cache.cache_get('dramaqueen_cookie')['value'] headersget.update({'Cookie': cookie}) url = params['url'] rG = requests.get(url, headers=headersget, timeout=15).content # LoginCheck(url=rG) result = parseDOM(rG, 'div', attrs={'class': 'tagcloud'})[0] links = parseDOM(result, 'a', ret='href') label = parseDOM(result, 'a') count = [ re.findall('\d+', i)[0] for i in parseDOM(result, 'a', ret='aria-label') ] for item in zip(label, links, count): addon.addDir(str(item[0]) + ' ' + '[COLOR %s]%s[/COLOR]' % ('green', str(item[2]) + ' pozycji'), str(item[1]), mode=7, fanart='', plot='', thumb='')
def KategorieLista(): cookie = cache.cache_get('dramaqueen_cookie')['value'] headersget.update({'Cookie': cookie}) url = params['url'] rG = requests.get(url, headers=headersget, timeout=15).content rG = CleanHTML(rG) # LoginCheck(url=rG) result = parseDOM(rG, 'div', attrs={'class': 'avia-content-slider-inner'})[0] label = [parseDOM(i, 'a', ret='title')[0] for i in parseDOM(result, 'h3')] obraz = parseDOM(result, 'img', ret='src') links = [parseDOM(i, 'a', ret='href')[0] for i in parseDOM(result, 'h3')] for item in zip(label, links, obraz): if str(item[1]).__contains__('/drama/'): addon.addDir(str(item[0]) + ' ' + '[COLOR %s]Drama[/COLOR]' % 'green', str(item[1]), mode=4, fanart=str(item[2]), thumb=str(item[2])) elif str(item[1]).__contains__('/film/'): addon.addLink(str(item[0]) + ' ' + '[COLOR %s]Film[/COLOR]' % 'green', str(item[1]), mode=5, fanart=str(item[2]), thumb=str(item[2]))
def Alfabetyczna(url): name = params['name'] url = params['url'] result = requests.get(url, timeout=15).content result = parseDOM(result, 'div', attrs={'id': 'letter-index'})[0] lista = re.findall('data-index.*?">\s(.+?)</a>\s\((.+?)\)\s', result) for litera in lista: if 'Anime' in name: addon.addDir(str(litera[0]) + ' ' + '[COLOR %s]%s[/COLOR]' % ('green', str(litera[1]) + ' pozycji'), url, mode='AOListTitles', section=str(litera[0])[0:1], thumb=str(Letter[str(litera[0])[0:1]]), fanart=custom_background) else: addon.addDir(str(litera[0]) + ' ' + '[COLOR %s]%s[/COLOR]' % ('green', str(litera[1]) + ' pozycji'), url, mode='AOListTitles', section=str(litera[0])[0:1], thumb=str(Letter[str(litera[0])[0:1]]), fanart=custom_background)
def Browse_Seasons(): url = params['url'] section = params['section'] page = params['page'] img = params['img'] if section == 'polecane': html = requests.get(url, timeout=15).content result = parseDOM(html, 'ul', attrs={'class': 'pmenu'})[1] result = parseDOM(result, 'li') for item in result: link = parseDOM(item, 'a', ret='href')[0] nazwa = parseDOM(item, 'a')[0] if "Kolejno" in str(nazwa): continue addon.addDir(str(nazwa), url + str(link), mode='List_Episodes', isFolder= True, thumb=fanartAol, fanart=default_background, page=str(url), section='polecane') elif section == 'other': html = requests.get(url, timeout=15).content result = parseDOM(html, 'h1', attrs={'class': 'pod_naglowek'}) if len(result) > 1: for item in result: addon.addDir(str(item), url, mode='List_Episodes', isFolder= True, thumb=str(img), fanart=default_background, page=str(item), section='multi') elif len(result) <= 1: List_Episodes()
def Browse_Titles(): url = params['url'] name = params['name'] html = requests.get(url, timeout=15).content if name in html: mark1 = '>' + name + '</div>' mark2 = '</ul>' data = GetDataBeetwenMarkers(html, mark1, mark2, False)[1] data = re.findall('<a href="(.+?)"(.+?)">(.+?)</a></li>', data) data.sort() #####Polecane ####### if len(data) > 0: for item in data: link = item[0] title = item[2] if 'inne.wbijam' in str(item[0]).lower(): continue addon.addDir(title, link, mode='Browse_Seasons', thumb=fanartAol, fanart=default_background, section='polecane', page=str(url)) #####Pozostałe### elif len(data) == 0: data2 = GetDataBeetwenMarkers(html, mark1, mark2, False)[1] data2 = re.findall('<a href="(.+?)">(.+?)</a></li>', data2) data2.sort() for item in data2: link = url + item[0] set = requests.get(link, timeout=15).content image = parseDOM([i for i in parseDOM(set,'center') if 'img' in i][0], 'img', ret='src')[0] title = item[1] addon.addDir(title, link, mode='Browse_Seasons', thumb=url + str(image), fanart=default_background, section='other', page=str(url))
def ListDramas(): url = params['url'] rT = requests.get(url, timeout=15).content rT = CleanHTML(rT) result = parseDOM(rT, 'div', attrs={'id': 'av_section_1'})[0] results = re.findall('flex_column av_one_fourth(.+?)</div></div></div>', result) Titles = re.findall('><p>(.+?)</p>', result) Plot = re.findall('/p>[\s,\S,.]<p>(.+?)</p>', result) obrazy = parseDOM(results, 'img', ret='src') linki = [item for item in parseDOM(results, 'a', ret='href')] for item in zip(linki, Titles, obrazy, Plot): addon.addDir(str(item[1]), str(item[0]), mode=4, plot=(str(item[3])), fanart=(str(item[2])), isFolder=True, thumb=(str(item[2])), section='')
def Gatunki(): section = params['section'] name = params['name'] url = params['url'] if section == 'gatunki': result = requests.get(url, timeout=15).text result = parseDOM(result, 'div', attrs={'class': 'panel-body'})[0] taglist = [item for item in parseDOM(result, 'div', attrs={'class': r'.+?' + 'checkbox'}) if len(item)>0] tagname = [CleanHTML(item) for item in parseDOM(taglist, 'label')] tagcat = [item for item in parseDOM(taglist, 'input', ret='name') ] tagcode = ['=' + i for i in parseDOM(taglist, 'input', ret= 'value')] taglink = [] for item in zip(tagcat, tagcode): taglink.append(str(item[0]) + str(item[1])) d = xbmcgui.Dialog() select = d.multiselect('Wybór Gatunku', tagname) if select == None: PageAnimeOdcinki(mainLink) return seltags = [] for idx in select: seltags.append(taglink[idx]) sep = '&' url = url + '?' + sep.join(seltags) elif section == 'nextpage': url = url html = requests.get(url, timeout=15).text result = parseDOM(html, 'li', attrs={'class': 'search-result'}) for item in result: nazwa = CleanHTML(str(parseDOM(item, 'a')[0])) link = str(parseDOM(item, 'a', ret='href')[0]) plot = CleanHTML(str(parseDOM(item, 'p')[0])) addon.addDir(nazwa, link, mode='AOListEpisodes', thumb=fanartAodc, plot=plot, fanart=custom_background, section='search', subdir=name) if 'nextpostslink' in html: nextpage = parseDOM(html, 'a', attrs={'class':'nextpostslink'}, ret='href')[0] addon.addDir('[I]następna strona[/I]', nextpage, mode='AOGatunki', thumb=nexticon, fanart=custom_background, section='nextpage')
def ListTitles(): name = params['name'] url = params['url'] section = params['section'] html = requests.get(url, timeout=15).content result = str( parseDOM(html, 'section', attrs={'class': 'anime-list box'})[0]) results = [ item for item in parseDOM(result, 'ul', attrs={'class': 'div-row'}) if 'h3' in item ] for item in results: link = mainLink + re.sub('/series/', 'series/', parseDOM(item, 'a', ret='href')[1]) obraz = mainLink + re.sub('/res/', 'res/', parseDOM(item, 'a', ret='href')[0]) title = parseDOM(item, 'a')[1] title = title.replace('<em>', '[I]') title = title.replace('</em>', '[/I]') addon.addDir(str(title), link, mode='SHListEpisodes', section='episodes', thumb=str(obraz), fanart=custom_background) try: next = parseDOM(html, 'a', attrs={'rel': 'next'}, ret='href')[0] if len(next) > 0: nextpage = mainLink + re.sub('/', '', next) nextpage = CleanHTML(nextpage) if '&r307=1' in nextpage: nextpage = str(nextpage).replace('&r307=1', '') elif 'r307=1' in nextpage: nextpage = str(nextpage).replace('r307=1', '') addon.addDir( '[I]następna strona[/I]', str(nextpage), mode='SHListTitles', section='nextpage', thumb=str(nexticon), fanart=custom_background, ) except: pass
def Pagewbijam(url): result = requests.get(url, timeout=15).content if (len(url) == 0): return result = re.sub('>Menu główne', '', result) result = re.sub('>Reklama', '', result) data = [item for item in parseDOM(result, 'div', attrs={'class' : 'pmenu_naglowek_' + r'.'}) if len(item)>0 ] if len(data) > 0: for item in data: name = item addon.addDir(str(name), url, mode='Browse_Titles', thumb=fanartAol, fanart=default_background)
def Alfabetyczna(url): name = params['name'] url = params['url'] result = requests.get(url, timeout=15).text result = parseDOM(result, 'div', attrs={'id': 'letter-index'})[0] lista = re.findall('data-index.*?">\s(.+?)</a>\s\((.+?)\)\s', result) for litera in lista: if 'Anime' in name: addon.addDir(str(litera[0]), url, mode='AOListTitles', section=str(litera[0])[0:1], thumb=str(Letter[str(litera[0])[0:1]]), fanart=custom_background, code='[B][COLOR %s]%s[/COLOR][/B]' % ('green', str(litera[1]) + ' pozycji')) else: addon.addDir(str(litera[0]), url, mode='AOListTitles', section=str(litera[0])[0:1], thumb=str(Letter[str(litera[0])[0:1]]), fanart=custom_background, code='[B][COLOR %s]%s[/COLOR][/B]' % ('green', str(litera[1]) + ' pozycji')) xbmcplugin.addSortMethod(int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_TITLE, label2Mask= '%P')
def MainMenu(): ###Wbijam.pl### addon.addDir('[COLOR=%s]Wbijam.pl[/COLOR]' % 'blue', 'https://inne.wbijam.pl/', mode='Pagewbijam', fanart=default_background, thumb=iconWbijam, isFolder=True) ###Anime Odcinki### addon.addDir('[COLOR=%s]Anime Odcinki[/COLOR]' % 'blue', 'https://anime-odcinki.pl/', mode='AnimeOdcinki', fanart=Odcinkifanart, thumb=iconOdcinki, isFolder=True) ###Shinden.pl### addon.addDir('[COLOR=%s]Shinden.pl[/COLOR]' % 'blue', 'https://shinden.pl/series', mode='Shinden', fanart=default_background, thumb=iconShinden, isFolder=True) ###StrefaDB.pl### addon.addDir('[COLOR=%s]StrefaDB.pl[/COLOR]' % 'blue', 'https://strefadb.pl/', mode='Dragonball', fanart=DBfanart, thumb=iconstrefadb, isFolder=True) ###Ustawienia### addon.addDir('Ustawienia', '', 'Settings', fanart=default_background, thumb=iconsettings, isFolder=True)
def Alfabetyczna(): name = params['name'] url = params['url'] html = requests.get(url, timeout=15).content html = CleanHTML(html) result = parseDOM(html, 'ul', attrs={'class': 'letter-list' + r'.+?'})[0] letterlink = [ str(item).replace('r307=1&', '') for item in parseDOM(result, 'a', ret='href') ] letter = parseDOM(result, 'a') for i in zip(letter, letterlink): addon.addDir(str(i[0]), url + str(i[1]), mode='SHListTitles', section=str(i[0]), thumb=str(Letter[str(i[0])]), fanart=custom_background)
def KategorieLista(): cookie = cache.cache_get('dramaqueen_cookie')['value'] headersget.update({'Cookie': cookie}) url = params['url'] rG = requests.get(url, headers=headersget, timeout=15).text rG = CleanHTML(rG) # LoginCheck(url=rG) result = parseDOM(rG, 'div', attrs={'class': 'avia-content-slider-inner'})[0] label = [parseDOM(i, 'a', ret='title')[0] for i in parseDOM(result, 'h3')] obraz = parseDOM(result, 'img', ret='src') links = [parseDOM(i, 'a', ret='href')[0] for i in parseDOM(result, 'h3')] for item in zip(label, links, obraz): if str(item[1]).__contains__('/drama/'): addon.addDir(str(item[0]), str(item[1]), mode=2, fanart=str(item[2]), thumb=str(item[2]), code='[B][COLOR %s]Drama[/COLOR][/B]' % 'green') elif str(item[1]).__contains__('/film/'): addon.addLink(str(item[0]), str(item[1]), mode=3, fanart=str(item[2]), thumb=str(item[2]), code='[B][COLOR %s]Film[/COLOR][/B]' % 'green') xbmcplugin.addSortMethod(int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_TITLE, label2Mask='%P')
def Browse_Titles(): url = params['url'] name = params['name'] html = requests.get(url, timeout=15).text if name in html: mark1 = '>' + name + '</div>' mark2 = '</ul>' data = GetDataBeetwenMarkers(html, mark1, mark2, False)[1] data = re.findall('<a href="(.+?)"(.+?)">(.+?)</a></li>', data) data.sort() #####Polecane ####### thread = False if len(data) > 0: threads = [] for item in data: if 'Anime online' in str(item[2]): continue elif 'inne.wbijam' in str(item[0]).lower(): continue link = item[0] tytul = item[2] #Wyjatki do scrapera if tytul == 'Decadence': tytul = 'Deca-dence' title, poster, plot, fanart, genre, year, thread = scraper.scraper_check( tytul, type='TV') if title == '': title = tytul if plot == '': plot = '' if poster == '': poster, fanart = scraper.Scrap(tytul, type='TV', poster=True) if fanart == '': fanart = default_background if genre == '': genre = '' if year == '': year = '' if thread: threads.append(thread) addon.addDir(title, link, mode='Browse_Seasons', thumb=str(poster), fanart=str(fanart), section='polecane', page=str(url), subdir=title, genre=genre, year=year, plot=plot) if thread: t = threading.Thread(target=ScrapInfo, args=(threads, )) if t.is_alive(): t.join() else: t.start() #####Pozostałe### elif len(data) == 0: data2 = GetDataBeetwenMarkers(html, mark1, mark2, False)[1] data2 = re.findall('<a href="(.+?)">(.+?)</a></li>', data2) data2.sort() for item in data2: link = url + item[0] set = requests.get(link, timeout=15).text image = parseDOM( [i for i in parseDOM(set, 'center') if 'img' in i][0], 'img', ret='src')[0] title = item[1] addon.addDir(title, link, mode='Browse_Seasons', thumb=url + str(image), fanart=default_background, section='other', page=str(url), subdir=title)
def Pagedragon(): addon.addDir("Filmy Kinowe", mainLink + 'filmy-kinowe.html', mode='DBListTitles', fanart=DBAllfanart, section='ListTitles', thumb=DBMOVIEthumb) addon.addDir("DragonBall", mainLink + 'odcinki/dragon-ball.html', mode='DBListTitles', fanart=DBAllfanart, section='ListTitles', thumb=DBthumb) addon.addDir("DragonBall Z", mainLink + 'odcinki/dragon-ball-z.html', mode='DBListTitles', fanart=DBAllfanart, section='ListTitles', thumb=DBZthumb) addon.addDir("DragonBall KAI", mainLink + '/odcinki/dragon-ball-kai.html', mode='DBListTitles', fanart=DBAllfanart, section='ListTitles', thumb=DBKAIthumb) addon.addDir("DragonBall GT", mainLink + '/odcinki/dragon-ball-gt.html', mode='DBListTitles', fanart=DBAllfanart, section='ListTitles', thumb=DBGTthumb) addon.addDir("DragonBall Super", mainLink + '/odcinki/dragon-ball-super.html', mode='DBListTitles', fanart=DBAllfanart, section='ListTitles', thumb=DBSUPERthumb) addon.addDir("DragonBall Super Heroes", mainLink + '/odcinki/dragon-ball-super-heroes.html', mode='DBListTitles', fanart=DBAllfanart, section='ListTitles', thumb=DBSUPERHEROthumb) addon.addDir("DragonBall Z Abridged", mainLink + '/odcinki/dragon-ball-z-abridged.html', mode='DBListTitles', fanart=DBAllfanart, section='ListTitles', thumb=DBZABRIDGthumb)
def PageAnimeOdcinki(url): addon.addDir("[Anime] Alfabetycznie", mainLink + 'anime', mode='AOAlfabetycznie', fanart=default_background) addon.addDir("[Anime] Emitowane", mainLink + 'anime', mode='AOListTitles', fanart=default_background, section='Aired') addon.addDir("[Anime] Wszystkie", mainLink + 'anime', mode='AOListTitles', fanart=default_background, section='All') addon.addDir("[Filmy] Alfabetycznie", mainLink + 'filmy', mode='AOAlfabetycznie', fanart=default_background) addon.addDir("[Filmy] Wszystkie", mainLink + 'filmy', mode='ListTitles', fanart=default_background, section='All') addon.addDir("Gatunki", mainLink + 'gatunki', mode='AOGatunki', fanart=default_background, section='gatunki', thumb=searchicon) addon.addDir("Wyszukiwarka", mainLink + 'szukaj/', mode='AOSearch', fanart=default_background, section='search', thumb=searchicon)
def ListTitles(url=''): # name = params['name'] if url == '': url = params['url'] section = params['section'] html = requests.get(url, timeout=15).text result = str( parseDOM(html, 'section', attrs={'class': 'anime-list box'})[0]) results = [ item for item in parseDOM(result, 'ul', attrs={'class': 'div-row'}) if 'h3' in item ] for item in results: link = mainLink + re.sub('/series/', 'series/', parseDOM(item, 'a', ret='href')[1]) obraz = mainLink + re.sub('/res/', 'res/', parseDOM(item, 'a', ret='href')[0]) title = parseDOM(item, 'a')[1] title = title.replace('<em>', '') title = title.replace('</em>', '') try: datasite = requests.get(link, timeout=10).text plotdata = parseDOM(datasite, 'div', attrs={'id': 'description'})[0] plot = CleanHTML(parseDOM(plotdata, 'p')[0]) except: plot = '' try: genredata = [ item for item in parseDOM(datasite, 'tr') if 'Gatunki' in item ][0] genre = ', '.join(parseDOM(genredata, 'a')) except: genre = '' try: yeardata = parseDOM(datasite, 'section', attrs={'class': 'title-small-info'})[0] year = re.findall(r'[1-2][0-9]{3}', yeardata)[0] except: year = '' addon.addDir(str(title), link, mode='SHListEpisodes', section='episodes', thumb=str(obraz), fanart=custom_background, subdir=str(title), plot=str(plot), genre=str(genre), year=str(year)) try: next = parseDOM(html, 'a', attrs={'rel': 'next'}, ret='href')[0] if len(next) > 0: nextpage = mainLink + re.sub('/', '', next) nextpage = CleanHTML(nextpage) if '&r307=1' in nextpage: nextpage = str(nextpage).replace('&r307=1', '') elif 'r307=1' in nextpage: nextpage = str(nextpage).replace('r307=1', '') addon.addDir( '[I]następna strona[/I]', str(nextpage), mode='SHListTitles', section='nextpage', thumb=str(nexticon), fanart=custom_background, ) except: pass
def MainMenu(): ###Wbijam.pl### if setting('Wbijam') == 'true': addon.addDir('[COLOR=%s]Wbijam.pl[/COLOR]' % 'blue', 'https://inne.wbijam.pl/', mode='Pagewbijam', fanart=default_background, thumb=iconWbijam, isFolder=True) ###Anime Odcinki### if setting('AnimeOdcinki') == 'true': addon.addDir('[COLOR=%s]Anime Odcinki[/COLOR]' % 'blue', 'https://anime-odcinki.pl/', mode='AnimeOdcinki', fanart=default_background, thumb=iconOdcinki, isFolder=True) ###Shinden.pl### if setting('Shinden') == 'true': addon.addDir('[COLOR=%s]Shinden.pl[/COLOR]' % 'blue', 'https://shinden.pl/series', mode='Shinden', fanart=default_background, thumb=iconShinden, isFolder=True) ###AnimeZone.pl### if setting('AnimeZone') == 'true': addon.addDir('[COLOR=%s]Animezone.pl[/COLOR]' % 'blue', 'https://www.animezone.pl/', mode='AnimeZone', fanart=default_background, thumb=iconAnimezone, isFolder=True) ###StrefaDB.pl### if setting('Dragonball') == 'true': addon.addDir('[COLOR=%s]StrefaDB.pl[/COLOR]' % 'blue', 'https://strefadb.pl/', mode='Dragonball', fanart=default_background, thumb=iconstrefadb, isFolder=True) ###AnimeOn.pl### if setting('AnimeOn') == 'true': addon.addDir('[COLOR=%s]Animeon.pl[/COLOR]' % 'blue', 'https://animeon.pl/', mode='Animeon', fanart=default_background, thumb=iconstrefadb, isFolder=True) ###Kreskoweczki.pl### if setting('Kreskoweczki') == 'true': addon.addDir('[COLOR=%s]Kreskóweczki[/COLOR]' % 'blue', 'http://www.kreskoweczki.pl', mode='Animeon', fanart=default_background, thumb=iconkresk, isFolder=True) ###Ustawienia### addon.addDir('Ustawienia', '', 'Settings', fanart=default_background, isFolder=True)
def ListTitles(): cookie = cache.cache_get('dramaqueen_cookie')['value'] headersget.update({'Cookie': cookie}) url = params['url'] html = requests.get(url, headers=headersget, timeout=15).text html = CleanHTML(html) result = parseDOM(html, 'div', attrs={'id': 'av_section_1'})[0] results = re.findall( 'flex_column ' + r'.+?' + 'av_one_fourth(.+?)</div></div></div>', result) titles = re.findall('><p>(.+?)</p>', result) linki = [item for item in parseDOM(results, 'a', ret='href')] Plot = re.findall('/p>[\s,\S,.]<p>(.+?)</p>', result) obrazy = parseDOM(results, 'img', ret='src') threads = [] for item in zip(linki, titles, obrazy, Plot): title, poster, plot, banner, fanart, genre, year, thread = dqscraper.scraper_check( item[1], item[0], item[2]) if title == '': title = item[1] if plot == '': plot = item[3] if poster == '': poster = item[2] if banner == '': banner = item[2] if fanart == '': fanart = item[2] if genre == '': genre = '' if year == '': year = '' if thread: threads.append(thread) if str(item[0]).__contains__('/drama/'): addon.addDir(str(title), str(item[0]), mode=2, plot=(str(plot)), fanart=(str(fanart)), isFolder=True, thumb=(str(poster)), banner=(str(banner)), genre=str(genre), year=str(year), section='') elif str(item[0]).__contains__('/film/'): addon.addLink(str(title), str(item[0]), mode=3, plot=str(plot), fanart=str(fanart), thumb=str(poster), banner=str(banner)) if thread: t = threading.Thread(target=ScrapInfo, args=(threads, )) if t.is_alive(): t.join() else: t.start()
def Gatunki(): section = params['section'] name = params['name'] url = params['url'] if section == 'gatunki': html = requests.get(url, timeout=10).content tagname = [ re.sub('<i(.+?)</i>', '', item) for item in parseDOM(html, 'a', attrs={'class': 'genre-item'}) ] tagcode = [ 'i' + item for item in parseDOM( html, 'a', attrs={'class': 'genre-item'}, ret='data-id') ] taglink = [] d = xbmcgui.Dialog() select = d.multiselect('Wybór Gatunku', tagname) seltags = [] if select == None: PageAnimeShinden() return for idx in select: seltags.append(tagcode[idx]) sep = ';' url = url + 'genres-type=all&genres=' + sep.join(seltags) elif section == 'nextpage': url = url html = requests.get(url, timeout=15).content result = str( parseDOM(html, 'section', attrs={'class': 'anime-list box'})[0]) results = [ item for item in parseDOM(result, 'ul', attrs={'class': 'div-row'}) if 'h3' in item ] for item in results: link = mainLink + re.sub('/series/', 'series/', parseDOM(item, 'a', ret='href')[1]) obraz = mainLink + re.sub('/res/', 'res/', parseDOM(item, 'a', ret='href')[0]) title = parseDOM(item, 'a')[1] title = title.replace('<em>', '[I]') title = title.replace('</em>', '[/I]') addon.addDir( str(title), link, mode='SHListEpisodes', section='episodes', thumb=str(obraz), fanart=custom_background, ) try: next = parseDOM(html, 'a', attrs={'rel': 'next'}, ret='href')[0] if len(next) > 0: nextpage = mainLink + re.sub('/', '', next) nextpage = CleanHTML(nextpage) if '&r307=1' in nextpage: nextpage = str(nextpage).replace('&r307=1', '') elif 'r307=1' in nextpage: nextpage = str(nextpage).replace('r307=1', '') addon.addDir( '[I]następna strona[/I]', str(nextpage), mode='SHListTitles', section='nextpage', thumb=str(nexticon), fanart=custom_background, ) except: pass
def Szukaj(): url = params['url'] keyb = xbmc.Keyboard('', "Wyszukiwarka") keyb.doModal() if keyb.isConfirmed() and len(keyb.getText().strip()) > 0: search = keyb.getText() url = url + '%s' % search.replace(" ", "+") else: CATEGORIES(False) html = requests.get(url, timeout=15).text result = str(parseDOM(html, 'main', attrs={'role': 'main'})[0]) results = [CleanHTML(item) for item in parseDOM(result, 'h2')] excludelist = ['Japońsk', 'Koreańsk', 'Pozostałe'] includelist = ['/drama/', '/film/'] for item in results: if any(exclude in item for exclude in excludelist): continue elif any(include in item for include in includelist): Title = parseDOM(item, 'a')[0] link = parseDOM(item, 'a', ret='href')[0] data = requests.get(link, timeout=10).text title, poster, plot, banner, fanart, genre, year, thread = dqscraper.scraper_check( Title, link, poster='') # poster, fanart = scraper.Scrap(title, type='drama') if fanart == '': fanart = re.findall('background-image: url\((.+?)\);', data)[1] if poster == '': poster = parseDOM(data, 'img', attrs={'itemprop': 'thumbnailUrl'}, ret='src')[0] if plot == '': plot = parseDOM(data, 'em')[0] plot = CleanHTML(plot) if title == '': title = Title if '/drama/' in item: addon.addDir(str(title), str(link), mode=2, fanart=str(fanart), thumb=str(poster), poster=str(poster), plot=str(plot), code='[B][COLOR=green]drama[/COLOR][/B]', genre=str(genre), year=str(year)) else: addon.addLink(str(title), str(link), mode=3, fanart=str(fanart), thumb=str(poster), poster=str(poster), plot=str(plot), code='[B][COLOR=green]film[/COLOR][/B]', genre=str(genre), year=str(year)) else: continue xbmcplugin.addSortMethod(int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_TITLE, label2Mask='%P')
def Szukaj(): url = params['url'] keyb = xbmc.Keyboard('', "Wyszukiwarka") keyb.doModal() if keyb.isConfirmed() and len(keyb.getText().strip()) > 0: search = keyb.getText() url = url + '%s' % search.replace(" ", "+") else: CATEGORIES(False) html = requests.get(url, timeout=15).content result = str(parseDOM(html, 'main', attrs={'role': 'main'})[0]) results = [CleanHTML(item) for item in parseDOM(result, 'h2')] for item in results: if 'Japońsk' in item: continue elif 'Koreańsk' in item: continue elif '/drama/' in item: title = parseDOM(item, 'a')[0] link = parseDOM(item, 'a', ret='href')[0] data = requests.get(link, timeout=10).content fanart = re.findall('background-image: url\((.+?)\);', data)[1] poster = parseDOM(data, 'img', attrs={'itemprop': 'thumbnailUrl'}, ret='src')[0] plot = parseDOM(data, 'em')[0] plot = CleanHTML(plot) addon.addDir(str(title) + '[COLOR=green] drama[/COLOR]', str(link), mode=4, fanart=str(poster), thumb=str(poster), poster=str(poster), plot=str(plot)) elif '/film/' in item: title = parseDOM(item, 'a')[0] link = parseDOM(item, 'a', ret='href')[0] result = requests.get(link, timeout=10).content data = requests.get(link, timeout=10).content fanart = re.findall('background-image: url\((.+?)\);', data)[1] poster = parseDOM(data, 'img', attrs={'itemprop': 'thumbnailUrl'}, ret='src')[0] plot = parseDOM(data, 'em')[0] plot = CleanHTML(plot) addon.addLink(str(title) + '[COLOR=green] film[/COLOR]', str(link), mode=5, fanart=str(poster), thumb=str(poster), poster=str(poster), plot=str(plot)) else: continue
def CATEGORIES(login): addon.addDir('[COLOR=%s]Gatunki[/COLOR]' % 'yellow', base_link + '#gatunki/', mode=6, fanart=korea_background) addon.addDir(str(DKorea), base_link + 'drama/koreanska/', mode=1, fanart=korea_background, thumb=korea_thumb) addon.addDir(str(DJapan), base_link + 'drama/japonska/', mode=1, fanart=japan_background, thumb=japan_thumb) addon.addDir('Dramy Inne', base_link + 'drama/pozostale/', mode=1, fanart=china_background, thumb=inne_thumb) addon.addDir('Film Korea', base_link + 'film/koreanski/', mode=2, fanart=korea_background, thumb=korea_thumb) addon.addDir('Film Japonia', base_link + 'film/japonski/', mode=2, fanart=japan_background, thumb=japan_thumb) addon.addDir('Filmy Pozosta\xc5\x82e', base_link + 'film/pozostale/', mode=2, fanart=china_background, thumb=inne_thumb) addon.addDir("Wyszukiwanie", 'https://www.dramaqueen.pl/?s=', mode=8, fanart=default_background, thumb=search_icon) if login == True: Logowanie()
def Search(): section = params['section'] name = params['name'] url = params['url'] if section == 'search': keyb = xbmc.Keyboard('', "Wyszukiwarka anime") keyb.doModal() if keyb.isConfirmed() and len(keyb.getText().strip()) > 0: search = keyb.getText() url = url + '%s' % search.replace(" ", "+") else: PageAnimeShinden() return elif section == 'nextpage': url = url html = requests.get(url, timeout=15).content result = str( parseDOM(html, 'section', attrs={'class': 'anime-list box'})[0]) results = [ item for item in parseDOM(result, 'ul', attrs={'class': 'div-row'}) if 'h3' in item ] for item in results: link = mainLink + re.sub('/series/', 'series/', parseDOM(item, 'a', ret='href')[1]) obraz = mainLink + re.sub('/res/', 'res/', parseDOM(item, 'a', ret='href')[0]) title = parseDOM(item, 'a')[1] title = title.replace('<em>', '[I]') title = title.replace('</em>', '[/I]') addon.addDir( str(title), link, mode='SHListEpisodes', section='episodes', thumb=str(obraz), fanart=custom_background, ) try: next = parseDOM(html, 'a', attrs={'rel': 'next'}, ret='href')[0] if len(next) > 0: nextpage = mainLink + re.sub('/', '', next) nextpage = CleanHTML(nextpage) if '&r307=1' in nextpage: nextpage = str(nextpage).replace('&r307=1', '') elif 'r307=1' in nextpage: nextpage = str(nextpage).replace('r307=1', '') addon.addDir( '[I]następna strona[/I]', str(nextpage), mode='SHListTitles', section='nextpage', thumb=str(nexticon), fanart=custom_background, ) except: pass