def ListMovies(): cookie = cache.cache_get('dramaqueen_cookie')['value'] headersget.update({'Cookie': cookie}) url = params['url'] rM = str(requests.get(url, headers=headersget, timeout=15).content) rM = CleanHTML(rM) result = parseDOM(rM, 'div', attrs={'id': 'av_section_1'})[0] results = re.findall('flex_column av_one_fourth(.+?)</div></div></div>', result) Titles = re.findall('><p>(.+?)</p>', result) Plot = re.findall('/p>[\s,\S,.]<p>(.+?)</p>', result) obrazy = parseDOM(results, 'img', ret='src') linki = [item for item in parseDOM(results, 'a', ret='href')] for item in zip(linki, Titles, obrazy, Plot): addon.addLink(str(item[1]), str(item[0]), mode=5, thumb=str(item[2]), fanart=str(item[2]), plot=str(item[3]))
def ListEpisodes(): section = params['section'] name = params['name'] url = params['url'] subdir = params['subdir'] result = requests.get(url, timeout=15).text result = CleanHTML(result) results = parseDOM(result, 'section', attrs={'id':'anime-header'}) poster = parseDOM(results, 'img', ret='src')[0] link = parseDOM(results, 'a', ret='href') title= parseDOM(results, 'a') tags = parseDOM(result, 'div', attrs={'class':'field field-name-field-tags'}) try: plot = re.findall('p><p>(.+?)</p>', result)[0] if len(re.findall('<span', plot)) >= 0: plot = re.sub('<span(.+?)/span>', '', plot) except: plot = '' pass for i in zip(title, link): addon.addLink(str(i[0]), str(i[1]), mode='AOListLinks', section='links', thumb=str(poster), plot=str(plot), fanart=custom_background, subdir=subdir)
def KategorieLista(): cookie = cache.cache_get('dramaqueen_cookie')['value'] headersget.update({'Cookie': cookie}) url = params['url'] rG = requests.get(url, headers=headersget, timeout=15).content rG = CleanHTML(rG) # LoginCheck(url=rG) result = parseDOM(rG, 'div', attrs={'class': 'avia-content-slider-inner'})[0] label = [parseDOM(i, 'a', ret='title')[0] for i in parseDOM(result, 'h3')] obraz = parseDOM(result, 'img', ret='src') links = [parseDOM(i, 'a', ret='href')[0] for i in parseDOM(result, 'h3')] for item in zip(label, links, obraz): if str(item[1]).__contains__('/drama/'): addon.addDir(str(item[0]) + ' ' + '[COLOR %s]Drama[/COLOR]' % 'green', str(item[1]), mode=4, fanart=str(item[2]), thumb=str(item[2])) elif str(item[1]).__contains__('/film/'): addon.addLink(str(item[0]) + ' ' + '[COLOR %s]Film[/COLOR]' % 'green', str(item[1]), mode=5, fanart=str(item[2]), thumb=str(item[2]))
def ListTitles(): url = params['url'] section = params['section'] name = params['name'] thumb = params['img'] html = requests.get(url, timeout=10).text result = parseDOM(html, 'table', attrs={'id': 'lista-odcinkow'})[0] results = [item for item in parseDOM(result, 'tr')] episodes = [parseDOM(item, 'td') for item in results if 'href' in item] for title, link, s, r, t in episodes: nazwa = parseDOM(link, 'a')[0] if '<span' in link: title = title + ' ' + nazwa + '[COLOR=green] Filler[/COLOR]' else: title = title + ' ' + nazwa link = mainLink + re.sub('^/', '', parseDOM(link, 'a', ret='href')[0]) addon.addLink(title, link, mode='DBListLinks', fanart=DBAllfanart, thumb=thumb, section='ListLinks', subdir=name)
def ListEpisodes(): section = params['section'] name = params['name'] url = params['url'] + '/episodes' thumb = params['img'] Logowanie() cookie = cache.cache_get('shinden_cookie')['value'] headersget.update({'Cookie': cookie}) headers = headersget html = requests.get(url, headers=headers, timeout=15).content result = parseDOM(html, 'tbody', attrs={'class': 'list-episode-checkboxes'})[0] results = parseDOM(result, 'tr') epNo = [parseDOM(item, 'td')[0] for item in results] epTitle = [ parseDOM(item, 'td', attrs={'class': 'ep-title'})[0] for item in results ] epstatus = [ re.findall('<i class="fa fa-fw fa-(.+?)"></i>', item)[0] for item in results ] epDate = [ parseDOM(item, 'td', attrs={'class': 'ep-date'})[0] for item in results ] link = [ mainLink + re.sub('^/', '', parseDOM(item, 'a', ret='href')[0]) for item in results ] for ep in zip(epNo, epTitle, epDate, link, epstatus): if str(ep[4]) == 'check': title = str(ep[0]) + ' : ' + str( ep[1]) + '[COLOR=blue]%s[/COLOR]' % (' - ' + str(ep[2])) section = 'online' elif str(ep[4]) == 'times': title = str(ep[0]) + ' ' + '[COLOR=red] offline [/COLOR]' section = 'offline' else: title = str(ep[0]) + ' ' + '[COLOR=red] offline [/COLOR]' section = 'offline' addon.addLink(title, str(ep[3]), mode='SHListLinks', fanart=str(thumb), thumb=str(thumb), section=section)
def KategorieLista(): cookie = cache.cache_get('dramaqueen_cookie')['value'] headersget.update({'Cookie': cookie}) url = params['url'] rG = requests.get(url, headers=headersget, timeout=15).text rG = CleanHTML(rG) # LoginCheck(url=rG) result = parseDOM(rG, 'div', attrs={'class': 'avia-content-slider-inner'})[0] label = [parseDOM(i, 'a', ret='title')[0] for i in parseDOM(result, 'h3')] obraz = parseDOM(result, 'img', ret='src') links = [parseDOM(i, 'a', ret='href')[0] for i in parseDOM(result, 'h3')] for item in zip(label, links, obraz): if str(item[1]).__contains__('/drama/'): addon.addDir(str(item[0]), str(item[1]), mode=2, fanart=str(item[2]), thumb=str(item[2]), code='[B][COLOR %s]Drama[/COLOR][/B]' % 'green') elif str(item[1]).__contains__('/film/'): addon.addLink(str(item[0]), str(item[1]), mode=3, fanart=str(item[2]), thumb=str(item[2]), code='[B][COLOR %s]Film[/COLOR][/B]' % 'green') xbmcplugin.addSortMethod(int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_TITLE, label2Mask='%P')
def ListEpisodes(): Logowanie(False) cookie = cache.cache_get('dramaqueen_cookie')['value'] headersget.update({'Cookie': cookie}) name = params['name'] thumb = params['img'] url = params['url'] rE = str(requests.get(url, headers=headersget, timeout=15).text) LoginCheck(rE) rE = str.replace(rE, '–', '-') rE = rE.replace(' ', ' ') result = parseDOM(rE, 'div', attrs={'class': 'togglecontainer ' + r'.+?'})[0] results = re.findall('av_toggle_section(.+?)<span', result) episodes = [item for item in parseDOM(results, 'p')] plot = parseDOM(rE, 'section', attrs={'class': 'av_textblock_section '})[1] if '<em>' in plot: plot = CleanHTML(parseDOM(plot, 'em')[0]) else: plot = CleanHTML(parseDOM(plot, 'p')[0]) fanart = '' #re.findall('background-image: url\((.+?)\);', rE)[1] inprogress = '[COLOR=red][I] w tłumaczeniu[/COLOR][/I]' incorrection = '[COLOR=red][I] korekta[/COLOR][/I]' for item in episodes: if item.__contains__('tłumaczenie'): addon.addLink(str(inprogress), url, mode=8, fanart=(str(fanart)), plot=(str(plot)), thumb=str(thumb)) elif 'korekta' in item: addon.addLink(str(incorrection), url, mode=8, fanart=(str(fanart)), plot=(str(plot)), thumb=str(thumb)) else: addon.addLink(str(item), url, mode=3, fanart=(str(fanart)), plot=(str(plot)), thumb=str(thumb))
def List_Episodes(): url = params['url'] section = params['section'] page = params['page'] img = params['img'] ### Listowanie Polecanych if section == 'polecane': result = requests.get(url).content result = parseDOM(result, 'table', attrs={'class': 'lista'})[0] result = parseDOM(result, 'tr', attrs={'class': 'lista_hover'}) link = [page + parseDOM(item, 'a', ret='href')[0] for item in result] tytul = [str(parseDOM(item, 'img')[0]).split("</a>")[0] for item in result] data = [parseDOM(item, 'td', attrs={'class' : 'center'})[1] for item in result] for item in zip(link, tytul, data): addon.addLink(str(item[1]) + ' ' + str(item[2]), str(item[0]), mode='List_Links', thumb=fanartAol, fanart=default_background, page=str(page), section='polecane') #### Listowanie pozostalych z wieloma sezonami elif section == 'multi': result = requests.get(url).content slice = GetDataBeetwenMarkers(result, '<h1 class="pod_naglowek">' + page, '</table>', False)[1] results = parseDOM(slice, 'tr', attrs={'class': 'lista_hover'}) tytul = [str(parseDOM(item, 'img')[0]).split('</td>')[0] for item in results] link = mainLink for item in tytul: addon.addLink(str(item), str(url), mode='List_Links', thumb=str(img), fanart=default_background, page=str(page), section='multi',) #### Listowanie pozostalych pojedynczych else: result = requests.get(url).content result = parseDOM(result, 'tr', attrs={'class': 'lista_hover'}) tytul = [str(parseDOM(item, 'img')[0]).split('</td>')[0] for item in result] for item in tytul: addon.addLink(str(item), str(url), mode='List_Links', thumb=str(img), fanart=default_background, page=str(page), section='other')
def Szukaj(): url = params['url'] keyb = xbmc.Keyboard('', "Wyszukiwarka") keyb.doModal() if keyb.isConfirmed() and len(keyb.getText().strip()) > 0: search = keyb.getText() url = url + '%s' % search.replace(" ", "+") else: CATEGORIES(False) html = requests.get(url, timeout=15).content result = str(parseDOM(html, 'main', attrs={'role': 'main'})[0]) results = [CleanHTML(item) for item in parseDOM(result, 'h2')] for item in results: if 'Japońsk' in item: continue elif 'Koreańsk' in item: continue elif '/drama/' in item: title = parseDOM(item, 'a')[0] link = parseDOM(item, 'a', ret='href')[0] data = requests.get(link, timeout=10).content fanart = re.findall('background-image: url\((.+?)\);', data)[1] poster = parseDOM(data, 'img', attrs={'itemprop': 'thumbnailUrl'}, ret='src')[0] plot = parseDOM(data, 'em')[0] plot = CleanHTML(plot) addon.addDir(str(title) + '[COLOR=green] drama[/COLOR]', str(link), mode=4, fanart=str(poster), thumb=str(poster), poster=str(poster), plot=str(plot)) elif '/film/' in item: title = parseDOM(item, 'a')[0] link = parseDOM(item, 'a', ret='href')[0] result = requests.get(link, timeout=10).content data = requests.get(link, timeout=10).content fanart = re.findall('background-image: url\((.+?)\);', data)[1] poster = parseDOM(data, 'img', attrs={'itemprop': 'thumbnailUrl'}, ret='src')[0] plot = parseDOM(data, 'em')[0] plot = CleanHTML(plot) addon.addLink(str(title) + '[COLOR=green] film[/COLOR]', str(link), mode=5, fanart=str(poster), thumb=str(poster), poster=str(poster), plot=str(plot)) else: continue
def Szukaj(): url = params['url'] keyb = xbmc.Keyboard('', "Wyszukiwarka") keyb.doModal() if keyb.isConfirmed() and len(keyb.getText().strip()) > 0: search = keyb.getText() url = url + '%s' % search.replace(" ", "+") else: CATEGORIES(False) html = requests.get(url, timeout=15).text result = str(parseDOM(html, 'main', attrs={'role': 'main'})[0]) results = [CleanHTML(item) for item in parseDOM(result, 'h2')] excludelist = ['Japońsk', 'Koreańsk', 'Pozostałe'] includelist = ['/drama/', '/film/'] for item in results: if any(exclude in item for exclude in excludelist): continue elif any(include in item for include in includelist): Title = parseDOM(item, 'a')[0] link = parseDOM(item, 'a', ret='href')[0] data = requests.get(link, timeout=10).text title, poster, plot, banner, fanart, genre, year, thread = dqscraper.scraper_check( Title, link, poster='') # poster, fanart = scraper.Scrap(title, type='drama') if fanart == '': fanart = re.findall('background-image: url\((.+?)\);', data)[1] if poster == '': poster = parseDOM(data, 'img', attrs={'itemprop': 'thumbnailUrl'}, ret='src')[0] if plot == '': plot = parseDOM(data, 'em')[0] plot = CleanHTML(plot) if title == '': title = Title if '/drama/' in item: addon.addDir(str(title), str(link), mode=2, fanart=str(fanart), thumb=str(poster), poster=str(poster), plot=str(plot), code='[B][COLOR=green]drama[/COLOR][/B]', genre=str(genre), year=str(year)) else: addon.addLink(str(title), str(link), mode=3, fanart=str(fanart), thumb=str(poster), poster=str(poster), plot=str(plot), code='[B][COLOR=green]film[/COLOR][/B]', genre=str(genre), year=str(year)) else: continue xbmcplugin.addSortMethod(int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_TITLE, label2Mask='%P')
def ListTitles(): cookie = cache.cache_get('dramaqueen_cookie')['value'] headersget.update({'Cookie': cookie}) url = params['url'] html = requests.get(url, headers=headersget, timeout=15).text html = CleanHTML(html) result = parseDOM(html, 'div', attrs={'id': 'av_section_1'})[0] results = re.findall( 'flex_column ' + r'.+?' + 'av_one_fourth(.+?)</div></div></div>', result) titles = re.findall('><p>(.+?)</p>', result) linki = [item for item in parseDOM(results, 'a', ret='href')] Plot = re.findall('/p>[\s,\S,.]<p>(.+?)</p>', result) obrazy = parseDOM(results, 'img', ret='src') threads = [] for item in zip(linki, titles, obrazy, Plot): title, poster, plot, banner, fanart, genre, year, thread = dqscraper.scraper_check( item[1], item[0], item[2]) if title == '': title = item[1] if plot == '': plot = item[3] if poster == '': poster = item[2] if banner == '': banner = item[2] if fanart == '': fanart = item[2] if genre == '': genre = '' if year == '': year = '' if thread: threads.append(thread) if str(item[0]).__contains__('/drama/'): addon.addDir(str(title), str(item[0]), mode=2, plot=(str(plot)), fanart=(str(fanart)), isFolder=True, thumb=(str(poster)), banner=(str(banner)), genre=str(genre), year=str(year), section='') elif str(item[0]).__contains__('/film/'): addon.addLink(str(title), str(item[0]), mode=3, plot=str(plot), fanart=str(fanart), thumb=str(poster), banner=str(banner)) if thread: t = threading.Thread(target=ScrapInfo, args=(threads, )) if t.is_alive(): t.join() else: t.start()
def ListEpisodes(): section = params['section'] url = params['url'] + '/all-episodes' thumb = params['img'] subdir = params['subdir'] Logowanie() cookie = cache.cache_get('shinden_cookie')['value'] headersget.update({'Cookie': cookie}) headers = headersget html = requests.get(url, headers=headers, timeout=15).text result = parseDOM(html, 'tbody', attrs={'class': 'list-episode-checkboxes'})[0] results = parseDOM(result, 'tr') epNo = [parseDOM(item, 'td')[0] for item in results] epTitle = [ parseDOM(item, 'td', attrs={'class': 'ep-title'})[0] for item in results ] epstatus = [ re.findall('<i class="fa fa-fw fa-(.+?)"></i>', item)[0] for item in results ] epDate = [ parseDOM(item, 'td', attrs={'class': 'ep-date'})[0] for item in results ] link = [ mainLink + re.sub('^/', '', parseDOM(item, 'a', ret='href')[0]) for item in results ] for ep in zip(epNo, epTitle, epDate, link, epstatus): if str(ep[4]) == 'check': title = str(ep[0]) + ' : ' + str(ep[1]) code = '[B][COLOR=blue]%s[/COLOR][/B]' % (str(ep[2])) section = 'online' elif str(ep[4]) == 'times': title = str(ep[0]) + ' ' + '[COLOR=red] offline [/COLOR]' section = 'offline' code = '[B][COLOR=blue]%s[/COLOR][/B]' % (str(ep[2])) else: title = str(ep[0]) + ' ' + '[COLOR=red] offline [/COLOR]' section = 'offline' code = '[B][COLOR=blue]%s[/COLOR][/B]' % (str(ep[2])) addon.addLink(title, str(ep[3]), mode='SHListLinks', fanart=str(thumb), thumb=str(thumb), section=section, subdir=subdir, code=code) xbmcplugin.addSortMethod(int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_TITLE, label2Mask='%P')
def List_Episodes(): url = params['url'] section = params['section'] page = params['page'] img = params['img'] subdir = params['subdir'] ### Listowanie Polecanych if section == 'polecane': result = requests.get(url).text result = parseDOM(result, 'table', attrs={'class': 'lista'})[0] result = parseDOM(result, 'tr', attrs={'class': 'lista_hover'}) link = [page + parseDOM(item, 'a', ret='href')[0] for item in result] tytul = [ str(parseDOM(item, 'img')[0]).split("</a>")[0] for item in result ] data = [ parseDOM(item, 'td', attrs={'class': 'center'})[1] for item in result ] for item in zip(link, tytul, data): addon.addLink(str(item[1]), str(item[0]), mode='List_Links', thumb=img, fanart=default_background, page=str(page), section='polecane', subdir=subdir, code='[B][COLOR=blue]%s[/COLOR][/B]' % str(item[2])) xbmcplugin.addSortMethod(int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_TITLE, label2Mask='%P') #### Listowanie pozostalych z wieloma sezonami elif section == 'multi': result = requests.get(url).text slice = GetDataBeetwenMarkers(result, '<h1 class="pod_naglowek">' + page, '</table>', False)[1] results = parseDOM(slice, 'tr', attrs={'class': 'lista_hover'}) tytul = [ str(parseDOM(item, 'img')[0]).split('</td>')[0] for item in results ] link = mainLink for item in tytul: addon.addLink(str(item), str(url), mode='List_Links', thumb=str(img), fanart=default_background, page=str(page), section='multi', subdir=subdir) #### Listowanie pozostalych pojedynczych else: result = requests.get(url).text result = parseDOM(result, 'tr', attrs={'class': 'lista_hover'}) tytul = [ str(parseDOM(item, 'img')[0]).split('</td>')[0] for item in result ] for item in tytul: addon.addLink(str(item), str(url), mode='List_Links', thumb=str(img), fanart=default_background, page=str(page), section='other', subdir=subdir)