def WyswietlanieLinkow(): Logowanie(False) cookie = cache.cache_get('dramaqueen_cookie')['value'] headersget.update({'Cookie': cookie}) url = params['url'] name = params['name'] html = requests.get(url, headers=headersget, timeout=15).text LoginCheck(html) results = [ item for item in parseDOM( html, 'section', attrs={'class': 'av_toggle_section ' + r'.+?'}) ] if name.startswith('Odcinek '): index = int(re.findall('\d+', name)[0]) avlinks = [parseDOM(item, 'a', ret='href') for item in results][index - 1] avplayers = [parseDOM(item, 'button') for item in results][index - 1] elif 'tłumaczeni' in name: pass elif 'korekta' in name: pass else: avlinks = [parseDOM(item, 'a', ret='href') for item in results][0] avplayers = [parseDOM(item, 'button') for item in results][0] addon.SourceSelect(players=avplayers, links=avlinks, title=name)
def ShindenGetVideoLink(url): headers = { 'Accept': '*/*', 'Origin': 'https://shinden.pl', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.46 Safari/537.36', 'DNT': '1', } if str(url).startswith("//"): url = "https://" + url session = requests.session() session.get(url, headers=headers, timeout=15) time.sleep(5) video = session.get(url.replace("player_load", "player_show") + "&width=508", timeout=5).text video_url = '' try: video_url = parseDOM(video, 'iframe', ret='src')[0] except: pass if not video_url: try: video_url = parseDOM(video, 'a', ret='href')[0] except: pass if not video_url: try: video_url = re.findall("src=\"(.*?)\"", video)[0] except: pass if str(video_url).startswith("//"): video_url = "http:" + video_url return video_url
def ListTitles(): url = params['url'] section = params['section'] name = params['name'] thumb = params['img'] html = requests.get(url, timeout=10).text result = parseDOM(html, 'table', attrs={'id': 'lista-odcinkow'})[0] results = [item for item in parseDOM(result, 'tr')] episodes = [parseDOM(item, 'td') for item in results if 'href' in item] for title, link, s, r, t in episodes: nazwa = parseDOM(link, 'a')[0] if '<span' in link: title = title + ' ' + nazwa + '[COLOR=green] Filler[/COLOR]' else: title = title + ' ' + nazwa link = mainLink + re.sub('^/', '', parseDOM(link, 'a', ret='href')[0]) addon.addLink(title, link, mode='DBListLinks', fanart=DBAllfanart, thumb=thumb, section='ListLinks', subdir=name)
def Search(): section = params['section'] name = params['name'] url = params['url'] if section == 'search': keyb = xbmc.Keyboard('', "Wyszukiwarka anime") keyb.doModal() if keyb.isConfirmed() and len(keyb.getText().strip()) > 0: search = keyb.getText() url = url + '%s' % search.replace(" ", "+") else: PageAnimeOdcinki(mainLink) elif section == 'nextpage': url = url html = requests.get(url, timeout=15).text result = parseDOM(html, 'li', attrs={'class': 'search-result'}) for item in result: nazwa = CleanHTML(str(parseDOM(item, 'a')[0])) link = str(parseDOM(item, 'a', ret='href')[0]) plot = CleanHTML(str(parseDOM(item, 'p')[0])) addon.addDir(nazwa, link, mode='AOListEpisodes', thumb=fanartAodc, plot=plot, fanart=custom_background, section='search', subdir=name) if 'nextpostslink' in html: nextpage = parseDOM(html, 'a', attrs={'class':'nextpostslink'}, ret='href')[0] addon.addDir('[I]następna strona[/I]', nextpage, mode='AOSearch', thumb=nexticon, fanart=custom_background, section='nextpage')
def ListEpisodes(): section = params['section'] name = params['name'] url = params['url'] subdir = params['subdir'] result = requests.get(url, timeout=15).text result = CleanHTML(result) results = parseDOM(result, 'section', attrs={'id':'anime-header'}) poster = parseDOM(results, 'img', ret='src')[0] link = parseDOM(results, 'a', ret='href') title= parseDOM(results, 'a') tags = parseDOM(result, 'div', attrs={'class':'field field-name-field-tags'}) try: plot = re.findall('p><p>(.+?)</p>', result)[0] if len(re.findall('<span', plot)) >= 0: plot = re.sub('<span(.+?)/span>', '', plot) except: plot = '' pass for i in zip(title, link): addon.addLink(str(i[0]), str(i[1]), mode='AOListLinks', section='links', thumb=str(poster), plot=str(plot), fanart=custom_background, subdir=subdir)
def Kategorie(): cookie = cache.cache_get('dramaqueen_cookie')['value'] headersget.update({'Cookie': cookie}) url = params['url'] rG = requests.get(url, headers=headersget, timeout=15).text # LoginCheck(url=rG) result = parseDOM(rG, 'div', attrs={'class': 'tagcloud'})[0] links = parseDOM(result, 'a', ret='href') label = parseDOM(result, 'a') count = [ re.findall('\d+', i)[0] for i in parseDOM(result, 'a', ret='aria-label') ] for item in zip(label, links, count): addon.addDir(str(item[0]), str(item[1]), mode=5, fanart='', plot='', thumb='', code='[B][COLOR %s]%s[/COLOR][/B]' % ('green', str(item[2]) + ' pozycji')) xbmcplugin.addSortMethod(int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_TITLE, label2Mask='%P')
def Gatunki(): section = params['section'] url = params['url'] if section == 'gatunki': html = requests.get(url, timeout=10).text tagname = [ re.sub('<i(.+?)</i>', '', item) for item in parseDOM(html, 'a', attrs={'class': 'genre-item'}) ] tagcode = [ 'i' + item for item in parseDOM( html, 'a', attrs={'class': 'genre-item'}, ret='data-id') ] taglink = [] d = xbmcgui.Dialog() select = d.multiselect('Wybór Gatunku', tagname) seltags = [] if select == None: PageAnimeShinden() return for idx in select: seltags.append(tagcode[idx]) sep = ';' url = url + 'genres-type=all&genres=' + sep.join(seltags) elif section == 'nextpage': url = url ListTitles(url)
def ListTitles(): section = params['section'] name = params['name'] url = params['url'] result = requests.get(url, timeout=15).text result = CleanHTML(result) if section == 'All': result = parseDOM(result, 'tr', attrs={'class': 'list-item'}) link = [parseDOM(item, 'a', ret='href')[0] for item in result] title = [parseDOM(item, 'a')[0] for item in result] elif section == 'Aired': result = parseDOM(result, 'section', attrs={'id': 'block-views-anime-emitowane-block'}) link = parseDOM(result, 'a', ret='href') title = parseDOM(result, 'a') else: result = parseDOM(result, 'tr', attrs={'data-fl' : str(section).lower()}) link = [parseDOM(item, 'a', ret='href')[0] for item in result] title = [parseDOM(item, 'a')[0] for item in result] for i in zip(title, link): addon.addDir(str(i[0]), str(i[1]), mode='AOListEpisodes', section=section, thumb=str(fanartAodc), fanart=custom_background, subdir=name)
def ListEpisodes(): Logowanie(False) cookie = cache.cache_get('dramaqueen_cookie')['value'] headersget.update({'Cookie': cookie}) name = params['name'] thumb = params['img'] url = params['url'] rE = str(requests.get(url, headers=headersget, timeout=15).text) LoginCheck(rE) rE = str.replace(rE, '–', '-') rE = rE.replace(' ', ' ') result = parseDOM(rE, 'div', attrs={'class': 'togglecontainer ' + r'.+?'})[0] results = re.findall('av_toggle_section(.+?)<span', result) episodes = [item for item in parseDOM(results, 'p')] plot = parseDOM(rE, 'section', attrs={'class': 'av_textblock_section '})[1] if '<em>' in plot: plot = CleanHTML(parseDOM(plot, 'em')[0]) else: plot = CleanHTML(parseDOM(plot, 'p')[0]) fanart = '' #re.findall('background-image: url\((.+?)\);', rE)[1] inprogress = '[COLOR=red][I] w tłumaczeniu[/COLOR][/I]' incorrection = '[COLOR=red][I] korekta[/COLOR][/I]' for item in episodes: if item.__contains__('tłumaczenie'): addon.addLink(str(inprogress), url, mode=8, fanart=(str(fanart)), plot=(str(plot)), thumb=str(thumb)) elif 'korekta' in item: addon.addLink(str(incorrection), url, mode=8, fanart=(str(fanart)), plot=(str(plot)), thumb=str(thumb)) else: addon.addLink(str(item), url, mode=3, fanart=(str(fanart)), plot=(str(plot)), thumb=str(thumb))
def Gatunki(): section = params['section'] name = params['name'] url = params['url'] if section == 'gatunki': result = requests.get(url, timeout=15).text result = parseDOM(result, 'div', attrs={'class': 'panel-body'})[0] taglist = [item for item in parseDOM(result, 'div', attrs={'class': r'.+?' + 'checkbox'}) if len(item)>0] tagname = [CleanHTML(item) for item in parseDOM(taglist, 'label')] tagcat = [item for item in parseDOM(taglist, 'input', ret='name') ] tagcode = ['=' + i for i in parseDOM(taglist, 'input', ret= 'value')] taglink = [] for item in zip(tagcat, tagcode): taglink.append(str(item[0]) + str(item[1])) d = xbmcgui.Dialog() select = d.multiselect('Wybór Gatunku', tagname) if select == None: PageAnimeOdcinki(mainLink) return seltags = [] for idx in select: seltags.append(taglink[idx]) sep = '&' url = url + '?' + sep.join(seltags) elif section == 'nextpage': url = url html = requests.get(url, timeout=15).text result = parseDOM(html, 'li', attrs={'class': 'search-result'}) for item in result: nazwa = CleanHTML(str(parseDOM(item, 'a')[0])) link = str(parseDOM(item, 'a', ret='href')[0]) plot = CleanHTML(str(parseDOM(item, 'p')[0])) addon.addDir(nazwa, link, mode='AOListEpisodes', thumb=fanartAodc, plot=plot, fanart=custom_background, section='search', subdir=name) if 'nextpostslink' in html: nextpage = parseDOM(html, 'a', attrs={'class':'nextpostslink'}, ret='href')[0] addon.addDir('[I]następna strona[/I]', nextpage, mode='AOGatunki', thumb=nexticon, fanart=custom_background, section='nextpage')
def ListLinks(): section = params['section'] name = params['name'] url = params['url'] subdir = params['subdir'] result = requests.get(url, timeout=15).text result = parseDOM(result, 'div', attrs={'id': 'video-player-control'})[0] player = [re.sub('<img src(.+?)">','',item) for item in parseDOM(result, 'div', attrs={'class': 'video-player-mode'})] link = [encryptPlayerUrl(item) for item in parseDOM(result, 'div', attrs={'class': 'video-player-mode'} , ret='data-hash')] addon.SourceSelect(players=player, links=link, title=name, subdir=subdir)
def Browse_Seasons(): url = params['url'] name = params['name'] section = params['section'] page = params['page'] img = params['img'] subdir = params['subdir'] pic = params['pic'] if section == 'polecane': html = requests.get(url, timeout=15).text result = parseDOM(html, 'ul', attrs={'class': 'pmenu'})[1] result = parseDOM(result, 'li') for item in result: link = parseDOM(item, 'a', ret='href')[0] nazwa = parseDOM(item, 'a')[0] if "Kolejno" in str(nazwa): continue addon.addDir(str(nazwa), url + str(link), mode='List_Episodes', isFolder=True, thumb=str(img), fanart=pic, page=str(url), section='polecane', subdir=subdir + ' ' + nazwa) elif section == 'other': html = requests.get(url, timeout=15).text result = parseDOM(html, 'h1', attrs={'class': 'pod_naglowek'}) if len(result) > 1: for item in result: addon.addDir(str(item), url, mode='List_Episodes', isFolder=True, thumb=str(img), fanart=default_background, page=str(item), section='multi', subdir=subdir + ' ' + str(item)) elif len(result) <= 1: List_Episodes()
def ListLinks(): name = params['name'] url = params['url'] section = params['section'] subdir = params['subdir'] if section == 'online': Logowanie() cookie = cache.cache_get('shinden_cookie')['value'] headersget.update({'Cookie': cookie}) headers = headersget html = requests.get(url, headers=headers, timeout=15).text result = [item for item in parseDOM(html, 'tbody') if 'player' in item] results = parseDOM(result, 'tr') playerinfo = [ re.findall('data-episode=\'(.+?)\' ', item) for item in results ] code = re.findall("""_Storage\.basic.*=.*'(.*?)'""", html)[0] playerdata = [json.loads(item[0]) for item in playerinfo] playerlink = [] player = [] for i in playerdata: title = i['player'] + '[COLOR=green]%s[/COLOR]' % ( ' ' + 'Audio' + ' ' + i['lang_audio'] + ('' if (i['lang_subs'] == '') or (i['lang_subs'] == None) else ' SUB ' + i['lang_subs'])) player.append(title) ID = (i['online_id']) link = "https://api4.shinden.pl/xhr/%s/player_load?auth=%s" % ( ID, code) playerlink.append(link) addon.SourceSelect(player, playerlink, name, subdir) else: return
def Alfabetyczna(): # name = params['name'] url = params['url'] html = requests.get(url, timeout=15).text html = CleanHTML(html) result = parseDOM(html, 'ul', attrs={'class': 'letter-list' + r'.+?'})[0] letterlink = [ str(item).replace('r307=1&', '') for item in parseDOM(result, 'a', ret='href') ] letter = parseDOM(result, 'a') for i in zip(letter, letterlink): addon.addDir(str(i[0]), url + str(i[1]), mode='SHListTitles', section=str(i[0]), thumb=str(Letter[str(i[0])]), fanart=custom_background)
def ListLinks(): Logowanie() url = params['url'] section = params['section'] name = params['name'] subdir = params['subdir'] cookie = cache.cache_get('db_cookie')['value'] headersget.update({'Cookie': cookie}) headers = headersget html = requests.get(url, headers=headers, timeout=10).text result = parseDOM(html, 'table', attrs={'id': 'video-table'})[0] results = parseDOM(result, 'tr', attrs={'title': 'Kliknij' + r'.+?'}) playerlink = [ mainLink + re.sub('^/', '', parseDOM(item, 'a', ret='href')[0]) for item in results ] playername = [parseDOM(item, 'a')[0] for item in results] player = [] playerdetails = [parseDOM(item, 'td') for item in results] playersubs = [ re.sub('<span(.+?)span>', '', parseDOM(item, 'td')[2]) for item in results ] playeraudio = [parseDOM(item, 'td')[1] for item in results] playerquality = [parseDOM(item, 'td')[4] for item in results] for item in zip(playername, playersubs, playeraudio, playerquality): if 'VIP' in item[0]: playertitle = item[ 0] + ' ' + '[COLOR=red] brak obsługi [/COLOR]' #'[COLOR=green] napisy %s - audio %s - %s [/COLOR]' % (item[1], item[2], item[3]) else: playertitle = item[ 0] + ' ' + '[COLOR=green] napisy %s - audio %s - %s [/COLOR]' % ( item[1], item[2], item[3]) player.append(playertitle) addon.SourceSelect(player, playerlink, name, subdir)
def Alfabetyczna(url): name = params['name'] url = params['url'] result = requests.get(url, timeout=15).text result = parseDOM(result, 'div', attrs={'id': 'letter-index'})[0] lista = re.findall('data-index.*?">\s(.+?)</a>\s\((.+?)\)\s', result) for litera in lista: if 'Anime' in name: addon.addDir(str(litera[0]), url, mode='AOListTitles', section=str(litera[0])[0:1], thumb=str(Letter[str(litera[0])[0:1]]), fanart=custom_background, code='[B][COLOR %s]%s[/COLOR][/B]' % ('green', str(litera[1]) + ' pozycji')) else: addon.addDir(str(litera[0]), url, mode='AOListTitles', section=str(litera[0])[0:1], thumb=str(Letter[str(litera[0])[0:1]]), fanart=custom_background, code='[B][COLOR %s]%s[/COLOR][/B]' % ('green', str(litera[1]) + ' pozycji')) xbmcplugin.addSortMethod(int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_TITLE, label2Mask= '%P')
def Pagewbijam(url): result = requests.get(url, timeout=15).text if (len(url) == 0): return result = re.sub('>Menu główne', '', result) result = re.sub('>Reklama', '', result) data = [ item for item in parseDOM( result, 'div', attrs={'class': 'pmenu_naglowek_' + r'.'}) if len(item) > 0 ] if len(data) > 0: for item in data: name = item addon.addDir(str(name), url, mode='Browse_Titles', thumb=fanartWb, fanart=default_background)
def KategorieLista(): cookie = cache.cache_get('dramaqueen_cookie')['value'] headersget.update({'Cookie': cookie}) url = params['url'] rG = requests.get(url, headers=headersget, timeout=15).text rG = CleanHTML(rG) # LoginCheck(url=rG) result = parseDOM(rG, 'div', attrs={'class': 'avia-content-slider-inner'})[0] label = [parseDOM(i, 'a', ret='title')[0] for i in parseDOM(result, 'h3')] obraz = parseDOM(result, 'img', ret='src') links = [parseDOM(i, 'a', ret='href')[0] for i in parseDOM(result, 'h3')] for item in zip(label, links, obraz): if str(item[1]).__contains__('/drama/'): addon.addDir(str(item[0]), str(item[1]), mode=2, fanart=str(item[2]), thumb=str(item[2]), code='[B][COLOR %s]Drama[/COLOR][/B]' % 'green') elif str(item[1]).__contains__('/film/'): addon.addLink(str(item[0]), str(item[1]), mode=3, fanart=str(item[2]), thumb=str(item[2]), code='[B][COLOR %s]Film[/COLOR][/B]' % 'green') xbmcplugin.addSortMethod(int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_TITLE, label2Mask='%P')
def scraper_add(url, name, poster): db = database.connect(scraperFile) cur = db.cursor() try: data = requests.get(url, timeout=10).text fanart = re.findall('background-image: url\((.+?)\);', data)[1] banner = parseDOM(data, 'img', attrs={'itemprop': 'thumbnailUrl'}, ret='src')[0] plot1 = [ item for item in parseDOM( data, 'section', attrs={'class': 'av_textblock_section '}) if '<em>' in item ] plot2 = [ item for item in parseDOM( data, 'section', attrs={'class': 'av_textblock_section '}) if not '<em>' in item ] if not '<a>' in plot1: plot = CleanHTML(parseDOM(plot1, 'em')[0]) if '<strong>' in plot: try: plot = CleanHTML(parseDOM(plot2, 'p')[0]) except: pass details = GetDataBeetwenMarkers(data, 'Gatunki:', 'Upload:', True)[1] gen = [ item[1] for item in re.findall('<a href(.+?)">(.+?)</a>', details) if '/gatunek/' in item[0] ] genre = ', '.join(gen) year = re.findall(r'[1-2][0-9]{3}', details)[0] while True: time.sleep(1) try: cur.execute("SELECT count(*) FROM DramaQueen WHERE name = ?", (name, )) data = cur.fetchone()[0] if not data: print('There is no component named %s' % name) cur.execute( "INSERT INTO DramaQueen (name, poster, plot, banner, fanart, genre, year) VALUES(?,?,?,?,?,?,?)", (name, poster, plot, banner, fanart, genre, year)) db.commit() break else: print('Component %s found in rows' % (name)) break except sqlite3.OperationalError as e: print(e) continue except Exception as e: print(e) raise e finally: db.close()
def List_Episodes(): url = params['url'] section = params['section'] page = params['page'] img = params['img'] subdir = params['subdir'] ### Listowanie Polecanych if section == 'polecane': result = requests.get(url).text result = parseDOM(result, 'table', attrs={'class': 'lista'})[0] result = parseDOM(result, 'tr', attrs={'class': 'lista_hover'}) link = [page + parseDOM(item, 'a', ret='href')[0] for item in result] tytul = [ str(parseDOM(item, 'img')[0]).split("</a>")[0] for item in result ] data = [ parseDOM(item, 'td', attrs={'class': 'center'})[1] for item in result ] for item in zip(link, tytul, data): addon.addLink(str(item[1]), str(item[0]), mode='List_Links', thumb=img, fanart=default_background, page=str(page), section='polecane', subdir=subdir, code='[B][COLOR=blue]%s[/COLOR][/B]' % str(item[2])) xbmcplugin.addSortMethod(int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_TITLE, label2Mask='%P') #### Listowanie pozostalych z wieloma sezonami elif section == 'multi': result = requests.get(url).text slice = GetDataBeetwenMarkers(result, '<h1 class="pod_naglowek">' + page, '</table>', False)[1] results = parseDOM(slice, 'tr', attrs={'class': 'lista_hover'}) tytul = [ str(parseDOM(item, 'img')[0]).split('</td>')[0] for item in results ] link = mainLink for item in tytul: addon.addLink(str(item), str(url), mode='List_Links', thumb=str(img), fanart=default_background, page=str(page), section='multi', subdir=subdir) #### Listowanie pozostalych pojedynczych else: result = requests.get(url).text result = parseDOM(result, 'tr', attrs={'class': 'lista_hover'}) tytul = [ str(parseDOM(item, 'img')[0]).split('</td>')[0] for item in result ] for item in tytul: addon.addLink(str(item), str(url), mode='List_Links', thumb=str(img), fanart=default_background, page=str(page), section='other', subdir=subdir)
def List_Links(): url = params['url'] section = urllib.parse.unquote_plus(params['section']) page = urllib.parse.unquote_plus(params['page']) title = params['name'] subdir = urllib.parse.unquote_plus(params['subdir']) if section == 'polecane': result = requests.get(url).text result = parseDOM(result, 'table', attrs={'class': 'lista'}) result = parseDOM(result, 'tr', attrs={'class': 'lista_hover'}) status = [ parseDOM(item, 'td', attrs={'class': 'center'})[1] for item in result ] player = [ parseDOM(item, 'td', attrs={'class': 'center'})[2] for item in result ] tlumacz = [ parseDOM(item, 'td', attrs={'class': 'center'})[3] for item in result ] Player = [] for item in zip(player, tlumacz): test = item[0] + ' ' + '[COLOR %s]%s[/COLOR]' % ('green', item[1]) Player.append(test) kodlinku = [ page + 'odtwarzacz-' + parseDOM( item, 'span', attrs={'class': 'odtwarzacz_link'}, ret='rel')[0] + '.html' for item in result ] link = [] for item in kodlinku: try: temp = requests.get(item).text if 'vk.com' in temp: l1 = parseDOM(temp, 'span', attrs={'class': 'odtwarzaj_vk'}, ret='rel')[0] l2 = parseDOM(temp, 'span', attrs={'class': 'odtwarzaj_vk'}, ret='id')[0] temp = 'https://vk.com/video' + l1 + '_' + l2 else: temp = parseDOM(temp, 'iframe', ret='src')[0] link.append(temp) except: continue addon.SourceSelect(Player, link, title, subdir) elif section == 'multi': result = requests.get(url).text slice = GetDataBeetwenMarkers(result, '<h1 class="pod_naglowek">' + page, '</table>', False)[1] results = [ item for item in parseDOM(slice, 'tr', attrs={'class': 'lista_hover'}) if title in item ] kodlinku = parseDOM(results, 'span', attrs={'class': 'odtwarzacz_link'}, ret='rel') link = [] player = [] for item in kodlinku: try: item = mainLink + 'odtwarzacz-' + item + '.html' temp = requests.get(item).text if 'vk.com' in temp: l1 = parseDOM(temp, 'span', attrs={'class': 'odtwarzaj_vk'}, ret='rel')[0] l2 = parseDOM(temp, 'span', attrs={'class': 'odtwarzaj_vk'}, ret='id')[0] temp = 'https://vk.com/video' + l1 + '_' + l2 else: temp = parseDOM(temp, 'iframe', ret='src')[0] link.append(temp) player.append('Oglądaj') except: continue addon.SourceSelect(players=player, links=link, title=title, subdir=subdir) elif section == 'other': result = requests.get(url).text results = [ item for item in parseDOM(result, 'tr', attrs={'class': 'lista_hover'}) if title in item ] kodlinku = parseDOM(results, 'span', attrs={'class': 'odtwarzacz_link'}, ret='rel') link = [] player = [] for item in kodlinku: try: item = mainLink + 'odtwarzacz-' + item + '.html' temp = requests.get(item).text if 'vk.com' in temp: l1 = parseDOM(temp, 'span', attrs={'class': 'odtwarzaj_vk'}, ret='rel')[0] l2 = parseDOM(temp, 'span', attrs={'class': 'odtwarzaj_vk'}, ret='id')[0] temp = 'https://vk.com/video' + l1 + '_' + l2 else: temp = parseDOM(temp, 'iframe', ret='src')[0] link.append(temp) player.append('Oglądaj') except: continue addon.SourceSelect(players=player, links=link, title=title, subdir=subdir)
def Browse_Titles(): url = params['url'] name = params['name'] html = requests.get(url, timeout=15).text if name in html: mark1 = '>' + name + '</div>' mark2 = '</ul>' data = GetDataBeetwenMarkers(html, mark1, mark2, False)[1] data = re.findall('<a href="(.+?)"(.+?)">(.+?)</a></li>', data) data.sort() #####Polecane ####### thread = False if len(data) > 0: threads = [] for item in data: if 'Anime online' in str(item[2]): continue elif 'inne.wbijam' in str(item[0]).lower(): continue link = item[0] tytul = item[2] #Wyjatki do scrapera if tytul == 'Decadence': tytul = 'Deca-dence' title, poster, plot, fanart, genre, year, thread = scraper.scraper_check( tytul, type='TV') if title == '': title = tytul if plot == '': plot = '' if poster == '': poster, fanart = scraper.Scrap(tytul, type='TV', poster=True) if fanart == '': fanart = default_background if genre == '': genre = '' if year == '': year = '' if thread: threads.append(thread) addon.addDir(title, link, mode='Browse_Seasons', thumb=str(poster), fanart=str(fanart), section='polecane', page=str(url), subdir=title, genre=genre, year=year, plot=plot) if thread: t = threading.Thread(target=ScrapInfo, args=(threads, )) if t.is_alive(): t.join() else: t.start() #####Pozostałe### elif len(data) == 0: data2 = GetDataBeetwenMarkers(html, mark1, mark2, False)[1] data2 = re.findall('<a href="(.+?)">(.+?)</a></li>', data2) data2.sort() for item in data2: link = url + item[0] set = requests.get(link, timeout=15).text image = parseDOM( [i for i in parseDOM(set, 'center') if 'img' in i][0], 'img', ret='src')[0] title = item[1] addon.addDir(title, link, mode='Browse_Seasons', thumb=url + str(image), fanart=default_background, section='other', page=str(url), subdir=title)
def ListTitles(url=''): # name = params['name'] if url == '': url = params['url'] section = params['section'] html = requests.get(url, timeout=15).text result = str( parseDOM(html, 'section', attrs={'class': 'anime-list box'})[0]) results = [ item for item in parseDOM(result, 'ul', attrs={'class': 'div-row'}) if 'h3' in item ] for item in results: link = mainLink + re.sub('/series/', 'series/', parseDOM(item, 'a', ret='href')[1]) obraz = mainLink + re.sub('/res/', 'res/', parseDOM(item, 'a', ret='href')[0]) title = parseDOM(item, 'a')[1] title = title.replace('<em>', '') title = title.replace('</em>', '') try: datasite = requests.get(link, timeout=10).text plotdata = parseDOM(datasite, 'div', attrs={'id': 'description'})[0] plot = CleanHTML(parseDOM(plotdata, 'p')[0]) except: plot = '' try: genredata = [ item for item in parseDOM(datasite, 'tr') if 'Gatunki' in item ][0] genre = ', '.join(parseDOM(genredata, 'a')) except: genre = '' try: yeardata = parseDOM(datasite, 'section', attrs={'class': 'title-small-info'})[0] year = re.findall(r'[1-2][0-9]{3}', yeardata)[0] except: year = '' addon.addDir(str(title), link, mode='SHListEpisodes', section='episodes', thumb=str(obraz), fanart=custom_background, subdir=str(title), plot=str(plot), genre=str(genre), year=str(year)) try: next = parseDOM(html, 'a', attrs={'rel': 'next'}, ret='href')[0] if len(next) > 0: nextpage = mainLink + re.sub('/', '', next) nextpage = CleanHTML(nextpage) if '&r307=1' in nextpage: nextpage = str(nextpage).replace('&r307=1', '') elif 'r307=1' in nextpage: nextpage = str(nextpage).replace('r307=1', '') addon.addDir( '[I]następna strona[/I]', str(nextpage), mode='SHListTitles', section='nextpage', thumb=str(nexticon), fanart=custom_background, ) except: pass
def Szukaj(): url = params['url'] keyb = xbmc.Keyboard('', "Wyszukiwarka") keyb.doModal() if keyb.isConfirmed() and len(keyb.getText().strip()) > 0: search = keyb.getText() url = url + '%s' % search.replace(" ", "+") else: CATEGORIES(False) html = requests.get(url, timeout=15).text result = str(parseDOM(html, 'main', attrs={'role': 'main'})[0]) results = [CleanHTML(item) for item in parseDOM(result, 'h2')] excludelist = ['Japońsk', 'Koreańsk', 'Pozostałe'] includelist = ['/drama/', '/film/'] for item in results: if any(exclude in item for exclude in excludelist): continue elif any(include in item for include in includelist): Title = parseDOM(item, 'a')[0] link = parseDOM(item, 'a', ret='href')[0] data = requests.get(link, timeout=10).text title, poster, plot, banner, fanart, genre, year, thread = dqscraper.scraper_check( Title, link, poster='') # poster, fanart = scraper.Scrap(title, type='drama') if fanart == '': fanart = re.findall('background-image: url\((.+?)\);', data)[1] if poster == '': poster = parseDOM(data, 'img', attrs={'itemprop': 'thumbnailUrl'}, ret='src')[0] if plot == '': plot = parseDOM(data, 'em')[0] plot = CleanHTML(plot) if title == '': title = Title if '/drama/' in item: addon.addDir(str(title), str(link), mode=2, fanart=str(fanart), thumb=str(poster), poster=str(poster), plot=str(plot), code='[B][COLOR=green]drama[/COLOR][/B]', genre=str(genre), year=str(year)) else: addon.addLink(str(title), str(link), mode=3, fanart=str(fanart), thumb=str(poster), poster=str(poster), plot=str(plot), code='[B][COLOR=green]film[/COLOR][/B]', genre=str(genre), year=str(year)) else: continue xbmcplugin.addSortMethod(int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_TITLE, label2Mask='%P')
def ListEpisodes(): section = params['section'] url = params['url'] + '/all-episodes' thumb = params['img'] subdir = params['subdir'] Logowanie() cookie = cache.cache_get('shinden_cookie')['value'] headersget.update({'Cookie': cookie}) headers = headersget html = requests.get(url, headers=headers, timeout=15).text result = parseDOM(html, 'tbody', attrs={'class': 'list-episode-checkboxes'})[0] results = parseDOM(result, 'tr') epNo = [parseDOM(item, 'td')[0] for item in results] epTitle = [ parseDOM(item, 'td', attrs={'class': 'ep-title'})[0] for item in results ] epstatus = [ re.findall('<i class="fa fa-fw fa-(.+?)"></i>', item)[0] for item in results ] epDate = [ parseDOM(item, 'td', attrs={'class': 'ep-date'})[0] for item in results ] link = [ mainLink + re.sub('^/', '', parseDOM(item, 'a', ret='href')[0]) for item in results ] for ep in zip(epNo, epTitle, epDate, link, epstatus): if str(ep[4]) == 'check': title = str(ep[0]) + ' : ' + str(ep[1]) code = '[B][COLOR=blue]%s[/COLOR][/B]' % (str(ep[2])) section = 'online' elif str(ep[4]) == 'times': title = str(ep[0]) + ' ' + '[COLOR=red] offline [/COLOR]' section = 'offline' code = '[B][COLOR=blue]%s[/COLOR][/B]' % (str(ep[2])) else: title = str(ep[0]) + ' ' + '[COLOR=red] offline [/COLOR]' section = 'offline' code = '[B][COLOR=blue]%s[/COLOR][/B]' % (str(ep[2])) addon.addLink(title, str(ep[3]), mode='SHListLinks', fanart=str(thumb), thumb=str(thumb), section=section, subdir=subdir, code=code) xbmcplugin.addSortMethod(int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_TITLE, label2Mask='%P')
def scraper_add(name, type): url = 'https://shinden.pl/series?search=' url = url + '%s' % name.replace(" ", "+") + '&series_type[0]={}' url = url.format(type) mainLink = 'https://shinden.pl/' db = database.connect(scraperFile) cur = db.cursor() try: html = requests.get(url, timeout=15).text result = str( parseDOM(html, 'section', attrs={'class': 'anime-list box'})[0]) results = [ item for item in parseDOM(result, 'ul', attrs={'class': 'div-row'}) if 'h3' in item ][0] link = mainLink + re.sub('/series/', 'series/', parseDOM(results, 'a', ret='href')[1]) obraz = mainLink + re.sub('/res/', 'res/', parseDOM(results, 'a', ret='href')[0]) empty = ['placeholders', 'javascript:void'] if any(i in obraz for i in empty): poster, fanart = Scrap(name, type, poster=True) else: fanart = Scrap(name, type, poster=False) poster = obraz try: datasite = requests.get(link, timeout=10).text plotdata = parseDOM(datasite, 'div', attrs={'id': 'description'})[0] plot = CleanHTML(parseDOM(plotdata, 'p')[0]) except: plot = '' try: genredata = [ item for item in parseDOM(datasite, 'tr') if 'Gatunki' in item ][0] genre = ', '.join(parseDOM(genredata, 'a')) except: genre = '' try: yeardata = parseDOM(datasite, 'section', attrs={'class': 'title-small-info'})[0] year = re.findall(r'[1-2][0-9]{3}', yeardata)[0] except: year = '' while True: time.sleep(1) try: cur.execute("SELECT count(*) FROM AnimeOtaku WHERE name = ?", (name, )) data = cur.fetchone()[0] if not data: print('There is no component named %s' % name) cur.execute( "INSERT INTO AnimeOtaku (name, poster, plot, fanart, genre, year) VALUES(?,?,?,?,?,?)", (name, poster, plot, fanart, genre, year)) db.commit() break else: print('Component %s found in rows' % (name)) break except sqlite3.OperationalError as e: print(e) continue except Exception as e: raise e finally: db.close()
def ListTitles(): cookie = cache.cache_get('dramaqueen_cookie')['value'] headersget.update({'Cookie': cookie}) url = params['url'] html = requests.get(url, headers=headersget, timeout=15).text html = CleanHTML(html) result = parseDOM(html, 'div', attrs={'id': 'av_section_1'})[0] results = re.findall( 'flex_column ' + r'.+?' + 'av_one_fourth(.+?)</div></div></div>', result) titles = re.findall('><p>(.+?)</p>', result) linki = [item for item in parseDOM(results, 'a', ret='href')] Plot = re.findall('/p>[\s,\S,.]<p>(.+?)</p>', result) obrazy = parseDOM(results, 'img', ret='src') threads = [] for item in zip(linki, titles, obrazy, Plot): title, poster, plot, banner, fanart, genre, year, thread = dqscraper.scraper_check( item[1], item[0], item[2]) if title == '': title = item[1] if plot == '': plot = item[3] if poster == '': poster = item[2] if banner == '': banner = item[2] if fanart == '': fanart = item[2] if genre == '': genre = '' if year == '': year = '' if thread: threads.append(thread) if str(item[0]).__contains__('/drama/'): addon.addDir(str(title), str(item[0]), mode=2, plot=(str(plot)), fanart=(str(fanart)), isFolder=True, thumb=(str(poster)), banner=(str(banner)), genre=str(genre), year=str(year), section='') elif str(item[0]).__contains__('/film/'): addon.addLink(str(title), str(item[0]), mode=3, plot=str(plot), fanart=str(fanart), thumb=str(poster), banner=str(banner)) if thread: t = threading.Thread(target=ScrapInfo, args=(threads, )) if t.is_alive(): t.join() else: t.start()