def buildCategories(): categories = ['All'] homeurl = 'http://www.washingtonpost.com/posttv' homehtml = getUrl(homeurl) soup = BeautifulSoup(homehtml) results = [] for div in soup.findAll("li", { "class" : re.compile("livenav") }): print div for links in div.findAll('a'): if links.has_key('href'): link = (links.get('href')) for titles in div.findAll("span"): title = titles.string title = title.replace(" ","") results.append((title, link)) categories.append(title) for i in range(0,len(categories)): if categories[i] == 'All': catName, catSlug = 'All', 'videos' else: catName, catSlug = categories[i], categories[i].replace(' ', '-').lower() callback = 'plugin://plugin.video.posttv?category=' + catSlug li = xbmcgui.ListItem(catName) xbmcplugin.addDirectoryItem(handle=addon_handle, url=callback, listitem=li, isFolder=True) xbmcplugin.endOfDirectory(addon_handle)
def getTVshows(url,order=None): progress = xbmcgui.DialogProgress() progress.create('Progress', 'Please wait...') progress.update(1, "", "Loading list - 1%", "") div = BeautifulSoup(http_req(url)).find("div", {"id": "tab1"}) if not order: tvs = div.findAll("a") else: tvs = [s.parent for s in div.findAll("a", text = re.compile(r"^" + order + ".+?$"))] current = 0 total = len(tvs) while current <= total - 1: title = htmlFilter(tvs[current].text) link = urlFilter(tvs[current]['href']) addDir(title, link, 2) if progress.iscanceled(): sys.exit() percent = int(((current + 1) * 100) / total) message = "Loading list - " + str(percent) + "%" progress.update(percent, "", message, "") current += 1 progress.close() xbmcplugin.endOfDirectory(int(sys.argv[1]))
def getMovieLens(url): req = urllib2.Request(url) req.add_header('User-Agent', "%s %s" % (sys.modules[ "__main__" ].__plugin__, sys.modules[ "__main__" ].__version__)) response = urllib2.urlopen(req) soup = BeautifulSoup(response) mySpans= soup.findAll('span',attrs={"class" : "movieTitle"}) ##mySpans = mySpans[1], mySpans[2] mydirs = list() pDialog = xbmcgui.DialogProgress() ret = pDialog.create('XBMC', 'Initializing script...') i = 0.0 total = len(mySpans) for span in mySpans: i += 1 percent = int( (i * 100) / total) pDialog.update(percent, 'Searching ' + span.a.string,str(int(i))+'/'+str(total)) s = span.a.string year = s[len(s)-7 : len(s)] year = year.replace('(','').replace(')','') year = year.strip() s = s.split('(',1)[0].strip() s = s.replace(', The','') #print s xbmc.log('s=%s' % s) dirs= getter.searchDirs(s + ' ' + year) if dirs: for d in dirs: id = d.getElementsByTagName('id').item(0).firstChild.data name = d.getElementsByTagName('name').item(0).firstChild.data date = d.getElementsByTagName('date').item(0).firstChild.data thumb = d.getElementsByTagName('thumb').item(0).firstChild.data url = sys.argv[0] + '?action=files&did=' + id clean = CleanFileName(name,False) clean = clean.replace('(', '') clean = clean.replace('[', '') clean = clean.replace(']', '') clean = clean.replace(')', '') if s.lower() in clean.lower() and year in name: ##xbmc.log('name=%s' % name) mydirs.append(d) if (pDialog.iscanceled()): print 'Canceled search' pDialog.close() return pDialog.close() return mydirs
def load_events(sortByStatus): response = PluginUtils.do_request(LOLEVENTURL) if (response is None): return None events = [] # Now lets parse results decoded_data = json.load(response) root = decoded_data['data'] LoLEvent = namedtuple('LoLEvent', 'title status eventId imageUrl') # For Each Item in Children for post in root['children']: html = post['data']['selftext_html'] if (html is not None): soup = BeautifulSoup(PluginUtils.unescape(html)) imgUrl = '' link = soup.find('a', href='#EVENT_PICTURE') if (link is not None): imgUrl = link.title status = 99 # Using numbers for status so we can easily sort by this # link_flair_css_class: "ongoing" # link_flair_css_class: "finished" # link_flair_css_class: "twitchongoing" # link_flair_css_class: "featured" # link_flair_css_class: null flair_css = post['data']['link_flair_css_class'] if (flair_css is not None): if (flair_css.lower()== FEATURED_STRING): status = 0 if (flair_css.lower()== ACTIVE_STRING): status = 1 if (flair_css.lower()== FINISHED_STRING): status = 2 childEvent = LoLEvent(title = post['data']['title'], status = status, eventId = post['data']['id'], imageUrl = imgUrl) events.append(childEvent) if (sortByStatus): # sort return sorted(events, key=attrgetter('status')) else: return events
def parse_encrypted_url_response(stream_type, response_xml): soup = BeautifulSoup(response_xml) raw = soup.find('path') if stream_type == StreamTypes.RTMP: result = raw.getText() elif stream_type == StreamTypes.LIVE: processed = raw.getText()[11:] #Strip 'addaptive://' from the start host = processed[:processed.find('/')] url = processed[processed.find('/'):] result = 'http://' + host + '/play?url=' + url return result
def get_lcs_standings(teamName): # This method loads the latest standings from the Gamepedia server url = '' if (teamName in TEAMS_EU): url = PluginUtils.unescape(PluginUtils.get_string(30104)) if (teamName in TEAMS_NA): url = PluginUtils.unescape(PluginUtils.get_string(30103)) if (url != ''): response = PluginUtils.do_request(url) if (response is not None): # Lets process the html # decoded_data = json.load(response) soup = BeautifulSoup(response) tables = soup.findAll('table') if (tables is not None): for table in tables: # We have the table, now lets try and get the right row rows = table.find('tbody').findAll('tr') if (rows is not None): for idx, row in enumerate(rows): columns = row.findAll('td') if (columns is not None): if (columns[2] is not None): if (teamName in TEAMS_EU): if (columns[2].find('a').text.lower() == TEAMS_EU[teamName].lower()): return { 'standing': idx + 1, 'record': columns[3].find('span').text + "W-" + columns[4].find('span').text + "L" } if (teamName in TEAMS_NA): if (columns[2].find('a').text.lower() == TEAMS_NA[teamName].lower()): return { 'standing': idx + 1, 'record': columns[3].find('span').text + "W-" + columns[4].find('span').text + "L" } return None
def recentlyAdded(cat): html = http_req(siteUrl) soup = BeautifulSoup(html).findAll('div', {'id': 'tab1'}) if cat == 'tvshows': soup = soup[0] param = 'seriale' elif cat == 'movies': soup = soup[1] param = 'filme' results = soup.findAll('a', href=re.compile(param), limit=20) total = len(results) current = 0 for a in results: ep_year = a.parent.parent.findAll('div')[1].text.strip() title = htmlFilter(a.text) url = urlFilter(a['href']) if cat == 'tvshows': eps = re.search(r'S(\d+)E(\d+-?\d*)', ep_year) season = str(eps.group(1)) if eps else '' episode = str(eps.group(2)) if eps else '' name = '%s %sx%s' % (title, season, episode) addDir(name, url, 8, "", title, season, episode, folder=False, totalItems=total) elif cat == 'movies': year = re.search('(\d{4,4})', ep_year) year = str(year.group(1)) if year else 'unknown' name = '%s (%s)' % (title, year) addDir(name, url, 8, "", name, folder=False, totalItems=total) xbmcplugin.endOfDirectory(int(sys.argv[1]))
def scrapelinks(html): soup = BeautifulSoup(html) results = [] for div in soup.findAll("div", { "class" : "search-results-slide-item-cont" }): for links in div.findAll('a'): if links.has_key('href'): if '/posttv/c/video/' in links['href']: link = (links.get('href')) for images in links.findAll('div', { "class" : "search-results-slide-image image-lazy-background" }): image = images.get('data-image-src') for titles in div.findAll("div", { "class" : "search-results-slide-caption" }): title = titles.string title = title.replace(" ","") results.append((title, link, image)) return results
def getFilmes(url): link = openURL(url) link = unicode(link, 'utf-8', 'ignore') soup = BeautifulSoup(link) conteudo = soup("div", {"class": "ib-miniaturas lista-miniaturas"}) filmes = conteudo[0]("div", {"class": "ib-miniatura"}) totF = len(filmes) for filme in filmes: titF = filme.img["alt"].encode('utf-8', 'ignore') titF = titF.replace('Assistir ', '').replace('Filme ', '') urlF = filme.a["href"].encode('utf-8', 'ignore') imgF = filme.img["src"].encode('utf-8', 'ignore') addDirF(titF, urlF, 100, imgF, False, totF) try: proxima = re.findall('link rel="next" href="(.*?)"', link)[0] addDir('Próxima Página >>', proxima, 20, artfolder + 'proxima.png') except: pass setViewFilmes()
def htmlFilter(htmlstring, trimspaces = False): hex_entity_pat = re.compile('&#x([^;]+);') hex_entity_fix = lambda x: hex_entity_pat.sub(lambda m: '&#%d;' % int(m.group(1), 16), x) htmlstring = str(BeautifulSoup(hex_entity_fix(htmlstring), convertEntities=BeautifulSoup.ALL_ENTITIES)) if trimspaces: htmlstring = "".join(line.strip() for line in htmlstring.split("\n")) return htmlstring
def getEpisodios(name, url, iconimage): titX = name.split(' - ')[0] temp = name[-1:] link = openURL(url) soup = BeautifulSoup(link) cont = soup("div", {"id": "seasons"}) eps = cont[0]("li") totE = len(eps) i = 1 for ep in eps: epX = re.findall('<div class="numerando">(.*?) x (.*?)</div>', str(ep))[0] if str(epX[0]) == str(temp): seaE = epX[0] epE = epX[1] seaepE = seaE + "x" + epE titE = titX + " - " + seaepE + " - " + ep.a.text.encode( 'utf-8', 'ignore') urlE = ep.a["href"] imgE = iconimage addDirE(titE, urlE, 100, imgE, False, totE) i += 1 if getVis == 'true': setViewEps()
def listMovies(url): progress = xbmcgui.DialogProgress() progress.create(localise(1006), localise(1007)) progress.update(1, "", progressReport(1), "") list = [] #TODO: caching movieList = BeautifulSoup(httpReq(url)).find( "div", { "class": "categoryThumbnailList" }).contents total = len(movieList) current = 0 for movie in movieList: link = movie.next url = link['href'] title = link['title'] title = title[:title.find("<")] img = link.find("img") if img: img = img['src'] addDir(title, url, 8, img, folder=False) if progress.iscanceled(): sys.exit() current += 1 percent = int((current * 100) / total) progress.update(percent, "", progressReport(percent), "") progress.close() xbmcplugin.endOfDirectory(addonHandle)
def playMovie(url, name, thumb): movies = BeautifulSoup(httpReq(url)).findAll("div", {"class": "btn"}) for movie in movies: if movie.text.lower().find("vezi film") > -1: print movie url = None while url == None: movie = movie.next try: if movie.name == "a": break except: #movie doesn't have field name (string) continue break url = movie['href'] url = yt.getYoutubeMovie(url) win = xbmcgui.Window(10000) win.setProperty('cinepub.playing.title', name.lower()) item = xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=thumbnail) item.setInfo(type="Video", infoLabels={"title": name}) item.setPath(url) xbmcplugin.setResolvedUrl(addonHandle, True, item) return True
def getSeries(url): link = openURL(url) soup = BeautifulSoup(link) cont = soup("div", {"class": "item_1 items"}) series = cont[0]("div", {"class": "item"}) totS = len(series) for serie in series: t**S = serie.img["alt"].replace('–', '-').encode('utf-8', 'ignore') urlS = serie.a["href"] imgS = serie.img["src"] if getPlot == 'true': plotS = re.findall('<span class="ttx">(.*?)</span>', str(serie), re.MULTILINE | re.DOTALL)[0] plotS = plotS.replace('<div class="degradado"></div>', '').replace('Sinopse e detalhes ' + t**S, '').strip() else: plotS = '' addDirS(t**S, urlS, 21, imgS, True, totS, plotS) try: proxima = re.findall("<link rel='next' href='(.*?)' />", link)[0] addDir('Próxima Página >>', proxima, 20, artfolder + 'proxima.png') except: pass if getVis == 'true': setViewSeries()
def MOVIES(url,order=None): if order == 'year': div = BeautifulSoup(http_req(url)).findAll("div", {"id": "filtre"})[1].findAll("a", attrs = {"class": None}) for a in div: addDir(a.text, moviesUrl + a['href'], 9, MoviesIcon) elif order == 'genre': div = BeautifulSoup(http_req(url)).find("div", {"id": "filtre"}).findAll("a", attrs = {"class": None}) for a in div: addDir(plugin.ro2en(a.text), moviesUrl + a['href'], 9, MoviesIcon) else: addDir('Search',url,14,MoviesIcon) addDir('Last Added',url,6,MoviesIcon) addDir('By Year',url,11,MoviesIcon) addDir('By Genre',url,12,MoviesIcon) xbmcplugin.endOfDirectory(int(sys.argv[1]))
def getMovies(url): progress = xbmcgui.DialogProgress() progress.create('Progress', 'Please wait...') progress.update(1, "", "Loading list - 1%", "") soup = BeautifulSoup(http_req(url)) pages = str(soup.find("div", {"id": "numarpagini"})) pages = max(int(x) for x in re.findall(r'([\d]+)</a>', pages)) page = int(re.search('pagina=(\d+)', url).group(1)) div = soup.find("div", {"id": "content"}) links = div.findAll("a", {"class": "link"}) thumbs = re.findall(r'<img src="../(.+?)"', str(div)) years = re.findall(r'Aparitie: ?(\d+)', str(div)) total = len(links) current = 0 while current <= total - 1: year = re.search('(\d{4,4})', years[current]) year = year.group(1) if year else 'unknown' name = "%s (%s)" % (htmlFilter(links[current].text), year) link = urlFilter(links[current]['href']) thumbnail = urlFilter(thumbs[current]) addDir(name, link, 8, thumbnail, name, folder=False) if progress.iscanceled(): sys.exit() percent = int(((current + 1) * 100) / total) message = "Loading list - " + str(percent) + "%" progress.update(percent, "", message, "") current += 1 if not page == pages: url = re.sub('pagina=\d+', 'pagina=' + str(page + 1), url) addDir("Next Page >>", url, 9) progress.close() xbmcplugin.endOfDirectory(int(sys.argv[1]))
def getMovies(url): progress = xbmcgui.DialogProgress() progress.create('Progress', 'Please wait...') progress.update(1, "", "Loading list - 1%", "") soup = BeautifulSoup(http_req(url)) pages = str(soup.find("div", {"id": "numarpagini"})) pages = max(int(x) for x in re.findall(r'([\d]+)</a>', pages)) page = int(re.search('pagina=(\d+)', url).group(1)) div = soup.find("div", {"id": "content"}) links = div.findAll("a", {"class": "link"}) thumbs = re.findall(r'<img src="../(.+?)"', str(div)) years = re.findall(r'Aparitie: ?(\d+)', str(div)) total = len(links) current = 0 while current <= total - 1: year = re.search('(\d{4,4})', years[current]) year = str(year.group(1)) if year else 'unknown' name = "%s (%s)" % (htmlFilter(links[current].text), year) link = urlFilter(links[current]['href']) thumbnail = urlFilter(thumbs[current]) addDir(name, link, 8, thumbnail, name, folder=False) if progress.iscanceled(): sys.exit() percent = int(((current + 1) * 100) / total) message = "Loading list - " + str(percent) + "%" progress.update(percent, "", message, "") current += 1 if not page == pages: url = re.sub('pagina=\d+', 'pagina=' + str(page + 1), url) addDir("Next Page >>", url, 9) progress.close() xbmcplugin.endOfDirectory(int(sys.argv[1]))
def lastAdded(cat): progress = xbmcgui.DialogProgress() progress.create('Progress', 'Please wait...') progress.update(1, "", "Loading list - 1%", "") div = htmlFilter(str(BeautifulSoup(http_req(siteUrl)).findAll("div", {"id": "tab1"})), True) if cat == 'tvshows': results = re.findall(r'<a class="link" href="(seriale2)-([0-9]+-[0-9]+)-.+?.html">(.+?)</a>.+?">(.+?)</div></div>', div) elif cat == 'movies': results = re.findall(r'<a class="link" href="(filme)-(.+?).html">(.+?)</a>.+?">(.+?)</div>', div) total = len(results) current = 0 while current <= total-1: type = results[current][0] link = results[current][1] title = results[current][2] ep_year = results[current][3] if type == 'seriale2': eps = re.findall(r'S(\d+)E(\d+)', ep_year) if eps: season = eps[0][0] episode = eps[0][1] else: season = '' episode = '' name = '%s %sx%s' % (title, season, episode) url = siteUrl + 'player-serial-' + link + '-sfast.html' addDir(name,url,8,"",title,season,episode,folder=False) elif type == 'filme': year = re.findall('(\d{4,4})', ep_year) name = '%s (%s)' % (title, year[0]) url = siteUrl + 'filme-' + link + '.html' addDir(name,url,8,"",name,folder=False) if progress.iscanceled(): sys.exit() percent = int(((current + 1) * 100) / total) message = "Loading list - " + str(percent) + "%" progress.update(percent, "", message, "") current += 1 progress.close() xbmcplugin.endOfDirectory(int(sys.argv[1]))
def recentlyAdded(cat): html = http_req(siteUrl) soup = BeautifulSoup(html).findAll('div', {'id': 'tab1'}) if cat == 'tvshows': soup = soup[0] param = 'seriale' elif cat == 'movies': soup = soup[1] param = 'filme' results = soup.findAll('a', href=re.compile(param), limit=20) total = len(results) current = 0 for a in results: ep_year = a.parent.parent.findAll('div')[1].text.strip() title = htmlFilter(a.text) url = urlFilter(a['href']) if cat == 'tvshows': eps = re.search(r'S(\d+)E(\d+-?\d*)', ep_year) season = eps.group(1) if eps else '' episode = eps.group(2) if eps else '' name = '%s %sx%s' % (title, season, episode) addDir(name,url,8,"",title,season,episode,folder=False,totalItems=total) elif cat == 'movies': year = re.search('(\d{4,4})', ep_year) year = year.group(1) if year else 'unknown' name = '%s (%s)' % (title, year) addDir(name,url,8,"",name,folder=False,totalItems=total) xbmcplugin.endOfDirectory(int(sys.argv[1]))
def get_lcs_standings(teamName): # This method loads the latest standings from the Gamepedia server url='' if (teamName in TEAMS_EU): url = PluginUtils.unescape(PluginUtils.get_string(30104)) if (teamName in TEAMS_NA): url = PluginUtils.unescape(PluginUtils.get_string(30103)) if (url != ''): response = PluginUtils.do_request(url) if (response is not None): # Lets process the html # decoded_data = json.load(response) soup = BeautifulSoup(response) tables = soup.findAll('table') if (tables is not None): for table in tables: # We have the table, now lets try and get the right row rows = table.find('tbody').findAll('tr') if (rows is not None): for idx, row in enumerate(rows): columns = row.findAll('td') if (columns is not None): if (columns[2] is not None): if (teamName in TEAMS_EU): if (columns[2].find('a').text.lower() == TEAMS_EU[teamName].lower()): return {'standing' : idx+1, 'record' : columns[3].find('span').text + "W-" + columns[4].find('span').text +"L" } if (teamName in TEAMS_NA): if (columns[2].find('a').text.lower() == TEAMS_NA[teamName].lower()): return {'standing' : idx+1, 'record' : columns[3].find('span').text + "W-" + columns[4].find('span').text +"L"} return None
def streamGetter(url): ## Open the PostTV video URL response = urllib2.urlopen(url) page_source = response.read() ## Pull some metadata from the video page source soup = BeautifulSoup(page_source) for titles in soup.findAll("meta", attrs={"property":"og:title"}): title = titles.get("content") for descs in soup.findAll("meta", attrs={"property":"og:description"}): description = descs.get("content") for images in soup.findAll("meta", attrs={"property":"og:image"}): image = images.get("content") ## Look for the Ooyala player string m = re.search('"((http)?://player.ooyala.com/player_v2.swf(?!.*adSetCode).*?)"', page_source) url = m.group() ## strip off some extra quotes cleanURL = url.replace("\"","") ## Grabs the embed code from the cleaned URL ec = re.search('((?<=embedCode=)(.*)(?=&autoplay))', cleanURL) embedCode = ec.group() ## Decrypts the embed code and returns a stream path smil = CommonUtils().grabEncrypted(embedCode) decrypted_smil = ooyalaCrypto().ooyalaDecrypt(smil) videoList = MagicNaming().getVideoUrl(decrypted_smil) videoArray = ''.join(videoList) ## Pulls the playpath from the stream path Segments = videoArray.rsplit('/',2) playpath = 'mp4:s/' + Segments[1]+ '/' +Segments[2] ## Returns the title, description, thumbnail url and playpath return (title, description, image, playpath)
def getSubtitle(url): subtitle = '' try: if plugin.getSetting("enableSub") == 'true': page = str(BeautifulSoup(http_req(url)).findAll("script")) page = ''.join(page.split()) match = re.findall('\'tracks\':\[{\'file\':"http:\/\/superweb\.rol\.ro\/video\/jw6\/(.+?)",', page) if match: sub_url = 'http://superweb.rol.ro/video/jw6/' + match[0] sub_tmp = os.path.join(xbmc.translatePath("special://temp"), match[0]) with open(sub_tmp, 'w') as f: f.write(http_req(sub_url)) subtitle = match[0] except: pass return subtitle
def getAnos(url): link = openURL(url) soup = BeautifulSoup(link) cont = soup("div", {"class": "filtro_y"}) anos = cont[0]("li") totA = len(anos) for ano in anos: titA = ano.text.encode('utf-8', 'ignore') urlA = ano.a["href"].encode('utf-8', 'ignore') imgA = artfolder + 'calend.png' addDir(titA, urlA, 20, imgA) if getVis == 'true': setViewMenu()
def getFilmes(url): link = openURL(url) link = unicode(link, 'utf-8', 'ignore') soup = BeautifulSoup(link) conteudo = soup("div", {"class": "rel-single"}) filmes = conteudo[0]("li") totF = len(filmes) for filme in filmes: try: titF = filme.img["title"].encode('latin1').encode( 'utf-8', 'ignore') except: titF = filme.img["title"].encode('utf-8', 'ignore') titF = titF.replace('Dublado - 1080p', '').replace( 'Dublado - 720p', '').replace('Legendado - 1080p', '').replace('Legendado - 720p', '') urlF = filme.a["href"] imgF = filme.img["src"].replace( base + "/wp-content/themes/CineFilmes3/timthumb.php?src=", "") pltF = re.findall('<div class="sinopse-box">(.*?)</div>', str(filme))[0] try: pltF = pltF.split('–')[1] except: pass try: pltF = pltF.encode('latin1').decode('utf-8') except: pltF = pltF.decode('utf8') addDirF(titF, urlF, 100, imgF, False, totF, pltF) try: proxima = re.findall('<a class="next page-numbers" href="(.*?)">', link)[0] addDir('Próxima Página >>', proxima, 20, artfolder + 'proxima.png') except: pass setViewFilmes()
def getCategorias(url): link = openURL(url) soup = BeautifulSoup(link) conteudo = soup("div", {"class": "categorias"}) categorias = conteudo[0]("li") totC = len(categorias) for categoria in categorias: titC = categoria.a.text.replace('&', '&').encode('utf-8', 'ignore') urlC = categoria.a["href"] imgC = artfolder + 'categorias.png' addDir(titC, urlC, 20, imgC) if getVis == 'true': setViewMenu()
def getAnos(url): link = openURL(url) soup = BeautifulSoup(link) conteudo = soup("div", {"class": "box-seletor"}) anos = conteudo[1]("option") totA = len(anos) for ano in anos: titA = ano.text.encode('utf-8', 'ignore') if not "Ano" in titA: urlA = base + '/?cat=' + ano["value"] imgA = artfolder + 'categorias.png' addDir(titA, urlA, 20, imgA) setViewMenu()
def getCategorias(url): link = openURL(url) soup = BeautifulSoup(link) conteudo = soup("ul", {"class": "m-cat"}) categorias = conteudo[0]("li") totC = len(categorias) for categoria in categorias: titC = categoria.text.encode('latin-1', 'replace') if not 'Lançamento' in titC: urlC = categoria.a["href"] imgC = artfolder + limpa(titC) + '.png' addDir(titC, urlC, 20, imgC) setViewMenu()
def getCategorias(url): link = openURL(url) soup = BeautifulSoup(link) conteudo = soup("div", {"class": "row aling"}) categorias = conteudo[0]("a") totC = len(categorias) for categoria in categorias: titC = categoria.img['title'] titC = titC.replace('Filmes na categoria: ', '').replace( 'Categoria: ', '').replace('categoria: ', '').encode('utf-8', 'ignore') if not ("Todas" in titC or "Filmes" in titC or "Seriados" in titC or "Em Breve" in titC or "Lançamentos" in titC or "Novos" in titC): urlC = categoria['href'] imgC = categoria.img['src'] addDir(titC, urlC, 20, imgC) setViewMenu()
def getRecentes(url): link = openURL(url) soup = BeautifulSoup(link) cont = soup("div", {"id": "episodes"}) eprec = cont[0]("td", {"class": "bb"}) totEPR = len(eprec) for ep in eprec: if not 'TV Show' in ep: rtit = re.findall('<a href=".*?">(.*?)</a>.*?<span>(.*?)</span>', str(ep))[0] rtit2 = re.findall('<a href=".*?"><h2>(.*?)</h2></a>', str(ep))[0] rimg = re.findall( '<div class="imagen"><a href="(.*?)"><img src=" (.*?)" />', str(ep))[0] titEPR = rtit[0] + ' - ' + rtit[1] + " - " + rtit2 urlEPR = rimg[0] imgEPR = rimg[1] if getPlot == 'true': rplot = re.findall('<p>(.*?)</p>', str(ep))[0] plotEPR = rplot else: plotEPR = '' addDirS(titEPR, urlEPR, 100, imgEPR, False, totEPR, plotEPR) try: proxima = re.findall("<link rel='next' href='(.*?)' />", link)[0] addDir('Próxima Página >>', proxima, 30, artfolder + 'proxima.png') except: pass if getVis == 'true': setViewEps()
def getVideoUrl(self, smil): chunkArray = [] print "Ooyala: --> Calculating video URL..." soup = BeautifulSoup(smil) rtmpDomain = soup.find('rtmpdomains').string rtmpDomain = rtmpDomain.split(',')[0] httpDomain = soup.find('domains').string httpDomain = httpDomain.split(',')[0] vData = soup.find('vdata') if vData: streams = vData.findAll('stream', attrs={'f':'h264'}) streams = sorted(streams, key=lambda x: x['w']) # version = vData['version'] version = "1:1" ## Overriding 'version' for PostTV embedCode = vData['embedcode'] ts = str(vData.find('ts').string) tsArray = ts.split(",") domain = "" if len(tsArray) > 2: domain = "http://" + httpDomain + "/" else: domain = rtmpDomain + "/mp4:s/" vidLength = len(tsArray)-1 for i, startTime in enumerate(tsArray[:vidLength]): index = i start = int(startTime) br = int(streams[-1]['br']) width = int(streams[-1]['w']) url = self.getChunkUrl(embedCode, version, index, start, br, width) url = domain + url chunkArray.append(url) else: print "Ooyala: --> Could not find vData, trying the other." promo = soup.find('promo') promoVids = re.findall('[A-z0-9]{32}', promo) chunkArray.append(rtmpDomain + str(promoVids[0]) + str(promoVids[-1])) print "Ooyala: --> Formed the following URL: " + str(chunkArray[0]) print "Ooyala: --> Successfully formed URL." return chunkArray
def player(name, url, iconimage): OK = True mensagemprogresso = xbmcgui.DialogProgress() mensagemprogresso.create('HORA DA PIPOCA', 'Obtendo Fontes para ' + name, 'Por favor aguarde...') mensagemprogresso.update(0) titsT = [] idsT = [] matriz = [] link = openURL(url) soup = BeautifulSoup(link) try: conteudo = soup("div", {"class": "pn-opcoes pn-dub"}) srvsdub = conteudo[0]("li") totD = len(srvsdub) tipo = "Dublado" for i in range(totD): t**S = srvsdub[i].text.encode('utf-8', 'ignore') + " (%s)" % tipo if not 'Principal' in t**S: if not 'DropVideo' in t**S: if not 'Vídeo PW' in t**S: if not 'YouWatch' in t**S: if not 'Youwatch' in t**S: if not 'NeoDrive' in t**S: idS = srvsdub[i]["data-pid"] titsT.append(t**S) idsT.append(idS) except: pass try: conteudo = soup("div", {"class": "pn-opcoes pn-leg"}) srvsleg = conteudo[0]("li") totL = len(srvsleg) tipo = "Legendado" for i in range(totL): t**S = srvsleg[i].text.encode('utf-8', 'ignore') + " (%s)" % tipo if not 'Principal' in t**S: if not 'DropVideo' in t**S: if not 'Vídeo PW' in t**S: if not 'YouWatch' in t**S: if not 'Youwatch' in t**S: if not 'NeoDrive' in t**S: idS = srvsleg[i]["data-pid"] titsT.append(t**S) idsT.append(idS) except: pass if not titsT: return index = xbmcgui.Dialog().select('Selecione uma das fontes suportadas :', titsT) if index == -1: return ind = idsT[index] conteudo = soup("li", {"class": "pi-item" + ind}) links = conteudo[0]("iframe") if len(links) == 0: links = conteudo[0]("embed") for link in links: urlVideo = link["data-src"] print "URLVIDEO " + urlVideo mensagemprogresso.update(50, 'Resolvendo fonte para ' + name, 'Por favor aguarde...') if 'nowvideo.php' in urlVideo: nowID = urlVideo.split("id=")[1] urlVideo = 'http://embed.nowvideo.sx/embed.php?v=%s' % nowID elif 'video.tt' in urlVideo: vttID = urlVideo.split('e/')[1] urlVideo = 'http://www.video.tt/watch_video.php?v=%s' % vttID elif 'flashx.php' in urlVideo: fxID = urlVideo.split('id=')[1] urlVideo = 'http://www.flashx.tv/embed-%s.html' % fxID elif 'thevid.net' in urlVideo: linkTV = openURL(urlVideo) js_data = jsunpack.unpack(linkTV) url2Play = re.findall('var vurl2="(.*?)"', js_data)[0] OK = False if OK: url2Play = urlresolver.resolve(urlVideo) if not url2Play: return legendas = '-' mensagemprogresso.update(75, 'Abrindo Sinal para ' + name, 'Por favor aguarde...') playlist = xbmc.PlayList(1) playlist.clear() listitem = xbmcgui.ListItem(name, thumbnailImage=iconimage) listitem.setPath(url2Play) listitem.setProperty('mimetype', 'video/mp4') listitem.setProperty('IsPlayable', 'true') playlist.add(url2Play, listitem) xbmcPlayer = xbmc.Player(xbmc.PLAYER_CORE_AUTO) xbmcPlayer.play(playlist) mensagemprogresso.update(100) mensagemprogresso.close() if legendas != '-': if 'timedtext' in legendas: import os.path sfile = os.path.join(xbmc.translatePath("special://temp"), 'sub.srt') sfile_xml = os.path.join(xbmc.translatePath("special://temp"), 'sub.xml') #timedtext sub_file_xml = open(sfile_xml, 'w') sub_file_xml.write(urllib2.urlopen(legendas).read()) sub_file_xml.close() xmltosrt.main(sfile_xml) xbmcPlayer.setSubtitles(sfile) else: xbmcPlayer.setSubtitles(legendas)
def load_event_content(eventId): LoLEventDay = namedtuple('LoLEventDay', 'dayId day matches recommended imageUrl') LoLEventMatch = namedtuple('LoLEventMatch', 'gameId team1 team2 videoLinks') url = LOLMATCHESURL % eventId response = PluginUtils.do_request(url) if (response is None): return None # Now lets parse results decoded_data = json.load(response) selfText = decoded_data[0]['data']['children'][0]['data']['selftext_html'] eventTitle = '' days = [] soup = BeautifulSoup(PluginUtils.unescape(selfText)) # Get all the recommended matches, we add those to the events # We do it like this Game H1_C1_C4 recommended = '' #a href="/spoiler" spoilers = soup.findAll("a", href="/spoiler") if (spoilers is not None): for spoiler in spoilers: # add them to the list games = spoiler.text.replace(',', '_') recommended += games + "_" imgUrl = '' link = soup.find('a', href='#EVENT_PICTURE') if (link is not None): imgUrl = link.title # find all tables tables = soup.findAll("table") for idx, table in enumerate(tables): if (table is not None): titleLink = table.find("a", href="http://www.table_title.com") if (titleLink is not None): eventTitle = titleLink['title'] YouTubeColumns = [] Team1Index = -1 Team2Index = -1 # Investigate the right columns for youtube links rows = table.find("thead").findAll("tr") for row in rows : cols = row.findAll("th") for i, col in enumerate(cols): if (col.text.lower() == "youtube"): YouTubeColumns.append(i) if (col.text.lower() == "team 1"): Team1Index = i if (col.text.lower() == "team 2"): Team2Index = i # matches=[] rows = table.find("tbody").findAll("tr") for row in rows : videos = [] cols = row.findAll("td") if (cols is not None): for yv in YouTubeColumns: if (cols[yv] is not None): if (cols[yv].a is not None): youTubeData = PluginUtils.parse_youtube_url(cols[yv].a['href']) videos.append({'text' : cols[yv].a.text, 'videoId' : youTubeData['videoId'], 'time' : youTubeData['time'] }) matches.append(LoLEventMatch(cols[0].text, cols[Team1Index].text, cols[Team2Index].text, videos)) days.append(LoLEventDay(dayId = idx, day=eventTitle, matches = matches, recommended = recommended, imageUrl = imgUrl)) return days
def load_events(sortByStatus, after): # The reddit api does things like this: # /r/bla.json?limit=pagesize&after=postId # Let's build a URL urlAppend = '?limit=' + str(PAGE_SIZE) if (after is not 'none'): urlAppend += '&after=' + after response = PluginUtils.do_request(LOLEVENTURL + urlAppend) if (response is None): return None events = [] # Now lets parse results decoded_data = json.load(response) root = decoded_data['data'] # after link = afterPost = root['after'] LoLEvent = namedtuple('LoLEvent', 'title status eventId createdOn imageUrl') # For Each Item in Children for post in root['children']: html = post['data']['selftext_html'] if (html is not None): soup = BeautifulSoup(PluginUtils.unescape(html)) imgUrl = '' isEvent = False link = soup.find('a', href='#EVENT_TITLE') if (link is not None): isEvent = True link = soup.find('a', href='#EVENT_PICTURE') if (link is not None): imgUrl = link.title status = 99 # Using numbers for status so we can easily sort by this # link_flair_css_class: "ongoing" # link_flair_css_class: "finished" # link_flair_css_class: "twitchongoing" # link_flair_css_class: "featured" # link_flair_css_class: "finishedfeatured" # link_flair_css_class: null flair_css = post['data']['link_flair_css_class'] if (flair_css is not None): if (flair_css.lower()== FEATURED_STRING): status = 0 if (flair_css.lower()== ACTIVE_STRING): status = 1 if (flair_css.lower()== FINISHED_STRING): status = 2 if (flair_css.lower()== FINISHEDFEATURED_STRING): status = 2 # Some don't have link_flair_css_class but are events if (status == 99 and isEvent): status = 98 childEvent = LoLEvent(title = post['data']['title'], status = status, eventId = post['data']['id'], createdOn = datetime.datetime.fromtimestamp(int(post['data']['created'])), imageUrl = imgUrl) events.append(childEvent) if (sortByStatus): # sort return afterPost, sorted(events, key=attrgetter('status')) else: return afterPost, events
def find_games(self,url): page = urllib2.urlopen(url) soup = BeautifulSoup(page) games = soup.findAll('div', {'class':'sbGame'}) return games
def parse_game_servlet_response(response_xml): soup = BeautifulSoup(response_xml) streams = Streams() home_path = away_path = home_condensed_path = away_condensed_path = None #Figure out stream type if soup.find('isarchived') and soup.find('isarchived').getText()=='true': streams.stream_type = StreamTypes.RTMP else: streams.tream_type = StreamTypes.LIVE #Home Streams if soup.find('hashomeprogram') and soup.find('hashomeprogram').getText() == 'true': if streams.stream_type == StreamTypes.RTMP: home_path = soup.find('homeprogram').find('publishpoint').getText()[:-2] if soup.find('homecondensed'): home_condensed_path = soup.find('homecondensed').find('publishpoint').getText()[:-2] else: home_path = soup.find('homeprogramid').getText() #Away Streams if soup.find('hasawayprogram') and soup.find('hasawayprogram').getText() == 'true': if streams.stream_type == StreamTypes.RTMP: away_path = soup.find('awayprogram').find('publishpoint').getText()[:-2] if soup.find('awaycondensed'): home_condensed_path = soup.find('awaycondensed').find('publishpoint').getText()[:-2] else: away_path = soup.find('awayprogramid').getText() streams.paths = { 'home' : home_path, 'home_condensed' : home_condensed_path, 'away' : away_path, 'away_condensed' : away_condensed_path } return streams
def getEpisodes(url,season,title,thumbnail): progress = xbmcgui.DialogProgress() progress.create('Progress', 'Please wait...') progress.update(1, "", "Loading list - 1%", "") title = re.sub('\(.+?\)', '', title).strip() list = [] cache = False if plugin.getSetting("enableCache") == 'true': cacheFilename = '%s-s%s-episodes' % (re.sub(' ', '-', title), season) cache = plugin.cacheLoad(cacheFilename, int(plugin.getSetting("cacheExpire"))) if cache: list = cache if not cache: div = htmlFilter(str(BeautifulSoup(http_req(url)).find("div", {"id": "content"})), True) episodes = re.findall(r'Sezonul '+season+', Episodul (.+?)</div>.+?<a href="seriale2-([\d]+-[\d]+)-.+?.html" class="link">(.+?)</a>', div) if episodes: total = len(episodes) else: episodes = re.findall(r'ma;">([\d]+)</div>.+?<a href="seriale2-([0-9]+-[0-9]+)-.+?.html" class="link">(.+?)</a>', div) total = len(episodes) current = 0 while current <= total - 1: ep_num = episodes[current][0] ep_name = episodes[current][2] if ep_name == str(re.findall('(Episodul [-0-9]*)',ep_name)).strip('[]').strip('"\''): ep_name = '' tvshow = {} tvshow['url'] = siteUrl + 'player-serial-' + episodes[current][1] + '-sfast.html' tvshow['thumbnail'] = thumbnail tvshow['title'] = title tvshow['season'] = season tvshow['ep_num'] = ep_num tvshow['ep_name'] = ep_name list.append(tvshow) if progress.iscanceled(): sys.exit() percent = int(((current + 1) * 100) / total) message = "Loading list - " + str(percent) + "%" progress.update(percent, "Enabling cache storage will speed up future loads.", message, "") current += 1 if plugin.getSetting("enableCache") == 'true': plugin.cacheList(list, cacheFilename) for tvshow in list: name = 'Episode %s %s' % (tvshow['ep_num'], tvshow['ep_name']) addDir(name,tvshow['url'],8,tvshow['thumbnail'],tvshow['title'],tvshow['season'],tvshow['ep_num'],tvshow['ep_name'],folder=False) progress.close() xbmcplugin.endOfDirectory(int(sys.argv[1]))