def getLink(self, url): printDBG("getLink url[%s]" % url) urlItem = url.split('|') if 3 == len(urlItem): url = urlItem[0] post_data = { 'action': 'getPlayer', 'id': urlItem[1], 'playerType': urlItem[2] } HEADER = dict(self.AJAX_HEADER) HEADER['Referer'] = url if 'free' == urlItem[2]: http_params = {'header': HEADER} else: http_params = {'header': HEADER, 'use_cookie': True, 'save_cookie': False, 'load_cookie': True, 'cookiefile': self.COOKIE_FILE} sts, data = self.cm.getPage( url, http_params, post_data) if not sts: return '' data = CParsingHelper.getDataBeetwenMarkers(data, '<div class="player">', '<div class="playerTypes">', False)[1] if 'free' == urlItem[2]: data = CParsingHelper.getSearchGroups(data, '<iframe [^>]*?src="([^"]+?)"')[0] sts, data = self.cm.getPage( data ) if not sts: return '' data = CParsingHelper.getSearchGroups(data, '<iframe [^>]*?src="([^"]+?)"')[0] return self.up.getVideoLink( data ) else: return CParsingHelper.getSearchGroups(data, 'url: [\'"](http[^\'"]+?)[\'"]')[0] return '' else: return url
def getWebCamera(self, cItem): printDBG("getWebCamera start") sts, data = self.cm.getPage(cItem['url']) if sts: if cItem['title'] == 'WebCamera PL': params = dict(cItem) params.update({'title':'Polecane kamery'}) self.addDir(params) data = CParsingHelper.getDataBeetwenMarkers(data, '<h4>Kamery wg kategorii</h4>', '</div>', False)[1] data = data.split('</a>') del data[-1] for item in data: url = self.cm.ph.getSearchGroups(item, """href=['"](http[^'^"]+?)['"]""")[0] if '' != url: params = dict(cItem) params.update({'title':self._cleanHtmlStr(item), 'url':url}) self.addDir(params) else: data = CParsingHelper.getDataBeetwenMarkers(data, '<div class="inlinecam', '<div id="footerbar">', False)[1] data = data.split('<div class="inlinecam') for item in data: item = CParsingHelper.getDataBeetwenMarkers(item, '<a', '</div>', True)[1] url = self.cm.ph.getSearchGroups(item, """href=['"](http[^'^"]+?)['"]""")[0] if '' != url: title = self._cleanHtmlStr(CParsingHelper.getDataBeetwenMarkers(item, '<div class="bar">', '</div>', False)[1]) icon = self.cm.ph.getSearchGroups(item, """data-src=['"](http[^'^"]+?)['"]""")[0] params = dict(cItem) params.update({'title':title, 'url':url, 'icon':icon}) self.playVideo(params)
def getTeamCastList(self, cItem): printDBG('getTeamCastList start') #http://team-cast.pl.cp-21.webhostbox.net/kanalyFlash/ #http://team-cast.pl.cp-21.webhostbox.net/ #src="http://team-cast.pl.cp-21.webhostbox.net/kanalyFlash/film/hbo.html" url = cItem['url'] # list categories if '' == url : self.teamCastTab = {} url = 'http://team-cast.pl.cp-21.webhostbox.net/' sts, data = self.cm.getPage(url) if not sts: return data = CParsingHelper.getDataBeetwenMarkers(data, '<div id="stream-frame">', '<div id="now-watching">', False)[1] # remove commented channels data = re.sub('<!--[^!]+?-->', '', data) data = data.split('<li class="menu_right">') del data[0] for cat in data: catName = CParsingHelper.getDataBeetwenMarkers(cat, '<a href="#" class="drop">', '</a>', False)[1].strip() channels = re.findall('<a href="([^"]+?)">([^<]+?)<img src="http://wrzucaj.net/images/2014/09/12/flash-player-icon.png"', cat) if len(channels): self.teamCastTab[catName] = channels newItem = dict(cItem) newItem.update({'url':catName, 'title':catName + ' (%d)' % len(channels)}) self.addDir(newItem) elif url in self.teamCastTab: # List channels for item in self.teamCastTab[url]: newItem = dict(cItem) newItem.update({'url':item[0], 'title':item[1]}) self.playVideo(newItem) else: printExc()
def fillFilters(self, refresh=False): printDBG('getFilters') def SetFilters(raw, tab): printDBG("---------------------") for it in raw: tab.append({'tab': it[1], 'val': it[0]}) printDBG("filter: %r" % tab[-1]) if self.filtersFilled and not refresh: return False sts, data = self.cm.getPage(self.MAINURL) if False == sts: return rawSortFilters = CParsingHelper.getDataBeetwenMarkers(data, 'Sortuj:', '</div>', False)[1] rawVerFilters = CParsingHelper.getDataBeetwenMarkers(data, 'Wyświetl:', '</div>', False)[1] rawCatFilters = CParsingHelper.getDataBeetwenMarkers(data, 'Kategorie Filmowe', '<script>', False)[1] data = '' # free data ;) rawSortFilters = re.compile('href="[^,]+?\,([^,]+?)\,wszystkie,0\.html">([^<]+?)<').findall(rawSortFilters) rawVerFilters = re.compile('href="[^,]+?\,[^,]+?\,([^,]+?),0\.html">([^<]+?)<').findall(rawVerFilters) rawCatFilters = re.compile('href="([^,]+?\,[^.]+?)\.html">([^<]+?)<').findall(rawCatFilters) if 0 < len(rawSortFilters) and 0 < len(rawVerFilters) and 0 < len(rawCatFilters): self.filters['sort'] = [] self.filters['ver'] = [] self.filters['cat'] = [{'tab': 'Wszystkie', 'val': 'glowna'}] SetFilters(rawSortFilters, self.filters['sort']) SetFilters(rawVerFilters, self.filters['ver']) SetFilters(rawCatFilters, self.filters['cat']) self.filtersFilled = True
def listSeasons(self, cItem, category): printDBG("SeansikTV.listSeasons") url = self._getFullUrl(cItem['url']) sts, data = self.cm.getPage(url) if False == sts: return icon = self._getFullUrl( CParsingHelper.getSearchGroups( data, 'href="([^"]+?\.jpg)" rel="image_src"')[0]) data = CParsingHelper.getDataBeetwenMarkers(data, '<div class="jump">', '</div>', False)[1] data = re.findall('<a href="#([^"]+?)">([^<]+?)</a>', data) for item in data: params = dict(cItem) params.update({ 'category': category, 'season': item[0], 'title': item[1], 'icon': icon }) self.addDir(params) if 0 == len(self.currList): cItem.update({ 'season': 'season1', 'category': category, 'icon': icon }) self.listEpisodes(cItem)
def __addLastVersion(self, servers): mainUrl = "https://gitlab.com/iptvplayer-for-e2/iptvplayer-for-e2" sts, data = self.cm.getPage(mainUrl + '/tree/master') if sts: crcSum = CParsingHelper.getSearchGroups( data, '"/iptvplayer-for-e2/iptvplayer-for-e2/commit/([^"]+?)">')[0] if 40 == len(crcSum): finalurl = mainUrl + '/blob/%s/IPTVPlayer/version.py' % crcSum sts, data = self.cm.getPage(finalurl) if sts: newVerNum = CParsingHelper.getSearchGroups( data, '"([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)"')[0] sourceUrl = mainUrl + "/repository/archive.tar.gz?ref=%s" % crcSum server = { 'name': 'gitlab.com', 'version': newVerNum, 'url': sourceUrl, 'subdir': 'iptvplayer-for-e2.git/', 'pyver': 'X.X', 'packagetype': 'sourcecode' } printDBG( "UpdateMainAppImpl.__addLastVersion server: [%s]" % str(server)) servers.append(server) else: printDBG("Wrong crcSum[%s]" % crcSum)
def listCategories(self, url, cat): printDBG("listCategories for url[%s] cat[%s]" % (url, cat)) # add all item params = {'category': cat, 'title': '--Wszystkie--', 'cat_id': ''} self.addDir(params) sts, data = self.cm.getPage(url) if not sts: return sts, data = CParsingHelper.getDataBeetwenMarkers( data, "<div class='window_title'>", "<div class='footer'>") if not sts: return False data = data.split("<div class='list_title'>") if len(data) > 1: del data[0] for item in data: # cat_id: match.group(2) & title: match.group(1) & img: self.MAINURL + match.group(3) match = re.search( "<b>([^<]+?)</b></a></div><a href='[^']*?category=([0-9]+?)'><img[^>]*?src='([^']+?)'", item) if not match: continue # plot printDBG('AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA: [%s]' % match.group(2)) plot = CParsingHelper.removeDoubles( remove_html_markup(item, ' ').replace(match.group(1), ''), ' ') params = { 'category': cat, 'title': match.group(1), 'cat_id': match.group(2), 'icon': self.MAINURL + "/" + match.group(3), 'plot': plot } self.addDir(params)
def getLinksForVideo(self, cItem): printDBG("MoviesHDCO.getLinksForVideo [%s]" % cItem) urlTab = [] sts, data = self.getPage(cItem['url']) if not sts: return urlTab #printDBG(data) data = CParsingHelper.getDataBeetwenMarkers(data, '<div class="video-embed">', '</div>', False)[1] oneLink = CParsingHelper.getDataBeetwenMarkers(data, 'data-rocketsrc="', '"', False)[1] if oneLink == '': oneLink = self.cm.ph.getSearchGroups(data, '<iframe[^>]+?src="([^"]+?)"')[0] if oneLink == '': oneLink = self.cm.ph.getSearchGroups(data, '<script[^>]+?src="([^"]+?)"')[0] if oneLink.startswith('//'): oneLink = 'http:' + oneLink oneLink = self._getFullUrl(oneLink) if 'videomega.tv/validatehash.php?' in oneLink: sts, data = self.cm.getPage(oneLink, {'header':{'Referer':cItem['url'], 'User-Agent':'Mozilla/5.0'}}) if not sts: return urlTab data = self.cm.ph.getSearchGroups(data, 'ref="([^"]+?)"')[0] if '' == data: return urlTab oneLink = 'http://videomega.tv/view.php?ref={0}&width=700&height=460&val=1'.format(data) if '' == oneLink: return urlTab name = self.up.getHostName(oneLink) urlTab.append({'name':name, 'url':oneLink, 'need_resolve':1}) return urlTab
def listMovies(self, cItem): printDBG("Filmy3dEU.listMovies") page = cItem.get('page', 1) url = cItem['url'] if page > 1 and '?' not in url: url += '/page/%s' % page post_data = cItem.get('post_data', {}) if {} == post_data: sts, data = self.cm.getPage( url, {}, self._addSortData(cItem.get('sort_type', ''))) else: sts, data = self.cm.getPage( cItem['url'], { 'raw_post_data': True, 'use_cookie': True, 'save_cookie': False, 'load_cookie': True, 'cookiefile': self.COOKIE_FILE }, post_data) if not sts: return data = CParsingHelper.getDataBeetwenMarkers( data, '<div class="short-film">', '<div class="gf-right">', False)[1] data = data.split('<div class="short-film">') if len( data ) > 0 and '<span class="pnext">Poprzednia</span></a>' in data[-1]: nextPage = True else: nextPage = False for item in data: tmp = item.split('<h5')[-1] url = self.cm.ph.getSearchGroups(tmp, 'href="([^"]+?)"')[0] icon = self.cm.ph.getSearchGroups(item, 'src="([^"]+?)"')[0] title = CParsingHelper.getDataBeetwenMarkers( tmp, '>', '</h5>', False)[1] if '' == title: self.cm.ph.getSearchGroups(item, 'alt="([^"]+?)"')[0] desc = _("Rating") + ': {0}/100, '.format( self.cm.ph.getSearchGroups(item, 'width\:([0-9]+?)\%')[0]) desc += CParsingHelper.getDataBeetwenMarkers( item, '<p class="text">', '</p>', False)[1] if '' != url and '' != title: params = dict(cItem) params.update({ 'title': self.cleanHtmlStr(title), 'url': self._getFullUrl(url), 'desc': self.cleanHtmlStr(desc), 'icon': self._getFullUrl(icon) }) self.addVideo(params) if nextPage: params = dict(cItem) params.update({'title': _('Next page'), 'page': page + 1}) self.addDir(params)
def __init__(self, session, params={}): # params: vk_title, movie_title self.session = session Screen.__init__(self, session) self.params = params self.params['login'] = config.plugins.iptvplayer.opensuborg_login.value self.params['password'] = config.plugins.iptvplayer.opensuborg_password.value self.searchPattern = CParsingHelper.getNormalizeStr( self.params.get('movie_title', '') ) # try to guess season and episode number try: tmp = CParsingHelper.getSearchGroups(self.searchPattern + ' ', 's([0-9]+?)e([0-9]+?)[^0-9]', 2) self.episodeData = {'season': int(tmp[0]), 'episode':int(tmp[1])} except: self.episodeData = {'season':-1, 'episode':-1} self.onShown.append(self.onStart) self.onClose.append(self.__onClose) self.subProvider = OpenSubOrgProvider() self["title"] = Label(" ") self["console"] = Label(" ") self["label_red"] = Label(_("Cancel")) self["label_yellow"] = Label(_("Move group")) self["label_green"] = Label(_("Apply")) self["icon_red"] = Cover3() self["icon_yellow"] = Cover3() self["icon_green"] = Cover3() self["list"] = IPTVMainNavigatorList() self["list"].connectSelChanged(self.onSelectionChanged) self["actions"] = ActionMap(["ColorActions", "SetupActions", "WizardActions", "ListboxActions"], { "cancel": self.keyExit, "ok" : self.keyOK, "red" : self.keyRed, "yellow": self.keyYellow, "green" : self.keyGreen, }, -2) self.iconPixmap = {} for icon in ['red', 'yellow', 'green']: self.iconPixmap[icon] = LoadPixmap(GetIconDir(icon+'.png')) self.movieTitle = '' self.stackList = [] self.stackItems = [] self.defaultLanguage = GetDefaultLang() self.listMode = False self.downloadedSubFilePath = '' self.loginPassed = False self.tmpItem = None
def getMovieTab(self, url): FightTube.printDBG('getMovieTab start') query_data = {'url': url, 'return_data': True} try: data = self.cm.getURLRequestData(query_data) except: FightTube.printDBG('getMovieTab exception') return # get next page url nexPageUrl = '' sts, tmp = CParsingHelper.getDataBeetwenMarkers( data, "<nav class='pagination'>", "</nav>", False) if sts: match = re.search("<li><a href='([^']+?)'>></a></li>", tmp) if match: nexPageUrl = match.group(1) # separete vidTab sts, data = CParsingHelper.getDataBeetwenMarkers( data, "<ul class='videos-listing'>", "</ul>", False) if not sts: printDBG('getMovieTab: main markers cannot be found!') return # separate videos data data = data.split('</li>') for vidItem in data: url = '' title = '' icon = '' ocena = '' wyswietlen = '' match = re.search("<a href='([^']+?)'", vidItem) if match: url = match.group(1) match = re.search("<img src='([^']+?)' alt='([^']+?)'", vidItem) if match: icon = match.group(1) title = match.group(2) if '' != url and '' != title: params = { 'type': 'video', 'title': title, 'page': url, 'icon': icon } self.currList.append(params) if nexPageUrl.startswith("http://"): params = { 'type': 'category', 'name': 'nextpage', 'title': 'Następna strona', 'page': nexPageUrl, 'icon': '' } self.currList.append(params) return
def getVideoUrl(self, url): #show adult content #self.cm.addCookieItem(COOKIEFILE, {'name': 'AnyF18', 'value': 'mam18', 'domain': 'video.anyfiles.pl'}, False) if not self.isLogged(): self.tryTologin() # GET VIDEO ID u = url.split('/') vidID = u[-1] match = re.search('([0-9]+?)\,', url) if match: vidID = match.group(1) # get COOKIE url = self.MAINURL + '/videos.jsp?id=' + vidID sts, data = self.cm.getPage(url, self.defaultParams) if not sts: return [] fUrl = self.MAINURL + "/w.jsp?id=%s&width=620&height=349&pos=0&skin=0" % vidID COOKIE_JSESSIONID = self.cm.getCookieItem(self.COOKIEFILE, 'JSESSIONID') HEADER = { 'Referer': url, 'Cookie': 'JSESSIONID=' + COOKIE_JSESSIONID + ';', 'User-Agent': "Mozilla/5.0 (Linux; U; Android 4.1.1; en-us; androVM for VirtualBox ('Tablet' version with phone caps) Build/JRO03S) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Safari/534.30" } request_param = {'header': HEADER} sts, data = self.cm.getPage(fUrl, request_param) if not sts: return [] HEADER['Referer'] = fUrl config = CParsingHelper.getSearchGroups(data, 'src="/?(pcs\?code=[^"]+?)"', 1)[0] if '' != config: sts, data = self.cm.getPage(self.MAINURL + '/' + config, {'header': HEADER}) if sts: #var source = "<source src=\"http://50.7.220.66/video/60ExQvchsi4PbqMLr--I7A/1433518629/5e638de7a15c7a8dc7c979044cd2a953_147325.mp4\" type=\"video/mp4\" />"; #var track = "<track label=\"izombie.112...\" srclang=\"pl\" kind=\"captions\" src=\"http://video.anyfiles.pl/subtit/1433508336949.srt\"></track>\n"; data = data.replace('\\"', '"') #printDBG(data) difSourcesSrc = CParsingHelper.getSearchGroups( data, '''difSourcesSrc[^=]*?=[^"']*?["'](http[^'^"]+?)['"]''', 1)[0] url = CParsingHelper.getSearchGroups( data, '''<source[^>]+?src=["'](http[^'^"]+?)['"]''', 1)[0] subUrl = CParsingHelper.getSearchGroups( data, '''<track[^>]+?src=["'](http[^'^"]+?)['"]''', 1)[0] if 'youtube' in difSourcesSrc: return self.getYTVideoUrl(difSourcesSrc) else: return [{'name': 'AnyFiles.pl', 'url': url}] return []
def listVideos(self, baseUrl, cat, cat_id, sort, page, search_pattern=''): url = baseUrl % (cat_id, sort, page) printDBG("listVideos for url[%s]" % url) sts, data = self.cm.getPage(url) if not sts: return nextPage = False if -1 < data.find("class='black'>»</a>"): nextPage = True sts, data = CParsingHelper.getDataBeetwenMarkers( data, "<div class='window_title'>", "<div class='footer'>") if not sts: return False data = data.split("<div class='list' style='width: 173px;'>") if len(data) > 1: del data[0] for item in data: # vid_hash & img match = re.search( "href='([0-9a-fA-F]{32})'[^>]*?><img[^>]*?src='([^']+?)'", item) if not match: continue vid_hash = match.group(1) img = self.MAINURL + "/" + match.group(2) if not match: continue # title match = re.search( "<div class='list_title'><a href='%s'>([^<]+?)</a></div>" % vid_hash, item) if not match: continue title = match.group(1) # plot plot = CParsingHelper.removeDoubles( remove_html_markup(item, ' ').replace(title, ''), ' ') params = { 'title': title, 'url': self.MAINURL + "/" + vid_hash, 'icon': img, 'plot': plot } if 'Rozmiar:' in item: continue else: self.playVideo(params) if nextPage: params = { 'title': "Następna strona", 'category': cat, 'cat_id': cat_id, 'sort': sort, 'page': str(int(page) + 1), 'search_pattern': search_pattern } self.addDir(params)
def getVideosList(self, url): printDBG("getVideosList url[%s]" % url) sts, data = self.cm.getPage(url) if not sts: printDBG("getVideosList except") return # get pagination HTML part nextPageData = CParsingHelper.getDataBeetwenMarkers(data, 'class="pager"', '</div>', False)[1] # get Video HTML part data = CParsingHelper.getDataBeetwenMarkers(data, '<!-- ************ end user menu ************ -->', '</ul>', False)[1].split('<li>') del data[0] for videoItemData in data: printDBG(' videoItemData') icon = '' duration = '' gatunek = '' plot = '' title = '' url = '' if 'class="playIcon"' in videoItemData: # get icon src match = re.search('src="(http://[^"]+?)"', videoItemData) if match: icon = match.group(1).replace('&', '&') # get duration match = re.search('class="duration"[^>]*?>([^<]+?)<', videoItemData) if match: duration = match.group(1).replace(''', "'") # get gatunek match = re.search('"gatunek"[^>]*?>([^<]+?)<', videoItemData) if match: gatunek = match.group(1) # get plot match = re.search('class="text"[^>]*?>([^<]+?)<', videoItemData) if match: plot = match.group(1) # get title and url match = re.search('<a href="([^"]+?)" class="title"[^>]*?>([^<]+?)</a>', videoItemData) if match: url = self.MAIN_URL + match.group(1) title = match.group(2) params = {'type': 'video', 'page': url, 'title': title, 'icon': icon, 'duration': duration, 'gatunek': gatunek, 'plot': plot} self.currList.append( params ) # check next page nextPageUrl = '' match = re.search('href="([^"]+?)" class="nextPage"', nextPageData) if match: nextPageUrl = match.group(1) else: match = re.search('href="([^"]+?)" class="lastPage"', nextPageData) if match: nextPageUrl = match.group(1) if '' != nextPageUrl: params = {'type': 'category', 'name': 'sub-category', 'page': self.MAIN_URL + nextPageUrl.replace('&', '&'), 'title': 'Następna strona'} self.currList.append( params )
def listsCategoriesMenu(self, url): sts, data = self.cm.getPage( url, {'header': self.HEADER } ) if not sts: return data = CParsingHelper.getDataBeetwenMarkers(data, 'movie-kat-selection">', '</ul>', False)[1] data = data.split('</li>') for item in data: title = self.cleanHtmlStr(item) catID = CParsingHelper.getSearchGroups(item, 'data-value="([^"]+?)"', 1)[0] params = {'name': 'category', 'title': title, 'category': catID} self.addDir(params)
def getVideosList(self, url): printDBG("getVideosList url[%s]" % url) sts, data = self.cm.getPage(url) if not sts: printDBG("getVideosList except") return # get pagination HTML part nextPageData = CParsingHelper.getDataBeetwenMarkers(data, 'class="pager"', '</div>', False)[1] # get Video HTML part data = CParsingHelper.getDataBeetwenMarkers(data, '<!-- ************ end user menu ************ -->', '</ul>', False)[1].split('<li>') del data[0] for videoItemData in data: printDBG('videoItemData') icon = '' duration = '' gatunek = '' desc = '' title = '' url = '' if 'class="playIcon"' in videoItemData: # get icon src match = re.search('src="(http://[^"]+?)"', videoItemData) if match: icon = match.group(1).replace('&', '&') # get duration duration = self.cleanHtmlStr(self.cm.ph.getDataBeetwenMarkers(videoItemData, '<span class="duration">', '</span>')[1]) # get gatunek gatunek = self.cleanHtmlStr(self.cm.ph.getDataBeetwenMarkers(videoItemData, '<a class="gatunek" ', '</a>')[1]) # get desc match = re.search('class="text"[^>]*?>([^<]+?)<', videoItemData) if match: desc = match.group(1) # get title and url match = re.search('<a href="([^"]+?)" class="title"[^>]*?>([^<]+?)</a>', videoItemData) if match: url = self.MAIN_URL + match.group(1) title = match.group(2) params = {'good_for_fav': True, 'url': url, 'title': title, 'icon': icon, 'desc': ' | '.join([duration, gatunek]) + '[/br]' + desc} self.addVideo( params ) # check next page nextPageUrl = '' match = re.search('href="([^"]+?)" class="nextPage"', nextPageData) if match: nextPageUrl = match.group(1) else: match = re.search('href="([^"]+?)" class="lastPage"', nextPageData) if match: nextPageUrl = match.group(1) if '' != nextPageUrl: params = {'name': 'sub-category', 'page': self.MAIN_URL + nextPageUrl.replace('&', '&'), 'title': 'Następna strona'} self.addDir( params )
def listSerialSeasons(self, category, url, icon): printDBG("listSerialSeasons") sts, data = self.cm.getPage( url ) if False == sts: return plot = CParsingHelper.getDataBeetwenMarkers(data, '<p class="serialDescription">', '</p>', False)[1] data = CParsingHelper.getDataBeetwenMarkers(data, '<div class="seasonExpand">', '<script>', False)[1] data = re.compile('<a href="[/]?(serial,[^,]+?,sezon,[1-9][0-9]*?.html)">([^<]+?)</a>').findall(data) for item in data: if not item[0].startswith('http'): url = self.MAINURL + item[0] params = {'name': 'category', 'title': item[1], 'category': category, 'url': url, 'icon':icon, 'plot':plot} self.addDir(params)
def listSearchResults(self, pattern, searchType): printDBG("listFilms pattern[%s], searchType[%s]" % (pattern, searchType)) url = self.MAINURL + 'szukaj.html?query=%s&mID=' % pattern sts, data = self.cm.getPage( url ) if False == sts: return if 'filmy' == searchType: sts, data = CParsingHelper.getDataBeetwenMarkers(data, '<h2 id="movies-res">Filmy:', '<a href="#top"', False) category = 'video' else: sts, data = CParsingHelper.getDataBeetwenMarkers(data, '<h2 id="serials-res">Seriale:', '<a href="#top"', False) category = 'Serial_seasons_list' data = data.split('<li data-url=') self.listItems(data, category)
def listItems(self, cItem, category): printDBG("SeansikTV.listItems") page = cItem.get('page', 1) url = self._addPage(cItem.get('url'), page) sts, data = self.cm.getPage(url) if False == sts: return # check next page netxtPage = CParsingHelper.getDataBeetwenMarkers( data, '<b class="active">%d</b>' % page, '</div>', False)[1] if 'page' in netxtPage: netxtPage = True page += 1 else: netxtPage = False sts, data = CParsingHelper.getDataBeetwenMarkers( data, '<div class="content table-sofi', '<div class="content">', False) data = data.split('<div class="content table-sofi') for item in data: icon = self._getFullUrl( CParsingHelper.getSearchGroups(item, 'src="([^"]+?jpg)"')[0]) sts, tmp = CParsingHelper.getDataBeetwenReMarkers( item, re.compile('<td colspan="2"[^>]+?>'), re.compile('</td>'), False) url = self._getFullUrl( CParsingHelper.getSearchGroups(tmp, 'href="([^"]+?)"')[0]) tmp = tmp.split('</a>') title = self.cleanHtmlStr(tmp[0]) if 0 < len(tmp): desc = self.cleanHtmlStr(tmp[-1]) # validate data if '' == url or '' == title: continue params = { 'name': 'category', 'category': category, 'title': title, 'url': url, 'icon': icon, 'desc': desc } if 'video' != category: self.addDir(params) else: self.addVideo(params) if netxtPage: params = dict(cItem) params.update({'title': 'Następna strona', 'page': page}) self.addDir(params)
def getArticleContent(self, cItem): printDBG("Filmy3dEU.getArticleContent [%s]" % cItem) retTab = [] if 'url' not in cItem: return retTab sts, data = self.cm.getPage(cItem['url']) if not sts: return retTab sts, data = CParsingHelper.getDataBeetwenMarkers(data, "<div id='dle-content'>", '<div class="gf-right">', False) title = CParsingHelper.getDataBeetwenMarkers(data, '<h1 class="title">', '</h1>', False)[1] icon = self.cm.ph.getSearchGroups(data, 'srct="([^"]+?)"')[0] desc = self.cleanHtmlStr( self.cm.ph.getDataBeetwenMarkers(data, '<div class="comment-box-block" id="comment1">', '</div>', False)[1] ) return [{'title':title, 'text':desc, 'images':[]}]
def getVideoUrl(self, url): #show adult content #self.cm.addCookieItem(COOKIEFILE, {'name': 'AnyF18', 'value': 'mam18', 'domain': 'video.anyfiles.pl'}, False) if not self.isLogged(): self.tryTologin() # GET VIDEO ID u = url.split('/') vidID = u[-1] match = re.search('([0-9]+?)\,', url ) if match: vidID = match.group(1) # get COOKIE sts, data = self.cm.getPage(self.MAINURL + '/videos.jsp?id=' + vidID, self.defaultParams) if not sts: return [] fUrl = self.MAINURL + "/w.jsp?id=%s&width=620&height=349&pos=&skin=0" % vidID COOKIE_JSESSIONID = self.cm.getCookieItem(self.COOKIEFILE,'JSESSIONID') HEADER = {'Referer' : url, 'Cookie' : 'JSESSIONID=' + COOKIE_JSESSIONID, 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:33.0) Gecko/20100101 Firefox/33.0'} request_param = {'header':HEADER} sts, data = self.cm.getPage(fUrl, request_param) if not sts: return [] #document.cookie = "__utdc_8a85608c7ff88b4de47cdc08107a8108=f68082abdaab664660b0c60289346552"+expires+"; path="; match = re.search('document.cookie = "([^"]+?)"',data) if match: printDBG("========================================================================== B") #printDBG(data) printDBG("========================================================================== C") HEADER['Cookie'] = HEADER['Cookie'] + '; ' + match.group(1) HEADER['Referer'] = self.MAINURL + '/flowplaer/flowplayer.commercial-3.2.16.swf' config = CParsingHelper.getSearchGroups(data, 'var flashvars = {[^"]+?config: "([^"]+?)" }', 1)[0] if '' == config: printDBG("========================================================================== D") config = CParsingHelper.getSearchGroups(data, 'src="/?(pcsevlet\?code=[^"]+?)"', 1)[0] if '' != config: printDBG("========================================================================== E") sts,data = self.cm.getPage( self.MAINURL + '/' + config, {'header': HEADER}) if sts: url = CParsingHelper.getSearchGroups(data, "'url':'(http[^']+?mp4)'", 1)[0] if '' != url: return [{ 'name': 'AnyFiles', 'url': url}] url = CParsingHelper.getSearchGroups(data, "'url':'api:([^']+?)'", 1)[0] if '' != url: return self.getYTVideoUrl('http://www.youtube.com/watch?v='+url) return []
def getEpisodesTab(self, url, serial, sezon, icon): printDBG("getSerialEpisodItems start url=[%s] episode[%s]" % (url, sezon)) sts, data = self.cm.getPage(url) if False == sts: printDBG("getSerialEpisodItems problem") return sts, data = CParsingHelper.getDataBeetwenMarkers( data, 'id="%s"' % sezon, '</div>', False) if False == sts: printDBG("getSerialEpisodItems problem no data beetween markers") return data = re.compile( '<a class="o" href="([^"]+?)/([^"]+?)/([^"]+?)">([^<]+?)</a>' ).findall(data) if len(data) > 0: for i in range(len(data)): page = self.MAINURL + data[i][0] + '/' + data[i][ 1] + '/' + data[i][2] title = self.cm.html_entity_decode(data[i][3]) plot = '%s - %s' % (serial, data[i][1]) params = { 'season': sezon, 'tvshowtitle': serial, 'episode': data[i][1], 'title': title, 'page': page, 'icon': icon } self.addVideo(params)
def getF4MLinksWithMeta(manifestUrl, checkExt=True): if checkExt and not manifestUrl.split('?')[0].endswith('.f4m'): return [] cm = common() headerParams, postData = cm.getParamsFromUrlWithMeta(manifestUrl) retPlaylists = [] sts, data = cm.getPage(manifestUrl, headerParams, postData) if sts: liveStreamDetected = False if 'live' == CParsingHelper.getDataBeetwenMarkers( '<streamType>', '</streamType>', False): liveStreamDetected = True bitrates = re.compile('bitrate="([0-9]+?)"').findall(data) for item in bitrates: link = strwithmeta(manifestUrl, { 'iptv_proto': 'f4m', 'iptv_bitrate': item }) if liveStreamDetected: link.meta['iptv_livestream'] = True retPlaylists.append({ 'name': '[f4m/hds] bitrate[%s]' % item, 'url': link }) if 0 == len(retPlaylists): link = strwithmeta(manifestUrl, {'iptv_proto': 'f4m'}) if liveStreamDetected: link.meta['iptv_livestream'] = True retPlaylists.append({'name': '[f4m/hds]', 'url': link}) return retPlaylists
def getArticleContent(self, cItem): printDBG("MoviesHDCO.getArticleContent [%s]" % cItem) retTab = [] sts, data = self.getPage(cItem['url']) if not sts: return retTab sts, data = CParsingHelper.getDataBeetwenMarkers(data, '<table id="imdbinfo">', '</table>', False) if not sts: return retTab tmp = data.split('</tr>') if len(tmp) < 2: return retTab title = self.cleanHtmlStr(tmp[0]) if '' == title: icon = self.cm.ph.getSearchGroups(tmp[1], 'alt="([^"]+?)"')[0] icon = self.cm.ph.getSearchGroups(tmp[1], 'src="([^"]+?)"')[0] desc = self.cm.ph.getDataBeetwenMarkers(tmp[1], '<b>Plot:</b>', '</td>', False)[1] otherInfo = {} tmpTab = [{'mark':'<b>Rating:</b>', 'key':'rating'}, {'mark':'<b>Director:</b>', 'key':'director'}, {'mark':'<b>Writer:</b>', 'key':'writer'}, {'mark':'<b>Stars:</b>', 'key':'stars'}, {'mark':'<b>Runtime:</b>', 'key':'duration'}, {'mark':'<b>Rated:</b>', 'key':'rated'}, {'mark':'<b>Genre:</b>', 'key':'genre'}, {'mark':'<b>Released:</b>', 'key':'released'}, ] for item in tmpTab: val = self.cm.ph.getDataBeetwenMarkers(tmp[1], item['mark'], '</td>', False)[1] if '' != val: otherInfo[item['key']] = self.cleanHtmlStr(val) return [{'title':self.cleanHtmlStr( title ), 'text': self.cleanHtmlStr( desc ), 'images':[{'title':'', 'url':self._getFullUrl(icon)}], 'other_info':otherInfo}]
def getArticleContent(self, cItem): printDBG("MoviesHDCO.getArticleContent [%s]" % cItem) retTab = [] if 'movie' == cItem.get('mode') or 'explore_item' == cItem.get( 'category'): sts, data = self.cm.getPage(cItem['url']) if not sts: return retTab sts, data = CParsingHelper.getDataBeetwenMarkers( data, '<meta property', '<script') if not sts: return retTab icon = self.cm.ph.getSearchGroups( data, '<meta[^>]*?property="og:image"[^>]*?content="(http[^"]+?)"' )[0] title = self.cm.ph.getSearchGroups( data, '<meta[^>]*?property="og:title"[^>]*?content="([^"]+?)"')[0] desc = self.cm.ph.getSearchGroups( data, '<meta[^>]*?property="og:description"[^>]*?content="([^"]+?)"' )[0] return [{ 'title': self.cleanHtmlStr(title), 'text': self.cleanHtmlStr(desc), 'images': [{ 'title': '', 'url': self._getFullUrl(icon) }], 'other_info': {} }] else: return retTab
def getF4MLinksWithMeta(manifestUrl, checkExt=True): if checkExt and not manifestUrl.split('?')[0].endswith('.f4m'): return [] cm = common() headerParams, postData = cm.getParamsFromUrlWithMeta(manifestUrl) retPlaylists = [] sts, data = cm.getPage(manifestUrl, headerParams, postData) if sts: liveStreamDetected = False if 'live' == CParsingHelper.getDataBeetwenMarkers('<streamType>', '</streamType>', False): liveStreamDetected = True bitrates = re.compile('bitrate="([0-9]+?)"').findall(data) for item in bitrates: link = strwithmeta(manifestUrl, {'iptv_proto':'f4m', 'iptv_bitrate':item}) if liveStreamDetected: link.meta['iptv_livestream'] = True retPlaylists.append({'name':'[f4m/hds] bitrate[%s]' % item, 'url':link}) if 0 == len(retPlaylists): link = strwithmeta(manifestUrl, {'iptv_proto':'f4m'}) if liveStreamDetected: link.meta['iptv_livestream'] = True retPlaylists.append({'name':'[f4m/hds]', 'url':link}) return retPlaylists
def getVideosFromChannelList(self, url, category, page, cItem): printDBG('YouTubeParser.getVideosFromChannelList page[%s]' % (page) ) currList = [] try: sts,data = self.cm.getPage(url, {'host': self.HOST}) if sts: if '1' == page: sts,data = CParsingHelper.getDataBeetwenMarkers(data, 'feed-item-container', 'footer-container', False) else: data = unescapeHTML(data.decode('unicode-escape')).encode('utf-8').replace('\/', '/') # nextPage match = re.search('data-uix-load-more-href="([^"]+?)"', data) if not match: nextPage = "" else: nextPage = match.group(1).replace('&', '&') data = data.split('feed-item-container') currList = self.parseListBase(data) if '' != nextPage: item = dict(cItem) item.update({'title': _("Next page"), 'page': str(int(page) + 1), 'url': 'http://www.youtube.com' + nextPage}) currList.append(item) except: printExc() return [] return currList
def listCategories(self, cItem, category, filter='categories'): printDBG("SeansikTV.listCategories") baseUrl = self._getFullUrl(cItem['url']) sts, data = self.cm.getPage(baseUrl) if False == sts: return sts, data = CParsingHelper.getDataBeetwenMarkers( data, '<li onclick="selectBox(this)" name="%s">' % filter, '</ul>', True) data = re.compile( '<input[^>]+?value="([0-9]+?)"[^>]+?/>([^<]+?)<').findall(data) if len(data): params = { 'name': 'category', 'category': category, 'title': '--Wszystkie--', 'url': baseUrl } self.addDir(params) for item in data: url = baseUrl + ('&%s=%s' % (filter, item[0])) params = { 'name': 'category', 'category': category, 'title': item[1].strip(), 'url': url } self.addDir(params)
def listsMainMenu(self): FightTube.printDBG('listsMainMenu start') query_data = {'url': self.MAINURL, 'return_data': True} try: data = self.cm.getURLRequestData(query_data) except: FightTube.printDBG('listsMainMenu exception') return sts, data = CParsingHelper.getDataBeetwenMarkers( data, 'Kategorie video', '</ul>', False) if not sts: printDBG('listsMainMenu: menu marker cannot be found!') return match = re.compile( "<a href='([^']+?)' class='level0'[^>]+?>([^<]+?)</a>").findall( data) if len(match) > 0: for i in range(len(match)): params = { 'type': 'category', 'title': match[i][1], 'page': match[i][0], 'icon': '' } self.currList.append(params) params = { 'type': 'category', 'title': 'Wyszukaj', 'page': self.SEARCHURL, 'icon': '' } self.currList.append(params) return
def __addLastVersion(self, servers): mainUrl = "https://gitlab.com/iptvplayer-for-e2/iptvplayer-for-e2" sts, data = self.cm.getPage(mainUrl + '/tree/master') if sts: crcSum = CParsingHelper.getSearchGroups(data, '"/iptvplayer-for-e2/iptvplayer-for-e2/commit/([^"]+?)">')[0] if 40 == len(crcSum): finalurl = mainUrl + '/blob/%s/IPTVPlayer/version.py' % crcSum sts, data = self.cm.getPage(finalurl) if sts: newVerNum = CParsingHelper.getSearchGroups(data, '"([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)"')[0] sourceUrl = mainUrl + "/repository/archive.tar.gz?ref=%s" % crcSum server = {'name':'gitlab.com', 'version':newVerNum, 'url':sourceUrl, 'subdir':'iptvplayer-for-e2.git/', 'pyver':'X.X', 'packagetype':'sourcecode'} printDBG("UpdateMainAppImpl.__addLastVersion server: [%s]" % str(server)) servers.append(server) else: printDBG("Wrong crcSum[%s]" % crcSum)
def getSerialsListByLetter(self, baseUrl, cat, letter): letter = letter.replace(' ', '') printDBG("getSerialsListByLetter start letter=%s" % letter) sts, data = self.cm.getPage(baseUrl) if False == sts: printDBG("getSerialsListByLetter problem with getPage[%s]" % baseUrl) return sts, data = CParsingHelper.getDataBeetwenMarkers( data, 'id="letter_%s">' % letter, '</ul>', False) if False == sts: printDBG("getSerialsListByLetter problem no data beetween markers") return data = re.compile( '<a href="([^"]+?)" class="pl-corners">(.+?)</a>').findall(data) if len(data) > 0: for i in range(len(data)): title = remove_html_markup(data[i][1]) url = self.MAINURL + data[i][0].strip() params = { 'name': 'category', 'category': cat, 'title': title, 'url': url } self.addDir(params)
def getHostingTable(self, urlItem): printDBG("getHostingTable url[%s]" % urlItem['url']) # use cache if possible if 0 < len( self.linksCacheCache.get('tab', []) ) and (urlItem['url'] + urlItem.get('ver', '')) == self.linksCacheCache.get('marker', None): return self.linksCacheCache['tab'] hostingTab = [] # get lang tab langTab = [] sts, data = self.cm.getPage( urlItem['url'] ) if False == sts: return hostingTab data = CParsingHelper.getDataBeetwenMarkers(data, '<div class="langs">', '</div>', False)[1] data = re.compile('data-id="([^"]+?)"[^>]*?>(.+?)</a>', re.DOTALL).findall(data) for item in data: tmp = {'val': item[0], 'title': self.cleanHtmlStr(item[1])} if tmp['val'] == urlItem.get('ver', ''): langTab = [tmp] break else: langTab.append( tmp ) for lang in langTab: tmpTab = [] if self.loggedIn: tmpTab = self.getLinks(urlItem['url'], lang, {'val': 'premium', 'title':'Premium'}) if 0 == len(tmpTab): tmpTab = self.getLinks(urlItem['url'], lang, {'val': 'free', 'title':'Free'}) hostingTab.extend(tmpTab) self.linksCacheCache = {'marker': urlItem['url'] + urlItem.get('ver', ''), 'tab': hostingTab} return hostingTab
def getF4MLinksWithMeta(manifestUrl, checkExt=True): if checkExt and not manifestUrl.split("?")[0].endswith(".f4m"): return [] cm = common() headerParams, postData = cm.getParamsFromUrlWithMeta(manifestUrl) retPlaylists = [] sts, data = cm.getPage(manifestUrl, headerParams, postData) if sts: liveStreamDetected = False if "live" == CParsingHelper.getDataBeetwenMarkers("<streamType>", "</streamType>", False): liveStreamDetected = True bitrates = re.compile('bitrate="([0-9]+?)"').findall(data) for item in bitrates: link = strwithmeta(manifestUrl, {"iptv_proto": "f4m", "iptv_bitrate": item}) if liveStreamDetected: link.meta["iptv_livestream"] = True retPlaylists.append({"name": "[f4m/hds] bitrate[%s]" % item, "url": link}) if 0 == len(retPlaylists): link = strwithmeta(manifestUrl, {"iptv_proto": "f4m"}) if liveStreamDetected: link.meta["iptv_livestream"] = True retPlaylists.append({"name": "[f4m/hds]", "url": link}) return retPlaylists
def getArticleContent(self, cItem): printDBG("MoviesHDCO.getArticleContent [%s]" % cItem) retTab = [] if 'url' not in cItem: return [] sts, data = self.getPage(cItem['url']) if not sts: return retTab sts, data = CParsingHelper.getDataBeetwenMarkers(data, '<table id="imdbinfo">', '</table>', False) if not sts: return retTab tmp = data.split('</tr>') if len(tmp) < 2: return retTab title = self.cleanHtmlStr(tmp[0]) if '' == title: icon = self.cm.ph.getSearchGroups(tmp[1], 'alt="([^"]+?)"')[0] icon = self.cm.ph.getSearchGroups(tmp[1], 'src="([^"]+?)"')[0] desc = self.cm.ph.getDataBeetwenMarkers(tmp[1], '<b>Plot:</b>', '</td>', False)[1] otherInfo = {} tmpTab = [{'mark':'<b>Rating:</b>', 'key':'rating'}, {'mark':'<b>Director:</b>', 'key':'director'}, {'mark':'<b>Writer:</b>', 'key':'writer'}, {'mark':'<b>Stars:</b>', 'key':'stars'}, {'mark':'<b>Runtime:</b>', 'key':'duration'}, {'mark':'<b>Rated:</b>', 'key':'rated'}, {'mark':'<b>Genre:</b>', 'key':'genre'}, {'mark':'<b>Released:</b>', 'key':'released'}, ] for item in tmpTab: val = self.cm.ph.getDataBeetwenMarkers(tmp[1], item['mark'], '</td>', False)[1] if '' != val: otherInfo[item['key']] = self.cleanHtmlStr(val) return [{'title':self.cleanHtmlStr( title ), 'text': self.cleanHtmlStr( desc ), 'images':[{'title':'', 'url':self._getFullUrl(icon)}], 'other_info':otherInfo}]
def getVideosFromChannelList(self, url, category, page, cItem): printDBG('YouTubeParser.getVideosFromChannelList page[%s]' % (page)) currList = [] try: sts, data = self.cm.getPage(url, {'host': self.HOST}) if sts: if '1' == page: sts, data = CParsingHelper.getDataBeetwenMarkers( data, 'feed-item-container', 'footer-container', False) else: data = json_loads(data) data = data['load_more_widget_html'] + '\n' + data[ 'content_html'] # nextPage match = re.search('data-uix-load-more-href="([^"]+?)"', data) if not match: nextPage = "" else: nextPage = match.group(1).replace('&', '&') data = data.split('feed-item-container') currList = self.parseListBase(data) if '' != nextPage: item = dict(cItem) item.update({ 'title': _("Next page"), 'page': str(int(page) + 1), 'url': 'http://www.youtube.com' + nextPage }) currList.append(item) except Exception: printExc() return [] return currList
def fillSeriesCache(self, url): printDBG("AlltubeTV.fillSeriesCache") self.seriesCache = {} self.seriesLetters = [] sts, data = self.getPage(url) if not sts: return data = CParsingHelper.getDataBeetwenMarkers(data, 'term-list clearfix">', '</ul>', False)[1] data = re.compile( '<li[^>]*?data-letter="([^"]+)"[^>]*?>[^<]*?<a[^>]*?href="([^"]+?)"[^>]*?>([^<]+?)<' ).findall(data) for item in data: letter = item[0] url = item[1] title = item[2] if letter not in self.seriesCache: self.seriesCache[letter] = [] self.seriesLetters.append({'title': letter, 'letter': letter}) self.seriesCache[letter].append({ 'good_for_fav': True, 'title': self.cleanHtmlStr(title), 'url': self.getFullUrl(url) }) for idx in range(len(self.seriesLetters)): letter = self.seriesLetters[idx]['letter'] self.seriesLetters[idx]['title'] = letter + ' [%d]' % len( self.seriesCache[letter])
def listRanking(self, cItem): printDBG("NocnySeansPL.listRanking") sts, data = self.cm.getPage(cItem['url']) if not sts: return data = CParsingHelper.getDataBeetwenMarkers(data, '<tbody>', '</tbody>', False)[1] data = data.split('</tr>') if len(data): del data[-1] for item in data: url = self.cm.ph.getSearchGroups(item, 'href="([^"]+?)"')[0] icon = self.cm.ph.getSearchGroups(item, 'src="([^"]+?)"')[0] title = self.cm.ph.getDataBeetwenMarkers(item, '<strong>', '</strong>', False)[1] if '' == title: title = self.cm.ph.getSearchGroups(item, 'alt="([^"]+?)"')[0] desc = self.cm.ph.getDataBeetwenMarkers(item, '<p>', '</p>', False)[1] rank = self.cm.ph.getSearchGroups(item, '>([0-9.]+?)<')[0] params = dict(cItem) params.update({ 'title': self.cleanHtmlStr(title), 'url': self._getFullUrl(url), 'desc': rank + ', ' + self.cleanHtmlStr(desc), 'icon': self._getFullUrl(icon) }) self.addVideo(params)
def getVideosFromPlaylist(self, url, category, page, cItem): printDBG('YouTubeParser.getVideosFromPlaylist') currList = [] try: sts, data = self.cm.getPage(url, {'host': self.HOST}) if sts: if '1' == page: sts, data = CParsingHelper.getDataBeetwenMarkers( data, 'id="pl-video-list"', 'footer-container', False) else: data = unescapeHTML( data.decode('unicode-escape')).encode('utf-8').replace( '\/', '/') # nextPage match = re.search('data-uix-load-more-href="([^"]+?)"', data) if not match: nextPage = "" else: nextPage = match.group(1).replace('&', '&') itemsTab = data.split('<tr class="pl-video') currList = self.parseListBase(itemsTab) if '' != nextPage: item = dict(cItem) item.update({ 'title': 'Następna strona', 'page': str(int(page) + 1), 'url': 'http://www.youtube.com' + nextPage }) currList.append(item) except: printExc() return currList
def checkVersionFile(self, newVerPath): code = 0 msg = 'Wersja poprawna.' newVerFile = os_path.join(newVerPath, 'version.py') if os_path.isfile(newVerFile): verPattern = self.VERSION_PATTERN else: newVerFile = os_path.join(newVerPath, 'version.pyo') verPattern = '([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)' try: # get new version with open(newVerFile, "r") as verFile: data = verFile.read() newVerNum = CParsingHelper.getSearchGroups(data, verPattern)[0] if newVerNum != self.serversList[self.currServIdx]['version']: code = -1 msg = _( "Wrong version. \n downloaded version [%s] is different from the requested [%s]." ) % (newVerNum, self.serversList[self.currServIdx]['version']) except: printExc() code = -1 msg = _("File [%s] reading failed.") % newVerFile return code, msg
def getTop100(self, baseUrl, mode): printDBG("getTop100 for url[%s]" % baseUrl) post_data = { 'kategoria' : mode } sts, data = self.cm.getPage( baseUrl, {}, post_data) if False == sts: printDBG("getTop100 problem") return sts, data = CParsingHelper.getDataBeetwenMarkers(data, '<div class="ew-top-100"', '</section></div>', True) if False == sts: printDBG("getTop100 problem no data beetween markers") return data = data.split('<div class="ew-top-100"') if len(data) > 1: del data[0] place = 1 for item in data: # url & title match = re.search('<a href="([^"]+?)" class="en">([^<]+?)</a>[^<]*?<a href="[^"]+?" class="pl">([^<]*?)</a>', item) if match: url = self.MAINURL + match.group(1) title = str(place) + '. ' + match.group(2) + ' / ' + match.group(3) place = place + 1 else: continue # img match = re.search('<img src="([^"]+?)"', item) if match: img = match.group(1) else: img = '' # plot match = re.search('<p[^>]*?>([^<]+?)</p>', item) if match: plot = match.group(1).strip() else: plot = '' params = { 'title': title, 'url': url, 'icon': img, 'plot': plot} self.addVideo(params)
def __addLastVersion(self, servers): mainUrl = "https://gitorious.org/iptvplayer-for-e2/iptvplayer-for-e2" sts, response = self.cm.getPage(mainUrl, {'return_data': False}) if sts: finalurl = response.geturl() printDBG("UpdateMainAppImpl.__addLastVersion finalurl[%s]" % finalurl) response.close() crcSum = finalurl.split('/')[-1].replace(':', '') if 40 == len(crcSum): sts, data = self.cm.getPage(finalurl + "IPTVPlayer/version.py") if sts: newVerNum = CParsingHelper.getSearchGroups( data, '"([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)"')[0] sourceUrl = mainUrl + "/archive/%s.tar.gz" % crcSum server = { 'name': 'gitorious.org', 'version': newVerNum, 'url': sourceUrl, 'subdir': 'iptvplayer-for-e2-iptvplayer-for-e2/', 'pyver': 'X.X', 'packagetype': 'sourcecode' } printDBG( "UpdateMainAppImpl.__addLastVersion server: [%s]" % str(server)) servers.append(server) else: printDBG("Wrong crcSum[%s]" % crcSum)
def cleanHtmlStr(str): str = str.replace('<', ' <') str = str.replace(' ', ' ') str = str.replace(' ', ' ') str = clean_html(str) str = str.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ') return CParsingHelper.removeDoubles(str, ' ').strip()
def loadSubtitles(self, filePath, encoding='utf-8', fps=0): printDBG("OpenSubOrg.loadSubtitles filePath[%s]" % filePath) # try load subtitles using C-library try: if IsSubtitlesParserExtensionCanBeUsed(): try: if fps <= 0: filename, file_extension = os_path.splitext(filePath) tmp = CParsingHelper.getSearchGroups( filename.upper() + '_', '_FPS([0-9.]+)_')[0] if '' != tmp: fps = float(tmp) except Exception: printExc() from Plugins.Extensions.IPTVPlayer.libs.iptvsubparser import _subparser as subparser with codecs.open(filePath, 'r', encoding, 'replace') as fp: subText = fp.read().encode('utf-8') # if in subtitles will be line {1}{1}f_fps # for example {1}{1}23.976 and we set microsecperframe = 0 # then microsecperframe will be calculated as follow: llroundf(1000000.f / f_fps) if fps > 0: microsecperframe = int(1000000.0 / fps) else: microsecperframe = 0 # calc end time if needed - optional, default True setEndTime = True # characters per second - optional, default 12, can not be set to 0 CPS = 12 # words per minute - optional, default 138, can not be set to 0 WPM = 138 # remove format tags, like <i> - optional, default True removeTags = True subsObj = subparser.parse(subText, microsecperframe, removeTags, setEndTime, CPS, WPM) if 'type' in subsObj: self.subAtoms = subsObj['list'] # Workaround start try: printDBG( 'Workaround for subtitles from Das Erste: %s' % self.subAtoms[0]['start']) if len(self.subAtoms ) and self.subAtoms[0]['start'] >= 36000000: for idx in range(len(self.subAtoms)): for key in ['start', 'end']: if key not in self.subAtoms[idx]: continue if self.subAtoms[idx][key] >= 36000000: self.subAtoms[idx][key] -= 36000000 except Exception: printExc() # workaround end self._fillPailsOfAtoms() return True else: return False except Exception: printExc() return self._loadSubtitles(filePath, encoding)
def getLastAdded(self, baseUrl, cat, sub_cat, mode, page): printDBG("getLastAdded for url[%s] page[%s]" % (baseUrl, page) ) HTTP_HEADER= { 'Host':'alekino.tv', 'User-Agent':'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:21.0) Gecko/20100101 Firefox/21.0', 'Referer':self.MAINURL + '/', 'X-Requested-With':'XMLHttpRequest' } if page == '1': strPage = '' postPage = '0' else: strPage = '?page=' + page postPage = page post_data = { 'dostep' : 'true', 'mode':mode, 'days':'0', 'page':postPage } sts, data = self.cm.getPage( baseUrl + strPage, {'header':HTTP_HEADER}, post_data) if False == sts: printDBG("getLastAdded problem") return # next page? nextPage = False if -1 != data.find('rel="next"'): nextPage = True sts, data = CParsingHelper.getDataBeetwenMarkers(data, '<div style="padding-bottom:0px;">', '<div class="pagination-recent">', True) if False == sts: printDBG("getLastAdded problem no data beetween markers") return data = data.split('<div style="padding-bottom:0px;">') if len(data) > 1: del data[0] for item in data: item = item.replace('<br/>', '') # url & title match = re.search('<a class="movie-title-hover" href="([^"]+?)"[^>]+?>([^<]+?)</a>', item) if match: url = self.MAINURL + match.group(1) title = match.group(2).replace('\n', '').replace('\r', '').strip() else: continue # img match = re.search('<img src="([^"]+?)"', item) if match: img = match.group(1) else: img = '' # plot match = re.search('<div class="clearfix"></div>([^<]+?)</div>', item) if match: plot = match.group(1) else: plot = '' params = { 'title': title, 'url': url, 'icon': img, 'plot': plot} if sub_cat == '': self.addVideo(params) else: params['name']='category' params['category']=sub_cat self.addDir(params) #pagination if nextPage: params = {'name': 'category', 'category': cat, 'sub_cat':sub_cat, 'title': 'Następna strona', 'url': baseUrl, 'mode':mode, 'page': str(int(page) + 1)} self.addDir(params)
def getF4MLinksWithMeta(manifestUrl, checkExt=True, cookieParams={}, sortWithMaxBitrate=-1): if checkExt and not manifestUrl.split('?')[0].endswith('.f4m'): return [] cm = common() headerParams, postData = cm.getParamsFromUrlWithMeta(manifestUrl) headerParams.update(cookieParams) retPlaylists = [] sts, data = cm.getPage(manifestUrl, headerParams, postData) if sts: liveStreamDetected = False if 'live' == CParsingHelper.getDataBeetwenMarkers('<streamType>', '</streamType>', False): liveStreamDetected = True tmp = cm.ph.getDataBeetwenMarkers(data, '<manifest', '</manifest>')[1] baseUrl = cm.ph.getDataBeetwenReMarkers(tmp, re.compile('<baseURL[^>]*?>'), re.compile('</baseURL>'), False)[1].strip() printDBG("|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| " + baseUrl) if baseUrl == '': baseUrl = manifestUrl tmp = cm.ph.getAllItemsBeetwenMarkers(tmp, '<media', '>') for item in tmp: link = cm.ph.getSearchGroups(item, '''href=['"]([^'^"]+)['"]''')[0] if link != '': link = urljoin(baseUrl, link) if cm.isValidUrl(link): try: bitrate = int(cm.ph.getSearchGroups(item, '''bitrate=['"]([^'^"]+)['"]''')[0]) except Exception: bitrate = 0 retPlaylists.append({'name': '[f4m/hds] bitrate[%s]' % bitrate, 'bitrate': bitrate, 'url': link}) if 0 == len(retPlaylists): bitrates = re.compile('bitrate="([0-9]+?)"').findall(data) for item in bitrates: link = strwithmeta(manifestUrl, {'iptv_proto': 'f4m', 'iptv_bitrate': item}) if liveStreamDetected: link.meta['iptv_livestream'] = True try: bitrate = int(item) except Exception: bitrate = 0 retPlaylists.append({'name': '[f4m/hds] bitrate[%s]' % item, 'bitrate': bitrate, 'url': link}) if 0 == len(retPlaylists): link = strwithmeta(manifestUrl, {'iptv_proto': 'f4m'}) if liveStreamDetected: link.meta['iptv_livestream'] = True retPlaylists.append({'name': '[f4m/hds]', 'bitrate': 0, 'url': link}) if sortWithMaxBitrate > -1: def __getLinkQuality(itemLink): try: return int(itemLink['bitrate']) except Exception: printExc() return 0 retPlaylists = CSelOneLink(retPlaylists, __getLinkQuality, sortWithMaxBitrate).getSortedLinks() return retPlaylists
def listSerialsByLetter(self, category, url): printDBG("listSerialsByLetter") sts, data = self.cm.getPage( url ) if False == sts: return sts, data = CParsingHelper.getDataBeetwenMarkers(data, 'Seriale na liter', '<div class="right">', False) data = data.split('</li>') self.listItems(data, category)
def getMenuHTML(self): printDBG("getMenuHTML start") if True == self.refresh or '' == self.menuHTML: self.menuHTML = '' sts, data = self.cm.getPage( self.MAIN_URL ) if sts: self.menuHTML = CParsingHelper.getDataBeetwenMarkers(data, '<div class="nav-collapse collapse">', '<!--/.nav-collapse -->', False)[1] return self.menuHTML
def getMovieTab(self, url): FightTube.printDBG('getMovieTab start') query_data = { 'url': url, 'return_data': True } try: data = self.cm.getURLRequestData(query_data) except: FightTube.printDBG('getMovieTab exception') return # get next page url nexPageUrl = '' sts, tmp = CParsingHelper.getDataBeetwenMarkers(data, "<nav class='pagination'>", "</nav>", False) if sts: match = re.search("<li><a href='([^']+?)'>></a></li>", tmp) if match: nexPageUrl = match.group(1) # separete vidTab sts, data = CParsingHelper.getDataBeetwenMarkers(data, "<ul class='videos-listing'>", "</ul>", False) if not sts: printDBG('getMovieTab: main markers cannot be found!') return # separate videos data data = data.split('</li>') for vidItem in data: url = '' title = '' icon = '' ocena = '' wyswietlen = '' match = re.search("<a href='([^']+?)'", vidItem) if match: url = match.group(1) match = re.search("<img src='([^']+?)' alt='([^']+?)'", vidItem) if match: icon = match.group(1) title = match.group(2) if '' != url and '' != title: params = {'type': 'video', 'title': title, 'page': url, 'icon': icon} self.currList.append(params) if nexPageUrl.startswith("http://"): params = {'type': 'category', 'name': 'nextpage', 'title': 'Następna strona', 'page': nexPageUrl, 'icon': ''} self.currList.append(params) return
def listSerialsLastUpdated(self, category): printDBG("listSerialsLastUpdated") sts, data = self.cm.getPage( self.SERIALS_URL ) if False == sts: return sts, data = CParsingHelper.getDataBeetwenMarkers(data, 'Ostatnio zaktualizowane seriale', '<div class="right">', False) data = data.split('</li>') def getPlot(item): return item self.listItems(data, category, None, getPlot, False)
def listSerialsBack(self, category, marker1, marker2): sts, data = self.cm.getPage( self.SERIALS_URL ) if not sts: return data = CParsingHelper.getDataBeetwenMarkers(data, marker1, marker2, False)[1].replace("\\'", '"').replace('\\', '') marker = '<a onmouseover="toolTip(' data = data.split(marker) def getPlot(item): return CParsingHelper.getDataBeetwenMarkers(item, 'width="100"></td><td>', '<div', False)[1] self.listItems(data, category, None, getPlot)
def getVideoLinks(self, url): printDBG('getVideoLink url[%s]' % url) urlItems = [] sts, data = self.cm.getPage( url ) if not sts: return sts, data = CParsingHelper.getDataBeetwenMarkers(data, 'playlist: [', ']', True) data = re.compile("'http://tvproart.pl/tvonline/([^/]+?)/([^']+?\.mp4)'").findall(data) for item in data: urlItems.append({'name':item[0], 'url': self.MAI_URL + item[0] + '/' + item[1]}) return urlItems
def listsMainMenu(self): printDBG("listsMainMenu") url = self.MAI_URL sts, data = self.cm.getPage( url ) if not sts: return sts, data = CParsingHelper.getDataBeetwenMarkers(data, '<div class="menulist"', '<a href="tvonline">', True) data = re.compile('<a class="menuitemtv" href="([^"]+?)">([^<]+?)</a></div>').findall(data) for item in data: params = {'name': 'category', 'title': item[1], 'url': self.MAI_URL + item[0]} self.addDir(params)
def getVideoUrl(self, url): #show adult content #self.cm.addCookieItem(COOKIEFILE, {'name': 'AnyF18', 'value': 'mam18', 'domain': 'video.anyfiles.pl'}, False) if not self.isLogged(): self.tryTologin() # GET VIDEO ID u = url.split('/') vidID = u[-1] match = re.search('([0-9]+?)\,', url ) if match: vidID = match.group(1) # get COOKIE url = self.MAINURL + '/videos.jsp?id=' + vidID sts, data = self.cm.getPage(url, self.defaultParams) if not sts: return [] fUrl = self.MAINURL + "/w.jsp?id=%s&width=620&height=349&pos=0&skin=0" % vidID COOKIE_JSESSIONID = self.cm.getCookieItem(self.COOKIEFILE,'JSESSIONID') HEADER = {'Referer' : url, 'Cookie' : 'JSESSIONID=' + COOKIE_JSESSIONID + ';', 'User-Agent': "Mozilla/5.0 (Linux; U; Android 4.1.1; en-us; androVM for VirtualBox ('Tablet' version with phone caps) Build/JRO03S) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Safari/534.30"} request_param = {'header':HEADER} sts, data = self.cm.getPage(fUrl, request_param) if not sts: return [] HEADER['Referer'] = fUrl config = CParsingHelper.getSearchGroups(data, 'src="/?(pcs\?code=[^"]+?)"', 1)[0] if '' != config: sts,data = self.cm.getPage( self.MAINURL + '/' + config, {'header': HEADER}) if sts: #var source = "<source src=\"http://50.7.220.66/video/60ExQvchsi4PbqMLr--I7A/1433518629/5e638de7a15c7a8dc7c979044cd2a953_147325.mp4\" type=\"video/mp4\" />"; #var track = "<track label=\"izombie.112...\" srclang=\"pl\" kind=\"captions\" src=\"http://video.anyfiles.pl/subtit/1433508336949.srt\"></track>\n"; data = data.replace('\\"', '"') #printDBG(data) difSourcesSrc = CParsingHelper.getSearchGroups(data, '''difSourcesSrc[^=]*?=[^"']*?["'](http[^'^"]+?)['"]''', 1)[0] url = CParsingHelper.getSearchGroups(data, '''<source[^>]+?src=["'](http[^'^"]+?)['"]''', 1)[0] subUrl = CParsingHelper.getSearchGroups(data, '''<track[^>]+?src=["'](http[^'^"]+?)['"]''', 1)[0] if 'youtube' in difSourcesSrc: return self.getYTVideoUrl(difSourcesSrc) else: return [{'name':'AnyFiles.pl', 'url':url}] return []
def listMovies(self, cItem): printDBG("Filmy3dEU.listMovies") page = cItem.get('page', 1) url = cItem['url'] if page > 1 and '?' not in url: if url.endswith('/'): url = url[:-1] url += '/page/%s/' % page post_data = cItem.get('post_data', {}) if {} == post_data: sts, data = self.cm.getPage(url, {}, self._addSortData(cItem.get('sort_type', ''))) else: sts, data = self.cm.getPage(cItem['url'], {'raw_post_data':True, 'use_cookie': True, 'save_cookie': False, 'load_cookie': True, 'cookiefile': self.COOKIE_FILE}, post_data) if not sts: return data = CParsingHelper.getDataBeetwenMarkers(data, '<div class="short-film">', '<div class="gf-right">', False)[1] data = data.split('<div class="short-film">') if len(data) > 0 and '<span class="pnext">Poprzednia</span></a>' in data[-1]: nextPage = True else: nextPage = False for item in data: tmp = item.split('<h5')[-1] url = self.cm.ph.getSearchGroups(tmp, 'href="([^"]+?)"')[0] icon = self.cm.ph.getSearchGroups(item, 'src="([^"]+?)"')[0] title = CParsingHelper.getDataBeetwenMarkers(tmp, '>', '</h5>', False)[1] if '' == title: self.cm.ph.getSearchGroups(item, 'alt="([^"]+?)"')[0] desc = _("Rating") + ': {0}/100, '.format(self.cm.ph.getSearchGroups(item, 'width\:([0-9]+?)\%')[0]) desc += CParsingHelper.getDataBeetwenMarkers(item, '<p class="text">', '</p>', False)[1] if '' != url and '' != title: params = dict(cItem) params.update( {'title':self.cleanHtmlStr( title ), 'url':self._getFullUrl(url), 'desc': self.cleanHtmlStr( desc ), 'icon':self._getFullUrl(icon)} ) self.addVideo(params) if nextPage: params = dict(cItem) params.update( {'title':_('Next page'), 'page':page+1} ) self.addDir(params)
def getFilmTab(self, url, category, pager): sts, data = self.cm.getPage( url, {'header': self.HEADER } ) if not sts: return nextPage = re.search('<li><a href="/filmy?.+?" rel="next">»</a></li>', data) data = CParsingHelper.getDataBeetwenMarkers(data, '<div class="row-fluid movie-item">', '<div class="container">', False)[1] data = data.split('<div class="row-fluid movie-item">') titleA = re.compile('<a class="title"[^>]+?>') titleB = re.compile('</small>') plotA = re.compile('<p class="desc">') plotB = re.compile('</div>') for item in data: title = CParsingHelper.getDataBeetwenReMarkers(item, titleA, titleB, False)[1] page = self.MAINURL + CParsingHelper.getSearchGroups(item, 'class="title" href="([^"]+?)"', 1)[0] plot = CParsingHelper.getDataBeetwenReMarkers(item, plotA, plotB, False)[1] img = CParsingHelper.getSearchGroups(item, 'src="([^"]+?)"', 1)[0] if '' != title and '' != page: params = {'title': title, 'page': page, 'icon': img, 'plot': plot} self.addVideo(params) if nextPage: params = {'name': 'nextpage', 'category': category, 'title': 'Następna strona', 'page': str(int(pager) + 1)} self.addDir(params)