def getProgrammes(self, channelName, epgDate): channelTag = channelName.replace(" ", "") channelTag = unicodedata.normalize('NFD', channelTag).encode( 'ascii', 'ignore') url = self.palinsestoUrl url = url.replace("[nomeCanale]", channelTag) url = url.replace("[dd-mm-yyyy]", epgDate) response = json.loads(utils.checkStr(urllib2.urlopen(url).read())) return response[channelName][0]["palinsesto"][0]["programmi"]
def getVideoMetadata(self, pathId): url = self.getUrl(pathId) if url.endswith(".html"): url = url.replace(".html",".json") try: response = json.loads(utils.checkStr(urllib2.urlopen(url).read())) return response["video"] except: return []
def getProgrammesHtml(self, channelName, epgDate): channelTag = channelName.replace(" ", "-").lower() url = self.palinsestoUrlHtml url = url.replace("[idCanale]", channelTag) url = url.replace("[dd-mm-yyyy]", epgDate) try: data = utils.checkStr(urllib2.urlopen(url).read()) except urllib2.HTTPError: data = '' return data
def getProgrammes(self, channelName, epgDate): channelTag = channelName.replace(" ", "") url = self.palinsestoUrl url = url.replace("[nomeCanale]", channelTag) url = url.replace("[dd-mm-yyyy]", epgDate) response = json.loads(utils.checkStr(urllib2.urlopen(url).read())) try: key = channelName if channelName in response else '' oRetVal = response[key][0]["palinsesto"][0]["programmi"] except: oRetVal = None return oRetVal
def getMostVisited(self, tags, days=7, numContents=16): try: tags = urllib.quote(tags) except: tags = urllib.parse.quote(tags) domain = "RaiTv" xsl = "rai_tv-statistiche-raiplay-json" url = self.baseUrl + "/StatisticheProxy/proxyPost.jsp?action=mostVisited&days=%s&state=1&records=%s&tags=%s&domain=%s&xsl=%s" % \ (str(days), str(numContents), tags, domain, xsl) response = json.loads(utils.checkStr(urllib2.urlopen(url).read())) return response["list"]
def getLastContentByTag(self, tags="", numContents=16): try: tags = urllib.quote(tags) except: tags = urllib.parse.quote(tags) domain = "RaiTv" xsl = "rai_tv-statistiche-raiplay-json" url = self.baseUrl + "/StatisticheProxy/proxyPost.jsp?action=getLastContentByTag&numContents=%s&tags=%s&domain=%s&xsl=%s" % \ (str(numContents), tags, domain, xsl) response = json.loads(utils.checkStr(urllib2.urlopen(url).read())) return response["list"]
def getIndexFromJSON(self, pathId): url = self.getUrl(pathId) try: response = json.loads(utils.checkStr(urllib2.urlopen(url).read())) index = [] for i in response["contents"]: if len(response["contents"][i])>0: index.append(i) index.sort() return index except: return []
def get_raisport_items(params): dominio = params.get('dominio','') sub_keys = eval(params.get("sub_keys","[]")) xbmc.log("Build Rai Sport menu of item %s " % sub_keys[0]) for i in range(0, len(sub_keys)): key = sub_keys[i] title = key.split("|")[0] title = utils.checkStr(HTMLParser.HTMLParser().unescape(title)) if i==0: title = "Tutto su " + title liStyle = xbmcgui.ListItem(title) addDirectoryItem({"mode": "raisport_subitem", 'dominio': dominio, 'key': key}, liStyle) xbmcplugin.addSortMethod(handle, xbmcplugin.SORT_METHOD_NONE) xbmcplugin.endOfDirectory(handle=handle, succeeded=True)
def getHomePage(self): response = json.loads(utils.checkStr(urllib2.urlopen(self.baseUrl + 'index.json').read())) return response["contents"]
def getOnAir(self): response = json.loads(utils.checkStr(urllib2.urlopen(self.onAirUrl).read())) return response["on_air"]
def getChannels(self): response = json.loads(utils.checkStr(urllib2.urlopen(self.channelsUrl).read())) return response["dirette"]
def getCountry(self): try: response = utils.checkStr(urllib2.urlopen(self.localizeUrl).read()) except : response = "ERROR" return response
def getMainMenu(self): response = json.loads(utils.checkStr(urllib2.urlopen(self.menuUrl).read())) return response["menu"]
def getRaiSportVideos(self, key, domain, page): videos = [] header = { 'Accept': 'application/json, text/javascript, */*; q=0.01' , 'Content-Type': 'application/json; charset=UTF-8', 'Origin': 'https://www.raisport.rai.it', 'Referer': 'https://www.raisport.rai.it/archivio.html', 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36', 'X-Requested-With': 'XMLHttpRequest', } page = int(page) pageSize = 50 payload = { "page": page, "pageSize": pageSize, "filters":{ "tematica":[key], "dominio": domain } } postData=json.dumps(payload) try: req = urllib2.Request(self.RaiSportSearchUrl, postData, header) response = urllib2.urlopen(req) except TypeError: req = urllib2.Request(self.RaiSportSearchUrl, postData.encode('utf-8'), header) response = urllib2.urlopen(req) if response.code != 200: return [] data = utils.checkStr(response.read()) j = json.loads(data) if 'hits' in j: h = j['hits'] if 'hits' in h: for hh in h['hits']: if '_source' in hh: news_type = hh['_source']['tipo'] if news_type == 'Video' and 'media' in hh['_source']: relinker_url = hh['_source']['media']['mediapolis'] if 'durata' in hh['_source']['media']: d= hh['_source']['media']['durata'].split(":") duration = int(d[0])*3600 + int(d[1])*60 + int(d[2]) else: duration = 0 icon = self.RaiSportMainUrl + hh['_source']['immagini']['default'] title = hh['_source']['titolo'] creation_date = hh['_source']['data_creazione'] if 'sommario' in hh['_source']: desc = creation_date + '\n' + hh['_source']['sommario'] else: desc = creation_date params= {'mode':'raisport_video', 'title': title, 'url': relinker_url, 'icon': icon, 'duration' : duration, 'aired': creation_date, 'desc': desc} videos.append(params) if h['total'] > (page + pageSize): page += pageSize params = {'mode':'raisport_subitem', 'title': xbmc.getLocalizedString(33078), 'page': page} videos.append(params) return videos
def getURL(self, url): scheme, netloc, path, params, query, fragment = urlparse.urlparse(url) qs = urlparse.parse_qs(query) # output=20 url in body # output=23 HTTP 302 redirect # output=25 url and other parameters in body, space separated # output=44 XML (not well formatted) in body # output=47 json in body # pl=native,flash,silverlight # A stream will be returned depending on the UA (and pl parameter?) if "output" in qs: del (qs['output']) #qs['output'] = "20" # only url qs['output'] = "56" # xml stream data query = urlencode(qs, True) url = urlparse.urlunparse( (scheme, netloc, path, params, query, fragment)) try: response = utils.checkStr(urllib2.urlopen(url).read()) #mediaUrl = response.strip() #xbmc.log(response) #find real url content = re.findall("<url type=\"content\">(.*?)</url>", response) if content: #<![CDATA[https://dashaz-dc-euwe.akamaized.net/subtl_proxy/6ed6fac0-ae71-4dd7-b4be-d8921d4948b9/20200713102433_12778339.ism/manifest(format=mpd-time-csf,filter=medium_1200-2400).mpd?hdnea=st=1599391792~exp=1599391942~acl=/*~hmac=27e952b0f784662684fe65fb4717152d8644e2a9f3ad575ece21738dfbe88263]]> url = re.findall("<!\[CDATA\[(.*?)\]\]>", content[0]) if url: #find type of stream ct = re.findall("<ct>(.*?)</ct>", response) if ct: #find license key #<license_url><![CDATA[{"drmLicenseUrlValues":[{"drm":"WIDEVINE","licenceUrl":"https://mediaservicerainet02.keydelivery.northeurope.media.azure.net/Widevine/?kid=5ca7736f-7e27-49fc-80b2-bde49c8c0259&token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1cm46bWljcm9zb2Z0OmF6dXJlOm1lZGlhc2VydmljZXM6Y29udGVudGtleWlkZW50aWZpZXIiOiI1Y2E3NzM2Zi03ZTI3LTQ5ZmMtODBiMi1iZGU0OWM4YzAyNTkiLCJpc3MiOiJodHRwOi8vcmFpcGxheSIsImF1ZCI6InVybjpyYWlwbGF5X2hkY3BfdjFfc2wxX3NkIiwiZXhwIjoxNTk5MzkyOTc0fQ.ZwmxivCnx8Gz-GRVu5twKIrjsAdK6jczAT2JZLnO9mw","name":"Widevine Token Restricted JWT HDCP_V1_SL1_SD","audience":"urn:raiplay_hdcp_v1_sl1_sd"},{"drm":"PLAYREADY","licenceUrl":"https://mediaservicerainet02.keydelivery.northeurope.media.azure.net/PlayReady/?kid=5ca7736f-7e27-49fc-80b2-bde49c8c0259&token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1cm46bWljcm9zb2Z0OmF6dXJlOm1lZGlhc2VydmljZXM6Y29udGVudGtleWlkZW50aWZpZXIiOiI1Y2E3NzM2Zi03ZTI3LTQ5ZmMtODBiMi1iZGU0OWM4YzAyNTkiLCJpc3MiOiJodHRwOi8vcmFpcGxheSIsImF1ZCI6InVybjpyYWlwbGF5IiwiZXhwIjoxNTk5MzkyOTc0fQ.pih3hCVRTJyNkgYGj42oCb8-Tya-4-PPC-PlZn38n2M","name":"Playready Token Restricted JWT","audience":"urn:raiplay"}]}]]></license_url> licenseUrl = re.findall( "<license_url>(.*?)</license_url>", response) if licenseUrl: #xbmc.log(licenseUrl[0]) licenseJson = re.findall("<!\[CDATA\[(.*?)\]\]>", licenseUrl[0]) if licenseJson: #xbmc.log(licenseJson[0]) try: licenseJson = json.loads(licenseJson[0]) xbmc.log(str(licenseJson)) licenseData = licenseJson.get( 'drmLicenseUrlValues', []) key = '' for l in licenseData: if "WIDEVINE" in l.get("drm", ""): key = l.get("licenceUrl", '') return { 'url': url[0], 'ct': ct[0], 'key': key } except: return { 'url': url[0], 'ct': ct[0], 'key': '' } else: return {'url': url[0], 'ct': ct[0], 'key': ''} else: return {'url': url[0], 'ct': '', 'key': ''} else: return {'url': '', 'ct': '', 'key': ''} else: return {'url': '', 'ct': '', 'key': ''} # Workaround to normalize URL if the relinker doesn't try: mediaUrl = urllib.quote(mediaUrl, safe="%/:=&?~#+!$,;'@()*[]") except: mediaUrl = urllib.parse.quote(mediaUrl, safe="%/:=&?~#+!$,;'@()*[]") return mediaUrl except HTTPError: return {'url': '', 'type': '', 'key': ''}
def getAudioMetadata(self, pathId): url = self.getUrl(pathId) response = json.loads(utils.checkStr(urllib2.urlopen(url).read())) return response["audio"]