def getFlashVars(): cookie = common.mechanizeLogin() if not cookie: return False url = common.BASE_URL + '/gp/deal/ajax/getNotifierResources.html' showpage = json.loads(common.getURL(url, useCookie=cookie)) if not showpage: Dialog.notification(common.__plugin__, Error({'errorCode': 'invalidrequest', 'message': 'getFlashVars'}), xbmcgui.NOTIFICATION_ERROR) return False values = {'asin': common.args.asin, 'deviceTypeID': 'AOAGZA014O5RE', 'userAgent': common.UserAgent} values.update(showpage['resourceData']['GBCustomerData']) if 'customerId' not in values: Dialog.notification(common.getString(30200), common.getString(30210), xbmcgui.NOTIFICATION_ERROR) return False values['deviceID'] = common.gen_id() rand = 'onWebToken_' + str(random.randint(0, 484)) pltoken = common.getURL(common.BASE_URL + "/gp/video/streaming/player-token.json?callback=" + rand, useCookie=cookie) try: values['token'] = re.compile('"([^"]*).*"([^"]*)"').findall(pltoken)[0][1] except: Dialog.notification(common.getString(30200), common.getString(30201), xbmcgui.NOTIFICATION_ERROR) return False return values
def getTVDBImages(title, imdb=None, id=None, seasons=False): posterurl = fanarturl = None splitter = [" - ", ": ", ", "] langcodes = ["de", "en"] TVDB_URL = "http://www.thetvdb.com/banners/" while not id: tv = urllib.quote_plus(title) result = common.getURL("http://www.thetvdb.com/api/GetSeries.php?seriesname=%s&language=de" % (tv), silent=True) soup = BeautifulSoup(result) id = soup.find("seriesid") if id: id = id.string else: oldtitle = title for splitchar in splitter: if title.count(splitchar): title = title.split(splitchar)[0] break if title == oldtitle: break if not id: return None, None, None if seasons: soup = BeautifulSoup( common.getURL("http://www.thetvdb.com/api/%s/series/%s/banners.xml" % (common.tvdb, id), silent=True) ) seasons = {} for lang in langcodes: for datalang in soup.findAll("language"): if datalang.string == lang: data = datalang.parent if data.bannertype.string == "fanart" and not fanarturl: fanarturl = TVDB_URL + data.bannerpath.string if data.bannertype.string == "poster" and not posterurl: posterurl = TVDB_URL + data.bannerpath.string if data.bannertype.string == data.bannertype2.string == "season": snr = data.season.string if not seasons.has_key(snr): seasons[snr] = TVDB_URL + data.bannerpath.string return seasons, posterurl, fanarturl else: for lang in langcodes: result = common.getURL( "http://www.thetvdb.com/api/%s/series/%s/%s.xml" % (common.tvdb, id, lang), silent=True ) soup = BeautifulSoup(result) fanart = soup.find("fanart") poster = soup.find("poster") if len(fanart) and not fanarturl: fanarturl = TVDB_URL + fanart.string if len(poster) and not posterurl: posterurl = TVDB_URL + poster.string if posterurl and fanarturl: return id, posterurl, fanarturl return id, posterurl, fanarturl
def getTVDBImages(title, imdb=None, tvdb_id=None, seasons=False): posterurl = fanarturl = None splitter = [' - ', ': ', ', '] langcodes = ['de', 'en'] TVDB_URL = 'http://www.thetvdb.com/banners/' while not tvdb_id: tv = urllib.quote_plus(title) result = common.getURL('http://www.thetvdb.com/api/GetSeries.php?seriesname=%s&language=de' % (tv), silent=True) soup = BeautifulSoup(result) tvdb_id = soup.find('seriesid') if tvdb_id: tvdb_id = tvdb_id.string else: oldtitle = title for splitchar in splitter: if title.count(splitchar): title = title.split(splitchar)[0] break if title == oldtitle: break if not tvdb_id: return None, None, None if seasons: soup = BeautifulSoup(common.getURL('http://www.thetvdb.com/api/%s/series/%s/banners.xml' % (common.tvdb, tvdb_id), silent=True)) seasons = {} for lang in langcodes: for datalang in soup.findAll('language'): if datalang.string == lang: data = datalang.parent if data.bannertype.string == 'fanart' and not fanarturl: fanarturl = TVDB_URL + data.bannerpath.string if data.bannertype.string == 'poster' and not posterurl: posterurl = TVDB_URL + data.bannerpath.string if data.bannertype.string == data.bannertype2.string == 'season': snr = data.season.string if snr not in seasons: seasons[snr] = TVDB_URL + data.bannerpath.string return seasons, posterurl, fanarturl else: for lang in langcodes: result = common.getURL('http://www.thetvdb.com/api/%s/series/%s/%s.xml' % (common.tvdb, tvdb_id, lang), silent=True) soup = BeautifulSoup(result) fanart = soup.find('fanart') poster = soup.find('poster') if fanart and not fanarturl: fanarturl = TVDB_URL + fanart.string if poster and not posterurl: posterurl = TVDB_URL + poster.string if posterurl and fanarturl: return tvdb_id, posterurl, fanarturl return tvdb_id, posterurl, fanarturl
def getTMDBImages(title, imdb=None, content='movie', year=None): fanart = poster = id = None splitter = [' - ', ': ', ', '] TMDB_URL = 'http://image.tmdb.org/t/p/original' yearorg = year while not id: str_year = '' if year: str_year = '&year=' + str(year) movie = urllib.quote_plus(title) result = common.getURL('http://api.themoviedb.org/3/search/%s?api_key=%s&language=de&query=%s%s' % (content, common.tmdb, movie, str_year), silent=True) if not result: common.Log('Fanart: Pause 5 sec...') xbmc.sleep(5000) continue data = json.loads(result) if data['total_results'] > 0: result = data['results'][0] if result['backdrop_path']: fanart = TMDB_URL + result['backdrop_path'] if result['poster_path']: poster = TMDB_URL + result['poster_path'] id = result['id'] elif year: year = 0 else: year = yearorg oldtitle = title for splitchar in splitter: if title.count(splitchar): title = title.split(splitchar)[0] break if title == oldtitle: break if content == 'movie' and id and not fanart: fanart = common.na return fanart
def getUrldata(mode, values, opt='', extra=False, retURL=False, vMT='Feature', dRes='AudioVideoUrls,CatalogMetadata,SubtitleUrls'): url = common.ATV_URL + '/cdp/' + mode + "?" params = {"asin": values['asin'], "consumptionType": "Streaming", "deviceID": values['deviceID'], "deviceTypeID": values['deviceTypeID'], "firmware": 1, "version": 1, "format": "json", "marketplaceID": values['marketplace'], # "operatingSystemName": "Windows", # "operatingSystemVersion": "10.0", "customerID": values['customer'], "token": values['token']} url += urllib.urlencode(params) url += opt if extra: params = {"consumptionType": "Streaming", "desiredResources": dRes, "resourceUsage": "ImmediateConsumption", "videoMaterialType": vMT, "deviceDrmOverride": "CENC", "deviceStreamingTechnologyOverride": "DASH", "deviceProtocolOverride": "Http", "audioTrackId": "all"} url = url + "&" + urllib.urlencode(params) if retURL: return url data = common.getURL(url, common.ATV_URL.split('//')[1], useCookie=False) if data: jsondata = json.loads(data) if "error" in jsondata: return False, Error(jsondata['error']) return True, jsondata return False, 'HTTP Fehler'
def getStreams(suc, data, retmpd=False): prefHost = addon.getSetting("pref_host") if not suc: return False, data if retmpd: subUrls = parseSubs(data) if prefHost not in str(data) or prefHost == 'Auto': prefHost = False for cdn in data['audioVideoUrls']['avCdnUrlSets']: if prefHost and prefHost not in cdn['cdn']: continue common.Log('Using Host: ' + cdn['cdn']) for urlset in cdn['avUrlInfoList']: if retmpd: return urlset['url'], subUrls data = common.getURL(urlset['url']) fps_string = re.compile('frameRate="([^"]*)').findall(data)[0] fr = round(eval(fps_string + '.0'), 3) return str(fr).replace('.0', ''), True return '', False
def getIMDbID(asins, title): url = imdbid = None c = tvDB.cursor() for asin in asins.split(','): asin = '%' + asin + '%' url = c.execute('select imdburl from seasons where seriesasin like (?) and imdburl is not null', (asin,)).fetchone() if url: url = url[0] break if not url: while not imdbid: response = common.getURL('http://www.omdbapi.com/?type=series&t=' + urllib.quote_plus(title)) data = json.loads(response) if data['Response'] == 'True': imdbid = data['imdbID'] else: oldtitle = title if title.count(' - '): title = title.split(' - ')[0] elif title.count(': '): title = title.split(': ')[0] elif title.count('?'): title = title.replace('?', '') if title == oldtitle: imdbid = common.na else: imdbid = re.compile('/title/(.+?)/', re.DOTALL).findall(url) common.Log(imdbid + asins.split(',')[0]) return imdbid
def getUrldata(mode, values, devicetypeid=False, version=1, firmware='1', opt='', extra=False, useCookie=False, retURL=False, vMT='Feature', dRes='AudioVideoUrls%2CSubtitleUrls'): if not devicetypeid: devicetypeid = values['deviceTypeID'] url = common.ATV_URL + '/cdp/' + mode url += '?asin=' + values['asin'] url += '&deviceTypeID=' + devicetypeid url += '&firmware=' + firmware url += '&customerID=' + values['customerId'] url += '&deviceID=' + values['deviceID'] url += '&marketplaceID=' + values['marketplaceId'] url += '&token=' + values['token'] url += '&format=json' url += '&version=' + str(version) url += opt if extra: url += '&resourceUsage=ImmediateConsumption&consumptionType=Streaming&deviceDrmOverride=CENC' \ '&deviceStreamingTechnologyOverride=DASH&deviceProtocolOverride=Http&audioTrackId=all' url += '&videoMaterialType=' + vMT url += '&desiredResources=' + dRes if retURL: return url data = common.getURL(url, common.ATV_URL.split('//')[1], useCookie=useCookie) if data: jsondata = json.loads(data) del data if 'errorsByResource' in jsondata: for field in jsondata['errorsByResource']: if 'AudioVideoUrls' in field: return False, Error(jsondata['errorsByResource'][field]) return True, jsondata return False, 'HTTP Fehler'
def getTMDBImages(title, imdb=None, content='movie', year=None): fanart = poster = id = None splitter = [' - ', ': ', ', '] TMDB_URL = 'http://image.tmdb.org/t/p/original' yearorg = year while not id: str_year = '' if year: str_year = '&year=' + str(year) movie = urllib.quote_plus(title) result = common.getURL('http://api.themoviedb.org/3/search/%s?api_key=%s&language=de&query=%s%s' % (content, common.tmdb, movie, str_year), silent=True) if not result: common.Log('Fanart: Pause 5 sec...') xbmc.sleep(5000) continue data = demjson.decode(result) if data['total_results'] > 0: result = data['results'][0] if result['backdrop_path']: fanart = TMDB_URL + result['backdrop_path'] if result['poster_path']: poster = TMDB_URL + result['poster_path'] id = result['id'] elif year: year = 0 else: year = yearorg oldtitle = title for splitchar in splitter: if title.count(splitchar): title = title.split(splitchar)[0] break if title == oldtitle: break if content == 'movie' and id and not fanart: fanart = common.na return fanart
def testCRUD(): db_uri = common.getURL("root", "", "localhost", 3306, "mydb") engine = common.create_engine(db_uri) # Create engine.execute('INSERT INTO EX1 ' '(name) ' 'VALUES ("raw1")') # Read result = engine.execute('SELECT * FROM ' 'EX1') for _r in result: print _r # Update engine.execute('UPDATE EX1 set name="raw" ' 'WHERE name="raw1"') result = engine.execute('SELECT * FROM ' 'EX1') print result.fetchall() # Delete engine.execute('DELETE from EX1 where name="raw"') result = engine.execute('SELECT * FROM EX1') print result.fetchall()
def getIMDbID(asins, title): url = None imdb_id = None c = tvDB.cursor() for asin in asins.split(','): asin = '%' + asin + '%' url = c.execute('select imdburl from seasons where seriesasin like (?) and imdburl is not null', (asin,)).fetchone() if url: url = url[0] break if not url: while not imdb_id: response = common.getURL('http://www.omdbapi.com/?type=series&t=' + urllib.quote_plus(title)) data = json.loads(response) if data['Response'] == 'True': imdb_id = data['imdbID'] else: oldtitle = title title = title.split(' - ')[0] title = title.split(': ')[0] title = title.replace('?', '') if title == oldtitle: imdb_id = common.na else: imdb_id = re.compile('/title/(.+?)/', re.DOTALL).findall(url) common.Log(imdb_id + asins.split(',')[0]) return imdb_id
def getTVDBImages(title, imdb=None, id=None, seasons=False): posterurl = fanarturl = None splitter = [' - ', ': ', ', '] langcodes = ['de', 'en'] TVDB_URL = 'http://www.thetvdb.com/banners/' while not id: tv = urllib.quote_plus(title) result = common.getURL('http://www.thetvdb.com/api/GetSeries.php?seriesname=%s&language=de' % (tv), silent=True) soup = BeautifulSoup(result) id = soup.find('seriesid') if id: id = id.string else: oldtitle = title for splitchar in splitter: if title.count(splitchar): title = title.split(splitchar)[0] break if title == oldtitle: break if not id: return None, None, None if seasons: soup = BeautifulSoup(common.getURL('http://www.thetvdb.com/api/%s/series/%s/banners.xml' % (common.tvdb, id), silent=True)) seasons = {} for lang in langcodes: for datalang in soup.findAll('language'): if datalang.string == lang: data = datalang.parent if data.bannertype.string == 'fanart' and not fanarturl: fanarturl = TVDB_URL + data.bannerpath.string if data.bannertype.string == 'poster' and not posterurl: posterurl = TVDB_URL + data.bannerpath.string if data.bannertype.string == data.bannertype2.string == 'season': snr = data.season.string if not seasons.has_key(snr): seasons[snr] = TVDB_URL + data.bannerpath.string return seasons, posterurl, fanarturl else: for lang in langcodes: result = common.getURL('http://www.thetvdb.com/api/%s/series/%s/%s.xml' % (common.tvdb, id, lang), silent=True) soup = BeautifulSoup(result) fanart = soup.find('fanart') poster = soup.find('poster') if len(fanart) and not fanarturl: fanarturl = TVDB_URL + fanart.string if len(poster) and not posterurl : posterurl = TVDB_URL + poster.string if posterurl and fanarturl: return id, posterurl, fanarturl return id, posterurl, fanarturl
def testCreateTable(table_name): db_uri = common.getURL("root", "", "localhost", 3306, "mydb") engine = common.create_engine(db_uri) metadata = MetaData(engine) if not engine.dialect.has_table(engine, table_name): table = Table('EX1', metadata, Column('id', Integer, primary_key=True, autoincrement=True), Column('name', String(255), nullable=False)) table.create()
def GETFLASHVARS(pageurl): cookie = common.mechanizeLogin() showpage = common.getURL(pageurl, useCookie=cookie) common.WriteLog(showpage, 'flashvars', 'w') if not showpage: Dialog.notification(common.__plugin__, Error('CDP.InvalidRequest'), xbmcgui.NOTIFICATION_ERROR) return False values = {} search = {'asin' : '"pageAsin":"(.*?)"', 'sessionID' : "ue_sid='(.*?)'", 'marketplace': "ue_mid='(.*?)'", 'customer' : '"customerID":"(.*?)"'} if 'var config' in showpage: flashVars = re.compile('var config = (.*?);',re.DOTALL).findall(showpage) flashVars = demjson.decode(unicode(flashVars[0], errors='ignore')) values = flashVars['player']['fl_config']['initParams'] swfUrl = flashVars['player']['fl_config']['playerSwf'] else: for key, pattern in search.items(): result = re.compile(pattern, re.DOTALL).findall(showpage) if result: values[key] = result[0] values['swfUrl'] = 'http://ecx.images-amazon.com/images/G/01/digital/video/webplayer/1.0.379.0/swf/UnboxScreeningRoomClient.swf' for key in values.keys(): if not values.has_key(key): Dialog.notification(common.getString(30200), common.getString(30210), xbmcgui.NOTIFICATION_ERROR) return False values['deviceTypeID'] = 'A324MFXUEZFF7B' #Sony GoogleTV unenc Flash #values['deviceTypeID'] = 'A13Q6A55DBZB7M' #enc Flash #values['deviceTypeID'] = 'A35LWR0L7KC0TJ' #Logitech GoogleTV unenc Flash #values['deviceTypeID'] = 'A63V4FRV3YUP9' #enc Silverlight values['userAgent'] = "GoogleTV 162671" values['deviceID'] = common.hmac.new(common.UserAgent, common.gen_id(), hashlib.sha224).hexdigest() rand = 'onWebToken_' + str(random.randint(0,484)) pltoken = common.getURL(common.BASE_URL + "/gp/video/streaming/player-token.json?callback=" + rand, useCookie=cookie) try: values['token'] = re.compile('"([^"]*).*"([^"]*)"').findall(pltoken)[0][1] except: Dialog.notification(common.getString(30200), common.getString(30201), xbmcgui.NOTIFICATION_ERROR) return False return values
def getStreams(suc, data): if not suc: return '' for cdn in data['audioVideoUrls']['avCdnUrlSets']: for urlset in cdn['avUrlInfoList']: data = common.getURL(urlset['url']) fps_string = re.compile('frameRate="([^"]*)').findall(data)[0] fr = round(eval(fps_string + '.0'), 3) return str(fr).replace('.0','') return ''
def getFlashVars(url): cookie = common.mechanizeLogin() showpage = common.getURL(url, useCookie=cookie) #common.WriteLog(showpage, 'flashvars', 'w') if not showpage: Dialog.notification(common.__plugin__, Error('CDP.InvalidRequest'), xbmcgui.NOTIFICATION_ERROR) return False values = {} search = {'asin' : '"pageAsin":"(.*?)"', 'sessionID' : "ue_sid='(.*?)'", 'marketplace': "ue_mid='(.*?)'", 'customer' : '"customerID":"(.*?)"'} if 'var config' in showpage: flashVars = re.compile('var config = (.*?);',re.DOTALL).findall(showpage) flashVars = json.loads(unicode(flashVars[0], errors='ignore')) values = flashVars['player']['fl_config']['initParams'] else: for key, pattern in search.items(): result = re.compile(pattern, re.DOTALL).findall(showpage) if result: values[key] = result[0] for key in values.keys(): if not values.has_key(key): Dialog.notification(common.getString(30200), common.getString(30210), xbmcgui.NOTIFICATION_ERROR) return False values['deviceTypeID'] = 'AOAGZA014O5RE' values['userAgent'] = common.UserAgent values['deviceID'] = common.hmac.new(common.UserAgent, common.gen_id(), hashlib.sha224).hexdigest() rand = 'onWebToken_' + str(random.randint(0,484)) pltoken = common.getURL(common.BASE_URL + "/gp/video/streaming/player-token.json?callback=" + rand, useCookie=cookie) try: values['token'] = re.compile('"([^"]*).*"([^"]*)"').findall(pltoken)[0][1] except: Dialog.notification(common.getString(30200), common.getString(30201), xbmcgui.NOTIFICATION_ERROR) return False return values
def getUrldata(mode, values, format='json', asinlist=False, devicetypeid=False, version=1, firmware='WIN%2017,0,0,134%20PlugIn', opt='', extra=False, useCookie=False): if not devicetypeid: devicetypeid = values['deviceTypeID'] url = common.ATV_URL + '/cdp/' + mode url += '?asin=' + values['asin'] url += '&deviceTypeID=' + devicetypeid url += '&firmware=' + firmware url += '&customerID=' + values['customer'] url += '&deviceID=' + values['deviceID'] url += '&marketplaceID=' + values['marketplace'] url += '&token=' + values['token'] url += '&format=' + format url += '&version=' + str(version) url += '&xws-fa-ov=false' url += opt if asinlist: url = url.replace('?asin=', '?asinlist=') if extra: url += '&to_timecode=0' url += '&start_state=Video' url += '&offer_type=SUBSCRIPTION' url += '&cdn=' + values['cdn'] url += '&streaming_session_id=' + values['streamSessionID'] url += '&download_bandwidth=99999' url += '&source_system=' + common.BASE_URL url += '&is_timed_text_available=' + values['subtitle'] url += '&http_referer=ecx.images-amazon.com' url += '&from_mode=purchased' url += '&event_timestamp=' + str(int(float(time.time()*1000))) url += '&url=' + urllib.quote_plus(values['streamurl']) url += '&encrypted_customer_id=' + values['customer'] url += '&device_type_id=' + values['deviceTypeID'] url += '&bitrate=' + values['bitrate'] url += '&from_timecode=0' url += '&browser=' + urllib.quote_plus(values['userAgent']) url += '&client_version=' + values['swfUrl'].split('/')[-3] url += '&flash_version=' + firmware data = common.getURL(url, common.ATV_URL.split('//')[1], useCookie=useCookie) if data: jsondata = demjson.decode(data) del data if jsondata['message']['statusCode'] != "SUCCESS": return False, Error(jsondata['message']['body']['code']) return True, jsondata['message']['body'] return False, 'HTTP Fehler'
def parseSubs(data): subs = [] if addon.getSetting('subtitles') == 'false': return subs for sub in data: lang = sub['displayName'].split('(')[0].strip() common.Log('Convert %s Subtitle' % lang) file = xbmc.translatePath('special://temp/%s.srt' % lang).decode('utf-8') soup = BeautifulSoup(common.getURL(sub['url'])) enc = soup.originalEncoding num = 0 with codecs.open(file, 'w', encoding='utf-8') as srt: for caption in soup.findAll('tt:p'): num += 1 subtext = caption.renderContents().decode(enc).replace('<tt:br>', '\n').replace('</tt:br>', '') srt.write(u'%s\n%s --> %s\n%s\n\n' % (num, caption['begin'], caption['end'], subtext)) subs.append(file) return subs
def getStreams(suc, data, retmpd=False): if not suc: return '' subUrls = parseSubs(data['subtitleUrls']) title = plot = False common.prettyprint(data) if "catalogMetadata" in data: title = data['catalogMetadata']['catalog']['title'] plot = data['catalogMetadata']['catalog']['synopsis'] for cdn in data['audioVideoUrls']['avCdnUrlSets']: for urlset in cdn['avUrlInfoList']: if retmpd: return title, plot, urlset['url'], subUrls data = common.getURL(urlset['url']) fps_string = re.compile('frameRate="([^"]*)').findall(data)[0] fr = round(eval(fps_string + '.0'), 3) return str(fr).replace('.0', '') return ''
def getTMDBImages(title, imdb=None, content="movie", year=None): fanart = poster = id = None splitter = [" - ", ": ", ", "] TMDB_URL = "http://image.tmdb.org/t/p/original" yearorg = year while not id: str_year = "" if year: str_year = "&year=" + str(year) movie = urllib.quote_plus(title) result = common.getURL( "http://api.themoviedb.org/3/search/%s?api_key=%s&language=de&query=%s%s" % (content, common.tmdb, movie, str_year), silent=True, ) if not result: common.Log("Fanart: Pause 5 sec...") xbmc.sleep(5000) continue data = json.loads(result) if data["total_results"] > 0: result = data["results"][0] if result["backdrop_path"]: fanart = TMDB_URL + result["backdrop_path"] if result["poster_path"]: poster = TMDB_URL + result["poster_path"] id = result["id"] elif year: year = 0 else: year = yearorg oldtitle = title for splitchar in splitter: if title.count(splitchar): title = title.split(splitchar)[0] break if title == oldtitle: break if content == "movie" and id and not fanart: fanart = common.na return fanart
def parseSubs(data): subs = [] if addon.getSetting('subtitles') == 'false' or 'subtitleUrls' not in data: return subs for sub in data['subtitleUrls']: lang = sub['displayName'].split('(')[0].strip() common.Log('Convert %s Subtitle' % lang) srtfile = xbmc.translatePath('special://temp/%s.srt' % lang).decode('utf-8') srt = codecs.open(srtfile, 'w', encoding='utf-8') soup = BeautifulStoneSoup(common.getURL(sub['url']), convertEntities=BeautifulStoneSoup.XML_ENTITIES) enc = soup.originalEncoding num = 0 for caption in soup.findAll('tt:p'): num += 1 subtext = caption.renderContents().decode(enc).replace('<tt:br>', '\n').replace('</tt:br>', '') srt.write(u'%s\n%s --> %s\n%s\n\n' % (num, caption['begin'], caption['end'], subtext)) srt.close() subs.append(srtfile) return subs
def CONVERTSUBTITLES(url): xml=common.getURL(url) tree = BeautifulStoneSoup(xml, convertEntities=BeautifulStoneSoup.XML_ENTITIES) lines = tree.find('tt:body').findAll('tt:p') stripTags = re.compile(r'<.*?>',re.DOTALL) spaces = re.compile(r'\s\s\s+') srt_output = '' count = 1 displaycount = 1 for line in lines: sub = line.renderContents() sub = stripTags.sub(' ', sub) sub = spaces.sub(' ', sub) sub = sub.decode('utf-8') start = line['begin'].replace('.',',') if count < len(lines): end = line['end'].replace('.',',') line = str(displaycount)+"\n"+start+" --> "+end+"\n"+sub+"\n\n" srt_output += line count += 1 displaycount += 1 return srt_output.encode('utf-8')
def getUrldata(mode, values, format='json', devicetypeid=False, version=1, firmware='1', opt='', extra=False, useCookie=False): if not devicetypeid: devicetypeid = values['deviceTypeID'] url = common.ATV_URL + '/cdp/' + mode url += '?asin=' + values['asin'] url += '&deviceTypeID=' + devicetypeid url += '&firmware=' + firmware url += '&customerID=' + values['customer'] url += '&deviceID=' + values['deviceID'] url += '&marketplaceID=' + values['marketplace'] url += '&token=' + values['token'] url += '&format=' + format url += '&version=' + str(version) url += opt if extra: url += '&resourceUsage=ImmediateConsumption&videoMaterialType=Feature&consumptionType=Streaming&desiredResources=AudioVideoUrls&deviceDrmOverride=CENC&deviceStreamingTechnologyOverride=DASH&deviceProtocolOverride=Http&deviceBitrateAdaptationsOverride=CVBR%2CCBR&audioTrackId=all' data = common.getURL(url, common.ATV_URL.split('//')[1], useCookie=useCookie) if data: jsondata = json.loads(data) del data if jsondata.has_key('error'): return False, Error(jsondata['error']) return True, jsondata return False, 'HTTP Fehler'
def get_meta(): db_url = common.getURL("root", "", "localhost", 3306, "mydb") engine = create_engine(db_url) return MetaData(engine, reflect=True), engine
def getIP(url): identurl = 'http://'+url+'/fcs/ident' ident = common.getURL(identurl) ip = re.compile('<fcs><ip>(.+?)</ip></fcs>').findall(ident)[0] return ip