def run(self): def _start_servers(): self.proxy.server_activate() self.proxy.timeout = 1 self.proxy_thread.start() Log('Service: Proxy server started') def _stop_servers(): self.proxy.server_close() self.proxy.shutdown() self.proxy_thread.join() Log('Service: Proxy server stopped') from time import time monitor = xbmc.Monitor() _start_servers() Log('Service started') while not monitor.abortRequested(): ct = time() if (ct >= (self.lastCheck + self.freqCheck)) and self._s.wl_export: self.lastCheck = ct self.export_watchlist(ct) if monitor.waitForAbort(1): break _stop_servers() Log('Service stopped')
def _ForwardRequest(self, method, endpoint, headers, data): """Forwards the request to the proper target""" from resources.lib.network import MechanizeLogin import re import requests # Create sessions for keep-alives and connection pooling host = re.search('://([^/]+)/', endpoint) # Try to extract the host from the URL if None is not host: host = host.group(1) if host not in self.sessions: self.sessions[host] = requests.Session() session = self.sessions[host] else: session = requests.Session() cookie = MechanizeLogin() if not cookie: Log('[PS] Not logged in', Log.DEBUG) self.send_error(440) return (None, None, None) Log('[PS] Forwarding the {} request towards {}'.format(method, endpoint), Log.DEBUG) r = session.request(method, endpoint, data=data, headers=headers, cookies=cookie, verify=self.server._s.verifySsl) return (r.status_code, r.headers, r.content.decode('utf-8'))
def export_watchlist(self, cur_time=0, override=False): """ Export the watchlist every self.freqExport seconds or when triggered by override """ if override or (cur_time >= (self.freqExport + self.lastExport)): Log('Service: Exporting the Watchlist') self.lastExport = cur_time writeConfig('last_wl_export', cur_time) xbmc.executebuiltin('XBMC.RunPlugin(plugin://plugin.video.amazon-test/?mode=getListMenu&url=watchlist&export=2)')
def __init__(self): from resources.lib.common import Settings from resources.lib.proxy import ProxyTCPD from resources.lib.configs import writeConfig self._s = Settings() self.lastExport = float(getConfig('last_wl_export', '0')) self.proxy = ProxyTCPD(self._s) writeConfig('proxyaddress', '127.0.0.1:{}'.format(self.proxy.port)) Log('Service: Proxy bound to {}'.format(self._s.proxyaddress)) self.proxy_thread = threading.Thread(target=self.proxy.serve_forever)
def _EndChunkedTransfer(self, gzstream): """Terminate the transfer""" Log('[PS] Chunked transfer: last chunks', Log.DEBUG) gzstream[0].flush() gzstream[0].close() self._SendChunk(gzstream) gzstream[1].close() self.wfile.write(b'0\r\n\r\n')
def do_POST(self): """Respond to POST requests""" from urlparse import unquote path, headers, data = self._ParseBaseRequest('POST') if None is path: return if ('gpr' == path[0]) and (2 == len(path)): self._AlterGPR(unquote(path[1]), headers, data) else: Log('[PS] Invalid request received', Log.DEBUG) self.send_error(501, 'Invalid request')
def _ParseBaseRequest(self, method): """Return path, headers and post data commonly required by all methods""" from urlparse import unquote, urlparse, parse_qsl path = urlparse(self.path).path[1:] # Get URI without the trailing slash path = path.decode('utf-8').split('/') # license/<asin>/<ATV endpoint> Log('[PS] Requested {} path {}'.format(method, path), Log.DEBUG) # Retrieve headers and data headers = {k: self.headers[k] for k in self.headers if k not in ['host', 'content-length']} data_length = self.headers.get('content-length') data = {k: v for k, v in parse_qsl(self.rfile.read(int(data_length)))} if data_length else None return (path, headers, data)
def do_GET(self): """Respond to GET requests""" from urlparse import unquote path, headers, data = self._ParseBaseRequest('GET') if None is path: return if ('mpd' == path[0]) and (2 == len(path)): self._AlterMPD(unquote(path[1]), headers, data) elif ('subtitles' == path[0]) and (3 == len(path)): self._TranscodeSubtitle(unquote(path[1]), headers, data, path[2]) else: Log('[PS] Invalid request received', Log.DEBUG) self.send_error(501, 'Invalid request')
def _PrepareChunkedResponse(self, code, headers): """Prep the stream for gzipped chunked transfers""" Log('[PS] Chunked transfer: prepping', Log.DEBUG) headers = {k: headers[k] for k in headers if k not in self._purgeHeaders} headers['Connection'] = 'Keep-Alive' headers['Transfer-Encoding'] = 'chunked' headers['Content-Encoding'] = 'gzip' self._SendHeaders(code, headers) gzstream = self._gzip(stream=True) try: yield gzstream finally: gzstream[0].close() gzstream[1].close()
def _stop_servers(): self.proxy.server_close() self.proxy.shutdown() self.proxy_thread.join() Log('Service: Proxy server stopped')
def _start_servers(): self.proxy.server_activate() self.proxy.timeout = 1 self.proxy_thread.start() Log('Service: Proxy server started')
def _AlterMPD(self, endpoint, headers, data): """ MPD alteration for better language parsing """ try: from urllib.parse import urlparse except ImportError: from urlparse import urlparse import re # Extrapolate the base CDN url to avoid proxying data we don't need to url_parts = urlparse(endpoint) baseurl = url_parts.scheme + '://' + url_parts.netloc + re.sub( r'[^/]+$', '', url_parts.path) def _rebase(data): data = data.replace('<BaseURL>', '<BaseURL>' + baseurl) data = re.sub(r'(<SegmentTemplate\s+[^>]*?\s*media=")', r'\1' + baseurl, data) data = re.sub(r'(<SegmentTemplate\s+[^>]*?\s*initialization=")', r'\1' + baseurl, data) return data # Start the chunked reception status_code, headers, r = self._ForwardRequest('get', endpoint, headers, data, True) with self._PrepareChunkedResponse(status_code, headers) as gzstream: if r.encoding is None: r.encoding = 'utf-8' buffer = '' bPeriod = False Log('[PS] Loading MPD and rebasing as {}'.format(baseurl), Log.DEBUG) for chunk in r.iter_content(chunk_size=1048576, decode_unicode=True): buffer += py2_decode(chunk) # Flush everything up to audio AdaptationSets as fast as possible pos = re.search( r'(<AdaptationSet[^>]*contentType="video"[^>]*>.*?</AdaptationSet>\s*)' if bPeriod else r'(<Period[^>]*>\s*)', buffer, flags=re.DOTALL) if pos: if 0 < pos.start(1): self._SendChunk(gzstream, buffer[0:pos.start(1)]) if not bPeriod: bPeriod = True self._SendChunk(gzstream, buffer[pos.start(1):pos.end(1)]) else: self._SendChunk( gzstream, _rebase(buffer[pos.start(1):pos.end(1)])) buffer = buffer[pos.end(1):] # Count the number of duplicates with the same ISO 639-1 codes Log('[PS] Parsing languages', Log.DEBUG) languages = [] langCount = {} for lang in re.findall( r'<AdaptationSet[^>]*audioTrackId="([^"]+)"[^>]*>', buffer): if lang not in languages: languages.append(lang) for lang in languages: lang = lang[0:2] if lang not in langCount: langCount[lang] = 0 langCount[lang] += 1 # Send corrected AdaptationSets, one at a time through chunked transfer Log('[PS] Altering <AdaptationSet>s', Log.DEBUG) while True: pos = re.search(r'(<AdaptationSet[^>]*>)(.*?</AdaptationSet>)', buffer, flags=re.DOTALL) if None is pos: break # Log('[PS] AdaptationSet position: ([{}:{}], [{}:{}])'.format(pos.start(1), pos.end(1), pos.start(2), pos.end(2))) setTag = buffer[pos.start(1):pos.end(1)] try: trackId = re.search( r'\s+audioTrackId="([a-z]{2})(-[a-z0-9]{2,})_(dialog|descriptive)', setTag).groups() lang = re.search(r'\s+lang="([a-z]{2})"', setTag).group(1) newLocale = self._AdjustLocale(trackId[0] + trackId[1], langCount[trackId[0]]) if 'descriptive' == trackId[2]: newLocale += (' ' if '-' in newLocale else '-') + '[Audio Description]' setTag = setTag.replace('lang="{}"'.format(lang), 'lang="{}"'.format(newLocale)) except: pass self._SendChunk(gzstream, setTag) self._SendChunk(gzstream, _rebase(buffer[pos.start(2):pos.end(2)])) buffer = buffer[pos.end(2):] # Send the rest and signal EOT if 0 < len(buffer): self._SendChunk(gzstream, buffer) self._EndChunkedTransfer(gzstream)
def export_watchlist(self, cur_time=0): """ Export the watchlist every self.freqExport seconds """ if cur_time >= (self.freqExport + self.lastExport): Log('Service: Exporting the Watchlist') self.lastExport = cur_time xbmc.executebuiltin('RunPlugin(plugin://plugin.video.amazon-test/?mode=getListMenu&url=watchlist&export=2)')
from __future__ import unicode_literals import xbmc from resources.lib.common import Settings from resources.lib.logging import Log from resources.lib.configs import * if __name__ == '__main__': _s = Settings() monitor = xbmc.Monitor() Log('Service: Start') check_freq = 60 export_freq = 24 * 60 * 60 if _s.wl_export: while not monitor.abortRequested(): import time last_export = float(getConfig('last_wl_export', '0')) cur_time = time.time() if cur_time >= last_export + export_freq: Log('Service: Starting Export of Watchlist') writeConfig('last_wl_export', cur_time) xbmc.executebuiltin( 'XBMC.RunPlugin(plugin://plugin.video.amazon-test/?mode=getListMenu&url=watchlist&export=2)' ) if monitor.waitForAbort(check_freq): break Log('Service: End')
def _TranscodeSubtitle(self, endpoint, headers, data, filename): """ On-the-fly subtitle transcoding (TTMLv2 => SRT) """ import re status_code, headers, content = self._ForwardRequest( 'get', endpoint, headers, data) if 0 < len(content): # Apply a bunch of regex to the content instead of line-by-line to save computation time content = re.sub( r'<(|/)span[^>]*>', r'<\1i>', content ) # Using (|<search>) instead of ()? to avoid py2.7 empty matching error content = re.sub(r'([0-9]{2}:[0-9]{2}:[0-9]{2})\.', r'\1,', content) # SRT-like timestamps content = re.sub(r'\s*<(?:tt:)?br\s*/>\s*', '\n', content) # Replace <br/> with actual new lines # Subtitle timing stretch if ('[–1]' in filename): def _stretch(f): millis = int(f.group('h')) * 3600000 + int( f.group('m')) * 60000 + int(f.group('s')) * 1000 + int( f.group('ms')) h, m = divmod(millis * _stretch.factor, 3600000) m, s = divmod(m, 60000) s, ms = divmod(s, 1000) # Truncate to the decimal of a ms (for lazyness) return '%02d:%02d:%02d,%03d' % (h, m, s, int(ms)) _stretch.factor = self.server._s.subtitleStretchFactor Log("Stretch factor: %f" % (_stretch.factor)) content = re.sub( r'(?P<h>\d+):(?P<m>\d+):(?P<s>\d+),(?P<ms>\d+)', _stretch, content) # Convert dfxp or ttml2 to srt num = 0 srt = '' for tt in re.compile( r'<(?:tt:)?p begin="([^"]+)"[^>]*end="([^"]+)"[^>]*>\s*(.*?)\s*</(?:tt:)?p>', re.DOTALL).findall(content): text = tt[2] # Embed RTL and change the punctuation where needed if filename.startswith("ar"): from unicodedata import lookup text = re.sub(r'^(?!{}|{})'.format( lookup('RIGHT-TO-LEFT MARK'), lookup('RIGHT-TO-LEFT EMBEDDING')), lookup('RIGHT-TO-LEFT EMBEDDING'), text, flags=re.MULTILINE) text = text.replace('?', '؟').replace(',', '،') for ec in [('&', '&'), ('"', '"'), ('<', '<'), ('>', '>'), (''', "'")]: text = text.replace(ec[0], ec[1]) num += 1 srt += '%s\n%s --> %s\n%s\n\n' % (num, tt[0], tt[1], text) content = srt self._SendResponse(status_code, headers, content) # Kodi doesn't quite like gzip'd subtitles