def Search(item): sub_data = read_sub(item) #### Do whats needed to get the list of subtitles from service site #### use item["some_property"] that was set earlier #### once done, set xbmcgui.ListItem() below and pass it to xbmcplugin.addDirectoryItem() if sub_data != None: log_my(sub_data) for it in sub_data: listitem = xbmcgui.ListItem(label=it['id'], # language name for the found subtitle label2=get_info(it), # file name for the found subtitle iconImage=str(int(round(float(it['rating'])))), # rating for the subtitle, string 0-5 thumbnailImage="bg" # language flag, ISO_639_1 language + gif extention, e.g - "en.gif" ) listitem.setProperty( "sync", '{0}'.format("false").lower() ) # set to "true" if subtitle is matched by hash, # indicates that sub is 100 Comaptible listitem.setProperty( "hearing_imp", '{0}'.format("false").lower() ) # set to "true" if subtitle is for hearing impared ## below arguments are optional, it can be used to pass any info needed in download function ## anything after "action=download&" will be sent to addon once user clicks listed subtitle to downlaod url = "plugin://%s/?action=download&link=%s&ID=%s&filename=%s" % (__scriptid__, it['url'], it['id'], "filename of the subtitle") ## add it to list, this can be done as many times as needed for all subtitles found xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=url,listitem=listitem,isFolder=False) Notify('Server', 'ok') else: Notify('Server', 'error')
def read_sub (mov, year): list = [] log_my(mov, year) values['m'] = mov values['y'] = year enc_values = urllib.urlencode(values) log_my('Url: ', (url), 'Headers: ', (headers), 'Values: ', (enc_values)) request = urllib2.Request(url + '/search.php', enc_values, headers) response = urllib2.urlopen(request) log_my(response.code, BaseHTTPServer.BaseHTTPRequestHandler.responses[response.code][0]) if response.info().get('Content-Encoding') == 'gzip': buf = StringIO(response.read()) f = gzip.GzipFile(fileobj=buf) data = f.read() f.close() buf.close() else: log_my('Error: ', response.info().get('Content-Encoding')) return None get_id_url_n(data, list) if run_from_xbmc == False: for k in list_key: d = get_data(list, k) log_my(d) return list
def read_sub(mov, year): list = [] log_my(mov, year) values['movie'] = mov values['yr'] = year enc_values = urllib.urlencode(values) log_my('Url: ', (url), 'Headers: ', (head), 'Values: ', (enc_values)) connection = HTTPConnection(url) connection.request("POST", "/index.php?", headers=head, body=enc_values) response = connection.getresponse() log_my(response.status, BaseHTTPServer.BaseHTTPRequestHandler.responses[response.status][0]) if response.status == 200 and response.getheader('content-type').split( ';')[0] == 'text/html': log_my(response.getheaders()) data = response.read() else: connection.close() return None connection.close() get_id_url_n(data, list) if run_from_xbmc == False: for k in list_key: d = get_data(list, k) log_my(d) return list
def read_sub (mov, year): list = [] log_my(mov, year) values['movie'] = mov values['yr'] = year enc_values = urllib.urlencode(values) log_my('Url: ', (url), 'Headers: ', (head), 'Values: ', (enc_values)) connection = HTTPConnection(url) connection.request("POST", "/index.php?", headers=head, body=enc_values) response = connection.getresponse() log_my(response.status, BaseHTTPServer.BaseHTTPRequestHandler.responses[response.status][0]) if response.status == 200 and response.getheader('content-type').split(';')[0] == 'text/html': log_my(response.getheaders()) data = response.read() else: connection.close() return None connection.close() get_id_url_n(data, list) if run_from_xbmc == False: for k in list_key: d = get_data(list, k) log_my(d) return list
def get_sub(id, sub_url, filename): s = {} enc_values = urllib.urlencode(values) request = urllib2.Request(url + sub_url, enc_values, headers) response = urllib2.urlopen(request) log_my(response.code, BaseHTTPServer.BaseHTTPRequestHandler.responses[response.code][0]) s['data'] = response.read() s['fname'] = response.info()['Content-Disposition'].split('filename=')[1].strip('"') return s
def get_sub(id, sub_url, filename): s = {} headers['Referer'] = url request = urllib2.Request( 'http://bukvi.mmcenter.bg/load/0-0-0-' + sub_url.split("/")[-1] + '-20' , None, headers) response = urllib2.urlopen(request) log_my(response.code, BaseHTTPServer.BaseHTTPRequestHandler.responses[response.code][0]) s['data'] = response.read() s['fname'] = response.geturl().split("/")[-1] return s
def get_sub(id, sub_url, filename): request = urllib2.Request(sub_url, None, headers) response = urllib2.urlopen(request) mycook = response.info().get('Set-Cookie') if response.info().get('Content-Encoding') == 'gzip': buf = StringIO(response.read()) f = gzip.GzipFile(fileobj=buf) data = f.read() f.close() buf.close() else: data = response.read() log_my('Error: ', response.info().get('Content-Encoding')) match = re.findall("<a href='(.+?)' class='ipsButton ipsButton_fullWidth", data) log_my(response.code, BaseHTTPServer.BaseHTTPRequestHandler.responses[response.code][0]) nexturl = match[0].replace('&','&') dheaders = { 'Upgrade-Insecure-Requests' : '1', 'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36', 'Content-Type' : 'application/x-www-form-urlencoded', 'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3', 'Referer' : 'http://www.easternspirit.org/forum/index.php?/files/', 'Accept-Encoding' : 'gzip, deflate', 'Accept-Language' : 'en-US,en;q=0.8', 'Cookie': mycook, 'Connection': 'keep-alive', 'Referer': nexturl, 'Host': 'www.easternspirit.org' } request = urllib2.Request(nexturl, None, dheaders) request.add_header('Cookie',mycook) log_my(response.code, BaseHTTPServer.BaseHTTPRequestHandler.responses[response.code][0]) response = urllib2.urlopen(request) s = {} if response.info().get('Content-Type') == 'application/x-rar-compressed': s['data'] = response.read() s['fname'] = response.info()['Content-Disposition'].split('filename=')[1].strip('"') return s else: #TV SERIES FIX if response.info().get('Content-Encoding') == 'gzip': buf = StringIO(response.read()) f = gzip.GzipFile(fileobj=buf) data2 = f.read() f.close() buf.close() else: data2 = response.read() data2 = re.sub('[\r\n]+','',data2) data2 = re.sub('&','&',data2) match = re.findall("ipsType_break ipsContained'>([^<>]+)<.+?a href='([^']+)'", data2) sub_url = getResult(match) request = urllib2.Request(sub_url, None, dheaders) request.add_header('Cookie',mycook) response = urllib2.urlopen(request) log_my(response.code, BaseHTTPServer.BaseHTTPRequestHandler.responses[response.code][0]) s['data'] = response.read() s['fname'] = response.info()['Content-Disposition'].split('filename=')[1].strip('"') return s
def get_sub(id, sub_url, filename): s = {} connection = HTTPConnection(url) connection.request("GET", "/index.php?act=download&attach_id="+sub_url, headers=head) response = connection.getresponse() log_my(response.status, BaseHTTPServer.BaseHTTPRequestHandler.responses[response.status][0]) log_my(response.getheaders()) if response.status != 200: connection.close() return None s['data'] = response.read() s['fname'] = response.getheader('Content-Disposition').split('filename=')[1].strip('"') connection.close() return s
def Search(item): it = [] _item = dict(item) it.append(item) _item['title'], _item['year'] = xbmc.getCleanMovieTitle(item['title']) it.append(_item) sub_data = read_sub(*it) #### Do whats needed to get the list of subtitles from service site #### use item["some_property"] that was set earlier #### once done, set xbmcgui.ListItem() below and pass it to xbmcplugin.addDirectoryItem() if sub_data != None: log_my(sub_data) for it in sub_data: listitem = xbmcgui.ListItem( label=it['id'], # language name for the found subtitle label2=get_info(it), # file name for the found subtitle iconImage=str(int(round(float( it['rating'])))), # rating for the subtitle, string 0-5 thumbnailImage= "bg" # language flag, ISO_639_1 language + gif extention, e.g - "en.gif" ) listitem.setProperty("sync", '{0}'.format("false").lower( )) # set to "true" if subtitle is matched by hash, # indicates that sub is 100 Comaptible listitem.setProperty("hearing_imp", '{0}'.format("false").lower( )) # set to "true" if subtitle is for hearing impared ## below arguments are optional, it can be used to pass any info needed in download function ## anything after "action=download&" will be sent to addon once user clicks listed subtitle to downlaod url = "plugin://%s/?action=download&link=%s&ID=%s&filename=%s" % ( __scriptid__, it['url'], it['id'], "filename of the subtitle") ## add it to list, this can be done as many times as needed for all subtitles found xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=url, listitem=listitem, isFolder=False) Notify('Server', 'ok') else: Notify('Server', 'error')
def get_sub(id, sub_url, filename): s = {} connection = HTTPConnection(url) connection.request("GET", "/index.php?act=download&attach_id=" + sub_url, headers=head) response = connection.getresponse() log_my(response.status, BaseHTTPServer.BaseHTTPRequestHandler.responses[response.status][0]) log_my(response.getheaders()) if response.status != 200: connection.close() return None s['data'] = response.read() s['fname'] = response.getheader('Content-Disposition').split( 'filename=')[1].strip('"') connection.close() return s
def Download(id, url, filename, stack=False): subtitle_list = [] exts = [".srt", ".sub", ".txt", ".smi", ".ssa", ".ass"] ## Cleanup temp dir, we recomend you download/unzip your subs in temp folder and ## pass that to XBMC to copy and activate if xbmcvfs.exists(__temp__): try: Notify('Cleanup', 'ok') shutil.rmtree(__temp__) except: Notify('Error cleanup', 'error') pass xbmcvfs.mkdirs(__temp__) log_my('Download from unacs id', url) sub = get_sub(id, url, filename) if (sub.has_key('data') and sub.has_key('fname')): log_my('{0}'.format(sub['fname']), 'saving') ff = os.path.join(__temp__, sub['fname']) with open(ff, 'wb') as subFile: subFile.write(sub['data']) xbmc.sleep(500) xbmc.executebuiltin(('XBMC.Extract("%s","%s")' % ( ff, __temp__, )).encode('utf-8'), True) Notify('{0}'.format(sub['fname']), 'load') for file in xbmcvfs.listdir(ff)[1]: file = os.path.join(__temp__, (file.decode('utf-8'))) if (os.path.splitext(file)[1] in exts): subtitle_list.append(file) else: Notify('Error while downlod') if len(subtitle_list) >= 3: subtitle_list = select_1(subtitle_list) if xbmcvfs.exists(subtitle_list[0]): return subtitle_list
def Download(id, url, filename, stack=False): subtitle_list = [] ## Cleanup temp dir, we recomend you download/unzip your subs in temp folder and ## pass that to XBMC to copy and activate if xbmcvfs.exists(__temp__): try: rmtree(__temp__) except: Notify('Error cleanup', 'error') pass xbmcvfs.mkdirs(__temp__) log_my('Download from id', url) sub = get_sub(id, url, filename) if (sub.has_key('data') and sub.has_key('fname')): log_my('{0}'.format(sub['fname']), 'saving') ff = os.path.join(__temp__, sub['fname']) subFile = xbmcvfs.File(ff, 'wb') subFile.write(sub['data']) subFile.close() xbmc.sleep(500) xbmc.executebuiltin(('XBMC.Extract("%s","%s")' % ( ff, __temp__, )).encode('utf-8'), True) Notify('{0}'.format(sub['fname']), 'load') dirs, files = xbmcvfs.listdir(__temp__) files.extend(dirs) appendsubfiles(subtitle_list, __temp__, files) if len(subtitle_list) >= 2: subtitle_list = select_1(subtitle_list) if xbmcvfs.exists(subtitle_list[0]): return subtitle_list else: Notify('Error', 'downlod subtitles') return []
def read_sub (mov): list = [] values['q'] = mov enc_values = urllib.urlencode(values) request = urllib2.Request(url + '/index.php?/search/&'+enc_values.replace('+','%20'), None, headers) response = urllib2.urlopen(request) log_my(response.code) if response.info().get('Content-Encoding') == 'gzip': buf = StringIO(response.read()) f = gzip.GzipFile(fileobj=buf) data = f.read() f.close() buf.close() else: log_my('Error: ', response.info().get('Content-Encoding')) return None get_id_url_n(data, list) #if run_from_xbmc == False: for k in list_key: d = get_data(list, k) log_my(d) return list
def Download(id,url,filename, stack=False): subtitle_list = [] ## Cleanup temp dir, we recomend you download/unzip your subs in temp folder and ## pass that to XBMC to copy and activate if xbmcvfs.exists(__temp__): try: rmtree(__temp__) except: Notify('Error cleanup', 'error') pass xbmcvfs.mkdirs(__temp__) log_my('Download from id', url) sub=get_sub(id, url, filename) if (sub.has_key('data') and sub.has_key('fname')): log_my('{0}'.format(sub['fname']),'saving') ff = os.path.join(__temp__, sub['fname']) subFile = xbmcvfs.File(ff, 'wb') subFile.write(sub['data']) subFile.close() xbmc.sleep(500) xbmc.executebuiltin(('XBMC.Extract("%s","%s")' % (ff,__temp__,)).encode('utf-8'), True) Notify('{0}'.format(sub['fname']),'load') dirs, files = xbmcvfs.listdir(__temp__) files.extend(dirs) appendsubfiles(subtitle_list, __temp__, files) if len(subtitle_list) >= 2: subtitle_list = select_1(subtitle_list) if xbmcvfs.exists(subtitle_list[0]): return subtitle_list else: Notify('Error','downlod subtitles') return []
def Download(id,url,filename, stack=False): subtitle_list = [] exts = [".srt", ".sub", ".txt", ".smi", ".ssa", ".ass" ] ## Cleanup temp dir, we recomend you download/unzip your subs in temp folder and ## pass that to XBMC to copy and activate if xbmcvfs.exists(__temp__): try: Notify('Cleanup', 'ok') shutil.rmtree(__temp__) except: Notify('Error cleanup', 'error') pass xbmcvfs.mkdirs(__temp__) log_my('Download from unacs id', url) sub=get_sub(id, url, filename) if (sub.has_key('data') and sub.has_key('fname')): log_my('{0}'.format(sub['fname']),'saving') ff = os.path.join(__temp__, sub['fname']) with open(ff, 'wb') as subFile: subFile.write(sub['data']) xbmc.sleep(500) xbmc.executebuiltin(('XBMC.Extract("%s","%s")' % (ff,__temp__,)).encode('utf-8'), True) Notify('{0}'.format(sub['fname']),'load') for file in xbmcvfs.listdir(ff)[1]: file = os.path.join(__temp__, (file.decode('utf-8'))) if (os.path.splitext( file )[1] in exts): subtitle_list.append(file) else: Notify('Error while downlod') if len(subtitle_list) >= 3: subtitle_list = select_1(subtitle_list) if xbmcvfs.exists(subtitle_list[0]): return subtitle_list
def Download(id, url, filename, stack=False): subtitle_list = [] ## Cleanup temp dir, we recomend you download/unzip your subs in temp folder and ## pass that to XBMC to copy and activate if xbmcvfs.exists(__temp__): try: rmtree(__temp__) except: Notify('Error cleanup', 'error') pass xbmcvfs.mkdirs(__temp__) log_my('Download from id', url) sub = get_sub(id, url, filename) if (sub.has_key('data') and sub.has_key('fname')): log_my('{0}'.format(sub['fname']), 'saving') ff = os.path.join(__temp__, sub['fname']) subFile = xbmcvfs.File(ff, 'wb') subFile.write(sub['data']) subFile.close() xbmc.sleep(500) Notify('{0}'.format(sub['fname']), 'load') if id == 'unacs': xbmcvfs.delete(ff) headers = { "Host": "subsunacs.net", "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "Accept-Language": "en-US,en;q=0.5", "Accept-Encoding": "gzip, deflate, br", "DNT": "1", "Connection": "keep-alive", "Upgrade-Insecure-Requests": "1", "Cache-Control": "max-age=0", } url = 'https://subsunacs.net' + url + '!' req = requests.get(url, headers=headers) match = re.compile('<a href="(.+?)">(.+?)</a></label>').findall( req.text) for suburl, subname in match: subname = subname.encode('cp1251', 'ignore').decode( 'cp1251', 'ignore').encode('utf-8', 'ignore').replace(' ', '.') #suname = subname.encode('utf-8') subtitri = __temp__ + subname try: url2 = 'https://subsunacs.net' + suburl req2 = requests.get(url2, headers=headers) f = open(subtitri, 'wb') f.write(req2.content) f.close() xbmc.sleep(1000) except: pass else: if __addon__.getSetting('xbmc_extractor') == 'true': if '.zip' in ff: xbmc.executebuiltin(('XBMC.Extract("%s","%s")' % ( ff, __temp__, )).encode('utf-8'), True) xbmcvfs.delete(ff) #check for rars after zip extraction unextracted_rars = xbmcvfs.listdir(__temp__) for rars in unextracted_rars[1]: if rars.endswith('.rar'): src = 'archive' + '://' + urllib.quote_plus( __temp__ + rars) + '/' (cdirs, cfiles) = xbmcvfs.listdir(src) for cfile in cfiles: fsrc = '%s%s' % (src, cfile) xbmcvfs.copy(fsrc, __temp__ + cfile) else: src = 'archive' + '://' + urllib.quote_plus(ff) + '/' (cdirs, cfiles) = xbmcvfs.listdir(src) for cfile in cfiles: fsrc = '%s%s' % (src, cfile) xbmcvfs.copy(fsrc, __temp__ + cfile) elif __addon__.getSetting('rarlab') == 'true': import rarfile if '.rar' in ff: archive = rarfile.RarFile(ff) archive.extract(__temp__) xbmcvfs.delete(ff) else: xbmc.executebuiltin(('XBMC.Extract("%s","%s")' % ( ff, __temp__, )).encode('utf-8'), True) xbmcvfs.delete(ff) #check for rars after zip extraction unextracted_rars = xbmcvfs.listdir(__temp__) for rars in unextracted_rars[1]: if rars.endswith('.rar'): archive = rarfile.RarFile(__temp__ + rars) archive.extract(__temp__) elif __addon__.getSetting('extract_me') == 'true': if '.zip' in ff: xbmc.executebuiltin(('XBMC.Extract("%s","%s")' % ( ff, __temp__, )).encode('utf-8'), True) xbmcvfs.delete(ff) #check for rars after zip extraction unextracted_rars = xbmcvfs.listdir(__temp__) for rars in unextracted_rars[1]: if rars.endswith('.rar'): s = requests.Session() r = s.get('https://extract.me/upload/') mycook = re.search( 'uid=(.+?);', r.headers['Set-Cookie']).group(1) fname = rars files = { 'files': (fname, open(__temp__ + rars, 'rb'), "application/octet-stream") } payload = {'uid': mycook, 'files': filename} r = s.post('https://extract.me/upload/', files=files, data=payload) tmp_filename = r.json()['files'][0]['tmp_filename'] name = r.json()['files'][0]['name'] nexpayload = { 'tmp_filename': tmp_filename, 'archive_filename': name, 'password': '' } r = s.post('https://extract.me/unpack/', data=nexpayload) compres_to_zip = s.post( 'https://extract.me/compress/zip/' + mycook + '/' + tmp_filename) zipped = compres_to_zip.json()['download_url'] nexturl = 'https://extract.me/' + mycook + zipped ziper = s.get(nexturl) zf = re.search('.*\/(.+?\.zip)', zipped).group(1) zname = __temp__ + zf f = open(zname, 'wb+') f.write(ziper.content) f.close() #xbmc.executebuiltin(('XBMC.Extract doent extract zips lol import zipfile #xbmc.executebuiltin(('XBMC.Extract("%s","%s")' % (zname,__temp__,)).encode('utf-8'), True) #xbmc.sleep(500) with zipfile.ZipFile(zname, 'r') as zip_ref: zip_ref.extractall(__temp__) else: s = requests.Session() r = s.get('https://extract.me/upload/') mycook = re.search('uid=(.+?);', r.headers['Set-Cookie']).group(1) fname = sub['fname'] files = { 'files': (fname, open(ff, 'rb'), "application/octet-stream") } payload = {'uid': mycook, 'files': filename} r = s.post('https://extract.me/upload/', files=files, data=payload) tmp_filename = r.json()['files'][0]['tmp_filename'] name = r.json()['files'][0]['name'] nexpayload = { 'tmp_filename': tmp_filename, 'archive_filename': name, 'password': '' } r = s.post('https://extract.me/unpack/', data=nexpayload) compres_to_zip = s.post( 'https://extract.me/compress/zip/' + mycook + '/' + tmp_filename) zipped = compres_to_zip.json()['download_url'] nexturl = 'https://extract.me/' + mycook + zipped ziper = s.get(nexturl) zf = re.search('.*\/(.+?\.zip)', zipped).group(1) f = open(__temp__ + zf, 'wb+') f.write(ziper.content) f.close() xbmc.executebuiltin(('XBMC.Extract("%s","%s")' % ( __temp__ + zf, __temp__, )).encode('utf-8'), True) elif __addon__.getSetting('online-convert-com') == 'true': if '.zip' in ff: xbmc.executebuiltin(('XBMC.Extract("%s","%s")' % ( ff, __temp__, )).encode('utf-8'), True) xbmcvfs.delete(ff) #check for rars after zip extraction We try to extract from xbmc because not to wasting minutes in OCdotCom unextracted_rars = xbmcvfs.listdir(__temp__) for rars in unextracted_rars[1]: if rars.endswith('.rar'): src = 'archive' + '://' + urllib.quote_plus( __temp__ + rars) + '/' (cdirs, cfiles) = xbmcvfs.listdir(src) for cfile in cfiles: fsrc = '%s%s' % (src, cfile) xbmcvfs.copy(fsrc, __temp__ + cfile) else: api_key = __addon__.getSetting('ocapi') newendpoint = 'http://api2.online-convert.com/jobs' data = { "conversion": [{ "category": "archive", "target": "zip" }] } head = { 'x-oc-api-key': api_key, 'Content-Type': 'application/json', 'Cache-Control': 'no-cache' } res = requests.post(newendpoint, data=json.dumps(data), headers=head) match = re.compile( 'id":"(.+?)".+?server":"(.+?)"').findall(res.text) for idj, servurl in match: servurl = servurl.replace('\/', '/') nextendpont = servurl + '/upload-file/' + idj file = {'file': open(ff, 'rb')} head = {'x-oc-api-key': api_key} res = requests.post(nextendpont, files=file, headers=head) xbmc.sleep(2000) res = requests.get(newendpoint, headers=head) match2 = re.compile('"uri":"(http.+?zip)"').findall( res.text) for dlzip in match2: zipfile = dlzip.replace('\/', '/') subfile = zipfile.split("/")[-1] r = requests.get(zipfile) with open((__temp__ + subfile), 'wb') as f: f.write(r.content) xbmc.sleep(500) f.close() xbmc.sleep(1000) delurl = 'http://api2.online-convert.com/jobs/' + idj head = { 'x-oc-api-key': api_key, 'Content-Type': 'application/json', 'Cache-Control': 'no-cache' } res = requests.delete(delurl, headers=head) xbmc.sleep(500) jj = __temp__ + subfile xbmc.executebuiltin( ('XBMC.Extract("%s","%s")' % (jj, __temp__)), True) elif __addon__.getSetting('android_rar') == 'true': if 'zip' in ff: xbmc.executebuiltin(('XBMC.Extract("%s","%s")' % ( ff, __temp__, )).encode('utf-8'), True) else: app = 'com.rarlab.rar' intent = 'android.intent.action.VIEW' dataType = 'application/rar' dataURI = ff arch = 'StartAndroidActivity("%s", "%s", "%s", "%s")' % ( app, intent, dataType, dataURI) xbmc.executebuiltin(arch) if __addon__.getSetting('android_rar') == 'true': timer = __addon__.getSetting('ar_wait_time') xbmc.sleep(int(timer) * 1000) dirs, files = xbmcvfs.listdir(__temp__) files.extend(dirs) appendsubfiles(subtitle_list, __temp__, files) if len(subtitle_list) >= 2: subtitle_list = select_1(subtitle_list) if xbmcvfs.exists(subtitle_list[0]): return subtitle_list else: Notify('Error', 'Bad format or ....') return []