def observation(): utilities.log( "Fetching Hourly Observation for '%s (%s)' from the Met Office..." % (OBSERVATION_LOCATION, OBSERVATION_LOCATION_ID)) with urlcache.URLCache(ADDON_DATA_PATH) as cache: filename = cache.get(HOURLY_LOCATION_OBSERVATION_URL, observation_expiry) data=json.load(open(filename)) try: dv = data['SiteRep']['DV'] dataDate = utilities.strptime(dv.get('dataDate').rstrip('Z'), DATAPOINT_DATETIME_FORMAT).replace(tzinfo=pytz.utc) WINDOW.setProperty('HourlyObservation.IssuedAt', dataDate.astimezone(TZ).strftime(ISSUEDAT_FORMAT))#@UndefinedVariable try: latest_period = dv['Location']['Period'][-1] except KeyError: latest_period = dv['Location']['Period'] try: latest_obs = latest_period['Rep'][-1] except KeyError: latest_obs = latest_period['Rep'] WINDOW.setProperty('Current.Condition', WEATHER_CODES[latest_obs.get('W', 'na')][1])#@UndefinedVariable WINDOW.setProperty('Current.Visibility', latest_obs.get('V', 'n/a'))#@UndefinedVariable WINDOW.setProperty('Current.Pressure', latest_obs.get('P', 'n/a'))#@UndefinedVariable WINDOW.setProperty('Current.Temperature', str(round(float(latest_obs.get('T', 'n/a')))).split('.')[0])#@UndefinedVariable WINDOW.setProperty('Current.Wind', latest_obs.get('S', 'n/a'))#@UndefinedVariable WINDOW.setProperty('Current.WindDirection', latest_obs.get('D', 'n/a'))#@UndefinedVariable WINDOW.setProperty('Current.WindGust', latest_obs.get('G', 'n/a'))#@UndefinedVariable WINDOW.setProperty('Current.OutlookIcon', '%s.png' % WEATHER_CODES[latest_obs.get('W', 'na')][0])#@UndefinedVariable WINDOW.setProperty('Current.FanartCode', '%s.png' % WEATHER_CODES[latest_obs.get('W','na')][0])#@UndefinedVariable WINDOW.setProperty('Current.DewPoint', str(round(float(latest_obs.get('Dp', 'n/a')))).split('.')[0])#@UndefinedVariable WINDOW.setProperty('Current.Humidity', str(round(float(latest_obs.get('H', 'n/a')))).split('.')[0])#@UndefinedVariable WINDOW.setProperty('HourlyObservation.IsFetched', 'true')#@UndefinedVariable except KeyError as e: e.args = ("Key Error in JSON File", "Key '{0}' not found while processing file from url:".format(e.args[0]), HOURLY_LOCATION_OBSERVATION_URL) raise
def search_subtitles( file_original_path, title, tvshow, year, season, episode, set_temp, rar, lang1, lang2, lang3, stack ): #standard input subtitles_list = [] msg = "" searchstring_notclean = "" searchstring = "" global israr israr = os.path.abspath(file_original_path) israr = os.path.split(israr) israr = israr[0].split(os.sep) israr = string.split(israr[-1], '.') israr = string.lower(israr[-1]) if len(tvshow) == 0: if 'rar' in israr and searchstring is not None: if 'cd1' in string.lower(title) or 'cd2' in string.lower(title) or 'cd3' in string.lower(title): dirsearch = os.path.abspath(file_original_path) dirsearch = os.path.split(dirsearch) dirsearch = dirsearch[0].split(os.sep) if len(dirsearch) > 1: searchstring_notclean = dirsearch[-3] searchstring = xbmc.getCleanMovieTitle(dirsearch[-3]) searchstring = searchstring[0] else: searchstring = title else: searchstring = title elif 'cd1' in string.lower(title) or 'cd2' in string.lower(title) or 'cd3' in string.lower(title): dirsearch = os.path.abspath(file_original_path) dirsearch = os.path.split(dirsearch) dirsearch = dirsearch[0].split(os.sep) if len(dirsearch) > 1: searchstring_notclean = dirsearch[-2] searchstring = xbmc.getCleanMovieTitle(dirsearch[-2]) searchstring = searchstring[0] else: #We are at the root of the drive!!! so there's no dir to lookup only file# title = os.path.split(file_original_path) searchstring = title[-1] else: if title == "": title = os.path.split(file_original_path) searchstring = title[-1] else: searchstring = title if len(tvshow) > 0: searchstring = "%s S%#02dE%#02d" % (tvshow, int(season), int(episode)) log( __name__ ,"%s Search string = %s" % (debug_pretext, searchstring)) portuguese = 0 if string.lower(lang1) == "portuguese": portuguese = 1 elif string.lower(lang2) == "portuguese": portuguese = 2 elif string.lower(lang3) == "portuguese": portuguese = 3 getallsubs(searchstring, "pt", "Portuguese", file_original_path, subtitles_list, searchstring_notclean) if portuguese == 0: msg = "Won't work, LegendasDivx is only for Portuguese subtitles!" return subtitles_list, "", msg #standard output
def getrating(subsfile, videofile): x = 0 rating = 0 log(__name__, "# Comparing Releases:\n %s [subtitle-rls] \n %s [filename-rls]" % (subsfile, videofile)) videofile = "".join(videofile.split(".")[:-1]).lower() subsfile = subsfile.lower().replace(".", "") videofile = videofile.replace(".", "") for release_type in releases_types: if release_type in videofile: x += 1 if release_type in subsfile: rating += 1 if x: rating = (rating / float(x)) * 4 # Compare group name if videofile.split("-")[-1] == subsfile.split("-")[-1]: rating += 1 # Group name didnt match # try to see if group name is in the beginning (less info on file less weight) elif videofile.split("-")[0] == subsfile.split("-")[-1]: rating += 0.5 if rating > 0: rating = rating * 2 log(__name__, "# Result is: %f" % rating) return round(rating)
def getallsubs(searchstring, languageshort, languagelong, subtitles_list): if languageshort == "sv": url = ( main_url + "?group1=on&p=soek&add=arkiv&submit=S%F6k&select2=&select3=&select=&str=" + urllib.quote_plus(searchstring) ) subtitle_pattern = sv_subtitle_pattern if languageshort == "en": url = ( main_url + "?group1=on&p=eng_search&add=arkiv&submit=S%F6k&select2=&select3=&select=&str=" + urllib.quote_plus(searchstring) ) subtitle_pattern = en_subtitle_pattern content = geturl(url) if content is not None: log(__name__, "%s Getting '%s' subs ..." % (debug_pretext, languageshort)) for matches in re.finditer(subtitle_pattern, content, re.IGNORECASE | re.DOTALL): id = matches.group(1) filename = string.strip(matches.group(2)) log(__name__, "%s Subtitles found: %s (id = %s)" % (debug_pretext, filename, id)) subtitles_list.append( { "rating": "0", "no_files": 1, "filename": filename, "sync": False, "id": id, "language_flag": "flags/" + languageshort + ".gif", "language_name": languagelong, } )
def getAllSubtitles(fname, subtitlePageID, languageList): # Retrieve the subtitles page (html) subs = [] subtitlePage = getURL(BASE_URL + "getajax.php?moviedetailssubtitles=" + subtitlePageID[1:]) # Create a list of all subtitles found on page foundSubtitles = re.findall(COMBINED, subtitlePage) for (fid, language, title, fid2, language2, title2) in foundSubtitles: log(__name__, "%s Is sendspace?: %s" % (debug_pretext, bool(fid2 and len(fid2) > 0))) # Create Dictionery for XBMC Gui if fid2 and len(fid2) > 0: fid = fid2 language = language2 title = title2 # Check if the subtitles found match one of our languages was selected # by the user if sratimToScript(language) in languageList: rating = getrating(title, fname) subs.append( { "rating": str(rating), "sync": rating >= 8, "filename": title, "subtitle_id": fid, "language_flag": "flags/" + languageTranslate(sratimToScript(language), 0, 2) + ".gif", "language_name": sratimToScript(language), "sendspace": (fid2 and len(fid2) > 0), } ) return sorted(subs, key=lambda x: int(float(x["rating"])), reverse=True)
def daily(): utilities.log( "Fetching Daily Forecast for '%s (%s)' from the Met Office..." % (FORECAST_LOCATION, FORECAST_LOCATION_ID)) with urlcache.URLCache(ADDON_DATA_PATH) as cache: filename = cache.get(DAILY_LOCATION_FORECAST_URL, daily_expiry) data=json.load(open(filename)) try: dv = data['SiteRep']['DV'] dataDate = utilities.strptime(dv.get('dataDate').rstrip('Z'), DATAPOINT_DATETIME_FORMAT).replace(tzinfo=pytz.utc) WINDOW.setProperty('DailyForecast.IssuedAt', dataDate.astimezone(TZ).strftime(ISSUEDAT_FORMAT))#@UndefinedVariable for p, period in enumerate(dv['Location']['Period']): WINDOW.setProperty('Day%d.Title' %p, time.strftime(SHORT_DAY_FORMAT, time.strptime(period.get('value'), DATAPOINT_DATE_FORMAT)))#@UndefinedVariable for rep in period['Rep']: weather_type = rep.get('W', 'na') if rep.get('$') == 'Day': WINDOW.setProperty('Day%d.HighTemp' %p, rep.get('Dm', 'na'))#@UndefinedVariable WINDOW.setProperty('Day%d.HighTempIcon' %p, rep.get('Dm'))#@UndefinedVariable WINDOW.setProperty('Day%d.Outlook' %p, WEATHER_CODES.get(weather_type)[1])#@UndefinedVariable WINDOW.setProperty('Day%d.OutlookIcon' % p, WEATHER_ICON_PATH % WEATHER_CODES.get(weather_type, 'na')[0])#@UndefinedVariable WINDOW.setProperty('Day%d.WindSpeed' % p, rep.get('S', 'na'))#@UndefinedVariable WINDOW.setProperty('Day%d.WindDirection' % p, rep.get('D', 'na').lower())#@UndefinedVariable elif rep.get('$') == 'Night': WINDOW.setProperty('Day%d.LowTemp' %p, rep.get('Nm', 'na'))#@UndefinedVariable WINDOW.setProperty('Day%d.LowTempIcon' %p, rep.get('Nm'))#@UndefinedVariable except KeyError as e: e.args = ("Key Error in JSON File", "Key '{0}' not found while processing file from url:".format(e.args[0]), DAILY_LOCATION_FORECAST_URL) raise WINDOW.setProperty('DailyForecast.IsFetched', 'true')#@UndefinedVariable
def download(self,link): dest_dir = os.path.join(xbmc.translatePath(self.addon.getAddonInfo('profile').decode("utf-8")), 'temp') dest = os.path.join(dest_dir, "download.tmp") log(__name__,'Downloading subtitles from %s' % link) res = urllib.urlopen(link) subtitles_filename = re.search("Content\-Disposition: attachment; filename=\"(.+?)\"",str(res.info())).group(1) log(__name__,'Filename: %s' % subtitles_filename) subtitles_format = re.search("\.(\w+?)$", subtitles_filename, re.IGNORECASE).group(1) log(__name__,"Subs in %s" % subtitles_format) subtitles_data = res.read() log(__name__,'Saving to file %s' % dest) zip_file = open(dest,'wb') zip_file.write(subtitles_data) zip_file.close() final_dest = os.path.join(dest_dir, "download." + subtitles_format) log(__name__,'Changing filename to %s' % final_dest) os.rename(dest, final_dest) return final_dest
def search_subtitles(file_original_path, title, tvshow, year, season, episode, set_temp, rar, lang1, lang2, lang3, stack): #standard input subtitles_list = [] msg = "" if not (string.lower(lang1) or string.lower(lang2) or string.lower(lang3)) == "greek": msg = "Won't work, subtitles.gr is only for Greek subtitles." return subtitles_list, "", msg #standard output try: log( __name__ ,"%s Clean title = %s" % (debug_pretext, title)) premiered = year title, year = xbmc.getCleanMovieTitle( title ) except: pass if len(tvshow) == 0: # Movie searchstring = "%s (%s)" % (title, premiered) elif len(tvshow) > 0 and title == tvshow: # Movie not in Library searchstring = "%s (%#02d%#02d)" % (tvshow, int(season), int(episode)) elif len(tvshow) > 0: # TVShow searchstring = "%s S%#02dE%#02d" % (tvshow, int(season), int(episode)) else: searchstring = title log( __name__ ,"%s Search string = %s" % (debug_pretext, searchstring)) get_subtitles_list(searchstring, "el", "Greek", subtitles_list) return subtitles_list, "", msg #standard output
def search_subtitles( file_original_path, title, tvshow, year, season, episode, set_temp, rar, lang1, lang2, lang3, stack ): #standard input ok = False msg = "" osdb_server = OSDBServer() osdb_server.create() subtitles_list = [] file_size = "" hashTry = "" language1 = languageTranslate(lang1,0,1) language2 = languageTranslate(lang2,0,1) language3 = languageTranslate(lang3,0,1) if set_temp : hash_search = False file_size = "000000000" SubHash = "000000000000" else: try: file_size, SubHash = xbmc.subHashAndFileSize(file_original_path) log( __name__ ,"xbmc module hash and size") hash_search = True except: file_size = "" SubHash = "" hash_search = False if file_size != "" and SubHash != "": log( __name__ ,"File Size [%s]" % file_size ) log( __name__ ,"File Hash [%s]" % SubHash) if hash_search : log( __name__ ,"Search for [%s] by hash" % (os.path.basename( file_original_path ),)) subtitles_list, session_id = osdb_server.searchsubtitles_pod( SubHash ,language1, language2, language3, stack) if not subtitles_list: log( __name__ ,"Search for [%s] by name" % (os.path.basename( file_original_path ),)) subtitles_list = osdb_server.searchsubtitlesbyname_pod( title, tvshow, season, episode, language1, language2, language3, year, stack ) return subtitles_list, "", "" #standard output
def search_subtitles( file_original_path, title, tvshow, year, season, episode, set_temp, rar, lang1, lang2, lang3, stack ): #standard input subtitles_list = [] msg = "" if len(tvshow) > 0: for rok in re.finditer(' \(\d\d\d\d\)', tvshow): rok = rok.group() if len(rok) > 0: tvshow = tvshow.replace(rok, "") else: continue tvshow_plus = tvshow.replace(" ","+") if len(season) < 2: season_full = '0%s' % (season) else: season_full = season if len(episode) < 2: episode_full = '0%s' % (episode) else: episode_full = episode url = '%s%s+%sx%s' % (main_url, tvshow_plus, season_full, episode_full) else: original_title = xbmc.getInfoLabel("VideoPlayer.OriginalTitle") log( __name__ ,"Original title: [%s]" % (original_title)) movie_title_plus = original_title.replace(" ","+") url = '%s%s' % (main_url, movie_title_plus) log( __name__ , "Pobieram z [ %s ]" % (url)) response = urllib2.urlopen(url) content = response.read() getallsubs(content, title, subtitles_list, file_original_path) return subtitles_list, "", "" #standard output
def getallsubs(content, title, subtitles_list, file_original_path): for matches in re.finditer(subtitle_pattern, content): jezyk, numer_napisu, wydanie, tytul = matches.groups() if 'other' in jezyk: continue else: jezyk = jezyk link = "%s%s/" % (down_url, numer_napisu) log( __name__ ,"Subtitles found: %s %s (link=%s)" % (tytul, wydanie, link)) obraz_flagi = "flags/%s.gif" % (jezyk) lang = twotofull(jezyk) tytul_pelny = '%s %s' % (tytul, wydanie) wydanie_sclean = wydanie.replace(" ","") wydanie_clean = wydanie_sclean.replace(",",";") wydanie_srednik = '%s;' % (wydanie_clean) for wydania in re.finditer('(.+?);', wydanie_srednik): wydania = wydania.group() wydania_clean = wydania.replace(";","") wydania_upper = wydania_clean.upper() filepatch_upper = file_original_path.upper() if wydania_upper in filepatch_upper: sync_value = True else: sync_value = False subtitles_list.append({'filename': tytul_pelny, 'sync': sync_value, 'link': link, 'language_flag': obraz_flagi, 'language_name': lang,'rating': ""})
def search_subtitles( file_original_path, title, tvshow, year, season, episode, set_temp, rar, lang1, lang2, lang3, stack ): # standard input subtitles_list = [] msg = "" if len(tvshow) == 0: searchstring = title if len(tvshow) > 0: searchstring = "%s S%#02dE%#02d" % (tvshow, int(season), int(episode)) log(__name__, "Search string = %s" % (searchstring)) swedish = 0 if string.lower(lang1) == "swedish": swedish = 1 elif string.lower(lang2) == "swedish": swedish = 2 elif string.lower(lang3) == "swedish": swedish = 3 if swedish > 0: getallsubs(searchstring, "sv", "Swedish", subtitles_list) if swedish == 0: msg = "Won't work, Swesub.nu is only for Swedish subtitles." return subtitles_list, "", msg # standard output
def download_subtitles (subtitles_list, pos, zip_subs, tmp_sub_dir, sub_folder, session_id): #standard input cj = CookieJar() headers = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Charset': 'UTF-8,*;q=0.5', 'Accept-Encoding': 'gzip,deflate,sdch', 'Accept-Language': 'pl,pl-PL;q=0.8,en-US;q=0.6,en;q=0.4', 'Connection': 'keep-alive', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.83 Safari/537.1', 'Referer': 'http://napisy24.pl/' } values = { 'form_logowanieMail' : __addon__.getSetting( "n24user" ), 'form_logowanieHaslo' : __addon__.getSetting( "n24pass" ), 'postAction' : 'sendLogowanie' } data = urlencode(values) opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)) request = urllib2.Request("http://napisy24.pl/logowanie/", data, headers) response = opener.open(request) request = urllib2.Request(subtitles_list[pos][ "link" ], "", headers) f = opener.open(request) local_tmp_file = os.path.join(tmp_sub_dir, "zipsubs.zip") log( __name__ ,"Saving subtitles to '%s'" % (local_tmp_file)) local_file = open(zip_subs, "w" + "b") local_file.write(f.read()) local_file.close() opener.open("http://napisy24.pl/index.php?sendAction=Wyloguj") language = subtitles_list[pos][ "language_name" ] return True, language, "" #standard output
def search_subtitles( file_original_path, title, tvshow, year, season, episode, set_temp, rar, language1, language2, language3, stack ): #standard input subtitles_list = [] msg = "" log(__name__, "Search GomTV with a file name, "+file_original_path) movieFullPath = xbmc.Player().getPlayingFile() video_hash = hashFileMD5( movieFullPath, buff_size=1024*1024 ) if video_hash is None: msg = _(755) return subtitles_list, "", msg #standard output webService = GomTvWebService() if len(tvshow) > 0: # TvShow OS_search_string = ("%s S%.2dE%.2d" % (tvshow, int(season), int(episode),) ).replace(" ","+") else: # Movie or not in Library if str(year) == "": # Not in Library title, year = xbmc.getCleanMovieTitle( title ) else: # Movie in Library year = year title = title OS_search_string = title.replace(" ","+") subtitles_list = webService.SearchSubtitlesFromTitle( OS_search_string ,video_hash) log(__name__, "Found %d subtitles in GomTV" %len(subtitles_list)) return subtitles_list, "", msg #standard output
def __init__(self, name, entity_list, spawn_list, theme_dict, filename): self.name = name self.num = -1 self.entity_list = entity_list[:] self._entity_list_dirty = True # List needs sorting self.spawn_list = spawn_list[:] self.theme_lookup = dict(theme_dict) self.background_color = self.theme_lookup["default"].values["background_color"] self.filename = filename for spawner in spawn_list: self.entity_list.append(spawner.get_actor()) spawner.do_spawn() for entity in self.entity_list: theme_id = entity.get_theme_id() if theme_id in self.theme_lookup: self.theme_lookup[theme_id].apply(entity) else: utilities.log("WARN: Unrecognized theme id: "+str(theme_id)) self.theme_lookup["default"].apply(entity) self.actor = self._find_player() if self.actor == None: utilities.log("levels.Level: Warning: No actor found in loaded level!") self.sort_if_dirty()
def dump_build_information(args): """Log the packages provided & depended on by this build This function logs all packages that will be built if this build succeeds. This includes 'virtual' packages, i.e. packages like 'sh' that don't really exist but are provided by 'bash'. This is so that if this build fails, any build that depends on a package provided by this build knows who to blame when it can't install its dependencies. This function also logs what packages are depended on by this build. """ pkgbuild = os.path.join(args.abs_dir, "PKGBUILD") provides = [] provides += [strip_version_info(name) for name in interpret_bash_array(pkgbuild, "pkgname")] provides += [strip_version_info(name) for name in interpret_bash_array(pkgbuild, "provides")] log("provide_info", None, output=provides) depends = [] depends += [strip_version_info(name) for name in interpret_bash_array(pkgbuild, "depends")] depends += [strip_version_info(name) for name in interpret_bash_array(pkgbuild, "makedepends")] log("dep_info", "This build depends on the following packages", output=depends)
def path_to_vanilla_pkg(pkg_name, args): """Returns the path to a vanilla package in the local mirror. This method tries to find a package with name exactly matching 'pkg_name' in one of the repositories in the local mirror. It aborts the stage if such a package isn't found. """ log("info", "Trying to find vanilla package '%s'..." % pkg_name) for root, dirs, files in os.walk(args.mirror_directory): for f in files: if re.search("i686", f): continue if not re.search(".pkg.tar.xz$", f): continue if re.search(re.escape(pkg_name), f): path = os.path.join(root, f) cmd = "pacman --query --file " + path cp = subprocess.run( cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True ) log("command", cmd, cp.stdout.splitlines()) if cp.returncode: exit(1) candidate_name = cp.stdout.splitlines()[0].split()[0] if pkg_name == candidate_name: return path die(Status.failure, "No matching packages found.")
def mark_episode_in_range_from_start(self, tvdb_show_id, last_season=None, last_episode=None, watched=True): action = "show_progress" if watched == True else "delete_show_progress" log( "Mark episodes in range: " + action + " - " + str(tvdb_show_id) + " / " + str(last_season) + " / " + str(last_episode) ) try: values = {"show_id": int(tvdb_show_id)} if last_season: values["season"] = int(last_season) if last_episode: values["episode"] = int(last_episode) res = urllib2.urlopen(self.base_api_url + action + "?access_token=" + self.token, urllib.urlencode(values)) data = json.loads(res.read()) except urllib2.HTTPError as res: data = {"result": "KO", "error": res} self.store_api_rate(res.headers) log(data) return data["result"] == "OK"
def is_authorized(self): if self.is_token_empty(): return False if self.authorized == None: self.authorized = self.check_authorization() != False log("Is authorized ~ " + str(self.authorized)) return self.authorized
def run(self): ''' execute the main thread loop ''' if not self.port: # do not act as a server when there is no port number specified return # set-up TCP server on specified port and address log(self.identity, 'listening on port %d', self.port) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind((config.network['ip'], self.port)) s.listen(config.network['connections']) # main server loop while self.serve: # call hart-beat callback periodically self.hartbeat() # wait for a new connection, this will fail after x seconds try: conn, addr = s.accept() except: continue # receive TCP message data = conn.recv(config.network['buffer_size']) # decode message instance message = pickle.loads(data) # call handle method self.handle(conn, message) conn.close() # close the port when server terminates s.close()
def search_subtitles( file_original_path, title, tvshow, year, season, episode, set_temp, rar, lang1, lang2, lang3, stack ): #standard input subtitles_list = [] msg = "" searchstring_notclean = "" searchstring = "" global israr israr = os.path.abspath(file_original_path) israr = os.path.split(israr) israr = israr[0].split(os.sep) israr = string.split(israr[-1], '.') israr = string.lower(israr[-1]) if len(tvshow) == 0: if 'rar' in israr and searchstring is not None: if 'cd1' in string.lower(title) or 'cd2' in string.lower(title) or 'cd3' in string.lower(title): dirsearch = os.path.abspath(file_original_path) dirsearch = os.path.split(dirsearch) dirsearch = dirsearch[0].split(os.sep) if len(dirsearch) > 1: searchstring_notclean = dirsearch[-3] searchstring = xbmc.getCleanMovieTitle(dirsearch[-3]) searchstring = searchstring[0] else: searchstring = title else: searchstring = title elif 'cd1' in string.lower(title) or 'cd2' in string.lower(title) or 'cd3' in string.lower(title): dirsearch = os.path.abspath(file_original_path) dirsearch = os.path.split(dirsearch) dirsearch = dirsearch[0].split(os.sep) if len(dirsearch) > 1: searchstring_notclean = dirsearch[-2] searchstring = xbmc.getCleanMovieTitle(dirsearch[-2]) searchstring = searchstring[0] else: #We are at the root of the drive!!! so there's no dir to lookup only file# title = os.path.split(file_original_path) searchstring = title[-1] else: if title == "": title = os.path.split(file_original_path) searchstring = title[-1] else: searchstring = title if len(tvshow) > 0: searchstring = "%s S%#02dE%#02d" % (tvshow, int(season), int(episode)) log( __name__ ,"%s Search string = %s" % (debug_pretext, searchstring)) hasLang = languageTranslate(lang1,0,2) + " " + languageTranslate(lang2,0,2) + " " + languageTranslate(lang3,0,2) if re.search('pt', hasLang) or re.search('en', hasLang) or re.search('es', hasLang) or re.search('pb', hasLang): msgnote(debug_pretext,__language__(30153), 6000) getallsubs(searchstring, languageTranslate(lang1,0,2), lang1, file_original_path, subtitles_list, searchstring_notclean) getallsubs(searchstring, languageTranslate(lang2,0,2), lang2, file_original_path, subtitles_list, searchstring_notclean) getallsubs(searchstring, languageTranslate(lang3,0,2), lang3, file_original_path, subtitles_list, searchstring_notclean) else: msg = "Won't work, LegendasDivx.com is only for PT, PTBR, ES or EN subtitles." return subtitles_list, "", msg #standard output
def search_show_url(self, title, show_list): log(__name__,"Starting search by TV Show: %s" % title) if not title: return None for threshold_ratio in range(100,50,-5): if threshold_ratio == None: return show_list tv_show_list = [] for tv_show in show_list: matcher = SequenceMatcher(None, re.sub(r'(?i)^The ',"", tv_show["title"]), re.sub(r'(?i)^The ',"", title)).ratio() * 100 if matcher >= threshold_ratio: tv_show_list.append(tv_show) if tv_show_list: break if not tv_show_list: tv_show_list = show_list if (len(tv_show_list) == 0): log(__name__,"No TV Show found") return None elif (len(tv_show_list) == 1): log(__name__,"One TV Show found, autoselecting") tvshow_url = tv_show_list[0]['url'] else: log(__name__,"More TV Shows found, user dialog for select") menu_dialog = [] for tv_show in tv_show_list: menu_dialog.append(tv_show['title']) dialog = xbmcgui.Dialog() found_tv_show_id = dialog.select(self._t(32003), menu_dialog) if (found_tv_show_id == -1): return None # cancel dialog tvshow_url = tv_show_list[found_tv_show_id]['url'] tvshow_url = "/titulky/serial-" + tvshow_url log(__name__,"Selected show URL: " + tvshow_url) return tvshow_url
def getallsubs(searchstring, languageshort, languagelong, subtitles_list): url = ( main_url + "search.php?q=&r=0&s=S%F6k&in=" + urllib.quote_plus(searchstring) + "&ex=&ep=&be=" + urllib.quote_plus(searchstring) + "&adv=0" ) content = geturl(url) if content is not None: log(__name__, "Getting '%s' subs ..." % (languageshort)) for matches in re.finditer(subtitle_pattern, content, re.IGNORECASE | re.DOTALL): id = matches.group(1) filename = string.strip(matches.group(2)) log(__name__, "Subtitles found: %s (id = %s)" % (filename, id)) subtitles_list.append( { "rating": "0", "no_files": 1, "filename": filename, "sync": False, "id": id, "language_flag": "flags/" + languageshort + ".gif", "language_name": languagelong, } )
def getAllSubtitles(subtitlePageID,languageList,subtitlesList): # Retrieve the subtitles page (html) try: subtitlePage = getURL(BASE_URL + subtitlePageID) except: # Didn't find the page - no such episode? return # Didn't find the page - no such episode? if (not subtitlePage): return # Find subtitles dictionary declaration on page tempStart = subtitlePage.index("subtitles_groups = ") # Look for the following line break tempEnd = subtitlePage.index("\n",subtitlePage.index("subtitles_groups = ")) toExec = "foundSubtitles = "+subtitlePage[tempStart+len("subtitles_groups = "):tempEnd] # Remove junk at the end of the line toExec = toExec[:toExec.rfind("}")+1] exec(toExec) log( __name__ ,"Built webpage dictionary") for language in foundSubtitles.keys(): if (twotofull(language) in languageList): for translator in foundSubtitles[language]: for quality in foundSubtitles[language][translator]: for rating in foundSubtitles[language][translator][quality]: subtitlesList.append({'rating': rating, 'sync': False, 'filename': foundSubtitles[language][translator][quality][rating]["subtitle_version"], 'subtitle_id': foundSubtitles[language][translator][quality][rating]["id"], 'language_flag': 'flags/' + language + '.gif', 'language_name': twotofull(language), 'key': foundSubtitles[language][translator][quality][rating]["key"]})
def release(self, datasetID, size=0): if size <= self.__threshold: self.__fast_queue.release(datasetID) log("DatasetProcessorManager.release(dataset="+str(datasetID)+",size=" + repr(size) + ") from the __fast_queue") else: self.__slow_queue.release(datasetID) log("DatasetProcessorManager.release(dataset="+str(datasetID)+",size=" + repr(size) + ") from the __slow_queue")
def threehourly(): utilities.log( "Fetching 3 Hourly Forecast for '%s (%s)' from the Met Office..." % (FORECAST_LOCATION, FORECAST_LOCATION_ID)) with urlcache.URLCache(ADDON_DATA_PATH) as cache: filename = cache.get(THREEHOURLY_LOCATION_FORECAST_URL, threehourly_expiry) data=json.load(open(filename)) try: dv = data['SiteRep']['DV'] dataDate = utilities.strptime(dv.get('dataDate').rstrip('Z'), DATAPOINT_DATETIME_FORMAT).replace(tzinfo=pytz.utc) WINDOW.setProperty('3HourlyForecast.IssuedAt', dataDate.astimezone(TZ).strftime(ISSUEDAT_FORMAT))#@UndefinedVariable count = 0 for period in dv['Location']['Period']: for rep in period['Rep']: #extra xbmc targeted info: weather_type = rep.get('W', 'na') WINDOW.setProperty('3Hourly%d.Outlook' % count, WEATHER_CODES.get(weather_type)[1])#@UndefinedVariable WINDOW.setProperty('3Hourly%d.WindSpeed' % count, rep.get('S', 'n/a'))#@UndefinedVariable WINDOW.setProperty('3Hourly%d.WindDirection' % count, rep.get('D', 'na').lower())#@UndefinedVariable WINDOW.setProperty('3Hourly%d.GustSpeed' % count, rep.get('G', 'n/a'))#@UndefinedVariable WINDOW.setProperty('3Hourly%d.UVIndex' % count, rep.get('U', 'n/a'))#@UndefinedVariable WINDOW.setProperty('3Hourly%d.Precipitation' % count, rep.get('Pp'))#@UndefinedVariable WINDOW.setProperty('3Hourly%d.OutlookIcon' % count, WEATHER_ICON_PATH % WEATHER_CODES.get(weather_type, 'na')[0])#@UndefinedVariable WINDOW.setProperty('3Hourly%d.Day' % count, time.strftime(SHORT_DAY_FORMAT, time.strptime(period.get('value'), DATAPOINT_DATE_FORMAT)))#@UndefinedVariable WINDOW.setProperty('3Hourly%d.Time' % count, utilities.minutes_as_time(int(rep.get('$'))))#@UndefinedVariable WINDOW.setProperty('3Hourly%d.ActualTemp' % count, utilities.rownd(utilities.localised_temperature(rep.get('T', 'na'))))#@UndefinedVariable WINDOW.setProperty('3Hourly%d.ActualTempIcon' % count, rep.get('T', 'na'))#@UndefinedVariable WINDOW.setProperty('3Hourly%d.FeelsLikeTemp' % count, utilities.rownd(utilities.localised_temperature(rep.get('F', 'na'))))#@UndefinedVariable WINDOW.setProperty('3Hourly%d.FeelsLikeTempIcon' % count, rep.get('F', 'na'))#@UndefinedVariable count +=1 except KeyError as e: e.args = ("Key Error in JSON File", "Key '{0}' not found while processing file from url:".format(e.args[0]), THREEHOURLY_LOCATION_FORECAST_URL) raise WINDOW.setProperty('3HourlyForecast.IsFetched', 'true')#@UndefinedVariable
def getAllSubtitles(subtitlePageID,languageList,fname): # Retrieve the subtitles page (html) subs= [] try: subtitlePage = getURL(BASE_URL + subtitlePageID) except: # Didn't find the page - no such episode? return # Didn't find the page - no such episode? if (not subtitlePage): return # Find subtitles dictionary declaration on page toExec = "foundSubtitles = " + subtitlePage # Remove junk at the end of the line toExec = toExec[:toExec.rfind("}")+1] # Replace "null" with "None" toExec = toExec.replace("null","None") exec(toExec) in globals(), locals() log( __name__ ,"Built webpage dictionary") for language in foundSubtitles.keys(): if (languageTranslate(language, 2, 0) in languageList): for translator in foundSubtitles[language]: for quality in foundSubtitles[language][translator]: for rating in foundSubtitles[language][translator][quality]: title=foundSubtitles[language][translator][quality][rating]["subtitle_version"] Srating=getrating(title,fname) subs.append({'rating': str(Srating), 'sync': Srating>=8, 'filename': title, 'subtitle_id': foundSubtitles[language][translator][quality][rating]["id"], 'language_flag': 'flags/' + language + '.gif', 'language_name': languageTranslate(language, 2, 0), 'key': foundSubtitles[language][translator][quality][rating]["key"], 'notes': re.search('http://www\.sendspace\.com/file/\w+$',foundSubtitles[language][translator][quality][rating]["notes"])}) # sort, to put syncs on top return sorted(subs,key=lambda x: int(float(x['rating'])),reverse=True)
def login(self,username,password): log(__name__,'Logging in to Titulky.com') login_postdata = urllib.urlencode({'Login': username, 'Password': password, 'foreverlog': '1','Detail2':''} ) request = urllib2.Request(self.server_url + '/index.php',login_postdata) response = urllib2.urlopen(request).read() log(__name__,'Got response') return not response.find('BadLogin')>-1
def text(): utilities.log( "Fetching Text Forecast for '%s (%s)' from the Met Office..." % (REGIONAL_LOCATION, REGIONAL_LOCATION_ID)) with urlcache.URLCache(ADDON_DATA_PATH) as cache: filename = cache.get(TEXT_FORECAST_URL, text_expiry) data=json.load(open(filename)) try: rf = data['RegionalFcst'] issuedat = utilities.strptime(rf['issuedAt'].rstrip('Z'), DATAPOINT_DATETIME_FORMAT).replace(tzinfo=pytz.utc) WINDOW.setProperty('TextForecast.IssuedAt', issuedat.astimezone(TZ).strftime(ISSUEDAT_FORMAT))#@UndefinedVariable count = 0 for period in rf['FcstPeriods']['Period']: #have to check type because json can return list or dict here if isinstance(period['Paragraph'],list): for paragraph in period['Paragraph']: WINDOW.setProperty('Text.Paragraph%d.Title' % count, paragraph['title'].rstrip(':').lstrip('UK Outlook for'))#@UndefinedVariable WINDOW.setProperty('Text.Paragraph%d.Content' % count, paragraph['$'])#@UndefinedVariable count+=1 else: WINDOW.setProperty('Text.Paragraph%d.Title' % count, period['Paragraph']['title'].rstrip(':').lstrip('UK Outlook for'))#@UndefinedVariable WINDOW.setProperty('Text.Paragraph%d.Content' % count, period['Paragraph']['$'])#@UndefinedVariable count+=1 except KeyError as e: e.args = ("Key Error in JSON File", "Key '{0}' not found while processing file from url:".format(e.args[0]), TEXT_FORECAST_URL) raise WINDOW.setProperty('TextForecast.IsFetched', 'true')#@UndefinedVariable
def login(username, password): log( __name__ , " Logging in with username '%s' ..." % (username)) content= geturl(main_url + 'index.php') if content is not None: match = re.search('logouticon.png', content, re.IGNORECASE | re.DOTALL) if match: return 1 else: match = re.search(unique_pattern, content, re.IGNORECASE | re.DOTALL) if match: return_value = match.group(1) unique_name = match.group(2) unique_value = match.group(3) login_postdata = urllib.urlencode({'username': username, 'passwd': password, 'remember': 'yes', 'Submit': 'Login', 'remember': 'yes', 'option': 'com_user', 'task': 'login', 'silent': 'true', 'return': return_value, unique_name: unique_value} ) cj = cookielib.CookieJar() my_opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)) urllib2.install_opener(my_opener) request = urllib2.Request(main_url + 'index.php',login_postdata) response = urllib2.urlopen(request).read() match = re.search('logouticon.png', response, re.IGNORECASE | re.DOTALL) if match: return 1 else: return 0 else: return 0
def mergeOverlappingMax(data,f=max): newData = [] ld = len(data) le = len(data[0]) ler = range(3,le) intersectStart = 0 for i in range(ld): add = True if i != ld -1: for j in range(intersectStart,i+1): if intersect(data[j],data[i+1]) == 0: add = False if add: d = [data[intersectStart][0], data[intersectStart][1], max(map(lambda x:data[x][2],range(intersectStart,i+1)))] for j in ler: d.append(f(map(lambda x:data[x][j],range(intersectStart,i+1)))) newData.append(d) intersectStart = i+1 utilities.log(str(ld - len(newData))+": after merging overlaping the dataset was reduced from "+str(ld)+" lines to "+ str(len(newData))) return newData
def search_subtitles(file_original_path, title, tvshow, year, season, episode, set_temp, rar, lang1, lang2, lang3, stack): #standard input subtitles_list = [] msg = "" if len(tvshow) == 0: searchstring = title if len(tvshow) > 0: searchstring = "%s S%#02dE%#02d" % (tvshow, int(season), int(episode)) log(__name__, "%s Search string = %s" % (debug_pretext, searchstring)) swedish = 0 if string.lower(lang1) == "swedish": swedish = 1 elif string.lower(lang2) == "swedish": swedish = 2 elif string.lower(lang3) == "swedish": swedish = 3 english = 0 if string.lower(lang1) == "english": english = 1 elif string.lower(lang2) == "english": english = 2 elif string.lower(lang3) == "english": english = 3 if ((swedish > 0) and (english == 0)): getallsubs(searchstring, "sv", "Swedish", subtitles_list) if ((english > 0) and (swedish == 0)): getallsubs(searchstring, "en", "English", subtitles_list) if ((swedish > 0) and (english > 0) and (swedish < english)): getallsubs(searchstring, "sv", "Swedish", subtitles_list) getallsubs(searchstring, "en", "English", subtitles_list) if ((swedish > 0) and (english > 0) and (swedish > english)): getallsubs(searchstring, "en", "English", subtitles_list) getallsubs(searchstring, "sv", "Swedish", subtitles_list) if ((swedish == 0) and (english == 0)): msg = "Won't work, Undertexter.se is only for Swedish and English subtitles." return subtitles_list, "", msg #standard output
def forecastlayer(): utilities.log( "Fetching '{0}' Forecast Map with index '{1}'...".format(FORECASTMAP_LAYER_SELECTION, FORECASTMAP_SLIDER)) with urlcache.URLCache(ADDON_DATA_PATH) as cache: surface = cache.get(GOOGLE_SURFACE, lambda x: datetime.utcnow() + timedelta(days=30)) marker = cache.get(GOOGLE_MARKER, lambda x: datetime.utcnow() + timedelta(days=30)) filename = cache.get(FORECAST_LAYER_CAPABILITIES_URL, forecastlayer_capabilities_expiry) data = json.load(open(filename)) #pull parameters out of capabilities file - TODO: consider using jsonpath here try: for thislayer in data['Layers']['Layer']: if thislayer['@displayName'] == FORECASTMAP_LAYER_SELECTION: layer_name = thislayer['Service']['LayerName'] image_format = thislayer['Service']['ImageFormat'] default_time = thislayer['Service']['Timesteps']['@defaultTime'] timesteps = thislayer['Service']['Timesteps']['Timestep'] break else: raise Exception('Error', "Couldn't find layer '%s'" % FORECASTMAP_LAYER_SELECTION) except KeyError as e: e.args = ("Key Error in JSON File", "Key '{0}' not found while processing file from url:".format(e.args[0]), FORECAST_LAYER_CAPABILITIES_URL) raise issuedat = utilities.strptime(default_time, DATAPOINT_DATETIME_FORMAT).replace(tzinfo=pytz.utc) index = FORECASTMAP_SLIDER if int(index) < 0: utilities.log('Slider is negative. Fetching with index 0') WINDOW.setProperty('ForecastMap.Slider', '0') #@UndefinedVariable index = '0' elif int(index) > len(timesteps)-1: utilities.log('Slider exceeds available index range. Fetching with index {0}'.format(str(len(timesteps)-1))) WINDOW.setProperty('ForecastMap.Slider', str(len(timesteps)-1)) #@UndefinedVariable index = str(len(timesteps)-1) timestep = timesteps[int(index)] delta = timedelta(hours=timestep) maptime = TZUK.normalize(issuedat + delta) #get overlay using parameters from gui settings try: LayerURL = data['Layers']['BaseUrl']['$'] except KeyError as e: e.args = ("Key Error in JSON File", "Key '{0}' not found while processing file from url:".format(e.args[0]), FORECAST_LAYER_CAPABILITIES_URL) raise url = LayerURL.format(LayerName=layer_name, ImageFormat=image_format, DefaultTime=default_time, Timestep=timestep, key=API_KEY) layer = cache.get(url, lambda x: datetime.utcnow() + timedelta(days=1), image_resize) WINDOW.setProperty('ForecastMap.Surface', surface)#@UndefinedVariable WINDOW.setProperty('ForecastMap.Marker', marker)#@UndefinedVariable WINDOW.setProperty('ForecastMap.IssuedAt', issuedat.astimezone(TZ).strftime(ISSUEDAT_FORMAT))#@UndefinedVariable WINDOW.setProperty('ForecastMap.MapTime', maptime.strftime(MAPTIME_FORMAT))#@UndefinedVariable WINDOW.setProperty('ForecastMap.Layer', layer)#@UndefinedVariable WINDOW.setProperty('ForecastMap.IsFetched', 'true')#@UndefinedVariable
def acquire(self, datasetID, size=0): if size <= self.__threshold: log("DatasetProcessorManager.acquire(dataset=" + str(datasetID) + ",size=" + repr(size) + "): put in the __fast_queue") self.datasetQueue[datasetID] = self.__fast_queue self.__fast_queue.acquire(datasetID) log("DatasetProcessorManager dataset=" + str(datasetID) + "running from the from the __fast_queue") else: log("DatasetProcessorManager.acquire(dataset=" + str(datasetID) + ",size=" + repr(size) + "): put in the __slow_queue") self.datasetQueue[datasetID] = self.__slow_queue self.__slow_queue.acquire(datasetID) log("DatasetProcessorManager dataset=" + str(datasetID) + " running from the __slow_queue")
async def check_if_should_change_servers( original_current_server_id: str = "N/A", ) -> Tuple[bool, str]: current_server_id = ("" if original_current_server_id == "N/A" else original_current_server_id) current_server_playing = 0 highest_player_server_playing = 0 log("Querying Roblox API for server list") url = f"https://games.roblox.com/v1/games/{CFG.game_id}/servers/Public" try: response = get(url, timeout=10) except Exception: return False, "[WARN] Could not poll Roblox servers. Is Roblox down?" if response.status_code == 200: log("Finding best server and comparing to current...") response_result = response.json() servers = response_result["data"] if current_server_id == "N/A": current_server_id = "" for server in servers: server_id = server.get("id", "undefined") if server.get("playerTokens") is None: server_playing = -1 else: server_playing = len(server["playerTokens"]) if server_id == "undefined" or server_playing == -1: notify_admin( f"Handled Error in `check_if_should_change_servers`\nServers:\n`{servers}`\nProblem:\n`{server}`" ) continue if current_server_id == server_id: current_server_id = server_id current_server_playing = server_playing elif ("playerTokens" in server and server_playing > highest_player_server_playing): highest_player_server_playing = server_playing log("") if current_server_id == "" or current_server_id == "undefined": if highest_player_server_playing == 0: return False, "[WARN] Could not poll Roblox servers. Is Roblox down?" return_message = ( f"[WARN] Could not find FumoCam. Are we in a server?\n" f"Original Server ID: {original_current_server_id}\n" f"Detected Server ID: {current_server_id}") return True, return_message elif (current_server_playing < CFG.player_switch_cap and (current_server_playing + CFG.player_difference_to_switch) < highest_player_server_playing): difference = highest_player_server_playing - current_server_playing return ( True, f"[WARN] There is a server with {difference} more players online.", ) else: return False, "" return False, "[WARN] Could not poll Roblox servers. Is Roblox down?"
def getallsubs(searchstring, languageshort, languagelong, file_original_path, subtitles_list): page = 1 if languageshort == "es": url = main_url + "index.php?accion=5&masdesc=&oxdown=1&pg=" + str(page) + "&buscar=" + urllib.quote_plus(searchstring) content = geturl(url) log( __name__ ,"%s Getting '%s' subs ..." % (debug_pretext, languageshort)) while re.search(subtitle_pattern, content, re.IGNORECASE | re.DOTALL | re.MULTILINE | re.UNICODE): for matches in re.finditer(subtitle_pattern, content, re.IGNORECASE | re.DOTALL | re.MULTILINE | re.UNICODE): id = matches.group(4) no_files = matches.group(3) server = matches.group(5) downloads = int(matches.group(2)) / 1000 if (downloads > 10): downloads=10 filename = string.strip(matches.group(1)) #Remove new lines on the commentaries filename = re.sub('\n',' ',filename) #Remove HTML tags on the commentaries filename = re.sub(r'<[^<]+?>','', filename) #Find filename on the comentaries to show sync label filesearch = os.path.split(file_original_path) sync = False if re.search(filesearch[1][:len(filesearch[1])-4], filename): sync = True log( __name__ ,"%s Subtitles found: %s (id = %s)" % (debug_pretext, filename, id)) subtitles_list.append({'rating': str(downloads), 'no_files': no_files, 'filename': filename, 'sync': sync, 'id' : id, 'server' : server, 'language_flag': 'flags/' + languageshort + '.gif', 'language_name': languagelong}) page = page + 1 url = main_url + "index.php?accion=5&masdesc=&oxdown=1&pg=" + str(page) + "&buscar=" + urllib.quote_plus(searchstring) content = geturl(url) # Bubble sort, to put syncs on top for n in range(0,len(subtitles_list)): for i in range(1, len(subtitles_list)): temp = subtitles_list[i] if subtitles_list[i]["sync"] > subtitles_list[i-1]["sync"]: subtitles_list[i] = subtitles_list[i-1] subtitles_list[i-1] = temp
def buy_item(self, item, merchant_type): """ Move to a merchant and buy an item """ # Open the "Buy or Sell" window buy_or_sell = self.open_merchant_window(merchant_type) if not buy_or_sell: utils.log( "SEVERE", F"Failed to click on {merchant_type} and open 'Buy or Sell' after 10 attempts" ) utils.quit_game() # Click the buy button buy_button = self.user_interface.wait_for_ui_element('buy') pyautogui.moveTo(buy_button[0] + 10, buy_button[1] + 10, 0.15) pyautogui.click() # Wait for the buy menu to open self.user_interface.wait_for_ui_element('buyMenu') # Find the item to buy item_loc = self.user_interface.wait_for_ui_element(item) pyautogui.moveTo(item_loc[0] + 6, item_loc[1] + 6, 0.15) pyautogui.doubleClick() # Confirm the sale check_mark = self.user_interface.wait_for_ui_element('checkMark') pyautogui.moveTo(check_mark[0] + 5, check_mark[1] + 5, 0.15) pyautogui.click() # Click cancel to leave the window cancel = self.user_interface.wait_for_ui_element('cancel') pyautogui.moveTo(cancel[0] + 5, cancel[1] + 5, 0.15) pyautogui.click() pyautogui.moveTo(400, 400) utils.log("INFO", F"Bought a {item}")
def validate(self): """ Need to validate that user's input can be converted to the required. variable type. ? Any other validation ? ? Features must perform their own validation too, can we do it here ? Alternatively, can get the queries to validate themselves. They hold sufficient context to do so. Note that this is a short-circuiting validation: returns False on first failure. """ for query in self.option_queries: try: query.getValue() if not query.validate(): log('%s FAILED validation' % (query.name)) return False # for instance attempting to convert 'bogus' to a numerical type except ValueError: return False return True
def getallsubs(searchstring, languageshort, languagelong, file_original_path, subtitles_list, tvshow, season, episode): url = main_url.replace("%s", searchstring) url = url.replace(' ', '%20') #replace spaces content = geturl(url) log(__name__, "%s Getting url: %s" % (debug_pretext, content)) for matches in re.finditer( subtitle_pattern, content, re.IGNORECASE | re.DOTALL | re.MULTILINE | re.UNICODE): id = matches.group(1) filename = matches.group(2) server = "http://www.asia-team.net" subtitles_list.append({ 'rating': '0', 'no_files': 1, 'filename': filename, 'server': server, 'sync': False, 'id': id, 'language_flag': 'flags/' + languageshort + '.gif', 'language_name': languagelong })
def search_subtitles(file_original_path, title, tvshow, year, season, episode, set_temp, rar, lang1, lang2, lang3, stack): #standard input subtitles_list = [] msg = "" if len(tvshow) == 0: searchstring = title if len(tvshow) > 0: searchstring = "%s S%#02dE%#02d" % (tvshow, int(season), int(episode)) log(__name__, u"%s Search string = %s" % (debug_pretext, searchstring)) spanish = 0 if string.lower(lang1) == "spanish": spanish = 1 elif string.lower(lang2) == "spanish": spanish = 2 elif string.lower(lang3) == "spanish": spanish = 3 getallsubs(searchstring, "es", "Spanish", file_original_path, subtitles_list) if spanish == 0: msg = "Won't work, Subdivx is only for Spanish subtitles!" return subtitles_list, "", msg #standard output
def scholar(): q = quote(unicode(request.args.get('q', '')).encode('utf8')) if q == "": return redirect("/") start = request.args.get('start', '') if start: pass else: start = str(0) utilities.log(str(request.remote_addr), str(request.headers.get('User-Agent')), request.args.get('q', '')) html = utilities.scholarQuery(q, start) if html == None: return render_template('error.html') else: g = GoogleScholarSearchResultParser(html) jsonData = g.getJson() jsonData['q'] = request.args.get('q', '') jsonData['start'] = start # return html # return render_template('about.html', jsonData={}) return render_template('scholar-search-result.html', jsonData=jsonData)
def main(): os.system('clear') log('Python Sorting Algorithms : ' + str(time.time())) print( "The following examples will show implementation and efficiency of various sorting algorithms." ) log('Unsorted Arrays') print(words) print(numbers_simple) print(numbers_complex) log("Bubble Sort") result = bubble_sort(numbers_simple.copy()) print('Before: ', numbers_simple) print('After: ', result) # log("Merge Sort") # result = merge_sort(numbers_simple.copy()) # print('Before: ', numbers_simple) # print('After: ', result) log("Merge Sort Steps") result = merge_sort([3, 2, 4, 1, 4]) log('Result') print('Before: ', [3, 2, 4, 1, 4]) print('After: ', result)
def getallsubs(response_url, content, language, title, subtitles_list, search_string): for matches in re.finditer(subtitle_pattern, content, re.IGNORECASE | re.DOTALL): languagefound = matches.group(3) if languagefound == to_subscene_lang(language): link = main_url + matches.group(1) languageshort = toOpenSubtitles_two(language) filename = matches.group(4) if search_string != "": log( __name__ , "string.lower(filename) = >" + string.lower(filename) + "<" ) log( __name__ , "string.lower(search_string) = >" + string.lower(search_string) + "<" ) if string.find(string.lower(filename),string.lower(search_string)) > -1: log( __name__ ,"%s Subtitles found: %s, %s" % (debug_pretext, languagefound, filename)) subtitles_list.append({'rating': '0', 'movie': title, 'filename': filename, 'sync': False, 'link': link, 'language_flag': 'flags/' + languageshort + '.gif', 'language_name': language}) else: log( __name__ ,"%s Subtitles found: %s, %s" % (debug_pretext, languagefound, filename)) subtitles_list.append({'rating': '0', 'movie': title, 'filename': filename, 'sync': False, 'link': link, 'language_flag': 'flags/' + languageshort + '.gif', 'language_name': language})
def getallsubs(searchstring, languageshort, languagelong, file_original_path, subtitles_list, searchstring_notclean): url = main_url + "jutud.php?tp=nimi&otsing=" + urllib.quote_plus(searchstring) content = geturl(url) content=content.replace('\r\n','') if content is not None: log( __name__ ,"%s Getting '%s' subs ..." % (debug_pretext, languageshort)) for id,filename in re.compile(subtitle_pattern).findall(content): log( __name__ ,"%s Subtitles found: %s (id = %s)" % (debug_pretext, filename, id)) global filesearch filesearch = os.path.abspath(file_original_path) #For DEBUG only uncomment next line #log( __name__ ,"%s abspath: '%s'" % (debug_pretext, filesearch)) filesearch = os.path.split(filesearch) #For DEBUG only uncomment next line #log( __name__ ,"%s path.split: '%s'" % (debug_pretext, filesearch)) dirsearch = filesearch[0].split(os.sep) #For DEBUG only uncomment next line #log( __name__ ,"%s dirsearch: '%s'" % (debug_pretext, dirsearch)) dirsearch_check = string.split(dirsearch[-1], '.') #For DEBUG only uncomment next line #log( __name__ ,"%s dirsearch_check: '%s'" % (debug_pretext, dirsearch_check)) subtitles_list.append({'rating': '0', 'no_files': 1, 'filename': filename, 'sync': False, 'id' : id, 'language_flag': 'flags/' + languageshort + '.gif', 'language_name': languagelong})
def main(): # define where all val imgs are located: val_img_dir = "coco/images/val/" # create a list of the paths to all val imgs: val_img_paths = [val_img_dir + file_name for file_name in\ os.listdir(val_img_dir) if ".jpg" in file_name] # get the feature vectors for all val imgs: val_img_id_2_feature_vector = extract_img_features(val_img_paths) # save on disk: cPickle.dump(val_img_id_2_feature_vector, open("coco/data/val_img_id_2_feature_vector", "wb")) print "val done!" log("val done!") # define where all test imgs are located: test_img_dir = "coco/images/test/" # create a list of the paths to all test imgs: test_img_paths = [test_img_dir + file_name for file_name in\ os.listdir(test_img_dir) if ".jpg" in file_name] # get the feature vectors for all test imgs: test_img_id_2_feature_vector = extract_img_features(test_img_paths) # save on disk: cPickle.dump(test_img_id_2_feature_vector, open("coco/data/test_img_id_2_feature_vector", "wb")) print "test done!" log("test done!") # define where all train imgs are located: train_img_dir = "coco/images/train/" # create a list of the paths to all train imgs: train_img_paths = [train_img_dir + file_name for file_name in\ os.listdir(train_img_dir) if ".jpg" in file_name] # get the feature vectors for all train imgs: train_img_id_2_feature_vector = extract_img_features(train_img_paths) # save on disk: cPickle.dump(train_img_id_2_feature_vector, open("coco/data/train_img_id_2_feature_vector", "wb"))
def cancel(self, event=None): try: self.cancel_callback() except AttributeError as e: log('no CANCEL function defined - AttributeError') log(e) except TypeError: log('no CANCEL function defined - TypeError') self.close()
def filter_subtitles_by_language(self, set_languages, subtitles_list): if not set_languages: return subtitles_list log(__name__, ['Filter by languages', set_languages]) filter_subtitles_list = [] for subtitle in subtitles_list['versions']: if xbmc.convertLanguage(subtitle['lang'], xbmc.ISO_639_2) in set_languages: filter_subtitles_list.append(subtitle) if not filter_subtitles_list: if "cze" not in set_languages and "slo" not in set_languages: dialog = xbmcgui.Dialog() if dialog.yesno(self.addon.getAddonInfo('name'), self._t(32100), self._t(32101)): xbmc.executebuiltin("Dialog.Close(subtitlesearch)") xbmc.executebuiltin("PlayerControl(Stop)") xbmc.executebuiltin( "ActivateWindowAndFocus(playersettings,-96,0,-67,0)") return None else: filter_results_list = copy.deepcopy(subtitles_list) filter_results_list['versions'] = filter_subtitles_list return filter_results_list
def getallsubs(content, title, moviefile, subtitles_list): for matches in re.finditer(subtitle_pattern, content, re.IGNORECASE | re.DOTALL): link = matches.group(1) filename = matches.group(2) log( __name__, "%s Subtitles found: %s (link=%s)" % (debug_pretext, filename, link)) if isexactmatch(filename, moviefile): sync = True rating = 10 else: rating = getrating(filename, moviefile) sync = False subtitles_list.append({ 'rating': str(rating), 'no_files': 1, 'movie': title, 'filename': filename, 'sync': sync, 'link': link, 'language_flag': 'flags/nl.gif', 'language_name': 'Dutch' })
def observation(): utilities.log( "Fetching Hourly Observation for '%s (%s)' from the Met Office..." % (OBSERVATION_LOCATION, OBSERVATION_LOCATION_ID)) with urlcache.URLCache(ADDON_DATA_PATH) as cache: filename = cache.get(HOURLY_LOCATION_OBSERVATION_URL, observation_expiry) data=json.load(open(filename)) try: dv = data['SiteRep']['DV'] dataDate = utilities.strptime(dv.get('dataDate').rstrip('Z'), DATAPOINT_DATETIME_FORMAT).replace(tzinfo=pytz.utc) WINDOW.setProperty('HourlyObservation.IssuedAt', dataDate.astimezone(TZ).strftime(ISSUEDAT_FORMAT))#@UndefinedVariable try: latest_period = dv['Location']['Period'][-1] except KeyError: latest_period = dv['Location']['Period'] try: latest_obs = latest_period['Rep'][-1] except KeyError: latest_obs = latest_period['Rep'] WINDOW.setProperty('Current.Condition', WEATHER_CODES[latest_obs.get('W', 'na')][1])#@UndefinedVariable WINDOW.setProperty('Current.Visibility', latest_obs.get('V', 'n/a'))#@UndefinedVariable WINDOW.setProperty('Current.Pressure', latest_obs.get('P', 'n/a'))#@UndefinedVariable WINDOW.setProperty('Current.Temperature', str(round(float(latest_obs.get('T', 'n/a')))).split('.')[0])#@UndefinedVariable WINDOW.setProperty('Current.FeelsLike', 'n/a')#@UndefinedVariable #if we get Wind, then convert it to kmph. WINDOW.setProperty('Current.Wind', utilities.mph_to_kmph(latest_obs, 'S'))#@UndefinedVariable WINDOW.setProperty('Current.WindDirection', latest_obs.get('D', 'n/a'))#@UndefinedVariable WINDOW.setProperty('Current.WindGust', latest_obs.get('G', 'n/a'))#@UndefinedVariable WINDOW.setProperty('Current.OutlookIcon', '%s.png' % WEATHER_CODES[latest_obs.get('W', 'na')][0])#@UndefinedVariable WINDOW.setProperty('Current.FanartCode', '%s.png' % WEATHER_CODES[latest_obs.get('W','na')][0])#@UndefinedVariable WINDOW.setProperty('Current.DewPoint', str(round(float(latest_obs.get('Dp', 'n/a')))).split('.')[0])#@UndefinedVariable WINDOW.setProperty('Current.Humidity', str(round(float(latest_obs.get('H', 'n/a')))).split('.')[0])#@UndefinedVariable WINDOW.setProperty('HourlyObservation.IsFetched', 'true')#@UndefinedVariable except KeyError as e: e.args = ("Key Error in JSON File", "Key '{0}' not found while processing file from url:".format(e.args[0]), HOURLY_LOCATION_OBSERVATION_URL) raise
def get_item(self, item, exit_on_failure=False): """ Find an item in the player's backpack and return it's pixel coordinates """ # Get the player's backpack backpack, backpack_loc = self.get_backpack() # Search the backpack for the item result = cv2.matchTemplate(backpack, self.item_templates[item], cv2.TM_CCORR_NORMED) _, max_val, _, item_loc = cv2.minMaxLoc(result) # Failed to find item in backpack with high confidence if max_val < 0.9: if exit_on_failure: utils.log( "SEVERE", F"Unable to find {item} in backpack. max_val: {max_val:3.2f}" ) utils.quit_game() else: return False return (backpack_loc[0] + item_loc[0], backpack_loc[1] + item_loc[1])
def geturl(url): log(__name__, " Getting url: %s" % (url)) try: import urllib response = urllib.urlopen(url) content = response.read() except Exception, inst: log(__name__, " Failed to get url: %s" % (url)) log(__name__, " Error: %s" % (inst)) content = None
def okFunction(self): queries = self.item.getOptionQueries() log('confirming new value') for q in queries: log(q.var.get()) if hasattr(self.item, 'is_composed'): log('ListItem row_num') # log(self.sub_item['row_num']) self.item.addChildByClass() self.item.didUpdateQueries()
async def mute_toggle(set_mute: Union[bool, None] = None): log_process("In-game Mute") desired_mute_state = not CFG.audio_muted if set_mute is not None: # If specified, force to state desired_mute_state = set_mute desired_volume = 0 if desired_mute_state else 100 log_msg = "Muting" if desired_mute_state else "Un-muting" log(log_msg) sc_exe_path = str(CFG.resources_path / CFG.sound_control_executable_name) os.system( # nosec f'{sc_exe_path} /SetVolume "{CFG.game_executable_name}" {desired_volume}' ) # Kill the process no matter what, race condition for this is two songs playing (bad) kill_process(executable=CFG.vlc_executable_name, force=True) if desired_mute_state: # Start playing music copyfile( CFG.resources_path / OBS.muted_icon_name, OBS.output_folder / OBS.muted_icon_name, ) vlc_exe_path = str(CFG.vlc_path / CFG.vlc_executable_name) music_folder = str(CFG.resources_path / "soundtracks" / "overworld") Popen( f'"{vlc_exe_path}" --playlist-autostart --loop --playlist-tree {music_folder}' ) output_log("muted_status", "In-game audio muted!\nRun !mute to unmute") sleep(5) # Give it time to load VLC else: # Stop playing music try: if os.path.exists(OBS.output_folder / OBS.muted_icon_name): os.remove(OBS.output_folder / OBS.muted_icon_name) except OSError: log("Error, could not remove icon!\nNotifying admin...") async_sleep(2) notify_admin("Mute icon could not be removed") log(log_msg) output_log("muted_status", "") CFG.audio_muted = desired_mute_state await check_active() log_process("") log("")
def sell_item(self, item, merchant_type): """ Move to a merchant and sell an item """ # Open the "Buy or Sell" window buy_or_sell = self.open_merchant_window(merchant_type) if not buy_or_sell: utils.log( "SEVERE", F"Failed to click on {merchant_type} and open 'Buy or Sell' after 10 attempts" ) utils.quit_game() # Click the sell button sell_button = self.user_interface.wait_for_ui_element('sell') pyautogui.moveTo(sell_button[0] + 10, sell_button[1] + 10, 0.15) pyautogui.click() # Wait for the sell menu to open self.user_interface.wait_for_ui_element('sellMenu') # Offer up to 12 items items_sold = 0 for _ in range(12): # Move the cursor away so it will register an "hover" event when move back pyautogui.moveTo(330, 206) # Find a item to sell item_loc = self.user_interface.get_ui_element(item, exit_on_fail=False) # No item_locs left to sell if not item_loc: utils.log("INFO", F"No {item} left to offer shopkeeper") break items_sold += 1 pyautogui.moveTo(item_loc[0] + 6, item_loc[1] + 12, 0.15) pyautogui.doubleClick() sleep(0.5) # Confirm the sale check_mark = self.user_interface.wait_for_ui_element('checkMark') pyautogui.moveTo(check_mark[0] + 5, check_mark[1] + 5, 0.15) pyautogui.click() # Click cancel to leave the window cancel = self.user_interface.wait_for_ui_element('cancel') pyautogui.moveTo(cancel[0] + 5, cancel[1] + 5, 0.15) pyautogui.click() utils.log("INFO", F"Sold {items_sold} {item}(s)") return items_sold
def search_subtitles( file_original_path, title, tvshow, year, season, episode, set_temp, rar, lang1, lang2, lang3, stack ): #standard input subtitles_list = [] msg = "" log( __name__ ,"%s Title = %s" % (debug_pretext, title)) if len(tvshow) == 0: # only process movies url = main_url + "?type=1+CD&p=zoek&trefwoord=" + urllib.quote_plus(title) Dutch = False if (string.lower(lang1) == "dutch") or (string.lower(lang2) == "dutch") or (string.lower(lang3) == "dutch"): Dutch = True content, response_url = geturl(url) if content is not None: log( __name__ ,"%s Getting subs ..." % debug_pretext) getallsubs(content, title, subtitles_list) else: log( __name__ ,"%s Dutch language is not selected" % (debug_pretext)) msg = "Won't work, Ondertitel is only for Dutch subtitles." else: log( __name__ ,"%s Tv show detected: %s" % (debug_pretext, tvshow)) msg = "Won't work, Ondertitel is only for movies." return subtitles_list, "", msg #standard output
def distributeChildFeature(self): log('LinearDistribution distributeChildFeature: %s' % (self.__repr__())) log('LinearDistribution feature: %s' % (self.features[0].__repr__())) file_text = self.features[0].getGCode() delta_X = self.option_queries[DeltaXQuery].getValue() delta_Y = self.option_queries[DeltaYQuery].getValue() for _ in xrange(self.option_queries[NumRepeatQuery].getValue() - 1): log('LinearDistribution REPEAT: %s' % (self.__repr__())) file_text += self.machine.setMode('INCR') file_text += G.G0_XY((delta_X, delta_Y)) file_text += self.features[0].getGCode() return file_text
def extractAndFindSub(tempSubDir, tempZipFile): # Remember the files currently in the folder and their number files = os.listdir(tempSubDir) init_filecount = len(files) filecount = init_filecount max_mtime = 0 # Determine which is the newest subtitles file in tempSubDir for file in files: if (string.split(file, '.')[-1] in ['srt', 'sub']): mtime = os.stat(os.path.join(tempSubDir, file)).st_mtime if mtime > max_mtime: max_mtime = mtime init_max_mtime = max_mtime # Wait 2 seconds so that the unpacked files are at least 1 second newer time.sleep(2) # Use XBMC's built-in extractor #--- #- xbmc.executebuiltin("XBMC.Extract(" + tempZipFile + "," + tempSubDir +")") #+++ print "XBMC.Extract(" + tempZipFile + "," + tempSubDir + ")" print "unzip " + str(tempZipFile) + " -d " + str(tempSubDir) os.system("unzip " + str(tempZipFile) + " -d " + str(tempSubDir)) #+++ waittime = 0 while ((filecount == init_filecount) and (waittime < 20) and (init_max_mtime == max_mtime)): # Nothing extracted yet # Wait 1 second to let the builtin function 'XBMC.extract' unpack time.sleep(1) files = os.listdir(tempSubDir) filecount = len(files) # Determine if there is a newer file created in tempSubDir # (indicates that the extraction had completed) for file in files: if (string.split(file, '.')[-1] in ['srt', 'sub']): mtime = os.stat(os.path.join(tempSubDir, file)).st_mtime if (mtime > max_mtime): max_mtime = mtime waittime = waittime + 1 if waittime == 20: log(__name__, "Failed to unpack subtitles in '%s'" % (tempSubDir)) return "" else: log(__name__, "Unpacked files in '%s'" % (tempSubDir)) for file in files: # There could be more subtitle files in tempSubDir, so make sure we # get the newest subtitle file if ((string.split(file, '.')[-1] in ['srt', 'sub']) and (os.stat(os.path.join(tempSubDir, file)).st_mtime > init_max_mtime)): log(__name__, "Unpacked subtitles file '%s'" % (file)) return os.path.join(tempSubDir, file)
def checkstatus(ctx, selected_hosts=None): """ Check the status of hosts """ if len(selected_hosts) == 0: selected_hosts = hostselector("Which host(s) do you want to check?") if len(selected_hosts) == 0: log("No host has been selected.", "red") return all_hosts = hosts() for host in selected_hosts: if host in all_hosts: host_data = hostdata(host) user = host_data["user"] ip = host_data["ip"] ssh_key = host_data["ssh_key"] ssh_port = host_data["ssh_port"] stdout, stderr = subprocess.Popen( [ "ssh", "-i", ssh_key, (user + "@" + ip), "-p", ssh_port, "uname -a" ], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ).communicate() if stderr: log( "Error while connecting to " + user + "@" + ip + " using SSH key " + ssh_key + ": " + stderr.decode(), "red", ) log(stdout.decode(), "green") else: log("Host " + host + " could not be found.", "red")
def geturl(url): class MyOpener(urllib.FancyURLopener): version = '' my_urlopener = MyOpener() log( __name__ ,"%s Getting url: %s" % (debug_pretext, url)) try: response = my_urlopener.open(url) content = response.read() return_url = response.geturl() if url != return_url: log( __name__ ,"%s Getting redirected url: %s" % (debug_pretext, return_url)) if (' ' in return_url): log( __name__ ,"%s Redirected url contains space (workaround a bug in python redirection: 'http://bugs.python.org/issue1153027', should be solved, but isn't)" % (debug_pretext)) return_url = return_url.replace(' ','%20') response = my_urlopener.open(return_url) content = response.read() return_url = response.geturl() except: log( __name__ ,"%s Failed to get url:%s" % (debug_pretext, url)) content = None return content
def download_subtitles (subtitles_list, pos, zip_subs, tmp_sub_dir, sub_folder, session_id): #standard input url = main_url + subtitles_list[pos][ "link" ] local_tmp_file = zip_subs content, cookie = geturl(url, FETCH_COOKIE) downloadlink = getdownloadlink(content) if downloadlink is not None: try: url = main_url + downloadlink url = string.replace(url," ","+") log( __name__ ,"%s Fetching subtitles using url %s - and cookie: %s" % (debug_pretext, url, cookie.cookies)) content = geturl(url, FETCH_SUBTITLE, cookie.cookies) if content is not None: log( __name__ ,"%s Saving subtitles to '%s'" % (debug_pretext, local_tmp_file)) local_file_handle = open(local_tmp_file, "w" + "b") local_file_handle.write(content) local_file_handle.close() except: log( __name__ ,"%s Failed to save subtitles to '%s'" % (debug_pretext, local_tmp_file)) log( __name__ ,"%s Subtitles saved to '%s'" % (debug_pretext, local_tmp_file)) language = subtitles_list[pos][ "language_name" ] return True, language, "" #standard output