def loadAPIkeys(): global clients tsv.create(datadir("clients/authenticated_machines.tsv")) #createTSV("clients/authenticated_machines.tsv") clients = tsv.parse(datadir("clients/authenticated_machines.tsv"),"string","string") #clients = parseTSV("clients/authenticated_machines.tsv","string","string") log("Authenticated Machines: " + ", ".join([m[1] for m in clients]))
def reduce_caches_if_low_ram(): ramprct = psutil.virtual_memory().percent if ramprct > cmp: log("{prct}% RAM usage, reducing caches!".format(prct=ramprct), module="debug") ratio = (cmp / ramprct)**3 reduce_caches(to=ratio)
def static_html(name): if name in aliases: redirect(aliases[name]) linkheaders = ["</style.css>; rel=preload; as=style"] keys = remove_identical(FormsDict.decode(request.query)) adminmode = request.cookies.get("adminmode") == "true" and auth.check( request) clock = Clock() clock.start() LOCAL_CONTEXT = { "adminmode": adminmode, "apikey": request.cookies.get("apikey") if adminmode else None, "_urikeys": keys, #temporary! } lc = LOCAL_CONTEXT lc["filterkeys"], lc["limitkeys"], lc["delimitkeys"], lc["amountkeys"], lc[ "specialkeys"] = uri_to_internal(keys) template = jinja_environment.get_template(name + '.jinja') try: res = template.render(**LOCAL_CONTEXT) except ValueError as e: abort(404, "Entity does not exist") if settings.get_settings("DEV_MODE"): jinja_environment.cache.clear() log("Generated page {name} in {time:.5f}s".format(name=name, time=clock.stop()), module="debug_performance") return clean_html(res)
def sync(): # all entries by file collected # so we don't open the same file for every entry entries = {} for idx in range(len(SCROBBLES)): if not SCROBBLES[idx][2]: t = get_scrobble_dict(SCROBBLES[idx]) artistlist = list(t["artists"]) artistlist.sort( ) #we want the order of artists to be deterministic so when we update files with new rules a diff can see what has actually been changed artistss = "␟".join(artistlist) timestamp = datetime.date.fromtimestamp(t["time"]) entry = [str(t["time"]), artistss, t["title"]] monthcode = str(timestamp.year) + "_" + str(timestamp.month) entries.setdefault(monthcode, []).append( entry) #i feckin love the setdefault function SCROBBLES[idx] = (SCROBBLES[idx][0], SCROBBLES[idx][1], True) for e in entries: tsv.add_entries("scrobbles/" + e + ".tsv", entries[e], comments=False) #addEntries("scrobbles/" + e + ".tsv",entries[e],escape=False) combineChecksums("scrobbles/" + e + ".tsv", cla.checksums) global lastsync lastsync = int(datetime.datetime.now(tz=datetime.timezone.utc).timestamp()) log("Database saved to disk.")
def post_scrobble(artist:Multi,**keys): artists = "/".join(artist) title = keys.get("title") apikey = keys.get("key") client = checkAPIkey(apikey) if client == False: # empty string allowed! response.status = 403 return "" try: time = int(keys.get("time")) except: time = int(datetime.datetime.now(tz=datetime.timezone.utc).timestamp()) log("Incoming scrobble (native API): Client " + client + ", ARTISTS: " + str(artists) + ", TRACK: " + title,module="debug") (artists,title) = cla.fullclean(artists,title) ## this is necessary for localhost testing #response.set_header("Access-Control-Allow-Origin","*") trackdict = createScrobble(artists,title,time) sync() #always sync, one filesystem access every three minutes shouldn't matter return {"status":"success","track":trackdict}
def set_image(b64, **keys): track = "title" in keys log("Trying to set image, b64 string: " + str(b64[:30] + "..."), module="debug") regex = r"data:image/(\w+);base64,(.+)" type, b64 = re.fullmatch(regex, b64).groups() b64 = base64.b64decode(b64) filename = "webupload" + str(int( datetime.datetime.now().timestamp())) + "." + type for folder in get_all_possible_filenames(**keys): if os.path.exists(data_dir['images'](folder)): with open(data_dir['images'](folder, filename), "wb") as f: f.write(b64) break else: folder = get_all_possible_filenames(**keys)[0] os.makedirs(data_dir['images'](folder)) with open(data_dir['images'](folder, filename), "wb") as f: f.write(b64) log("Saved image as " + data_dir['images'](folder, filename), module="debug") # set as current picture in rotation if track: local_track_cache.add((frozenset(keys["artists"]), keys["title"]), os.path.join("/images", folder, filename)) else: local_artist_cache.add(keys["artist"], os.path.join("/images", folder, filename))
def build_db(): log("Building database...") global SCROBBLES, ARTISTS, TRACKS global TRACKS_NORMALIZED_SET, TRACKS_NORMALIZED, ARTISTS_NORMALIZED_SET, ARTISTS_NORMALIZED global SCROBBLESDICT, STAMPS SCROBBLES = [] ARTISTS = [] TRACKS = [] STAMPS = [] SCROBBLESDICT = {} TRACKS_NORMALIZED = [] ARTISTS_NORMALIZED = [] ARTISTS_NORMALIZED_SET = set() TRACKS_NORMALIZED_SET = set() # parse files db = tsv.parse_all(datadir("scrobbles"),"int","string","string",comments=False) #db = parseAllTSV("scrobbles","int","string","string",escape=False) for sc in db: artists = sc[1].split("␟") title = sc[2] time = sc[0] readScrobble(artists,title,time) # optimize database SCROBBLES.sort(key = lambda tup: tup[1]) #SCROBBLESDICT = {obj[1]:obj for obj in SCROBBLES} STAMPS = [t for t in SCROBBLESDICT] STAMPS.sort() # inform malojatime module about earliest scrobble if len(STAMPS) > 0: register_scrobbletime(STAMPS[0]) # NOT NEEDED BECAUSE WE DO THAT ON ADDING EVERY ARTIST ANYWAY # get extra artists with no real scrobbles from countas rules #for artist in coa.getAllArtists(): #for artist in coa.getCreditedList(ARTISTS): # if artist not in ARTISTS: # log(artist + " is added to database because of countas rules",module="debug") # ARTISTS.append(artist) # coa.updateIDs(ARTISTS) #start regular tasks utilities.update_medals() utilities.update_weekly() utilities.send_stats() global ISSUES ISSUES = check_issues() log("Database fully built!")
def send_stats(): if settings.get_settings("SEND_STATS"): log("Sending daily stats report...") from .database import ARTISTS, TRACKS, SCROBBLES keys = { "url": "https://myrcella.krateng.ch/malojastats", "method": "POST", "headers": { "Content-Type": "application/json" }, "data": json.dumps({ "name": settings.get_settings("NAME"), "url": settings.get_settings("PUBLIC_URL"), "version": ".".join(str(d) for d in version), "artists": len(ARTISTS), "tracks": len(TRACKS), "scrobbles": len(SCROBBLES) }).encode("utf-8") } req = urllib.request.Request(**keys) response = urllib.request.urlopen(req) log("Sent daily report!")
def start_db(): log("Starting database...") global lastsync lastsync = int(datetime.datetime.now(tz=datetime.timezone.utc).timestamp()) build_db() loadAPIkeys() #run(dbserver, host='::', port=PORT, server='waitress') log("Database reachable!")
def start(): try: p = subprocess.Popen(["python3", "-m", "maloja.server"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) return p except e: log("Error starting Maloja: " + str(e), module="supervisor")
def scrobbletrack(artiststr,titlestr,timestamp): try: log("Incoming scrobble (compliant API): ARTISTS: " + artiststr + ", TRACK: " + titlestr,module="debug") (artists,title) = cla.fullclean(artiststr,titlestr) database.createScrobble(artists,title,timestamp) database.sync() except: raise ScrobblingException()
def get_image_track_all(track): for service in services["metadata"]: res = service.get_image_track(track) if res is not None: log("Got track image for " + str(track) + " from " + service.name) return res else: log("Could not get track image for " + str(track) + " from " + service.name)
def invalidate_caches(): global cache_query, cache_aggregate cache_query = {} cache_aggregate = {} now = datetime.datetime.utcnow() global cacheday cacheday = (now.year,now.month,now.day) log("Database caches invalidated.")
def scrobble(self,artiststr,titlestr,time=None,duration=None,album=None): logmsg = "Incoming scrobble (API: {api}): ARTISTS: {artiststr}, TRACK: {titlestr}" log(logmsg.format(api=self.__apiname__,artiststr=artiststr,titlestr=titlestr)) if time is None: time = int(datetime.datetime.now(tz=datetime.timezone.utc).timestamp()) try: (artists,title) = cla.fullclean(artiststr,titlestr) database.createScrobble(artists,title,time) database.sync() except: raise ScrobblingException()
def get_image_artist_all(artist): for service in services["metadata"]: res = service.get_image_artist(artist) if res is not None: log("Got artist image for " + str(artist) + " from " + service.name) return res else: log("Could not get artist image for " + str(artist) + " from " + service.name)
def log_stats(): logstr = "{name}: {hitsperm} Perm Hits, {hitstmp} Tmp Hits, {misses} Misses; Current Size: {sizeperm}/{sizetmp}" for s in (cachestats["cache_query"], cachestats["cache_aggregate"]): log(logstr.format(name=s["name"], hitsperm=s["hits_perm"], hitstmp=s["hits_tmp"], misses=s["misses"], sizeperm=len(s["objperm"]), sizetmp=len(s["objtmp"])), module="debug")
def handle(self,path,keys): try: methodname = self.get_method(path,keys) method = self.methods[methodname] except: log("Could not find a handler for method " + methodname + " in API " + self.__apiname__,module="debug") log("Keys: " + str(keys),module="debug") raise InvalidMethodException() return method(path,keys)
def incoming_scrobble(artists,title,album=None,duration=None,time=None,fix=True): if time is None: time = int(datetime.datetime.now(tz=datetime.timezone.utc).timestamp()) log("Incoming scrobble (): ARTISTS: " + str(artists) + ", TRACK: " + title,module="debug") if fix: (artists,title) = cla.fullclean("/".join(artists),title) trackdict = createScrobble(artists,title,time,album,duration) sync() return {"status":"success","track":trackdict}
def api_request_track(track): artists, title = track for api in apis_tracks: if api["check"]: log("API: " + api["name"] + "; Image request: " + "/".join(artists) + " - " + title, module="external") try: artiststring = urllib.parse.quote(", ".join(artists)) titlestring = urllib.parse.quote(title) var = artiststring + titlestring for step in api["steps"]: if step[0] == "get": response = urllib.request.urlopen(step[1].format( artiststring=artiststring, titlestring=titlestring, var=var)) var = json.loads(response.read()) elif step[0] == "post": keys = { "url": step[1].format(artiststring=artiststring, titlestring=titlestring, var=var), "method": "POST", "headers": step[2], "data": bytes(urllib.parse.urlencode(step[3]), encoding="utf-8") } req = urllib.request.Request(**keys) response = urllib.request.urlopen(req) var = json.loads(response.read()) elif step[0] == "parse": for node in step[1]: var = var[node] assert isinstance(var, str) and var != "" except: if len(artists) != 1: # try the same track with every single artist for a in artists: result = api_request_track(([a], title)) if result is not None: return result continue return var else: pass return None
def wrapper(self,path:Multi=[],**keys): log("API request: " + str(path))# + " | Keys: " + str({k:keys.get(k) for k in keys})) try: response.status,result = self.handle(path,keys) except: exceptiontype = sys.exc_info()[0] if exceptiontype in self.errors: response.status,result = self.errors[exceptiontype] else: response.status,result = 500,{"status":"Unknown error","code":500} return result
def rebuild(**keys): apikey = keys.pop("key",None) if (checkAPIkey(apikey)): log("Database rebuild initiated!") global db_rulestate db_rulestate = False sync() from .fixexisting import fix fix() global cla, coa cla = CleanerAgent() coa = CollectorAgent() build_db() invalidate_caches()
def proxy_scrobble(artists, title, timestamp): for api in apis_scrobble: response = urllib.request.urlopen(api["scrobbleurl"], data=utf(api["requestbody"]( artists, title, timestamp))) xml = response.read() data = ET.fromstring(xml) if data.attrib.get("status") == "ok": if data.find("scrobbles").attrib.get("ignored") == "0": log(api["name"] + ": Scrobble accepted: " + "/".join(artists) + " - " + title) else: log(api["name"] + ": Scrobble not accepted: " + "/".join(artists) + " - " + title)
def static_html(name): linkheaders = ["</css/maloja.css>; rel=preload; as=style"] keys = remove_identical(FormsDict.decode(request.query)) with open("website/" + name + ".html") as htmlfile: html = htmlfile.read() # apply global substitutions with open("website/common/footer.html") as footerfile: footerhtml = footerfile.read() with open("website/common/header.html") as headerfile: headerhtml = headerfile.read() html = html.replace("</body>", footerhtml + "</body>").replace( "</head>", headerhtml + "</head>") # If a python file exists, it provides the replacement dict for the html file if os.path.exists("website/" + name + ".py"): #txt_keys = SourceFileLoader(name,"website/" + name + ".py").load_module().replacedict(keys,DATABASE_PORT) try: content = SourceFileLoader(name, "website/" + name + ".py").load_module().instructions(keys) if isinstance(content, str): redirect(content) txt_keys, resources = content except HTTPResponse as e: raise except Exception as e: log("Error in website generation: " + str(sys.exc_info()), module="error") raise # add headers for server push for resource in resources: if all(ord(c) < 128 for c in resource["file"]): # we can only put ascii stuff in the http header linkheaders.append("<" + resource["file"] + ">; rel=preload; as=" + resource["type"]) # apply key substitutions for k in txt_keys: if isinstance(txt_keys[k], list): # if list, we replace each occurence with the next item for element in txt_keys[k]: html = html.replace(k, element, 1) else: html = html.replace(k, txt_keys[k]) response.set_header("Link", ",".join(linkheaders)) return html
def rebuild(): keys = FormsDict.decode(request.forms) apikey = keys.pop("key", None) if (checkAPIkey(apikey)): log("Database rebuild initiated!") global db_rulestate db_rulestate = False sync() os.system("python3 fixexisting.py") global cla, coa cla = CleanerAgent() coa = CollectorAgent() build_db() invalidate_caches()
def import_rulemodule(**keys): apikey = keys.pop("key",None) if (checkAPIkey(apikey)): filename = keys.get("filename") remove = keys.get("remove") is not None validchars = "-_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" filename = "".join(c for c in filename if c in validchars) if remove: log("Deactivating predefined rulefile " + filename) os.remove(datadir("rules/" + filename + ".tsv")) else: log("Importing predefined rulefile " + filename) os.symlink(datadir("rules/predefined/" + filename + ".tsv"),datadir("rules/" + filename + ".tsv"))
def import_rulemodule(): keys = FormsDict.decode(request.forms) apikey = keys.pop("key",None) if (checkAPIkey(apikey)): filename = keys.get("filename") remove = keys.get("remove") is not None validchars = "-_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" filename = "".join(c for c in filename if c in validchars) if remove: log("Deactivating predefined rulefile " + filename) os.remove("rules/" + filename + ".tsv") else: log("Importing predefined rulefile " + filename) os.symlink("predefined/" + filename + ".tsv","rules/" + filename + ".tsv")
def handle(path, keys): log("API request: " + str(path)) # + " | Keys: " + str({k:keys.get(k) for k in keys})) if len(path) > 1 and (path[0], path[1]) in handlers: handler = handlers[(path[0], path[1])] path = path[2:] try: response.status, result = handler.handle(path, keys) except: type = sys.exc_info()[0] response.status, result = handler.errors[type] else: result = {"error": "Invalid scrobble protocol"} response.status = 500 log("Response: " + str(result)) return result
def customerror(error): code = int(str(error).split(",")[0][1:]) log("HTTP Error: " + str(code),module="error") if os.path.exists("website/errors/" + str(code) + ".html"): return static_file("website/errors/" + str(code) + ".html",root="") else: with open("website/errors/generic.html") as htmlfile: html = htmlfile.read() # apply global substitutions with open("website/common/footer.html") as footerfile: footerhtml = footerfile.read() with open("website/common/header.html") as headerfile: headerhtml = headerfile.read() html = html.replace("</body>",footerhtml + "</body>").replace("</head>",headerhtml + "</head>") html = html.replace("ERROR_CODE",str(code)) return html
def route(self, fullpath): # preprocess all requests headers = request.headers keys = FormsDict.decode(request.query) if self.debug: log("Request to " + fullpath) for k in keys: log("\t" + k + " = " + keys.get(k)) if request.get_header( "Content-Type" ) is not None and "application/json" in request.get_header( "Content-Type"): json = request.json if request.json is not None else {} keys.update(json) else: formdict = FormsDict.decode(request.forms) for k in formdict: for v in formdict.getall(k): keys[k] = v #keys.update(FormsDict.decode(request.forms)) #print(keys) nodes = fullpath.split("/") reqmethod = request.method if self.auth(request): result = self.handle(nodes, reqmethod, keys, headers) if isinstance(result, Response): return result else: result = serialize(result) result = format_output[self.type](result, root_node=self.rootnode) return result else: response.status = 403 return "Access denied"
def api_request_artist(artist): for api in apis_artists: if True: try: artiststring = urllib.parse.quote(artist) var = artiststring for step in api["steps"]: if step[0] == "get": response = urllib.request.urlopen(step[1].format( artiststring=artiststring, var=var)) var = json.loads(response.read()) elif step[0] == "post": keys = { "url": step[1].format(artiststring=artiststring, var=var), "method": "POST", "headers": step[2], "data": bytes(urllib.parse.urlencode(step[3]), encoding="utf-8") } req = urllib.request.Request(**keys) response = urllib.request.urlopen(req) var = json.loads(response.read()) elif step[0] == "parse": for node in step[1]: var = var[node] assert isinstance(var, str) and var != "" except Exception as e: log("Error while getting artist image from " + api["name"], module="external") log(str(e), module="external") continue return var else: pass return None