def node_label(self, id, label): # TODO: remember node label changes to be able to generate a proper graphdef _log("node label update always ignored, (%s, %s)." % (id, label)) # caching e = self.node_cmds_cache[id] self.node_cmds_cache[id] = (e[0], e[1], e[2], e[3], label, e[5])
def load_session(self, task): ''' fallbacks are: load -> revert -> reconstruct ''' _log("autoloading stashed session (%s)" % task.gs_id) # load gs from DB obj = None try: obj = GraphSession.objects.filter(id=task.gs_id)[0] except: raise Exception("requested session id yielded no db object (%s)" % task.gs_id) if obj.username != task.username: raise Exception("username validation failed for sender: %s (%s)" % (task.username, task.gs_id)) try: if not obj.stashed: raise Exception("'stashed' timezone.time flag was null") # load python & matlab structures session = SoftGraphSession(task.gs_id, obj.username) session.graph = from_djangodb_str(obj.stashed_pickle) filepath = os.path.join(settings.MATFILES_DIRNAME, obj.stashed_matfile) if os.path.isfile(filepath): session.graph.middleware.get_load_fct()(filepath) self.sessions[task.gs_id] = session except Exception as e: _log("autoload failed: %s (%s)" % (str(e), task.gs_id), error=True) return self.revert_session(task) return self.sessions.get(task.gs_id, None)
def extract_graphdef(self): ''' extract and return a frontend-readable graph definition, using the x_y field to insert these into the gd ''' _log("extracting graph def...") gdef = {} gdef["nodes"] = {} gdef["links"] = {} gdef["datas"] = {} gdef["labels"] = {} for key in self.root.subnodes.keys(): # nodes gdef["nodes"][key] = self.node_cmds_cache[key] # links try: gdef["links"][key] = self.dslinks_cache[key] except: pass # not all nodes have outgoing links... # labels # TODO: impl. # data n = self.root.subnodes[key] obj = n.get_object() if obj != None and type(n) in (ObjLiteralNode, FuncNode, MethodAsFunctionNode, ): gdef["datas"][key] = str( base64.b64encode( json.dumps(obj).encode('utf-8') ))[2:-1] return gdef
def cleanup_wrk(self): ''' cleanup thread retires sessions that are "touched" longer ago than X time''' try: while not self.terminated: last = timezone.now() while (not self.terminated ) and (timezone.now() - last).seconds < settings.WRK_CLEANUP_INTERVAL_S: time.sleep(1) _log("cleaning up sessions...") keys = [key for key in self.sessions.keys()] for key in keys: ses = self.sessions.get(key, None) # thread safe way if not ses: continue if (timezone.now() - ses.touched ).seconds > settings.WRK_SESSION_RETIRE_TIMEOUT_S: self.shutdown_session(ses.gs_id) _log("clean up retiring sessions...") keys = [key for key in self.sessions.keys()] for key in keys: ses = self.sessions.get(key, None) # thread safe way if not ses: continue self.shutdown_session(ses.gs_id) finally: self.termination_events[self.tcln.getName()].set()
def graph_coords(self, coords): ''' updates the cached node_add commands x- and y-coordinate entries ''' keys = coords.keys() for key in keys: coord = coords[key] cached_cmd = self.node_cmds_cache[key] self.node_cmds_cache[key] = (coord[0], coord[1], cached_cmd[2], cached_cmd[3], cached_cmd[4], cached_cmd[5]) _log('graph coords: %d coordinate sets' % len(keys))
def extract_update(self): ''' an "update" is a set of non-literal data representations ''' _log("extracting data update...") update = {} for key in self.root.subnodes.keys(): n = self.root.subnodes[key] if type(n) in (ObjNode, ): obj = n.get_object() if obj: update[key] = obj.get_repr() else: update[key] = None return update
def monitor_wrk(self): try: while not self.terminated: last = timezone.now() _log("gathering statistics...") # db users num_users = len(User.objects.all()) # db sessions num_sessions = len(GraphSession.objects.all()) # live data num_livesessions = len(self.sessions.keys()) # number of graph handles holding objects num_hothandles = 0 for key in self.sessions: ses = self.sessions[key] # WARNING: encapsulate this impl. into e.g. enginterface for key in ses.graph.root.subnodes: n = ses.graph.root.subnodes[key] if type(n) in (nodespeak.ObjNode, ) and n.get_object() != None: num_hothandles += 1 # accumulated middleware varnames num_middleware_vars = 0 for key in self.sessions: ses = self.sessions[key] num_middleware_vars += len(ses.graph.middleware.varnames) # total matlab vars num_matlab_vars = 0 try: someses = self.sessions[next(iter(self.sessions))] who = someses.graph.middleware.totalwho() num_matlab_vars = len(who) except Exception as e: pass # log monitored values _log_sysmon(num_users, num_sessions, num_livesessions, num_hothandles, num_middleware_vars, num_matlab_vars) while (not self.terminated ) and (timezone.now() - last).seconds < settings.WRK_MONITOR_INTERVAL_S: time.sleep(1) _log("exiting...") finally: self.termination_events[self.tmon.getName()].set()
def node_rm(self, id): n = self.root.subnodes.get(id, None) if not n: return obj = n.get_object() if obj != None: self.middleware.deregister(obj) if n.num_parents() != 0 or n.num_children() != 0: raise Exception("node_rm: can not remove node with existing links") remove_subnode(self.root, n) # caching del self.node_cmds_cache[id] _log("deleted node: %s" % id)
def shutdown_session(self, gs_id, nosave=False): ''' shuts down a session the right way ''' _log("retiring session %s" % gs_id) with self.shutdownlock: session = self.sessions.get(gs_id, None) if session: with session.lock: try: if not nosave: self.autosave(session) self.extract_log(session) session.graph.shutdown() del self.sessions[gs_id] except Exception as e: _log("session shutdown error: " + str(e) + " (%s)" % gs_id, error=True)
def link_rm(self, id1, idx1, id2, idx2, order=0): n1 = self.root.subnodes[id1] n2 = self.root.subnodes[id2] # method links are represented in the frontend as idx==-1, but as an owner/subnode relation in nodespeak if type(n1) == ObjNode and type(n2) == MethodNode and idx1 == -1 and idx2 == -1: remove_subnode(n1, n2) elif type(n1) == MethodNode and type(n2) == ObjNode and idx1 == -1 and idx2 == -1: remove_subnode(n2, n1) # all other connections else: remove_connection(n1, idx1, n2, idx2, order) # caching lst = self.dslinks_cache[id1] idx = lst.index((id1, idx1, id2, idx2, order)) del lst[idx] _log("removed link from (%s, %d) to (%s, %d)" % (id1, idx1, id2, idx2))
def link_add(self, id1, idx1, id2, idx2, order=0): n1 = self.root.subnodes[id1] n2 = self.root.subnodes[id2] # "method links" are represented in the frontend as idx==-1, but as an owner/subnode relation in nodespeak if idx1 == -1 and idx2 == -1: if type(n1) == ObjNode and type(n2) == MethodNode: add_subnode(n1, n2) elif type(n1) == MethodNode and type(n2) == ObjNode: add_subnode(n2, n1) # all other connections else: add_connection(n1, idx1, n2, idx2, order) # caching if not self.dslinks_cache.get(id1, None): self.dslinks_cache[id1] = [] self.dslinks_cache[id1].append((id1, idx1, id2, idx2, order)) _log("added link from (%s, %d) to (%s, %d)" % (id1, idx1, id2, idx2))
def reconstruct_session(self, task): ''' fallbacks are: load -> revert -> reconstruct ''' _log("reconstructing session from graphdef (%s)" % task.gs_id) try: obj = GraphSession.objects.filter(id=task.gs_id)[0] except: raise Exception("requested gs_id yielded no db object (%s)" % task.gs_id) if obj.username != task.username: raise Exception( "username validation failed for session id: %s, sender: %s" % (obj.username, task.username)) session = None try: session = SoftGraphSession(task.gs_id, obj.username) session.graph.inject_graphdef(json.loads(obj.graphdef)) # delete the matfile and reference if os.path.exists(obj.quicksave_matfile): os.remove(obj.quicksave_matfile) if os.path.exists(obj.stashed_matfile): os.remove(obj.stashed_matfile) obj.quicksave_matfile = "" obj.stashed_matfile = "" # over-write the pickle obj.quicksave_pickle = to_djangodb_str(session.graph) obj.stashed_pickle = to_djangodb_str(session.graph) # reset obj.quicksaved = timezone.now() obj.stashed = timezone.now() obj.save() self.sessions[task.gs_id] = session except Exception as e: _log("reconstruct failed: %s (%s)" % (str(e), task.gs_id), error=True) return session
def graph_update(self, redo_lsts): ''' takes an undo-redo list and sequentially modifies the server-side graph ''' _log('graph update: %d commands' % len(redo_lsts)) error = None def erracc(msg, s): ''' message accumulation ''' if s == None: s = msg else: s = s + ", " + msg for redo_molecule in redo_lsts: for redo in redo_molecule: cmd = redo[0] args = redo[1:] try: getattr(self, cmd)(*args) except Exception as e: _log('graph update failed, cmd "%s" with: %s' % (redo, str(e)), error=True) erracc(str(e), error) if error != None: return {'error' : error}
def inject_graphdef(self, graphdef): ''' adds nodes, links and datas to the graph ''' nodes = graphdef['nodes'] links = graphdef['links'] datas = graphdef['datas'] for key in nodes.keys(): cmd = nodes[key] self.node_add(cmd[0], cmd[1], cmd[2], cmd[3], cmd[4], cmd[5]) for key in links.keys(): for cmd in links[key]: self.link_add(cmd[0], cmd[1], cmd[2], cmd[3], cmd[4]) for key in datas.keys(): n = self.root.subnodes[key] if type(n) in (ObjLiteralNode, FuncNode, MethodAsFunctionNode, MethodNode ): obj = json.loads(str(base64.b64decode( datas[key] ))[2:-1]) try: n.assign(obj) except: print(obj) else: _log("inject: omiting setting data on node of type: %s" % str(type(n)), error=True)
def handle(self, *args, **options): _log("purging messages...") c = purgemessages.Command() c.handle() workers = Workers() _log("starting workers...") _log("looking for tasks...") try: while True: workers.mainwork() time.sleep(0.1) # ctr-c exits except KeyboardInterrupt: print("") _log("shutdown requested, exiting...") workers.terminate() print("")
def execute_node(self, id): ''' execute a node and return a json representation of the result ''' _log("execute_node: %s" % id) try: n = self.root.subnodes[id] # deregister and clear any previous object old = n.get_object() if old != None: self.middleware.deregister(old) # execute (assigns a new object or None), log and register obj = self.middleware.execute_through_proxy( lambda _n=n: execute_node(_n) ) # NOTE: deregistration is handled through the execute proxy call _log("exe yields: %s" % str(obj)) _log("returning json representation...") retobj = {'dataupdate': {} } update_lst = [id] if type(n) in (MethodAsFunctionNode, ): update_lst.append([o[0].name for o in n.parents if type(o[0])==ObjNode ][0]) # find "owner" id... if type(n) in (MethodNode, ): update_lst.append([o.name for o in n.owners if type(o) == ObjNode][0]) # find "owner" id... for key in update_lst: try: retobj['dataupdate'][key] = None m = self.root.subnodes[key] if m.exemodel().can_assign(): objm = m.get_object() if objm: retobj['dataupdate'][key] = objm.get_repr() except Exception as e: s = traceback.format_exc() raise ObjectRepresentationException(s) return retobj except InternalExecutionException as e: _log("internal error during exe (%s): %s - %s" % (id, e.name, str(e)), error=True) return {'error' : "InternalExecutionException:\n%s" % str(e), 'errorid' : e.name} except NodeNotExecutableException as e: _log("node is not executable (%s)" % id, error=True) return {'error' : "NodeNotExecutableException:\n%s, %s" % (str(e), id)} except ObjectRepresentationException as e: _log("object representation error... %s" % str(e), error=True) return {'error' : "ObjectRepresentationException:\n%s" % str(e)} except Exception as e: _log("Exotic engine error (%s): %s" % (id, str(e)), error=True) return {'error' : "%s: %s" % (type(e).__name__, str(e))}
def node_data(self, id, data_str): ''' This method does not simply set data, but: - only nodes of type ObjNode are touched - None results in data deletion - any json (incl. empty string) results in a tried inject_json call on the object, if any ''' n = self.root.subnodes[id] if not type(n) in [ObjNode, ObjLiteralNode, FuncNode, MethodAsFunctionNode]: _log('node_data ignored for node of type "%s"' % type(n)) return if data_str == None: n.assign(None) _log('node_data assigning None to object of node "%s"' % n.name) return # deserialise obj = None try: obj = json.loads(data_str) except: _log("node_data input could not be deserialised") return # assign / set if obj == None: # clear-functionality enabled by setting even userdata = null n.assign(None) _log("node_data clearing node: %s" % id) elif type(n) in (ObjLiteralNode,): n.assign(obj) _log('node_data assigning to literal "%s"' % n.name) elif type(n) in (FuncNode, MethodAsFunctionNode, ): n.assign(obj) _log("node_data assigning to FuncNode or MethodAsFunctionNode %s" % n.name) else: try: n.get_object().set_user_data(obj) _log('node_data injected into node "%s"' % n.name) except Exception as e: # set_user_data does not have to be implemented _log('node_data failed to set data "%s" on node "%s" (%s)' % (data_str, n.name, str(e)), error=True) raise e
def threadwork(self): # check for the self.terminated=True signal every timeout seconds task = None while not self.terminated: try: task = self.taskqueue.get(block=True, timeout=0.1) except Empty: task = None if not task: continue _log("doing task '%s' (%s)" % (task.cmd, task.gs_id)) try: # attach/load-attach if task.cmd == "load": session = self.get_soft_session(task) if not session: session = self.load_session(task) gd = None update = None with session.lock: try: gd = session.graph.extract_graphdef() update = session.graph.extract_update() except: _log( "autoload failed, requesting fallback cmd='revert' (%s)" % task.gs_id, error=True) task.cmd = "revert" self.taskqueue.put(task) graphreply = GraphReply(reqid=task.reqid, reply_json=json.dumps({ "graphdef": gd, "dataupdate": update })) graphreply.save() # revert - to last active save elif task.cmd == "revert": # cleanup & remove any active session self.shutdown_session(task.gs_id, nosave=True) # quickload the session AKA revert session = self.revert_session(task) gd = None update = None with session.lock: try: gd = session.graph.extract_graphdef() update = session.graph.extract_update() except: if not session: raise Exception( "session could not be reverted: %s" % task.gs_id) graphreply = GraphReply(reqid=task.reqid, reply_json=json.dumps({ "graphdef": gd, "dataupdate": update })) graphreply.save() # reset elif task.cmd == "reset": self.reset_session(task.gs_id) gd = None update = None graphreply = GraphReply(reqid=task.reqid, reply_json=json.dumps({ "graphdef": gd, "dataupdate": update })) graphreply.save() # save elif task.cmd == "save": session = self.get_soft_session(task) if not session: raise Exception( "save failed: session was not live (%s)" % task.gs_id) with session.lock: anyerrors = session.graph.graph_update( task.sync_obj['sync']) if anyerrors: raise Exception( "errors encountered during update: %s" % anyerrors) session.graph.graph_coords(task.sync_obj['coords']) self.quicksave(session) graphreply = GraphReply( reqid=task.reqid, reply_json='{"message" : "save success"}') graphreply.save() # update & run elif task.cmd == "update_run": session = self.get_soft_session(task) if not session: raise Exception( "update_run failed: session was not live (%s)" % task.gs_id) with session.lock: json_obj = session.update_and_execute( task.sync_obj['run_id'], task.sync_obj['sync']) graphreply = GraphReply( reqid=task.reqid, reply_json=json.dumps(json_obj)) graphreply.save() # update elif task.cmd == "update": session = self.get_soft_session(task) if not session: raise Exception( "update failed: session was not live (%s)" % task.gs_id) with session.lock: error1 = session.graph.graph_update( task.sync_obj['sync']) error2 = session.graph.graph_coords( task.sync_obj['coords']) # purge previous graph replies (depending on view setting, these may not be read) # TODO: impl # NOTE: at this time, update replies are not read, nor is this needed #graphreply = GraphReply(reqid=task.reqid, reply_json=json.dumps(error1)) #graphreply.save() # clear objects elif task.cmd == "clear_data": session = self.get_soft_session(task) if not session: raise Exception( "clear_data failed: session was not live (%s)" % task.gs_id) with session.lock: session.graph.reset_all_objs() update = session.graph.extract_update() graphreply = GraphReply(reqid=task.reqid, reply_json=json.dumps( {"dataupdate": update})) graphreply.save() # extract log lines elif task.cmd == "extract_log": session = self.get_soft_session(task) if not session: raise Exception( "extract_log failed: session was not live (%s)" % task.gs_id) with session.lock: self.extract_log(session) graphreply = GraphReply( reqid=task.reqid, reply_json= '{"message" : "command log extraction successful"}' ) graphreply.save() # save & shutdown elif task.cmd == "autosave_shutdown": for session in self._get_user_softsessions(task): self.shutdown_session(task.gs_id) graphreply = GraphReply( reqid=task.reqid, reply_json='{"message" : "save-shutdown successful"}') graphreply.save() # hard shutdown elif task.cmd == "shutdown": session = self.get_soft_session(task) session.graph.shutdown() # create / new elif task.cmd == "new": obj = GraphSession() obj.example = False obj.username = task.username obj.description = "" obj.title = "" obj.example = False obj.excomment = "" obj.save() session = SoftGraphSession(gs_id=str(obj.id), username=obj.username) with session.lock: self.sessions[obj.id] = session self.quicksave(session) self.autosave(session) graphreply = GraphReply(reqid=task.reqid, reply_json=obj.id) graphreply.save() # clone elif task.cmd == "clone": # get graph definition from clonable gd = None obj = GraphSession.objects.filter(id=task.gs_id)[0] if obj.username != task.username and obj.example == False: raise Exception( "will not clone non-example session from other users" ) session = self.get_soft_session(task) if not session: # get stored graphdef gd = json.loads(obj.graphdef) else: # get live session graphdef with session.lock: gd = session.graph.extract_graphdef() newobj = GraphSession() newobj.example = False newobj.graphdef = json.dumps(gd) newobj.title = obj.title + " [CLONE]" newobj.description = obj.description newobj.username = task.username newobj.save() # this causes loading to fail, resulting in a reconstruct @ load or revert self.reset_session(newobj.id) graphreply = GraphReply(reqid=task.reqid, reply_json=newobj.id) graphreply.save() # delete elif task.cmd == "delete": self.shutdown_session(task.gs_id) obj = GraphSession.objects.filter(id=task.gs_id)[0] if os.path.exists(obj.stashed_matfile): os.remove(obj.stashed_matfile) if os.path.exists(obj.quicksave_matfile): os.remove(obj.quicksave_matfile) obj.delete() graphreply = GraphReply( reqid=task.reqid, reply_json='{"message" : "delete success"}') graphreply.save() # reset all sessions (admin command) elif task.cmd == "admin_resetall": sesionobjs = GraphSession.objects.all() numreset = 0 for obj in sesionobjs: # TODO: create a plural shutdown_sessions to improve lock acquisition performance self.shutdown_session(str(obj.id), nosave=False) if obj.stashed_pickle != "reset": numreset = numreset + 1 obj.stashed_pickle = "reset" obj.quicksave_pickle = "reset" obj.loglines = "" obj.logheader = "" obj.stashed_matfile = "" obj.quicksave_matfile = "" obj.stashed = timezone.now() obj.quicksaved = timezone.now() obj.save() graphreply = GraphReply( reqid=task.reqid, reply_json=json.dumps( {"msg": "sessions activaly reset: %d" % numreset})) graphreply.save() # shutdown all sessions (admin command) elif task.cmd == "admin_shutdownall": numshutdown = 0 keys = list(self.sessions) for k in keys: # (k becomes the session id) self.shutdown_session(k, nosave=False) numshutdown = numshutdown + 1 graphreply = GraphReply( reqid=task.reqid, reply_json=json.dumps( {"msg": "sessions shut down: %d" % numshutdown})) graphreply.save() # shutdown all sessions (admin command) elif task.cmd == "admin_showvars": who = [] try: someses = self.sessions[next(iter(self.sessions))] who = someses.graph.middleware.totalwho() except: pass graphreply = GraphReply(reqid=task.reqid, reply_json=json.dumps( {"vars": who})) graphreply.save() # execute live matlab command (admin command) elif task.cmd == "admin_matlabcmd": mlcmd = task.sync_obj["mlcmd"] nargout = task.sync_obj["nargout"] sys.path.append( os.path.join(os.path.dirname(__file__), '..', '..', '..')) import ifitlib ans = ifitlib._eval(mlcmd, nargout=nargout, dontlog=True) graphreply = GraphReply(reqid=task.reqid, reply_json=json.dumps( {"ans": json.dumps(ans)})) graphreply.save() # else: raise Exception("invalid command: %s" % task.cmd) _log("task done") except Exception as e: _log("fatal error: " + str(e), error=True) graphreply = GraphReply(reqid=task.reqid, reply_json=json.dumps( {"fatalerror": str(e)})) graphreply.save() _log("exit") self.termination_events[threading.current_thread().getName()].set()
def node_add(self, x, y, id, name, label, tpe): n = self._create_node(id, tpe) _log('created node (%s) of type: "%s", content: "%s"' % (id, str(type(n)), str(n.get_object()))) add_subnode(self.root, n) # caching self.node_cmds_cache[id] = (x, y, id, name, label, tpe)