def has_unsaved_changes(self): import difflib import copy if self.file_origin: oldjson = open(self.file_origin, 'r').read() saved_gui = copy.copy(self.gui_params) for k in ['num_inlets', 'num_outlets', 'obj_id', 'top_level']: if k in self.gui_params: del self.gui_params[k] newjson = self.json_serialize() self.gui_params = saved_gui cdiff = difflib.context_diff(oldjson.split('\n'), newjson.split('\n')) for dline in cdiff: print dline if oldjson != newjson: log.warning("Unsaved changes in '%s'" % self.name, "(%s)" % self.file_origin) return True elif len(self.objects): log.warning("Unsaved changes in new patch '%s'" % self.name) return True return False
def show_editor(self, obj_id, show): from .mfp_app import MFPApp patch = MFPApp().objects.get(obj_id) if not isinstance(patch, Patch): log.warning("show_editor: error: obj_id=%s, obj=%s is not a patch" % (obj_id, patch)) elif show: patch.create_gui() else: patch.delete_gui()
def label_edit_finish(self, widget, new_text, aborted=False): if self.obj_id is None: self.create(self.proc_type, None) if self.obj_id is None: log.warning("TextElement: could not create obj") elif new_text != self.value and not aborted: self.value = new_text self.set_text() MFPGUI().mfp.send(self.obj_id, 0, self.value) self.update()
def toggle_pause(self): from mfp import log try: paused = MFPGUI().mfp.toggle_pause() if paused: log.warning("Execution of all patches paused") else: log.warning("Execution of all patches resumed") except Exception as e: print("Caught exception", e)
def toggle_pause(self): from mfp import log try: paused = MFPGUI().mfp.toggle_pause() if paused: log.warning("Execution of all patches paused") else: log.warning("Execution of all patches resumed") except Exception, e: print "Caught exception", e
def draw_field_cb(self, texture, ctxt, px_min, px_max): def stroke_to(styler, curve, px, ptnum, delta): points = self.points.get(curve) dst_ptnum = ptnum + delta if dst_ptnum < 0 or dst_ptnum > points[-1][0]: return dst_num, dst_pt = points[dst_ptnum] dst_px = self.pt2px(dst_pt) dst_px[0] -= px_min[0] dst_px[1] -= px_min[1] styler.stroke(ctxt, dst_px, px) # if the viewport is animated (viewport_scroll not 0) # the position of the field may have changed. field_vp = self.plot.get_viewport_origin() field_vp_pos = self.px2pt(field_vp) field_w = self.x_max - self.x_min field_h = self.y_max - self.y_min if self.x_min != field_vp_pos[0]: self.x_min = field_vp_pos[0] self.x_max = self.x_min + field_w self._recalc_x_scale() if self.y_max != field_vp_pos[1]: self.y_max = field_vp_pos[1] self.y_min = self.y_max - field_h self._recalc_y_scale() for curve in self.points: curve = int(curve) styler = self.style.get(curve) if styler is None: log.warning("[scatterplot]: no style for curve", curve) styler = self.style[curve] = MarkStyle() tile_id = self.plot.tile_reverse.get(texture) if tile_id is None: return points = self.points_by_tile[curve].get(tile_id) if points is not None: for ptnum, p in points: pc = self.pt2px(p) pc[0] -= px_min[0] pc[1] -= px_min[1] styler.mark(ctxt, pc) if styler.stroke_style: stroke_to(styler, curve, pc, ptnum, -1) if styler.stroke_style: ptnum, p = points[-1] pc = self.pt2px(p) pc[0] -= px_min[0] pc[1] -= px_min[1] stroke_to(styler, curve, pc, ptnum, 1)
def show_editor(self, obj_id, show): from .mfp_app import MFPApp patch = MFPApp().objects.get(obj_id) if not isinstance(patch, Patch): log.warning( "show_editor: error: obj_id=%s, obj=%s is not a patch" % (obj_id, patch)) elif show: patch.create_gui() else: patch.delete_gui()
def finish(self): from mfp import log p = self.process self.process = None if p is not None: try: p.terminate() p.wait() except OSError, e: log.warning( "RPCExecRemote: caught error in terminate(), continuing")
def add_element(self, factory, x=None, y=None): if x is None: x = self.input_mgr.pointer_x if y is None: y = self.input_mgr.pointer_y try: b = factory(self, x, y) except Exception, e: log.warning("add_element: Error while creating with factory", factory) return True
def _connect(self, dest_name, dest_inlet, wait=True): # short-circuit if already conected if (self.dest_name == dest_name and self.dest_inlet == dest_inlet and self.dest_obj is not None): return True # disconnect existing if needed if self.dest_obj is not None: self.disconnect(0, self.dest_obj, self.dest_inlet) self.dest_obj = None self.dest_obj_owned = False self.dest_name = dest_name self.dest_inlet = dest_inlet # find the new endpoint obj = MFPApp().resolve(self.dest_name, self, True) if obj is None: # usually we create a bus if needed. but if it's a reference to # another top-level patch, no. if ':' in self.dest_name or self.dest_inlet != 0: if wait: self._wait_connect() return False else: self.dest_obj = MFPApp().create(self.bus_type, "", self.patch, self.scope, self.dest_name) self.dest_obj_owned = True else: self.dest_obj = obj self.dest_obj_owned = False if self.dest_obj: if (len(self.dest_obj.connections_in) < self.dest_inlet + 1 or [self, 0] not in self.dest_obj.connections_in[self.dest_inlet]): self.connect(0, self.dest_obj, self.dest_inlet, False) else: log.warning("[send] can't find dest object and not still looking") self.init_args = '"%s",%s' % (self.dest_name, self.dest_inlet) self.conf(label_text=self._mkdispname()) if self.inlets[0] is not Uninit: self.trigger() return True
def wait(self, req, timeout=None): import datetime endtime = None if timeout is not None: endtime = datetime.datetime.now() + datetime.timedelta(seconds=timeout) with self.lock: while req.state not in (Request.RESPONSE_RCVD, Request.RPC_ERROR): self.condition.wait(0.1) if self.join_req: return False elif timeout is not None and datetime.datetime.now() > endtime: log.warning("rpc_host: Request timed out after %s sec -- %s" % (timeout, req)) raise Exception() if req.state == Request.RPC_ERROR: raise RPCHost.RPCError()
def run(self): from mfp import log while not self.join_req: try: if self.process: fileobj = self.process.stdout r, w, e = select.select([fileobj], [], [], 0.25) if fileobj in r: ll = fileobj.readline().decode() else: continue else: ll = None if not ll: self.join_req = True else: ll = ll.strip() if ll.startswith("[LOG] "): ll = ll[6:] if ll.startswith("FATAL:"): log.error(ll[7:], module=self.log_module) self.join_req = True elif ll.startswith("ERROR:"): log.error(ll[7:], module=self.log_module) elif ll.startswith("WARNING:"): log.warning(ll[9:], module=self.log_module) elif ll.startswith("INFO:"): log.info(ll[6:], module=self.log_module) elif ll.startswith("DEBUG:"): log.debug(ll[7:], module=self.log_module) elif ll.startswith("JackEngine::XRun"): log.warning("JACK: " + ll, module=self.log_module) elif ll.startswith("JackAudioDriver"): if "Process error" in ll: log.error("JACK: " + ll, module=self.log_module) elif self.log_raw and len(ll): log.debug("%s " % self.log_module, ll) except Exception as e: log.debug("RPCExecRemote: exiting with error", e) self.join_req = True if self.process: self.process.terminate() self.process.wait()
def _connect(self, dest_name, dest_inlet, wait=True): # short-circuit if already conected if (self.dest_name == dest_name and self.dest_inlet == dest_inlet and self.dest_obj is not None): return True # disconnect existing if needed if self.dest_obj is not None: self.disconnect(0, self.dest_obj, self.dest_inlet) self.dest_obj = None self.dest_obj_owned = False self.dest_name = dest_name self.dest_inlet = dest_inlet # find the new endpoint obj = MFPApp().resolve(self.dest_name, self, True) if obj is None: # usually we create a bus if needed. but if it's a reference to # another top-level patch, no. if ':' in self.dest_name or self.dest_inlet != 0: if wait: self._wait_connect() return False else: self.dest_obj = MFPApp().create(self.bus_type, "", self.patch, self.scope, self.dest_name) self.dest_obj_owned = True else: self.dest_obj = obj self.dest_obj_owned = False if self.dest_obj: if (len(self.dest_obj.connections_in) < self.dest_inlet+1 or [self, 0] not in self.dest_obj.connections_in[self.dest_inlet]): self.connect(0, self.dest_obj, self.dest_inlet, False) else: log.warning("[send] can't find dest object and not still looking") self.init_args = '"%s",%s' % (self.dest_name, self.dest_inlet) self.conf(label_text=self._mkdispname()) if self.inlets[0] is not Uninit: self.trigger() return True
def refresh(self, element): from .patch_info import PatchInfo if isinstance(element, PatchInfo): self.object_view.update(element, None) self.layer_view.update(element, None) return if self.load_in_progress: return if isinstance(element.container, PatchElement): self.object_view.update(element, (element.scope, element.container)) elif element.layer is not None and element.scope is not None: self.object_view.update(element, (element.scope, element.layer.patch)) elif element.layer is not None: self.object_view.update(element, (element.layer.scope, element.layer.patch)) else: log.warning("refresh: WARNING: element has no layer,", element)
def wait(self, req, timeout=None): import datetime endtime = None if timeout is not None: endtime = datetime.datetime.now() + datetime.timedelta( seconds=timeout) with self.lock: while req.state not in (Request.RESPONSE_RCVD, Request.RPC_ERROR): self.condition.wait(0.1) if self.join_req: return False elif timeout is not None and datetime.datetime.now() > endtime: log.warning( "rpc_host: Request timed out after %s sec -- %s" % (timeout, req)) raise Exception() if req.state == Request.RPC_ERROR: raise RPCHost.RPCError()
def run(self): work = [] retry = [] while not self.join_req: with self.lock: self.cv.wait(0.25) work = [] if self.queue: work.extend(self.queue) self.queue = [] if self.failed: toonew = [] newest = datetime.utcnow() - timedelta(milliseconds=250) for jobs, timestamp in self.failed: if timestamp < newest: work.extend(jobs) else: toonew.append((jobs, timestamp)) self.failed = toonew retry = [] for unit, retry_count, data in work: try: done = unit(*data) except Exception as e: log.debug("Exception while running", unit) log.debug_traceback() if not done and retry_count: if isinstance(retry_count, (int, float)): if retry_count > 1: retry_count -= 1 else: log.warning("[TaskNibbler] ran out of retries for", unit, data) retry_count = False retry.append((unit, retry_count, data)) if retry: with self.lock: self.failed.append((retry, datetime.utcnow()))
def add_element(self, factory, x=None, y=None): if x is None: x = self.input_mgr.pointer_x if y is None: y = self.input_mgr.pointer_y try: b = factory(self, x, y) except Exception as e: import traceback log.warning("add_element: Error while creating with factory", factory) log.warning(e) log.debug_traceback() return True self.active_layer().add(b) self.register(b) self.refresh(b) self.select(b) b.begin_edit() return True
def run(self): from mfp import log while not self.join_req: try: if self.process: ll = self.process.stdout.readline() else: ll = None if not ll: self.join_req = True else: ll = ll.strip() if self.log_raw: log.debug("%s:" % self.log_module, ll.strip()) if ll.startswith("[LOG] "): ll = ll[6:] if ll.startswith("FATAL:"): log.error(ll[7:], module=self.log_module) self.join_req = True elif ll.startswith("ERROR:"): log.error(ll[7:], module=self.log_module) elif ll.startswith("WARNING:"): log.warning(ll[9:], module=self.log_module) elif ll.startswith("INFO:"): log.info(ll[6:], module=self.log_module) elif ll.startswith("DEBUG:"): log.debug(ll[7:], module=self.log_module) elif ll.startswith("JackEngine::XRun"): log.warning("JACK: " + ll, module=self.log_module) elif ll.startswith("JackAudioDriver"): if "Process error" in ll: log.error("JACK: " + ll, module=self.log_module) except Exception, e: print "RPCExecRemote caught error:", e log.debug("RPCExecRemote: exiting") self.join_req = True
def run(self): ''' RPCHost.run: perform IO on managed sockets, dispatch data ''' self.read_workers.start() if RPCWrapper.rpchost is None: RPCWrapper.rpchost = self import select self.fdsockets = {} while not self.join_req: rdy = None for s in self.managed_sockets.values(): if s.fileno() not in self.fdsockets: self.fdsockets[s.fileno()] = s try: sockets = list(self.fdsockets.keys()) if sockets: rdy, _w, _x = select.select(list(self.fdsockets.keys()), [], [], 0.1) else: time.sleep(0.1) except Exception as e: print("select exception:", e) if not rdy: continue syncbytes = 8 sync = b'' for rsock in rdy: jdata = b'' retry = 1 while retry: sock = self.fdsockets.get(rsock) if sock is None: retry = 0 jdata = None continue try: sync = sync[syncbytes:] syncbit = sock.recv(syncbytes) if not syncbit: raise self.RecvError() sync += syncbit if sync != RPCHost.SYNC_MAGIC: syncbytes = 1 retry = 1 raise self.SyncError() else: syncbytes = 8 retry = 0 jlen = sock.recv(8) jlen = int(jlen) recvlen = 0 while recvlen < jlen: jdata += sock.recv(jlen - recvlen) recvlen = len(jdata) if recvlen < jlen: log.warning( "RPCHost: got short packet (%d of %d)" % (recvlen, jlen)) except RPCHost.SyncError as e: log.warning("RPCHost: sync error, resyncing") pass except (socket.error, RPCHost.RecvError) as e: log.warning("RPCHost: communication error") retry = 0 jdata = None deadpeer = self.peers_by_socket[sock] self.unmanage(deadpeer) except Exception as e: log.error("RPCHost: unhandled exception", type(e), e) log.debug(jdata) log.debug_traceback() retry = 0 jdata = b"" if jdata is not None and len(jdata) >= jlen: peer_id = self.peers_by_socket.get(sock) self.read_workers.submit((jdata, peer_id)) if self.node_id == 0: peers = list(self.managed_sockets.keys()) for node in peers: req = Request("exit_request", {}) self.put(req, node) self.wait(req) del self.managed_sockets[node] elif 0 in self.managed_sockets: req = Request("exit_notify", {}) self.put(req, 0) self.wait(req) for clsname, cls in list(self.served_classes.items()): self.unpublish(cls) for clsname, cls in RPCWrapper.rpctype.items(): cls.publishers = [] if RPCWrapper.rpchost == self: RPCWrapper.rpchost = None self.read_workers.finish()
def run(self): ''' RPCHost.run: perform IO on managed sockets, dispatch data ''' self.read_workers.start() if RPCWrapper.rpchost is None: RPCWrapper.rpchost = self import select self.fdsockets = {} while not self.join_req: rdy = None for s in self.managed_sockets.values(): if s.fileno() not in self.fdsockets: self.fdsockets[s.fileno()] = s try: sockets = self.fdsockets.keys() if sockets: rdy, _w, _x = select.select(self.fdsockets.keys(), [], [], 0.1) else: time.sleep(0.1) except Exception, e: print "select exception:", e if not rdy: continue jdata = None syncbytes = 8 sync = '' for rsock in rdy: retry = 1 while retry: sock = self.fdsockets.get(rsock) if sock is None: retry = 0 jdata = None continue try: sync = sync[syncbytes:] syncbit = sock.recv(syncbytes) if not syncbit: raise self.RecvError() sync += syncbit if sync != RPCHost.SYNC_MAGIC: syncbytes = 1 retry = 1 raise self.SyncError() else: syncbytes = 8 retry = 0 jlen = sock.recv(8) jlen = int(jlen) jdata = sock.recv(jlen) except RPCHost.SyncError, e: log.warning("RPCHost: sync error, resyncing") pass except (socket.error, RPCHost.RecvError) as e: log.warning("RPCHost: communication error") retry = 0 jdata = None deadpeer = self.peers_by_socket[sock] self.unmanage(deadpeer) except Exception, e: print "RPCHost: unhandled exception", type(e), e print jdata retry = 0 jdata = ""
def run(self): ''' RPCHost.run: perform IO on managed sockets, dispatch data ''' self.read_workers.start() if RPCWrapper.rpchost is None: RPCWrapper.rpchost = self import select self.fdsockets = {} while not self.join_req: rdy = None for s in self.managed_sockets.values(): if s.fileno() not in self.fdsockets: self.fdsockets[s.fileno()] = s try: sockets = list(self.fdsockets.keys()) if sockets: rdy, _w, _x = select.select(list(self.fdsockets.keys()), [], [], 0.1) else: time.sleep(0.1) except Exception as e: print("select exception:", e) if not rdy: continue syncbytes = 8 sync = b'' for rsock in rdy: jdata = b'' retry = 1 while retry: sock = self.fdsockets.get(rsock) if sock is None: retry = 0 jdata = None continue try: sync = sync[syncbytes:] syncbit = sock.recv(syncbytes) if not syncbit: raise self.RecvError() sync += syncbit if sync != RPCHost.SYNC_MAGIC: syncbytes = 1 retry = 1 raise self.SyncError() else: syncbytes = 8 retry = 0 jlen = sock.recv(8) jlen = int(jlen) recvlen = 0 while recvlen < jlen: jdata += sock.recv(jlen-recvlen) recvlen = len(jdata) if recvlen < jlen: log.warning("RPCHost: got short packet (%d of %d)" % (recvlen, jlen)) except RPCHost.SyncError as e: log.warning("RPCHost: sync error, resyncing") pass except (socket.error, RPCHost.RecvError) as e: log.warning("RPCHost: communication error") retry = 0 jdata = None deadpeer = self.peers_by_socket[sock] self.unmanage(deadpeer) except Exception as e: log.error("RPCHost: unhandled exception", type(e), e) log.debug(jdata) log.debug_traceback() retry = 0 jdata = b"" if jdata is not None and len(jdata) >= jlen: peer_id = self.peers_by_socket.get(sock) self.read_workers.submit((jdata, peer_id)) if self.node_id == 0: peers = list(self.managed_sockets.keys()) for node in peers: req = Request("exit_request", {}) self.put(req, node) self.wait(req) del self.managed_sockets[node] elif 0 in self.managed_sockets: req = Request("exit_notify", {}) self.put(req, 0) self.wait(req) for clsname, cls in list(self.served_classes.items()): self.unpublish(cls) for clsname, cls in RPCWrapper.rpctype.items(): cls.publishers = [] if RPCWrapper.rpchost == self: RPCWrapper.rpchost = None self.read_workers.finish()