def doEvent(event, obj): try: validateEvent(event) except ValueError: raise ValueError("Unable to do Event: Invalid Event") diff = toDiff(event['actions']) jd.patch(obj, diff, True) return
def record_patch(rec, diff): """Return the JSON-compatible structure that results from applying the changes in `diff` to the record `rec`. The parameters must be structures compatible with json.dumps *or* strings compatible with json.loads. Note that by design, `old == record_patch(new, record_diff(old, new))`""" rec, diff = _norm_json_params(rec, diff) return json_delta.patch(rec, diff, in_place=False)
def runTest(self): diff = json_delta.load_and_diff(self.case, self.target, minimal=False, verbose=True) self.assertEquals( json_delta._util.decode_json(self.target), json_delta.patch(json_delta._util.decode_json(self.case), diff) )
def patch(self, diff): """ Takes a delta (from diff()) and applies it to update the object """ if not diff: return res = json_delta.patch(self.raw(), diff) if self._is_iterable(res): return self.new(res) return res
def _store_rx_data(self, data, nodename): current_gen = shared.REMOTE_GEN.get(nodename, 0) our_gen_on_peer = data.get("gen", {}).get(rcEnv.nodename, 0) kind = data.get("kind", "full") change = False if kind == "patch": if current_gen == 0: # waiting for a full: ignore patches return if nodename not in shared.CLUSTER_DATA: # happens during init. ignore the patch, and ask for a full shared.REMOTE_GEN[nodename] = 0 shared.LOCAL_GEN[nodename] = our_gen_on_peer return deltas = data.get("deltas", []) gens = sorted([int(gen) for gen in deltas]) gens = [gen for gen in gens if gen > current_gen] if len(gens) == 0: #self.log.info("no more recent gen in received deltas") if our_gen_on_peer > shared.LOCAL_GEN[nodename]: shared.LOCAL_GEN[nodename] = our_gen_on_peer shared.CLUSTER_DATA[nodename]["gen"][ rcEnv.nodename] = our_gen_on_peer return with shared.CLUSTER_DATA_LOCK: for gen in gens: #self.log.debug("merge node %s gen %d (%d diffs)", nodename, gen, len(deltas[str(gen)])) if gen - 1 != current_gen: self.log.warning( "unsynchronized node %s dataset. local gen %d, received %d. " "ask for a full.", nodename, current_gen, gen) shared.REMOTE_GEN[nodename] = 0 shared.LOCAL_GEN[nodename] = our_gen_on_peer shared.CLUSTER_DATA[nodename]["gen"] = { nodename: gen, rcEnv.nodename: our_gen_on_peer, } break try: json_delta.patch(shared.CLUSTER_DATA[nodename], deltas[str(gen)]) current_gen = gen shared.REMOTE_GEN[nodename] = gen shared.LOCAL_GEN[nodename] = our_gen_on_peer shared.CLUSTER_DATA[nodename]["gen"] = { nodename: gen, rcEnv.nodename: our_gen_on_peer, } self.log.debug( "patch node %s dataset to gen %d, peer has gen %d of our dataset", nodename, shared.REMOTE_GEN[nodename], shared.LOCAL_GEN[nodename]) if self.patch_has_nodes_info_change(deltas[str(gen)]): self.on_nodes_info_change() change = True except Exception as exc: self.log.warning( "failed to apply node %s dataset gen %d patch: %s. " "ask for a full: %s", nodename, gen, deltas[str(gen)], exc) shared.REMOTE_GEN[nodename] = 0 shared.LOCAL_GEN[nodename] = our_gen_on_peer shared.CLUSTER_DATA[nodename]["gen"] = { nodename: gen, rcEnv.nodename: our_gen_on_peer, } return elif kind == "ping": with shared.CLUSTER_DATA_LOCK: shared.REMOTE_GEN[nodename] = 0 shared.LOCAL_GEN[nodename] = our_gen_on_peer if nodename not in shared.CLUSTER_DATA: shared.CLUSTER_DATA[nodename] = {} shared.CLUSTER_DATA[nodename]["gen"] = { nodename: 0, rcEnv.nodename: our_gen_on_peer, } shared.CLUSTER_DATA[nodename]["monitor"] = data["monitor"] self.log.debug( "reset node %s dataset gen, peer has gen %d of our dataset", nodename, shared.LOCAL_GEN[nodename]) change = True else: data_gen = data.get("gen", {}).get(nodename) if data_gen is None: self.log.debug("no 'gen' in full dataset from %s: drop", nodename) return last_gen = shared.REMOTE_GEN.get(nodename) if last_gen is not None and last_gen >= data_gen: self.log.debug( "already installed or beyond %s gen %d dataset: drop", nodename, data_gen) return node_status = data.get("monitor", {}).get("status") if node_status in ("init", "maintenance", "upgrade") and nodename in shared.CLUSTER_DATA: self.duplog("info", "preserve last known instances status from " "node %(nodename)s in %(node_status)s state", nodename=nodename, node_status=node_status) data["services"]["status"] = shared.CLUSTER_DATA[nodename].get( "services", {}).get("status", {}) with shared.CLUSTER_DATA_LOCK: shared.CLUSTER_DATA[nodename] = data new_gen = data.get("gen", {}).get(nodename, 0) shared.LOCAL_GEN[nodename] = our_gen_on_peer self.on_nodes_info_change() shared.REMOTE_GEN[nodename] = new_gen shared.CLUSTER_DATA[nodename]["gen"] = { nodename: new_gen, rcEnv.nodename: our_gen_on_peer, } self.log.debug( "install node %s dataset gen %d, peer has gen %d of our dataset", nodename, shared.REMOTE_GEN[nodename], shared.LOCAL_GEN[nodename]) change = True if change: shared.wake_monitor( "node %s %s dataset gen %d received through %s" % (nodename, kind, shared.REMOTE_GEN[nodename], self.name))
with tempfile.NamedTemporaryFile(suffix=".tmp") as tempfile: json.dump(jin, tempfile, indent=4, separators=(',',':'), sort_keys=True) tempfile.flush() call([EDITOR, tempfile.name]) tempfile.seek(0) jnew = json.load(tempfile) print json.dumps(jin, indent=4, separators=(',',':'), sort_keys=True) print json.dumps(jnew, indent=4, separators=(',',':'), sort_keys=True) diff = jd.diff(jin, jnew) print json.dumps(diff, indent=4, separators=(',',':'), sort_keys=True) jout = jd.patch(jin, diff, False) jd.patch(jin, diff, True) #print json.dumps(jin, indent=4, separators=(',',':'), sort_keys=True) #print json.dumps(jout, indent=4, separators=(',',':'), sort_keys=True) actions = toActions(diff) diff2 = toDiff(actions) print json.dumps(actions, indent=4, separators=(',',':'), sort_keys=True) #print json.dumps(diff2, indent=4, separators=(',',':'), sort_keys=True) if (diff == diff2): print "Diffs match!" #diff3 = jd.diff(jin, [])
def svcmon(node, options=None): global PATCH_Q rcColor.use_color = options.color if not options.node: options.node = "*" chars = 0 last_refresh = 0 last_patch_id = None namespace = options.namespace if options.namespace else os.environ.get( "OSVC_NAMESPACE") if options.stats and not options.interval: options.interval = 3 if options.interval: options.watch = True nodes = [] node.options.update({ "color": options.color, }) if options.parm_svcs is None: kind = os.environ.get("OSVC_KIND", "svc") options.parm_svcs = "*/%s/*" % kind status_data = node._daemon_status(server=options.server, selector=options.parm_svcs, namespace=namespace) if status_data is None or status_data.get("status", 0) != 0: status, error, info = node.parse_result(status_data) raise ex.excError(error) nodes_info = nodes_info_from_cluster_data(status_data) expanded_svcs = [ p for p in status_data.get("monitor", {}).get("services", {}) ] if not nodes: nodes = node.nodes_selector(options.node, data=nodes_info) if options.watch: start_events_thread(node, options.server, options.parm_svcs, namespace) preamble = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") stats_data = get_stats(options, node, expanded_svcs) prev_stats_data = None outs = format_cluster(paths=expanded_svcs, node=nodes, data=status_data, sections=options.sections, selector=options.parm_svcs, namespace=namespace) if outs is not None: print(CURSORHOME + preamble + CLEAREOLNEW + CLEAREOL) print(CLEAREOLNEW.join(outs.split("\n")) + CLEAREOS) while True: now = time.time() try: patch = PATCH_Q.get(False, 0.5) #for change in patch["data"]: # print(change) except Exception as exc: # queue empty patch = None if patch: if last_patch_id and patch["id"] != last_patch_id + 1: try: status_data = node._daemon_status( server=options.server, selector=options.parm_svcs, namespace=namespace) last_patch_id = patch["id"] except Exception: # seen on solaris under high load: decode_msg() raising on invalid json pass else: try: json_delta.patch(status_data, patch["data"]) last_patch_id = patch["id"] except Exception as exc: try: status_data = node._daemon_status( server=options.server, selector=options.parm_svcs, namespace=namespace) last_patch_id = patch["id"] except Exception: # seen on solaris under high load: decode_msg() raising on invalid json pass stats_changed = options.interval and now - last_refresh >= options.interval if not patch and not stats_changed: time.sleep(0.2) continue if patch: if status_data is None: # can happen when the secret is being reset on daemon join time.sleep(0.2) continue expanded_svcs = [ p for p in status_data.get("monitor", {}).get( "services", {}) ] nodes_info = nodes_info_from_cluster_data(status_data) nodes = node.nodes_selector(options.node, data=nodes_info) if stats_changed: prev_stats_data = stats_data stats_data = get_stats(options, node, expanded_svcs) if chars == 0: print(CURSORHOME + CLEAREOS) chars = 1 preamble = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") outs = format_cluster( paths=expanded_svcs, node=nodes, data=status_data, prev_stats_data=prev_stats_data, stats_data=stats_data, sections=options.sections, selector=options.parm_svcs, namespace=namespace, ) if outs is not None: print(CURSORHOME + preamble + CLEAREOLNEW + CLEAREOL) print(CLEAREOLNEW.join(outs.split("\n")) + CLEAREOS) # min delay last_refresh = now time.sleep(0.2) else: outs = format_cluster(paths=expanded_svcs, node=nodes, data=status_data, sections=options.sections, selector=options.parm_svcs, namespace=namespace) if outs is not None: print(outs)