def _on_connection_success_item(self, connection_item, stream): self._off_connection_timeout_handler() log.debug(u"Connection Success {}".format(self.client_config.address_str)) try: self.stream = stream self.stream.set_close_callback(self._on_connection_close) self.stream.set_nodelay(True) #: send message self._sending_connection_item(connection_item) #: fetch message read_status = yield self._read_message(connection_item) if read_status: connection_item.callback(RPCMessage( CONNECTION_TYPE_IN_RESPONSE, self._message.topic, self._message.body)) else: log.error("Malformed Client Request") except Exception as e: log.error(e) traceback.print_exc() finally: self.close()
def _on_item_clicked(self, data, idx, button): assert data, (data, idx, button) if data.is_equipment: # 装备栏 if button == 'mouse1': # 装备鼠标物品 old_mouse_item = self.get_mouse_item() if self._inventory_data.mouse_click_at_equipment(idx): current_mouse_item = self.get_mouse_item() assert old_mouse_item != current_mouse_item self.refresh_inventory() self.refresh_mouse() log.debug("equipment clicked!") return else: # 普通物品栏 if button == 'mouse1': if keyboard.is_ctrl_down(): # 分堆一半到鼠标 self._inventory_data.half_to_mouse(data.bag, idx) else: # 直接点击物品 self._inventory_data.mouse_click_at_bag(data.bag, idx) elif button == 'mouse3': # 快捷操作 if data.item: data.item.on_quick_action(data.bag, idx, self.get_mouse_item()) if data.item.is_destroyed(): data.bag.remove_item_at(idx) self.refresh_inventory() self.refresh_mouse()
def fail(info=None, max_log_len=2000): if info is None: info = {} info["status"] = const.STATUS_FAIL response_body = json_dumps(info) log.debug("response:%s", response_body[0:max_log_len]) return json.loads(response_body)
def do_action(self, action_info, tool, key_type, mouse_entity): action_type = action_info['action_type'] if action_type == 'throw_item_at': if mouse_entity: G.game_mgr.put_mouse_item_on_ground(self._pos) return True else: return False if action_type == 'place': if mouse_entity: # 放置操作 placeable = mouse_entity.get_component(ItemPlaceable) assert placeable name, data = placeable.get_gen_config() if not data: data = {} data['name'] = name pos = action_info['pos'] assert pos G.game_mgr.chunk_mgr.spawn_with_data(pos.get_x(), pos.get_y(), data) return True return False if action_type == 'throw_item_now': log.debug("throw item now!") return True
def run(timeout=3600) -> State: state = loop_for(timeout, run_checks, fail_msg="Health check unsuccessful") if state == State.OK: log.info("Health check successful.") elif state == State.FAIL: log.debug("Health check failed.") return state
def ___handler_request(self, func_name, **kwargs): topic_name = "{}.{}".format(self._service_name, func_name) body = "" if kwargs: body = json.dumps(kwargs) log.debug("{}({})".format(topic_name, body)) request_message = RPCMessage(CONNECTION_TYPE_IN_REQUEST, topic_name, body) def _f(_message): log.debug("Request Message {}".format(_message.__dict__)) status = _message.topic content = _message.body if status == RESPONSE_ERROR_TAG: raise gen.Return(content) if not content: raise gen.Return(content) v = content try: v = Storage(json.loads(content)) except: v = content raise gen.Return(v) yield self._client.fetch(RPCClientItem(request_message, _f))
def _on_connection_success_item(self, connection_item, stream): self._off_connection_timeout_handler() log.debug(u"Connection Success {}".format( self.client_config.address_str)) try: self.stream = stream self.stream.set_close_callback(self._on_connection_close) self.stream.set_nodelay(True) #: send message self._sending_connection_item(connection_item) #: fetch message read_status = yield self._read_message(connection_item) if read_status: connection_item.callback( RPCMessage(CONNECTION_TYPE_IN_RESPONSE, self._message.topic, self._message.body)) else: log.error("Malformed Client Request") except Exception as e: log.error(e) traceback.print_exc() finally: self.close()
def run (args): if len(args) < 2: print 'MM:00 EXEC: ERROR usage: exec cmd cmd ...' print 'Commands are:' for c in sorted(commands): print ' ' + c + ': ' + commands[c].get('cmd', '<CMD>') return for i in range(1, len(args)): cmdname = args[i] try: c = commands[cmdname]['cmd'] except: log.error('MM:00 ERROR: EXEC FAILED unknown or poorly specified cmd: ' + cmdname) continue log.info('MM:00 EXEC: ' + cmdname + ' cmd = ' + c) ca = c.split() try: p = subprocess.Popen(ca, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() except Exception, e: out = '' err = 'Command Failed: ' + repr(e) r = out + err log.debug('MM:00 EXEC: ' + cmdname + ' output = \n' + r.strip())
def cb(x): log.debug('x: %s', x) log.debug("time consumed: %s", time.time() - var['ts']) roots[idx].remove_node() roots[idx] = x x.reparent_to(G.render) flatten(next_idx)
def verify_complete(self, client: Client) -> State: """Are we done with this group yet?""" state = State.OK jobs = get_maint_job(client, self.group) if len(jobs) < self.device_count: log.debug( "Some devices are still offline.", online=len(jobs), expected=self.device_count, ) state = State.PENDING for job in jobs: status = job.get("upgradeStatus") job_ver = job.get("desiredVersion") node_id = get_path( str, get_path(str, job.get("dn", "").split("/"), 2).split("-"), 1) log.debug( "Upgrade status", percent=job.get("instlProgPct"), node_id=node_id, status=status, target_version=job_ver, ) if status != "completeok": state = State.PENDING if job_ver != self.version_str: state = State.PENDING return state
def on_equipped(self, slot_type): log.debug("equipped at %s", slot_type) if self._tool_name: G.game_mgr.change_equipment_model(slot_type, self._tool_name) elif self._tool_model: pass else: assert False
def check_dvs(client: Client) -> State: """VMware DVS state""" # Verify state == 'poweredOn' in compHv for dvs in client.get_class("compHv"): if dvs.get("state", "") != "poweredOn": log.warning("vSwitch offline", name=dvs["name"]) return State.FAIL log.debug("All vSwitch(s) online") return State.OK
def check_vcenter(client: Client) -> State: """VMware vCenter state""" # Verify operSt == 'online' in compCtrlr for ctrlr in client.get_class("compCtrlr"): if ctrlr.get("operSt", "") != "online": log.warning("vCenter offline", name=ctrlr["name"]) return State.FAIL log.debug("All vCenter(s) online") return State.OK
def check_vpc_health(client: Client) -> State: """vPC health""" # Verify peerSt == 'up' for vpcDom for vpc in client.get_class("vpcDom"): if vpc["peerSt"] != "up": log.warning("vPC not up", id=vpc["id"], state=vpc["peerSt"]) return State.FAIL log.debug("All vPCs are up") return State.OK
def goback(): 'Return the the most recent directory from which a goto() was issued' if not dirStack: log.warning('Directory stack empty') return theDir = dirStack.pop() os.chdir(theDir) log.debug('goback to %s'%theDir) return theDir
def run_post_checks(client: Client, snapshot: Snapshot) -> State: # Non-zero code indicates an error condition for check in enabled_post_checks: log.debug(f"Comparing: {check.__doc__}...") state = check(client, snapshot) if state == State.FAIL: log.debug("Failed on comparison", check=check.__doc__) return state return State.OK
def check_fabric_scale(client: Client) -> State: """fabric-wide scale""" # Verify fabric-wide MO counts are < limits from fvcapRule over_scale = False metrics = { "fvCEp": { "name": "endpoints" }, "fvAEPg": { "name": "EPGs" }, "fvBD": { "name": "BDs" }, "fvCtx": { "name": "VRFs" }, "fvTenant": { "name": "tenants" }, # # API doesn't provide these limits "vzBrCP": { "name": "contracts", "limit": 10000 }, "vzFilter": { "name": "filters", "limit": 10000 }, } for record in client.get_class("fvcapRule", cache=True): subj = record.get("subj") if subj in metrics and record["dn"].startswith("uni"): metrics[subj]["limit"] = int(record.get("constraint", 0)) def get_count(class_name): res = client.get(f"/api/class/{class_name}", params={"rsp-subtree-include": "count"}) return get_path(int, res, 0, "moCount", "attributes", "count") for class_name in metrics: metrics[class_name]["count"] = get_count(class_name) for class_name, metric in metrics.items(): # TODO validate scenario where limit isn't found if "limit" in metric and metric["count"] > metric["limit"]: over_scale = True log.warning(f"Over scale limit for {class_name}:", **metric) elif "limit" in metric and client.args["debug"]: log.debug( f'Scale for {metric["name"]}:', count=metric["count"], limit=metric["limit"], mo=class_name, ) return State.FAIL if over_scale else State.OK
def callabck(result: IntermediateRequest.Result): if result.is_succeeded: self.req_complete = True log.debug("closing websocet because response is fully processed") self.close(status=STATUS_NORMAL, reason=b"Proxy complete") else: self.req_complete = False if self.sock: self.close(status=STATUS_UNEXPECTED_CONDITION, reason=b"Proxy failure")
def __init__(self,name,depth,private=False,directory="",version="",project=""): self.name=name self.depth=depth self.private=private self.directory=directory self.version=version self.uses=list() self.project = project log.debug(self) return
def create_train_op(self, loss_tensor, params, learning_rate, global_step=None, gradient_summary_tag_name='gradient_norm'): if learning_rate == 0: return tf.no_op() # ToDo: add nromal gradient # optimizer if self.config.optimization_method == 'adam': optimizer = tf.train.AdamOptimizer(learning_rate) elif self.config.optimization_method == 'rmsprop': optimizer = tf.train.RMSPropOptimizer(learning_rate, momentum=0.9) elif self.config.optimization_method == 'sgd': optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=0.9) else: raise ValueError('Invalid optimization method!') # compute gradients gradients = tf.gradients( loss_tensor, params, aggregation_method=2 # see issue #492 ) if all(v is None for v in gradients): # in some cases, we are opted not to train some sub-networks at all. return tf.no_op() # use gradient clipping as well. if self.max_grad_norm > 0: clipped_grads, norm = tf.clip_by_global_norm( gradients, self.max_grad_norm) else: clipped_grads, norm = gradients, 0 tf.summary.scalar(gradient_summary_tag_name, norm) clipped_grad_and_vars = list(zip(clipped_grads, params)) train_op = optimizer.apply_gradients(clipped_grad_and_vars, global_step=global_step) # with some debugging information. total_num_elements = 0 for var in params: log.debug(" model param %s : %s (total %d)", var.name, var.get_shape().as_list(), var.get_shape().num_elements()) total_num_elements += var.get_shape().num_elements() log.infov("Total # of parameters in the train_op : %d", total_num_elements) # TODO learning rate might be overriden afterwards (e.g. checkpoint) return train_op
def check_ntp_state(client: Client) -> State: """NTP sync""" # Verify srvStatus == 'synced' in datetimeClkPol synced_peers = set() for ntp in client.get_class("datetimeClkPol"): if "synced" in ntp.get("srvStatus", ""): synced_peers.add(ntp["dn"]) if len(synced_peers) == 0: log.warning("NTP not synced to at least 1 peer") return State.FAIL log.debug("NTP synced.") return State.OK
def on_websocket_close(self, code: int, reason: str) -> NoReturn: log.debug(f"connection {self.sock_id} closed: {code} - {reason}") if not self.new_sock_added: log.debug(f"going to reconnect to router, webswocket close code: {code}") self.ws_client_farm.remove_ws(str(self.register_uri)) self.when_consumed_action() self.new_sock_added = True if not self.req_complete and self.req_to_target is not None: if code != STATUS_UNEXPECTED_CONDITION: log.info(f"websocket closed before the target response was processed, This may be because the user" f"closed therire browser, Going to cancel request to target {self.req_to_target.url}") self.req_to_target.abort(Exception("Socket to Router closed"))
def remote(args): if len(args) < 2: log.error('MM:00 EXEC: ERROR: usage: exec cmd arg ...') return host = args[0] del args[0] cmd = '' for arg in args: cmd += arg + ' ' log.info('MM:' + host + ' REXEC: ' + cmd) r = generic(host, 'REXEC', 'exec ' + cmd + '\n') if r is not None: log.debug('MM:' + host + ' REXEC: output = \n' + r.strip())
def goto(theDir,mkdir=False): '''Move the application to the given directory. If mkdir is true, any missing path segments will be created.''' if not os.path.exists(theDir): if mkdir: assure(theDir) else: raise CommandFailure,'Can not goto missing directory: "%s"'%theDir dirStack.append(os.getcwd()) os.chdir(theDir) log.debug('goto %s'%theDir) return dirStack
def _on_connection_success(self, stream): self._off_connection_timeout_handler() log.debug(u"Connection Success {}".format( self.client_config.address_str)) self.stream = stream self.stream.set_close_callback(self._on_connection_close) # Nagle’s algorithm self.stream.set_nodelay(True) #: test self.client.ping()
def remote (args): if len(args) < 2: log.error('MM:00 EXEC: ERROR: usage: exec anynode cmd arg ...') return host = args[0] del args[0] cmd = '' for arg in args: cmd += arg + ' ' log.info('MM:' + host + ' REXEC: ' + cmd) r = generic(host, 'REXEC', 'exec ' + cmd + '\n') if r is not None: log.debug('MM:' + host + ' REXEC: output = \n' + r.strip())
def killp(args): if len(args) < 2: log.error('MM:00 EXEC: ERROR: usage: killp anynode ID ...') return host = args[0] del args[0] cmd = '' for arg in args: cmd += arg + ' ' log.info('MM:' + host + ' KILLP: ' + cmd) r = generic(host, 'KILLP', 'killp ' + cmd + '\n') if r is not None: log.debug('MM:' + host + ' KILLP: output = \n' + r.strip())
def verify_completion(client: Client) -> State: res = client.get( f"/api/node/mo/expcont/expstatus-tsexp-{job_name}", params={ "query-target": "subtree", "target-subtree-class": "dbgexpTechSupStatus", }, ) status = get_path(str, res[-1], "dbgexpTechSupStatus", "attributes", "exportStatus") log.debug(f"tech support status: {status}") if status == "success": return State.OK return State.PENDING
def check_running_firmware(client: Client) -> State: """current running firmware""" # Verify only one version from firmwareRunning and firmwareCtrlrRunning versions = set() for record in client.get_class("firmwareRunning"): versions.add(record["peVer"]) for record in client.get_class("fimrwareCtrlrRunning"): versions.add(record["version"]) if len(versions) > 1: log.warning("Multiple firmware versions found", versions=list(versions)) elif client.args["debug"] and len(versions) > 0: log.debug("Firmware:", version=versions.pop()) return State.OK
def compare_routes(client: Client, snapshot: Snapshot) -> State: """ISIS inter-pod routes""" current_dns = [r.get("dn", "") for r in get_interpod_routes(client)] has_missing = False for route in snapshot["isis_routes"]: dn = route.get("dn", "") if dn not in current_dns: # Route is missing! log.debug("missing ISIS route", dn=route.get("dn"), prefix=route.get("pfx")) has_missing = True if has_missing: log.info("ISIS inter-pod routes are missing. Waiting for fabric to converge...") return State.FAIL return State.OK
def on_websocket_text(self, msg: str) -> NoReturn: ptc_req = ProtocolRequest(msg) # fire the request to target service only when end_marker is RequestHasNoBodyMarker: str = "_2" or RequestBodyEnededMarker: str = "_3" if self.req_to_target is None: self._on_req_received() self._new_req_to_target(ptc_req) if ptc_req.req_has_no_body(): self._send_req_to_target() elif ptc_req.req_body_pending(): log.debug(f"request body pending, sockId={self.sock_id}") elif ptc_req.req_body_ended(): log.debug(f"no further request body is coming, sockId={self.sock_id}") self._send_req_to_target()
def _mouse_click_event(self, status): if self._key == 'space': log.debug("space event: %s", status) if status == 'up': if self._key_timer <= self._click_max_duration: # click事件 if self._on_click: self._on_click() else: # hold事件结束 if self._on_hold_done: self._on_hold_done() self._key_timer = 0 self._is_down = status == 'down'
def _send_req_to_target(self) -> NoReturn: def callabck(result: IntermediateRequest.Result): if result.is_succeeded: self.req_complete = True log.debug("closing websocet because response is fully processed") self.close(status=STATUS_NORMAL, reason=b"Proxy complete") else: self.req_complete = False if self.sock: self.close(status=STATUS_UNEXPECTED_CONDITION, reason=b"Proxy failure") log.debug("request headers received") self.req_to_target.fire_req_from_connector_to_target_service(callabck) log.debug("request body is fully sent")
def verify_completion(client: Client) -> State: jobs = client.get( f"/api/node/mo/uni/backupst/jobs-[{backup_dn}]", params={ "query-target": "children", "target-subtree-class": "configJob" }, ) last_job_status = get_path(str, jobs[-1], "configJob", "attributes", "operSt") if last_job_status == "success": return State.OK else: log.debug(f"status: {last_job_status}") return State.PENDING
def check_maintenance_groups(client: Client) -> State: """switches are in maintenance groups""" # Verify all switches from topSystem are also in maintUpgJob objects job_dns = [] for job in client.get_class("maintUpgJob"): if job.get("maintGrp", "") != "" and job["dn"].startswith("topology"): job_dns.append(get_node_dn(job["dn"])) for device in client.get_class("topSystem"): if device["role"] == "spine" or device["role"] == "leaf": if get_node_dn(device["dn"]) not in job_dns: log.warning("Device not in maintenance group", name=device["name"]) return State.FAIL log.debug("All devices in maintenance groups") return State.OK
def compare_devices(client: Client, snapshot: Snapshot) -> State: """devices""" has_missing = False current_dns = [r.get("dn", "") for r in get_devices(client)] for device in snapshot["devices"]: snapshot_dn = device.get("dn", "") if snapshot_dn not in current_dns: # Device is missing! log.debug("missing device", dn=device.get("dn"), name=device.get("name")) has_missing = True if has_missing: log.info( "Some devices are not currently available. Waiting for fabric to converge..." ) return State.FAIL return State.OK
def check_tcam_scale(client: Client) -> State: """per-leaf TCAM scale""" # Verify polUsageCum <= polUsageCapCum for eqptcapacityPolUsage5min over_limit = False for record in client.get_class("eqptcapacityPolUsage5min"): node_dn = get_node_dn(record["dn"]) count = get_path(int, record, "polUsageCum") limit = get_path(int, record, "polUsageCapCum") if count > 0 and count >= limit: over_limit = True log.warning(f"Over TCAM scale on {node_dn}", count=count, limit=limit) if client.args["debug"]: log.debug(f"TCAM scale on {node_dn}", count=count, limit=limit) return State.FAIL if over_limit else State.OK
def router (args): if len(args) < 2: log.error('MM:00 ROUTER: ERROR: usage: router arg arg ...') return host = args[0] if host not in bgprouters: log.error('MM:' + host + ' ERROR: ' + 'ROUTER' + ' ' + host + ' : must be a BGP router') return del args[0] cmd = '' for arg in args: cmd += '"' + arg + '" ' log.info('MM:' + host + ' ROUTER: ' + cmd) r = generic(host, 'ROUTER', 'router ' + cmd + '\n') if r is not None: log.debug('MM:' + host + ' ROUTER: output = \n' + r.strip())
def _f(_message): log.debug("Request Message {}".format(_message.__dict__)) status = _message.topic content = _message.body if status == RESPONSE_ERROR_TAG: raise gen.Return(content) if not content: raise gen.Return(content) v = content try: v = Storage(json.loads(content)) except: v = content raise gen.Return(v)
def rexec (args): if len(args) < 3: print 'MM:00 REXEC: ERROR usage: rexec cmd host host ...' print 'Commands are:' for c in sorted(commands): print ' ' + c + ': ' + commands[c].get('cmd', '<CMD>') return cmdname = args[1] try: c = commands[cmdname]['cmd'] except: log.error('MM:00 ERROR: REXEC FAILED unknown or poorly specified cmd: ' + cmdname) return for i in range(2, len(args)): host = args[i] log.info('MM:' + host + ' REXEC ' + cmdname + ' cmd = ' + c) r = generic(host, 'REXEC', 'exec ' + c + '\n') if r is not None: log.debug('MM:' + host + ' REXEC ' + cmdname + ' output = \n' + r.strip())
def run(self): """ トレイアイコン上にメニューを表示する. """ creator = popupmenu.Creator() creator.append("&Reload", commander_system.CMD_RELOAD) creator.append( "Open &Snippet Folder", commander_system.CMD_OPEN_SNIPPET_DIRECTORY ) creator.append( "Open &Hotkey Config File", commander_system.CMD_OPEN_HOTKEY_CONFIG ) creator.append_separator() creator.append("Open &Install Directory", commander_system.CMD_OPEN_DIRECTORY) creator.append("&Version", commander_system.CMD_SHOW_VERSION) creator.append("&Quit", commander_system.CMD_QUIT) menudata = None try: menudata = creator.get_menudata() except ValueError as e: creator.destroy() # @todo with 使って RAII したい raise exceptions.ProgrammersMistake(str(e)) tracker = popupmenu.Tracker(trayicongui.hwndinst.get()) command = None mouseposx, mouseposy = win32api.GetCursorPos() try: command = tracker.track(menudata, mouseposx, mouseposy) log.debug("tracked command:" + str(command)) except popupmenu.MenuTrackError as e: log.debug(str(e)) creator.destroy() self._commanderchain.run(command)
def get_title(url): log.debug(" fetching page...") message = urllib.urlopen(url) content = message.read() title_el = re.search('<title>(.*)</title>', content, re.DOTALL and re.IGNORECASE) if title_el: title = title_el.group(1).strip() else: title = '' charset = message.info().getparam('charset') if not charset: httpequiv_el = re.search('http-equiv\s*=\s*["\']Content-Type["\']\s*content\s*=\s*["\'].*charset\s*=\s*(.*)["\']', content, re.DOTALL and re.IGNORECASE) if httpequiv_el: charset = httpequiv_el.group(1).strip() log.debug(' page charset : %s', charset) if charset: title = title.decode(charset) title = title.strip() log.debug(" reading page title... : %s", title) return title
def get_uses(pkg_dir,environ): 'Return the packages that the package at the given directory uses' this_pkg = os.path.basename(pkg_dir) this_ver = package_version(pkg_dir, environ) this_project = package_project(pkg_dir, environ) #print pkg_dir,'is in project',this_project log.debug("pkg =",this_pkg,"ver =",this_ver,"project =",this_project) uses = [UsedPackage(this_pkg,0,False,"",this_ver,this_project)] pack2proj = {this_pkg:this_project} #...debugging... #for kv in environ.iteritems(): print '"%s" --> "%s"'%kv #print 'CMTPATH="%s"'%environ['CMTPATH'] #print 'pkg_dir="%s", projects="%s"'%(pkg_dir,fs.projects()) res = cmt("show uses",environ,pkg_dir+'/cmt',True) #print 'SHOW USES: "%s"'%res for line in res.split('\n'): line = line.strip() words = line.split(' ') if len(words) == 1: continue if line[0] != '#': pack = words[1] # CMT packages printed out in a non-standard way, and we # don't need them anyways. if len(pack) > 2 and pack[:3] == 'CMT': continue proj = words[4][:-1] if proj[-1] == '/': proj = proj[:-1] proj = os.path.basename(proj) pack2proj[pack] = proj #print pack,"==>",proj continue log.debug('words =',words) depth = 1 for w in words[1:]: # count spaces if w: break depth += 1 if words[depth] != 'use': continue #print words name = words[depth+1] ver = words[depth+2] try: dir = words[depth+3] except IndexError: dir = "" private = False try: if words[depth+4] == '(private)': private = True except IndexError: pass depth = 1+(depth-1)/2 uses.append(UsedPackage(name,depth,private,dir,ver)) continue stack = [] for u in uses: try: u.project = pack2proj[u.name] except KeyError: u.project = "Unknown" if not stack: log.debug('stack: %s'%u) stack.append(u) continue while stack and u.depth-stack[-1].depth != 1: tmp = stack.pop() log.debug('pop: %s(%d) for %s(%d)'%(tmp.name,tmp.depth,u.name,u.depth)) continue if stack: stack[-1].uses.append(u) log.debug('add uses: %s'%stack[-1]) stack.append(u) log.debug('stack: %s'%u) return uses
if len(args) < 1: log.error('MM:00 EXEC: ERROR usage: local cmd arg ...') return cmd = '' for arg in args: cmd += arg + ' ' log.info('MM:00 LOCAL: ' + cmd) try: p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() except Exception, e: out = '' err = 'Command Failed: ' + repr(e) r = out + err log.debug('MM:00 LOCAL: output =\n' + r.strip()) # execute participant api def blackholing (args): if len(args) < 3: log.error('MM:00 EXEC: ERROR usage: blackholing participant_id remove/insert id[,id...]') return part_id = args[0] #participant id part_action = args[1] #action insert or remove rule_ids = [] for policy_id in args[2].split(','): #rule ids rule_ids.append(int(policy_id)+2**12) #additional 4096 for cookie id
def _on_connection_close(self): log.debug("Connection Timeout(close) {}".format(self.client_config.address_str))