def main(): parser = argparse.ArgumentParser() parserGrp = parser.add_argument_group("secure grpc") parser.add_argument('targetURL', help="target url, typically localhost:<port>") parserGrp.add_argument('--tls', action="store_true", help="enable tls connection") parserGrp.add_argument('--cert', help="path to the certificate") parserGrp.add_argument('--pvtkey', help="path to the private key file") parser.add_argument('--log-level', help="set log level", default=3, type=int) args = parser.parse_args() #print args log_path = '/var/log/nocsys_sonic_gnmi_server.log' if args.log_level < 0: # clear log file with open(log_path, 'w'): pass # let systemd redirect the stdout to syslog # # else: # util_utl.DBG_MODE = 0 log_level_map = [ logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL ] log_fmt = '%(asctime)s.%(msecs)03d %(levelname)-5s [%(filename)s %(lineno)d %(funcName)s] %(message)s' log_lvl = log_level_map[args.log_level] if args.log_level < len( log_level_map) else logging.CRITICAL # remove any log handlers created automatically logging.getLogger().handlers = [] handler = logging.handlers.RotatingFileHandler(log_path, maxBytes=1024000, backupCount=2) handler.setFormatter( logging.Formatter(fmt=log_fmt, datefmt='%y-%m-%d %H:%M:%S')) logging.getLogger().addHandler(handler) logging.getLogger().setLevel(log_lvl) util_utl.utl_log(args) gTarget = gNMITarget(args.targetURL, args.tls, args.cert, args.pvtkey, args.log_level < 0) gTarget.run()
def __init__(self, is_dbg_test): self.my_args = dispArgs() self.my_args.cfgdb = swsssdk.ConfigDBConnector() self.my_args.cfgdb.connect() self.my_args.appdb = swsssdk.SonicV2Connector(host='127.0.0.1') self.my_args.appdb.connect(self.my_args.appdb.APPL_DB) self.my_args.appdb.connect(self.my_args.appdb.COUNTERS_DB) self.my_args.appdb.connect(self.my_args.appdb.ASIC_DB) # check if port table is ready is_pcfg_done = False chk_cnt = 0 while True: pcfg_val = self.my_args.appdb.get_all(self.my_args.appdb.APPL_DB, "PORT_TABLE:PortConfigDone") is_pcfg_done = pcfg_val != None chk_cnt += 1 if is_pcfg_done or chk_cnt % 3 == 1: util_utl.utl_log( "PORT TABLE was%sready...(%s)" % ([" not ", " "][is_pcfg_done], chk_cnt), logging.CRITICAL) if is_pcfg_done: break time.sleep(10) # create the full yang tree # for performance, only update the tree node requested self.oc_yph = YANGPathHelper() for k in ocTable.keys(): if ocTable[k]["cls"]: ocTable[k]["cls"](path_helper = self.oc_yph) # create obj for "/" to only return subtree of depth 1 openconfig_root_dpt_1(self.oc_yph) # create all interfaces to speed up processing request for interfaces later util_interface.interface_create_all_infs(self.oc_yph, is_dbg_test, self.my_args) # create default network instance util_nwi.nwi_create_dflt_nwi(self.oc_yph, is_dbg_test) # create default objects util_qos.qos_create_dflt_obj(self.oc_yph, is_dbg_test) # check if new teammgrd is used test_cmd = 'docker run --rm=true --privileged=true --entrypoint="/bin/bash" "docker-teamd" -c "[ -f /usr/bin/teammgrd ]"' util_interface.IS_NEW_TEAMMGRD = util_utl.utl_execute_cmd(test_cmd)
def vlan_delete(root_yph, pkey_ar, disp_args): try: vlan_name = pkey_ar[0] vlan_info = disp_args.cfgdb.get_entry(CFGDB_TABLE_NAME_VLAN, vlan_name) if len(vlan_info) == 0: utl_log("cannot find vlan {}".format(vlan_name)) return False interface_names = vlan_info.get('members', []) for interface_name in interface_names: disp_args.cfgdb.set_entry(CFGDB_TABLE_NAME_VLAN_MBR, (vlan_name, interface_name), None) interface_db_clear_ip(disp_args.cfgdb, interface_name) disp_args.cfgdb.set_entry(CFGDB_TABLE_NAME_VLAN, vlan_name, None) except: return False return True
def vlan_delete_member(root_yph, pkey_ar, disp_args): try: vlan_name = pkey_ar[1] interface_name = pkey_ar[0] vlan_info = disp_args.cfgdb.get_entry(CFGDB_TABLE_NAME_VLAN, vlan_name) if len(vlan_info) == 0: utl_log("cannot find vlan {}".format(vlan_name)) return False interface_names = vlan_info.get('members', []) if interface_name in interface_names: interface_names.remove(interface_name) if not interface_names: del vlan_info['members'] else: vlan_info['members'] = interface_names disp_args.cfgdb.set_entry(CFGDB_TABLE_NAME_VLAN, vlan_name, vlan_info) disp_args.cfgdb.set_entry(CFGDB_TABLE_NAME_VLAN_MBR, (vlan_name, interface_name), None) except: return False return True
def vlan_set_member(oc_yph, pkey_ar, val, is_create, disp_args): try: members = SonicVlan.VlanMember.FromString(val) for member in members.vlan_member_list: member_info = {} vlan_name = member.vlan_name interface_name = member.port data = member.vlan_member_list member_info["tagging_mode"] = str(VLANTaggingMode(data.tagging_mode)) vlan_info = disp_args.cfgdb.get_entry(CFGDB_TABLE_NAME_VLAN, vlan_name) if len(vlan_info) == 0: utl_log("cannot find vlan {}".format(vlan_name)) continue interface_names = vlan_info.get('members', []) if interface_name not in interface_names: interface_names.append(interface_name) vlan_info["members"] = interface_names disp_args.cfgdb.set_entry(CFGDB_TABLE_NAME_VLAN, vlan_name, vlan_info) disp_args.cfgdb.set_entry(CFGDB_TABLE_NAME_VLAN_MBR, (vlan_name, interface_name), member_info) except: return False return True
def TimerEventHandler(self, timer_q): l_timer_q = [] while not self.is_stopped: #pdb.set_trace() while len(timer_q) > 0: l_timer_q.append(timer_q.pop()) new_q = [] for trec in l_timer_q: if trec['context'].is_active(): trec['cur-tick'] -= 1 if trec['cur-tick'] == 0: trec['workq'].put({ 'req': trec['req'], 'subs': trec['subs'] }) else: new_q.append(trec) else: util_utl.utl_log("subscribe client exit %s" % trec['subs'].path) l_timer_q[0:] = new_q time.sleep(1)
def Subscribe(self, request, context): util_utl.utl_log("Recv'ed Subscribe Request") return self.__processSubscribeRequestObj(request, context)
def Set(self, request, context): util_utl.utl_log("Recv'ed Set Request") return self.__processSetRequestObj(request)
def Capabilities(self, request, context): util_utl.utl_log("Recv'ed Capabiality Request") return self.__getCapabilitiesResponseObj()
def __processSubscribeRequestObj(self, reqSubObj, context): # TODO: process more than one req ? for req in reqSubObj: util_utl.utl_log(req) #pdb.set_trace() k = req.WhichOneof("request") util_utl.utl_log(k) #val = getattr(req, k) #print val if k == 'subscribe': my_work_q = Queue.Queue() for subs in req.subscribe.subscription: work_rec = {'req': req, 'subs': subs} my_work_q.put(work_rec) while not self.is_stopped: # wait here until work_rec occurs (from Timer_Q or enter here first time) try: cur_work_rec = my_work_q.get(True, 1) except Queue.Empty: continue cur_req = cur_work_rec['req'] cur_subs = cur_work_rec['subs'] print["stream", "once", "poll"][cur_req.subscribe.mode] subResp = gnmi_pb2.SubscribeResponse() subResp.update.timestamp = int(time.time()) if cur_req.subscribe.mode == 0: # stream # send first response then wait for next round # 1) put each subs into Timer_Q ??? # 2) wait for timer event occurs print["target defined", "on change", "sample"][cur_subs.mode] print cur_subs.path # on_change : check heartbeat_interval # sample : check sample_interval, suppress_redundant (heartbeat_interval) # target defined: per leaf path_ar = [] path_ar += EncodePath(cur_req.subscribe.prefix.elem) path_ar += EncodePath(cur_subs.path.elem) subResp.update.prefix.CopyFrom( cur_req.subscribe.prefix) update = subResp.update.update.add() update.path.CopyFrom(cur_subs.path) update.val.string_val = "Test" yield subResp trec = { 'req': cur_req, 'subs': cur_subs, 'cur-tick': 10, 'context': context, 'workq': my_work_q } self.Timer_Q.append(trec) pass elif cur_req.subscribe.mode == 1: # once # send a final response with sync_response set to True # not need to check the subscription's mode if my_work_q.empty(): subResp.sync_response = True yield subResp return else: update = subResp.update.update.add() update.val.string_val = "Test" yield subResp path_lst = [] pass elif cur_req.subscribe.mode == 2: # poll # send first response then wait for next round pass elif k == 'poll': # on demand request pass elif k == 'aliases': pass
def __processSetRequestObj(self, reqSetObj): IsAnyErr = False pathPrefix = EncodePath(reqSetObj.prefix.elem) util_utl.utl_log(reqSetObj) # Now Build the set response # # one error => error code # all other => aborted (10) # over all => aborted (10) setResp = gnmi_pb2.SetResponse() setResp.timestamp = int(time.time()) #pdb.set_trace() # process order is delete, replace, update # refer to gnmi-specification.md # input: path (delete) for delete in reqSetObj.delete: delPath = pathPrefix + EncodePath(delete.elem) util_utl.utl_log(delPath) # input: path, val # When the `replace` operation omits values that have been previously set, # they MUST be treated as deleted from the data tree. # Otherwise, omitted data elements MUST be created with their # default values on the target. for replace in reqSetObj.replace: repPath = pathPrefix + EncodePath(replace.path.elem) k = replace.val.WhichOneof("value") util_utl.utl_log(k) val = getattr(replace.val, k) util_utl.utl_log(val) util_utl.utl_log(repPath) # input: same as replace for update in reqSetObj.update: updPath = pathPrefix + EncodePath(update.path.elem) # 1. check if path is valid # 2. issue command to do configuration # # only support '/interfaces/interface[name=Ethernet7]/ethernet/config/aggregate-id' # pkey_ar = EncodePathKey(update.path.elem) set_val = getattr(update.val, update.val.WhichOneof("value")) yp_str = EncodeYangPath(updPath) util_utl.utl_log("set req path :" + yp_str) util_utl.utl_log("set req val :" + set_val) self.lock.acquire() ret_set = self.myDispatcher.SetValByPath(yp_str, pkey_ar, set_val) self.lock.release() if ret_set: ret_set = grpc.StatusCode.OK else: IsAnyErr = True ret_set = grpc.StatusCode.INVALID_ARGUMENT self.__AddOneSetResp(setResp, update.path, 3, ret_set, None) util_utl.utl_log("set req code :" + str(ret_set)) # Fill error message # refer to google.golang.org/grpc/codes # # overall result if IsAnyErr == True: ret_code = grpc.StatusCode.ABORTED else: ret_code = grpc.StatusCode.OK setResp.message.code = ret_code.value[0] setResp.message.message = ret_code.value[1] return setResp
def __processGetRequestObj(self, reqGetObj): pfx_ar = EncodePath(reqGetObj.prefix.elem) t = reqGetObj.type #FIXME: Build the get response for all the paths getResp = gnmi_pb2.GetResponse() for path in reqGetObj.path: er_code = grpc.StatusCode.INVALID_ARGUMENT path_ar = pfx_ar + EncodePath(path.elem) pkey_ar = EncodePathKey(path.elem) yp_str = EncodeYangPath(path_ar) util_utl.utl_log("get req path :" + yp_str) tmp_json = None self.lock.acquire() try: oc_yph = self.myDispatcher.GetRequestYph(path_ar, pkey_ar) if isinstance(oc_yph, grpc.StatusCode): er_code = oc_yph else: tmp_obj = oc_yph.get(yp_str) if oc_yph else [] # TODO: if got more than one obj ? if len(tmp_obj) >= 1: tmp_json = {} for idx in range(len(tmp_obj)): obj_json = ExtractJson(tmp_obj[idx], None) if obj_json: # remove "'" for the key in the _yang_path obj_path = re.sub(r'\[([\w-]*)=\'([^]]*)\'\]', r'[\1=\2]', tmp_obj[idx]._yang_path()) if len(tmp_obj) == 1 and obj_path == yp_str: tmp_json = obj_json else: tmp_json[obj_path] = obj_json except: er_code = grpc.StatusCode.INTERNAL tmp_json = None finally: self.lock.release() if tmp_json != None: notif = getResp.notification.add() notif.timestamp = int(time.time()) notif.prefix.CopyFrom(reqGetObj.prefix) update = notif.update.add() update.path.CopyFrom(reqGetObj.path[0]) util_utl.utl_log("get req json :" + json.dumps(tmp_json)) update.val.json_val = json.dumps(tmp_json) er_code = grpc.StatusCode.OK util_utl.utl_log("get req code :" + str(er_code)) if er_code != grpc.StatusCode.OK: getResp.error.code = er_code.value[0] getResp.error.message = er_code.value[1] break return getResp