def __init__(self, rmr_port=4562, rmr_wait_for_ready=True, use_fake_sdl=False, post_init=None): """ Documented in the class comment. """ # PUBLIC, can be used by xapps using self.(name): self.logger = Logger(name=__name__) # Start rmr rcv thread self._rmr_loop = xapp_rmr.RmrLoop(port=rmr_port, wait_for_ready=rmr_wait_for_ready) self._mrc = self._rmr_loop.mrc # for convenience # SDL self.sdl = SDLWrapper(use_fake_sdl) # Config # The environment variable specifies the path to the Xapp config file self._config_path = os.environ.get(CONFIG_FILE_ENV, None) if self._config_path and os.path.isfile(self._config_path): self._inotify = inotify_simple.INotify() self._inotify.add_watch(self._config_path, inotify_simple.flags.MODIFY) self.logger.debug("__init__: watching config file {}".format(self._config_path)) else: self._inotify = None self.logger.warning("__init__: NOT watching any config file") # run the optionally provided user post init if post_init: post_init(self)
def __init__(self, rmr_port=4562, rmr_wait_for_ready=True, use_fake_sdl=False, post_init=None): """ Documented in the class comment. """ # PUBLIC, can be used by xapps using self.(name): self.logger = Logger(name=__name__) self._appthread = None # Start rmr rcv thread self._rmr_loop = xapp_rmr.RmrLoop(port=rmr_port, wait_for_ready=rmr_wait_for_ready) self._mrc = self._rmr_loop.mrc # for convenience # SDL self.sdl = SDLWrapper(use_fake_sdl) # Config # The environment variable specifies the path to the Xapp config file self._config_path = os.environ.get(Constants.CONFIG_FILE_ENV, None) if self._config_path and os.path.isfile(self._config_path): self._inotify = inotify_simple.INotify() self._inotify.add_watch(self._config_path, inotify_simple.flags.MODIFY) self.logger.debug("__init__: watching config file {}".format( self._config_path)) else: self._inotify = None self.logger.warning("__init__: NOT watching any config file") # used for thread control of Registration of Xapp self._keep_registration = True # configuration data for xapp registration and deregistration self._config_data = None if self._config_path and os.path.isfile(self._config_path): with open(self._config_path) as json_file: self._config_data = json.load(json_file) else: self._keep_registration = False self.logger.error( "__init__: Cannot Read config file for xapp Registration") self._config_data = {} self._appthread = Thread(target=self.registerXapp).start() # run the optionally provided user post init if post_init: post_init(self)
def __init__(self, name, service, address, port, config, loglevel): signal.signal(signal.SIGQUIT, self.signal_handler) signal.signal(signal.SIGTERM, self.signal_handler) signal.signal(signal.SIGINT, self.signal_handler) self.running = False self.configData = Config(name, config) if self.configData.controls.get('logger'): level = self.configData.controls['logger'].get('level') if port == None and self.configData.cfg['messaging'].get('ports'): for item in self.configData.cfg['messaging'].get('ports'): if item['name'] == http: port = int(item['port']) if address == None and self.configData.cfg.get('name'): address = self.configData.cfg.get('name') # save the listen address and port for later use self.port = port self.address = address self.logger = Logger(name, loglevel) # setup the symptomdata symptomCfg = self.GetSymptomConfig() self.symptomHndl = Symptomdata(service, name, "/tmp/", symptomCfg['url'], symptomCfg['timeout']) # create the thread HTTP server and set the uri handler callbacks self.server = ricrest.ThreadedHTTPServer(address, port) # trick to get the own handler with defined self.server.handler.add_handler(self.server.handler, "GET", "config", "/ric/v1/config", self.configGetHandler) self.server.handler.add_handler(self.server.handler, "GET", "healthAlive", "/ric/v1/health/alive", self.healthyGetAliveHandler) self.server.handler.add_handler(self.server.handler, "GET", "healthReady", "/ric/v1/health/ready", self.healthyGetReadyHandler) self.server.handler.add_handler(self.server.handler, "GET", "symptomdata", "/ric/v1/symptomdata", self.symptomdataGetHandler) # start rest server self.server.start() # start RMR self.startRMR(service, 4) self.running = True # now we can subscribe self.Subscribe()
central collector. Message destination(s) are controlled by the RMR routing table. Message contents must comply with the JSON schema in file metric-schema.json. """ from ctypes import c_void_p import json import time from mdclogpy import Logger from ricxappframe.rmr import rmr from ricxappframe.metric.exceptions import EmptyReport ############## # PRIVATE API ############## mdc_logger = Logger(name=__name__) RETRIES = 4 ############## # PUBLIC API ############## # constants RIC_METRICS = 120 # message type # Publish dict keys as constants for convenience of client code. KEY_REPORTER = "reporter" KEY_GENERATOR = "generator" KEY_TIMESTAMP = "timestamp" KEY_DATA = "data" KEY_DATA_ID = "id"
class MyXapp(object): def __init__(self, name, service, address, port, config, loglevel): signal.signal(signal.SIGQUIT, self.signal_handler) signal.signal(signal.SIGTERM, self.signal_handler) signal.signal(signal.SIGINT, self.signal_handler) self.running = False self.configData = Config(name, config) if self.configData.controls.get('logger'): level = self.configData.controls['logger'].get('level') if port == None and self.configData.cfg['messaging'].get('ports'): for item in self.configData.cfg['messaging'].get('ports'): if item['name'] == http: port = int(item['port']) if address == None and self.configData.cfg.get('name'): address = self.configData.cfg.get('name') # save the listen address and port for later use self.port = port self.address = address self.logger = Logger(name, loglevel) # setup the symptomdata symptomCfg = self.GetSymptomConfig() self.symptomHndl = Symptomdata(service, name, "/tmp/", symptomCfg['url'], symptomCfg['timeout']) # create the thread HTTP server and set the uri handler callbacks self.server = ricrest.ThreadedHTTPServer(address, port) # trick to get the own handler with defined self.server.handler.add_handler(self.server.handler, "GET", "config", "/ric/v1/config", self.configGetHandler) self.server.handler.add_handler(self.server.handler, "GET", "healthAlive", "/ric/v1/health/alive", self.healthyGetAliveHandler) self.server.handler.add_handler(self.server.handler, "GET", "healthReady", "/ric/v1/health/ready", self.healthyGetReadyHandler) self.server.handler.add_handler(self.server.handler, "GET", "symptomdata", "/ric/v1/symptomdata", self.symptomdataGetHandler) # start rest server self.server.start() # start RMR self.startRMR(service, 4) self.running = True # now we can subscribe self.Subscribe() def startRMR(self, service, level): # handle the RMR_SEED_RT and RMR_RTG_SVC which is different in mcxapp data = None os.environ["RMR_SRC_ID"] = service os.environ["RMR_LOG_VLEVEL"] = str(level) os.environ["RMR_RTG_SVC"] = "4561" rmrseed = os.environ.get('RMR_SEED_RT') if rmrseed is not None: data = read_file(rmrseed) if data is None: self.logger.warning("RMR seed file %s does not exists or is empty" % (rmrseed)) else: self.logger.info("RMR_SEED_RT seed file not set in environment") data = read_file('uta-rtg.rt') if data is not None: os.environ['RMR_SEED_RT'] = "./uta-rtg.rt" self.logger.info("Setting the default RMR_SEED_RT=uta-rtg.rt - content:") else: self.logger.info("Try to export the RMR_SEED_RT file if your RMR is not getting ready") self.rmrInit(b"4560") def signal_handler(self, sig, frame): if self.running is True: self.server.stop() rmr.rmr_close(self.rmr_mrc) self.running = False sys.exit(0) def rmrInit(self, initbind): # Init rmr self.rmr_mrc = rmr.rmr_init(initbind, rmr.RMR_MAX_RCV_BYTES, 0x00) while rmr.rmr_ready(self.rmr_mrc) == 0: time.sleep(1) self.logger.info('RMR not yet ready') rmr.rmr_set_stimeout(self.rmr_mrc, 1) rmr.rmr_set_vlevel(5) self.logger.info('RMR ready') def GetSymptomConfig(self): if self.configData.cfg['controls'].get('symptomdata').get('lwsd'): return self.configData.cfg['controls'].get('symptomdata').get('lwsd') def GetSubsConfig(self): if self.configData.cfg['controls'].get('subscription'): return self.configData.cfg['controls'].get('subscription') def Subscribe(self): self.subsCfgDetail = self.GetSubsConfig() if self.subsCfgDetail != None: # this is example subscription, for your use case fill the attributes according to your needs self.subscriber = subscribe.NewSubscriber(self.subsCfgDetail['url'] + 'ric/v1') # add as well the own subscription response callback handler if self.subscriber.ResponseHandler(self.subsResponseCB, self.server) is not True: self.logger.error("Error when trying to set the subscription reponse callback") # setup the subscription data subEndPoint = self.subscriber.SubscriptionParamsClientEndpoint(self.subsCfgDetail['clientEndpoint'], self.port, 4061) subsDirective = self.subscriber.SubscriptionParamsE2SubscriptionDirectives(10, 2, False) subsequentAction = self.subscriber.SubsequentAction("continue", "w10ms") actionDefinitionList = self.subscriber.ActionToBeSetup(1, "policy", (11,12,13,14,15), subsequentAction) subsDetail = self.subscriber.SubscriptionDetail(12110, (1,2,3,4,5), actionDefinitionList) # subscription data ready, make the subscription subObj = self.subscriber.SubscriptionParams("sub10", subEndPoint,"gnb123456",1231, subsDirective, subsDetail) self.logger.info("Sending the subscription to %s" %(self.subsCfgDetail['url'] + 'ric/v1')) self.logger.info(subObj.to_dict()) # subscribe data, reason, status = self.subscriber.Subscribe(subObj) # returns the json data, make it dictionary self.logger.info("Getting the subscription reponse") self.logger.info(json.loads(data)) def Unsubscribe(self): reason, status = self.subscriber.UnSubscribe("ygwefwebw") def QuerySubscribtions(self): data, reason, status = self.subscriber.QuerySubscriptions() def healthyGetReadyHandler(self, name, path, data, ctype): response = server.initResponse() response['payload'] = ("{'status': 'ready'}") return response def healthyGetAliveHandler(self, name, path, data, ctype): response = server.initResponse() response['payload'] = ("{'status': 'alive'}") return response def subsResponseCB(self, name, path, data, ctype): response = server.initResponse() response['payload'] = ("{}") return response def getSymptomData(self, uriparams): paramlist = urllib.parse.parse_qs(uriparams) [x.upper() for x in paramlist] fromtime = 0 totime = 0 print(paramlist) if paramlist.get('fromTime'): fromtime = getSeconds(paramlist.get('fromTime')[0]) if paramlist.get('toTime'): totime = getSeconds(paramlist.get('toTime')[0]) zipfile = self.symptomHndl.collect("symptomdata"+'-%Y-%m-%d-%H-%M-%S.zip', ('examples/.*.py',), fromtime, totime) if zipfile != None: (zipfile, size, data) = self.symptomHndl.read() return (zipfile, size, data) return (None, 0, None) def symptomdataGetHandler(self, name, path, data, ctype): reponse = ricrest.initResponse() (zipfile, size, filedata) = self.getSymptomData(self.path[20:]) if filedata != None: reponse['payload'] = filedata reponse['ctype'] = 'application/zip' reponse['attachment'] = "symptomdata.zip" reponse['mode'] = 'binary' return reponse logging.error("Symptom data does not exists") reponse['response'] = 'System error - symptomdata does not exists' reponse['status'] = 500 return reponse def configGetHandler(self, name, path, data, ctype): response = server.initResponse() response['payload'] = (self.configData.get_config()) return response
# # If the xapp has the internal data for symptomdata collection REST call response, it can use the helper methods getFileList and collect # to get the needed files or readymade zipped package for reponse. # import os import re import time import requests import json from requests.exceptions import HTTPError from zipfile import ZipFile from threading import Timer from datetime import datetime from mdclogpy import Logger logging = Logger(name=__name__) class RepeatTimer(Timer): # timer class for housekeeping and file rotating def run(self): while not self.finished.wait(self.interval): self.function(*self.args, **self.kwargs) class Symptomdata(object): # service is the local POD service id, path the temporal storage, host should be the trblmgr service name def __init__(self, service="", servicehost="", path="/tmp/", lwsduri=None, timeout=30): """ init
def setUp(self): self.logger = Logger()
class TestMdcLog(unittest.TestCase): """Unit tests for mdclog.py""" def setUp(self): self.logger = Logger() def tearDown(self): pass def test_that_get_level_returns_the_current_log_level(self): # default level is ERROR self.assertEqual(self.logger.get_level(), Level.ERROR) self.logger.set_level(Level.INFO) self.assertEqual(self.logger.get_level(), Level.INFO) self.logger.set_level(Level.WARNING) self.assertEqual(self.logger.get_level(), Level.WARNING) self.logger.set_level(Level.ERROR) self.assertEqual(self.logger.get_level(), Level.ERROR) self.logger.set_level(Level.DEBUG) self.assertEqual(self.logger.get_level(), Level.DEBUG) def test_that_set_level_does_not_accept_incorrect_level(self): self.logger.set_level(Level.INFO) self.logger.set_level(55) self.assertEqual(self.logger.get_level(), Level.INFO) @patch('mdclogpy.Logger._output_log') def test_that_logs_with_lower_than_current_level_(self, output_mock): self.logger.set_level(Level.WARNING) self.logger.log(Level.DEBUG, "DEBUG") self.logger.log(Level.INFO, "INFO") self.logger.log(Level.WARNING, "WARNING") self.logger.log(Level.ERROR, "ERROR") self.assertEqual(2, output_mock.call_count) logs = TestMdcLogUtils.get_logs_as_json(output_mock.call_args_list) self.assertEqual(logs[0]["msg"], "WARNING") self.assertEqual(logs[1]["msg"], "ERROR") @patch('mdclogpy.Logger._output_log') def test_that_logs_with_lower_than_current_level_are_not_logged( self, output_mock): self.logger.set_level(Level.WARNING) self.logger.log(Level.DEBUG, "DEBUG") self.logger.log(Level.INFO, "INFO") self.logger.log(Level.WARNING, "WARNING") self.logger.log(Level.ERROR, "ERROR") self.assertEqual(2, output_mock.call_count) logs = TestMdcLogUtils.get_logs_as_json(output_mock.call_args_list) self.assertEqual(logs[0]["msg"], "WARNING") self.assertEqual(logs[1]["msg"], "ERROR") @patch('mdclogpy.Logger._output_log') def test_that_log_contains_correct_criticality(self, output_mock): self.logger.set_level(Level.DEBUG) self.logger.log(Level.DEBUG, "debug test log") self.logger.log(Level.INFO, "info test log") self.logger.log(Level.WARNING, "warning test log") self.logger.log(Level.ERROR, "error test log") self.logger.debug("another debug test log") self.logger.info("another info test log") self.logger.warning("another warning test log") self.logger.error("another error test log") self.assertEqual(8, output_mock.call_count) logs = TestMdcLogUtils.get_logs_as_json(output_mock.call_args_list) self.assertEqual(logs[0]["crit"], "DEBUG") self.assertEqual(logs[1]["crit"], "INFO") self.assertEqual(logs[2]["crit"], "WARNING") self.assertEqual(logs[3]["crit"], "ERROR") self.assertEqual(logs[4]["crit"], "DEBUG") self.assertEqual(logs[5]["crit"], "INFO") self.assertEqual(logs[6]["crit"], "WARNING") self.assertEqual(logs[7]["crit"], "ERROR") @patch('time.time') @patch('mdclogpy.Logger._output_log') def test_that_log_contains_correct_timestamp(self, output_mock, mock_time): mock_time.return_value = 1554806251.4388545 self.logger.error("timestamp test") logs = TestMdcLogUtils.get_logs_as_json(output_mock.call_args_list) self.assertEqual(logs[0]["ts"], 1554806251439) @patch('mdclogpy.Logger._output_log') def test_that_log_contains_correct_message(self, output_mock): self.logger.error("message test") logs = TestMdcLogUtils.get_logs_as_json(output_mock.call_args_list) print(logs) self.assertEqual(logs[0]["msg"], "message test") @patch('mdclogpy.Logger._output_log') def test_that_log_message_is_escaped_to_valid_json_string( self, output_mock): self.logger.set_level(Level.DEBUG) self.logger.info('\ and "') logs = TestMdcLogUtils.get_logs(output_mock.call_args_list) self.assertTrue(r'\\ and \"' in logs[0]) logs = TestMdcLogUtils.get_logs_as_json(output_mock.call_args_list) self.assertEqual(logs[0]["msg"], '\ and "') @patch('mdclogpy.Logger._output_log') def test_that_empty_mdc_is_logged_correctly(self, output_mock): self.logger.mdclog_format_init(configmap_monitor=True) self.logger.error("empty mdc test") logs = TestMdcLogUtils.get_logs_as_json(output_mock.call_args_list) self.assertEqual(logs[0]["msg"], 'empty mdc test') @patch('mdclogpy.Logger._output_log') def test_that_config_map_is_monitored_correctly(self, output_mock): src = open("//tmp//log", "w") src.write("log-level: debug\n") src.close() self.logger.filename = "/tmp/log" self.logger.dirname = "/tmp/" self.logger.mdc = { "PID": "", "SYSTEM_NAME": "", "HOST_NAME": "", "SERVICE_NAME": "", "CONTAINER_NAME": "", "POD_NAME": "" } self.logger.get_env_params_values() self.logger.parse_file() self.logger.error("Hello") self.assertEqual(self.logger.get_level(), Level.DEBUG) @patch('mdclogpy.Logger._output_log') def test_that_mdc_values_are_logged_correctly(self, output_mock): self.logger.add_mdc("key1", "value1") self.logger.add_mdc("key2", "value2") self.logger.error("mdc test") logs = TestMdcLogUtils.get_logs_as_json(output_mock.call_args_list) self.assertEqual(logs[0]["mdc"]["key1"], "value1") self.assertEqual(logs[0]["mdc"]["key2"], "value2") @patch('mdclogpy.Logger._output_log') def test_that_mdc_pid_logged_correctly(self, output_mock): self.logger.mdclog_format_init(configmap_monitor=True) self.logger.error("mdc test") logs = TestMdcLogUtils.get_logs_as_json(output_mock.call_args_list) self.assertTrue(logs[0]["mdc"]["PID"]) def test_that_mdc_values_can_be_added_and_removed(self): self.logger.add_mdc("key1", "value1") self.logger.add_mdc("key2", "value2") self.assertEqual(self.logger.get_mdc("key2"), "value2") self.assertEqual(self.logger.get_mdc("key1"), "value1") self.assertEqual(self.logger.get_mdc("non_existent"), None) self.logger.remove_mdc("key1") self.assertEqual(self.logger.get_mdc("key1"), None) self.logger.remove_mdc("non_existent") self.logger.clean_mdc() self.assertEqual(self.logger.get_mdc("key2"), None) @patch('mdclogpy.Logger._output_log') def test_update_mdc_log_level_severity(self, output_mock): self.logger.update_mdc_log_level_severity("error") self.logger.update_mdc_log_level_severity("warning") self.logger.update_mdc_log_level_severity("info") self.logger.update_mdc_log_level_severity("debug") self.logger.update_mdc_log_level_severity("") @patch('mdclogpy.Logger._output_log') def test_output_log(self, output_mock): self.logger._output_log("Logger") @patch('mdclogpy.Logger._output_log') def test_register_log_change_notify(self, output_mock): self.logger.dirname = "/tmp/" self.logger.filename = "/tmp/log" self.logger.register_log_change_notify() @patch('mdclogpy.Logger._output_log') def test_multiple_logger_instances(self, output_mock): logger1 = Logger("logger1") logger2 = Logger("logger2") logger1.add_mdc("logger1_key1", "logger1_value1") logger1.add_mdc("logger1_key2", "logger1_value2") logger2.add_mdc("logger2_key1", "logger2_value1") logger2.add_mdc("logger2_key2", "logger2_value2") mdclogpy.add_mdc("key", "value") logger1.error("error msg") logger2.error("warning msg") mdclogpy.error("info msg") logs = TestMdcLogUtils.get_logs_as_json(output_mock.call_args_list) self.assertEqual(3, output_mock.call_count) self.assertEqual(logs[0]["id"], "logger1") self.assertEqual(logs[0]["crit"], "ERROR") self.assertEqual(logs[0]["msg"], "error msg") self.assertEqual(logs[0]["mdc"]["logger1_key1"], "logger1_value1") self.assertEqual(logs[0]["mdc"]["logger1_key2"], "logger1_value2") self.assertEqual(len(logs[0]["mdc"]), 2) self.assertEqual(logs[1]["id"], "logger2") self.assertEqual(logs[1]["crit"], "ERROR") self.assertEqual(logs[1]["msg"], "warning msg") self.assertEqual(logs[1]["mdc"]["logger2_key1"], "logger2_value1") self.assertEqual(logs[1]["mdc"]["logger2_key2"], "logger2_value2") self.assertEqual(len(logs[1]["mdc"]), 2) self.assertEqual(logs[2]["id"], sys.argv[0]) self.assertEqual(logs[2]["crit"], "ERROR") self.assertEqual(logs[2]["msg"], "info msg") self.assertEqual(logs[2]["mdc"]["key"], "value") self.assertEqual(len(logs[2]["mdc"]), 1)
def test_multiple_logger_instances(self, output_mock): logger1 = Logger("logger1") logger2 = Logger("logger2") logger1.add_mdc("logger1_key1", "logger1_value1") logger1.add_mdc("logger1_key2", "logger1_value2") logger2.add_mdc("logger2_key1", "logger2_value1") logger2.add_mdc("logger2_key2", "logger2_value2") mdclogpy.add_mdc("key", "value") logger1.error("error msg") logger2.error("warning msg") mdclogpy.error("info msg") logs = TestMdcLogUtils.get_logs_as_json(output_mock.call_args_list) self.assertEqual(3, output_mock.call_count) self.assertEqual(logs[0]["id"], "logger1") self.assertEqual(logs[0]["crit"], "ERROR") self.assertEqual(logs[0]["msg"], "error msg") self.assertEqual(logs[0]["mdc"]["logger1_key1"], "logger1_value1") self.assertEqual(logs[0]["mdc"]["logger1_key2"], "logger1_value2") self.assertEqual(len(logs[0]["mdc"]), 2) self.assertEqual(logs[1]["id"], "logger2") self.assertEqual(logs[1]["crit"], "ERROR") self.assertEqual(logs[1]["msg"], "warning msg") self.assertEqual(logs[1]["mdc"]["logger2_key1"], "logger2_value1") self.assertEqual(logs[1]["mdc"]["logger2_key2"], "logger2_value2") self.assertEqual(len(logs[1]["mdc"]), 2) self.assertEqual(logs[2]["id"], sys.argv[0]) self.assertEqual(logs[2]["crit"], "ERROR") self.assertEqual(logs[2]["msg"], "info msg") self.assertEqual(logs[2]["mdc"]["key"], "value") self.assertEqual(len(logs[2]["mdc"]), 1)
class _BaseXapp: """ This class initializes RMR, starts a thread that checks for incoming messages, provisions an SDL object and optionally creates a config-file watcher. This private base class should not be instantiated by clients directly, but it defines many public methods that may be used by clients. If environment variable CONFIG_FILE is defined, and that variable contains a path to an existing file, a watcher is defined to monitor modifications (writes) to that file using the Linux kernel's inotify feature. The watcher must be polled by calling method config_check(). Parameters ---------- rmr_port: int (optional, default is 4562) Port on which the RMR library listens for incoming messages. rmr_wait_for_ready: bool (optional, default is True) If this is True, then init waits until RMR is ready to send, which includes having a valid routing file. This can be set to False if the client wants to *receive only*. use_fake_sdl: bool (optional, default is False) if this is True, it uses the DBaaS "fake dict backend" instead of Redis or other backends. Set this to True when developing an xapp or during unit testing to eliminate the need for DBaaS. post_init: function (optional, default is None) Runs this user-provided function at the end of the init method; its signature should be post_init(self) """ def __init__(self, rmr_port=4562, rmr_wait_for_ready=True, use_fake_sdl=False, post_init=None): """ Documented in the class comment. """ # PUBLIC, can be used by xapps using self.(name): self.logger = Logger(name=__name__) self._appthread = None # Start rmr rcv thread self._rmr_loop = xapp_rmr.RmrLoop(port=rmr_port, wait_for_ready=rmr_wait_for_ready) self._mrc = self._rmr_loop.mrc # for convenience # SDL self.sdl = SDLWrapper(use_fake_sdl) # Config # The environment variable specifies the path to the Xapp config file self._config_path = os.environ.get(Constants.CONFIG_FILE_ENV, None) if self._config_path and os.path.isfile(self._config_path): self._inotify = inotify_simple.INotify() self._inotify.add_watch(self._config_path, inotify_simple.flags.MODIFY) self.logger.debug("__init__: watching config file {}".format( self._config_path)) else: self._inotify = None self.logger.warning("__init__: NOT watching any config file") # used for thread control of Registration of Xapp self._keep_registration = True # configuration data for xapp registration and deregistration self._config_data = None if self._config_path and os.path.isfile(self._config_path): with open(self._config_path) as json_file: self._config_data = json.load(json_file) else: self._keep_registration = False self.logger.error( "__init__: Cannot Read config file for xapp Registration") self._config_data = {} self._appthread = Thread(target=self.registerXapp).start() # run the optionally provided user post init if post_init: post_init(self) def get_service(self, host, service): """ To find the url for connecting to the service Parameters ---------- host: string defines the hostname in the url service: string defines the servicename in the url Returns ------- string url for the service """ app_namespace = self._config_data.get("APP_NAMESPACE") if app_namespace is None: app_namespace = Constants.DEFAULT_XAPP_NS self.logger.debug("service : {} host : {},appnamespace : {}".format( service, host, app_namespace)) if app_namespace is not None and host is not None: svc = service.format(app_namespace.upper(), host.upper()) urlkey = svc.replace("-", "_") url = os.environ.get(urlkey).split("//") self.logger.debug("Service urlkey : {} and url: {}".format( urlkey, url)) if len(url) > 1: return url[1] return "" def do_post(self, plt_namespace, url, msg): """ registration of the xapp using the url and json msg Parameters ---------- plt_namespace: string platform namespace where the xapp is running url: string url for xapp registration msg: string json msg containing the xapp details Returns ------- bool whether or not the xapp is registered """ if url is None: self.logger.error("url is empty ") return False if plt_namespace is None: self.logger.error("plt_namespace is empty") return False try: request_url = url.format(plt_namespace, plt_namespace) resp = requests.post(request_url, json=msg) self.logger.debug("Post to '{}' done, status : {}".format( request_url, resp.status_code)) self.logger.debug("Response Text : {}".format(resp.text)) return resp.status_code == 200 or resp.status_code == 201 except requests.exceptions.RequestException as err: self.logger.error("Error : {}".format(err)) return format(err) except requests.exceptions.HTTPError as errh: self.logger.error("Http Error: {}".format(errh)) return errh except requests.exceptions.ConnectionError as errc: self.logger.error("Error Connecting: {}".format(errc)) return errc except requests.exceptions.Timeout as errt: self.logger.error("Timeout Error: {}".format(errt)) return errt def register(self): """ function to registers the xapp Returns ------- bool whether or not the xapp is registered """ hostname = os.environ.get("HOSTNAME") xappname = self._config_data.get("name") xappversion = self._config_data.get("version") pltnamespace = os.environ.get("PLT_NAMESPACE") if pltnamespace is None: pltnamespace = Constants.DEFAULT_PLT_NS self.logger.debug( "config details hostname : {} xappname: {} xappversion : {} pltnamespace : {}" .format(hostname, xappname, xappversion, pltnamespace)) http_endpoint = self.get_service(hostname, Constants.SERVICE_HTTP) rmr_endpoint = self.get_service(hostname, Constants.SERVICE_RMR) if http_endpoint == "" or rmr_endpoint == "": self.logger.error( "Couldn't resolve service endpoints: http_endpoint={} rmr_endpoint={}" .format(http_endpoint, rmr_endpoint)) return False self.logger.debug( "config details hostname : {} xappname: {} xappversion : {} pltnamespace : {} http_endpoint : {} rmr_endpoint " ": {} configpath : {}".format( hostname, xappname, xappversion, pltnamespace, http_endpoint, rmr_endpoint, self._config_data.get("CONFIG_PATH"))) request_string = { "appName": hostname, "appVersion": xappversion, "configPath": "", "appInstanceName": xappname, "httpEndpoint": http_endpoint, "rmrEndpoint": rmr_endpoint, "config": json.dumps(self._config_data) } self.logger.info("REQUEST STRING :{}".format(request_string)) return self.do_post(pltnamespace, Constants.REGISTER_PATH, request_string) def registerXapp(self): """ registers the xapp """ retries = 5 while self._keep_registration and retries > 0: time.sleep(2) retries = retries - 1 # checking for rmr/sdl/xapp health healthy = self.healthcheck() if not healthy: self.logger.warning( "Application='{}' is not ready yet, waiting ...".format( self._config_data.get("name"))) continue self.logger.debug( "Application='{}' is now up and ready, continue with registration ..." .format(self._config_data.get("name"))) if self.register(): self.logger.debug( "Registration done, proceeding with startup ...") break def deregister(self): """ Deregisters the xapp Returns ------- bool whether or not the xapp is registered """ healthy = self.healthcheck() if not healthy: self.logger.error("RMR or SDL or xapp == Not Healthy") return None if self._config_data is None: return None name = os.environ.get("HOSTNAME") xappname = self._config_data.get("name") pltnamespace = os.environ.get("PLT_NAMESPACE") if pltnamespace is None: pltnamespace = Constants.DEFAULT_PLT_NS request_string = { "appName": name, "appInstanceName": xappname, } return self.do_post(pltnamespace, Constants.DEREGISTER_PATH, request_string) def xapp_shutdown(self): """ Deregisters the xapp while shutting down """ self.deregister() self.logger.debug("Wait for xapp to get unregistered") time.sleep(10) # Public rmr methods def rmr_get_messages(self): """ Returns a generator iterable over all items in the queue that have not yet been read by the client xapp. Each item is a tuple (S, sbuf) where S is a message summary dict and sbuf is the raw message. The caller MUST call rmr.rmr_free_msg(sbuf) when finished with each sbuf to prevent memory leaks! """ while not self._rmr_loop.rcv_queue.empty(): (summary, sbuf) = self._rmr_loop.rcv_queue.get() yield (summary, sbuf) def rmr_send(self, payload, mtype, retries=100): """ Allocates a buffer, sets payload and mtype, and sends Parameters ---------- payload: bytes payload to set mtype: int message type retries: int (optional) Number of times to retry at the application level before excepting RMRFailure Returns ------- bool whether or not the send worked after retries attempts """ sbuf = rmr.rmr_alloc_msg(vctx=self._mrc, size=len(payload), payload=payload, gen_transaction_id=True, mtype=mtype) for _ in range(retries): sbuf = rmr.rmr_send_msg(self._mrc, sbuf) if sbuf.contents.state == 0: self.rmr_free(sbuf) return True self.rmr_free(sbuf) return False def rmr_rts(self, sbuf, new_payload=None, new_mtype=None, retries=100): """ Allows the xapp to return to sender, possibly adjusting the payload and message type before doing so. This does NOT free the sbuf for the caller as the caller may wish to perform multiple rts per buffer. The client needs to free. Parameters ---------- sbuf: ctypes c_void_p Pointer to an rmr message buffer new_payload: bytes (optional) New payload to set new_mtype: int (optional) New message type (replaces the received message) retries: int (optional, default 100) Number of times to retry at the application level Returns ------- bool whether or not the send worked after retries attempts """ for _ in range(retries): sbuf = rmr.rmr_rts_msg(self._mrc, sbuf, payload=new_payload, mtype=new_mtype) if sbuf.contents.state == 0: return True self.logger.warning("RTS Failed! Summary: {}".format( rmr.message_summary(sbuf))) return False def rmr_free(self, sbuf): """ Frees an rmr message buffer after use Note: this does not need to be a class method, self is not used. However if we break it out as a function we need a home for it. Parameters ---------- sbuf: ctypes c_void_p Pointer to an rmr message buffer """ rmr.rmr_free_msg(sbuf) # Convenience (pass-thru) function for invoking SDL. def sdl_set(self, namespace, key, value, usemsgpack=True): """ ** Deprecate Warning ** ** Will be removed in a future function ** Stores a key-value pair to SDL, optionally serializing the value to bytes using msgpack. Parameters ---------- namespace: string SDL namespace key: string SDL key value: Object or byte array to store. See the `usemsgpack` parameter. usemsgpack: boolean (optional, default is True) Determines whether the value is serialized using msgpack before storing. If usemsgpack is True, the msgpack function `packb` is invoked on the value to yield a byte array that is then sent to SDL. Stated differently, if usemsgpack is True, the value can be anything that is serializable by msgpack. If usemsgpack is False, the value must be bytes. """ self.sdl.set(namespace, key, value, usemsgpack) def sdl_get(self, namespace, key, usemsgpack=True): """ ** Deprecate Warning ** ** Will be removed in a future function ** Gets the value for the specified namespace and key from SDL, optionally deserializing stored bytes using msgpack. Parameters ---------- namespace: string SDL namespace key: string SDL key usemsgpack: boolean (optional, default is True) If usemsgpack is True, the byte array stored by SDL is deserialized using msgpack to yield the original object that was stored. If usemsgpack is False, the byte array stored by SDL is returned without further processing. Returns ------- Value See the usemsgpack parameter for an explanation of the returned value type. Answers None if the key is not found. """ return self.sdl.get(namespace, key, usemsgpack) def sdl_find_and_get(self, namespace, prefix, usemsgpack=True): """ ** Deprecate Warning ** ** Will be removed in a future function ** Gets all key-value pairs in the specified namespace with keys that start with the specified prefix, optionally deserializing stored bytes using msgpack. Parameters ---------- nnamespaces: string SDL namespace prefix: string the key prefix usemsgpack: boolean (optional, default is True) If usemsgpack is True, the byte array stored by SDL is deserialized using msgpack to yield the original value that was stored. If usemsgpack is False, the byte array stored by SDL is returned without further processing. Returns ------- Dictionary of key-value pairs Each key has the specified prefix. The value object (its type) depends on the usemsgpack parameter, but is either a Python object or raw bytes as discussed above. Answers an empty dictionary if no keys matched the prefix. """ return self.sdl.find_and_get(namespace, prefix, usemsgpack) def sdl_delete(self, namespace, key): """ ** Deprecate Warning ** ** Will be removed in a future function ** Deletes the key-value pair with the specified key in the specified namespace. Parameters ---------- namespace: string SDL namespace key: string SDL key """ self.sdl.delete(namespace, key) def _get_rnib_info(self, node_type): """ Since the difference between get_list_gnb_ids and get_list_enb_ids is only node-type, this function extracted from the duplicated logic. Parameters ---------- node_type: string Type of node. This is EnumDescriptor. Available node types - UNKNOWN - ENB - GNB Returns ------- List: (NbIdentity) Raises ------ SdlTypeError: If function's argument is of an inappropriate type. NotConnected: If SDL is not connected to the backend data storage. RejectedByBackend: If backend data storage rejects the request. BackendError: If the backend data storage fails to process the request. """ nbid_strings: Set[bytes] = self.sdl.get_members( sdl_namespaces.E2_MANAGER, node_type, usemsgpack=False) ret: List[NbIdentity] = [] for nbid_string in nbid_strings: nbid = NbIdentity() nbid.ParseFromString(nbid_string) ret.append(nbid) return ret def get_list_gnb_ids(self): """ Retrieves the list of gNodeb identity entities gNodeb information is stored in SDL by E2Manager. Therefore, gNode information is stored in SDL's `e2Manager` namespace as protobuf serialized. Returns ------- List: (NbIdentity) Raises ------ SdlTypeError: If function's argument is of an inappropriate type. NotConnected: If SDL is not connected to the backend data storage. RejectedByBackend: If backend data storage rejects the request. BackendError: If the backend data storage fails to process the request. """ return self._get_rnib_info(Node.Type.Name(Node.GNB)) def get_list_enb_ids(self): """ Retrieves the list of eNodeb identity entities eNodeb information is stored in SDL by E2Manager. Therefore, eNode information is stored in SDL's `e2Manager` namespace as protobuf serialized. Returns ------- List: (NbIdentity) Raises ------ SdlTypeError: If function's argument is of an inappropriate type. NotConnected: If SDL is not connected to the backend data storage. RejectedByBackend: If backend data storage rejects the request. BackendError: If the backend data storage fails to process the request. """ return self._get_rnib_info(Node.Type.Name(Node.ENB)) """ Following RNIB methods are made to be inline of the go-lang based RNIB methods. Method names are same as in repository: gerrit.o-ran-sc.org/r/ric-plt/xapp-frame/pkg/rnib """ def GetNodeb(self, inventoryName): """ Returns nodeb info In RNIB SDL key is defined following way: RAN:<inventoryName> Parameters ---------- inventoryName: string Returns ------- NodebInfo() Raises ------ SdlTypeError: If function's argument is of an inappropriate type. NotConnected: If SDL is not connected to the backend data storage. RejectedByBackend: If backend data storage rejects the request. BackendError: If the backend data storage fails to process the request. """ nbid_string: Set[bytes] = self.sdl_get(sdl_namespaces.E2_MANAGER, 'RAN:' + inventoryName, usemsgpack=False) if nbid_string is not None: nbinfo = pb_nbi.NodebInfo() nbinfo.ParseFromString(nbid_string) return nbinfo return None def GetNodebByGlobalNbId(self, nodeType, plmnId, nbId): """ Returns nodeb identity based on type, plmn id and node id In RNIB SDL key is defined following way: <nodeType>:<plmnId>:<nbId> Parameters ---------- nodeType: string plmnId: string nbId: string Returns ------- NbIdentity() Raises ------ SdlTypeError: If function's argument is of an inappropriate type. NotConnected: If SDL is not connected to the backend data storage. RejectedByBackend: If backend data storage rejects the request. BackendError: If the backend data storage fails to process the request. """ nbid_string: Set[bytes] = self.sdl_get(sdl_namespaces.E2_MANAGER, nodeType + ':' + plmnId + ':' + nbId, usemsgpack=False) if nbid_string is not None: nbid = NbIdentity() nbid.ParseFromString(nbid_string) return nbid return None def GetCellList(self, inventoryName): """ Returns nodeb served cell list from the saved node data In RNIB SDL key is defined following way: RAN:<inventoryName> Parameters ---------- nodeType: string plmnId: string nbId: string Returns ------- ServedCellInfo() in case of ENB ServedNRCell() in case of GNB Raises ------ SdlTypeError: If function's argument is of an inappropriate type. NotConnected: If SDL is not connected to the backend data storage. RejectedByBackend: If backend data storage rejects the request. BackendError: If the backend data storage fails to process the request. """ nodeb = self.GetNodeb(inventoryName) if nodeb is not None: if nodeb.HasField('enb'): return nodeb.enb.served_cells elif nodeb.HasField('gnb'): return nodeb.gnb.served_nr_cells return None def GetCellById(self, cell_type, cell_id): """ Returns cell info by cell type and id. In RNIB SDL keys are defined based on the cell type: ENB type CELL:<cell_id> GNB type NRCELL:<cell_id> Parameters ---------- cell_type: string Available cell types - ENB - GNB Returns ------- Cell() Raises ------ SdlTypeError: If function's argument is of an inappropriate type. NotConnected: If SDL is not connected to the backend data storage. RejectedByBackend: If backend data storage rejects the request. BackendError: If the backend data storage fails to process the request. """ cellstr = None if cell_type == pb_cell.Cell.Type.Name(pb_cell.Cell.LTE_CELL): cellstr = 'CELL' elif cell_type == pb_cell.Cell.Type.Name(pb_cell.Cell.NR_CELL): cellstr = 'NRCELL' if cellstr is not None: cell_string: Set[bytes] = self.sdl_get(sdl_namespaces.E2_MANAGER, cellstr + ':' + cell_id, usemsgpack=False) if cell_string is not None: cell = pb_cell.Cell() cell.ParseFromString(cell_string) return cell return None def GetListNodebIds(self): """ Returns both enb and gnb NbIdentity list Returns ------- List: (NbIdentity) Raises ------ SdlTypeError: If function's argument is of an inappropriate type. NotConnected: If SDL is not connected to the backend data storage. RejectedByBackend: If backend data storage rejects the request. BackendError: If the backend data storage fails to process the request. """ nlist1 = self._get_rnib_info(Node.Type.Name(Node.ENB)) nlist2 = self._get_rnib_info(Node.Type.Name(Node.GNB)) for n in nlist2: nlist1.append(n) return nlist1 def GetCell(self, inventoryName, pci): """ Returns cell info using pci In RNIB SDL key is defined following way: PCI:<inventoryName>:<pci hex val> Parameters ---------- inventoryName: string pci: int Returns ------- Cell() Raises ------ SdlTypeError: If function's argument is of an inappropriate type. NotConnected: If SDL is not connected to the backend data storage. RejectedByBackend: If backend data storage rejects the request. BackendError: If the backend data storage fails to process the request. """ cell_string: Set[bytes] = self.sdl_get(sdl_namespaces.E2_MANAGER, 'PCI:{0:s}:{1:02x}'.format( inventoryName, pci), usemsgpack=False) if cell_string is not None: cell = pb_cell.Cell() cell.ParseFromString(cell_string) return cell return None def GetRanFunctionDefinition(self, inventoryName, ran_function_oid): """ Returns GNB ran function definition list based on the ran_function_oid In RNIB SDL key is defined following way: RAN:<inventoryName> Parameters ---------- inventoryName: string ran_function_oid: int Returns ------- array of ran_function_definition matching to ran_function_oid Raises ------ SdlTypeError: If function's argument is of an inappropriate type. NotConnected: If SDL is not connected to the backend data storage. RejectedByBackend: If backend data storage rejects the request. BackendError: If the backend data storage fails to process the request. """ nodeb = self.GetNodeb(inventoryName) if nodeb is not None: if nodeb.HasField('gnb') and nodeb.gnb.ran_functions is not None: ranFDList = [] for rf in nodeb.gnb.ran_functions: if rf.ran_function_oid == ran_function_oid: ranFDList.append(rf.ran_function_definition) return ranFDList return None def healthcheck(self): """ this needs to be understood how this is supposed to work """ return self._rmr_loop.healthcheck() and self.sdl.healthcheck() # Convenience function for discovering config change events def config_check(self, timeout=0): """ Checks the watcher for configuration-file events. The watcher prerequisites and event mask are documented in __init__(). Parameters ---------- timeout: int (optional) Number of seconds to wait for a configuration-file event, default 0. Returns ------- List of Events, possibly empty An event is a tuple with objects wd, mask, cookie and name. For example:: Event(wd=1, mask=1073742080, cookie=0, name='foo') """ if not self._inotify: return [] events = self._inotify.read(timeout=timeout) return list(events) def stop(self): """ cleans up and stops the xapp rmr thread (currently). This is critical for unit testing as pytest will never return if the thread is running. TODO: can we register a ctrl-c handler so this gets called on ctrl-c? Because currently two ctrl-c are needed to stop. """ if self._appthread is not None: self._appthread.join() self.xapp_shutdown() self._rmr_loop.stop()
from ricxappframe.xapp_sdl import SDLWrapper from a1.exceptions import PolicyTypeNotFound, PolicyInstanceNotFound, PolicyTypeAlreadyExists, PolicyTypeIdMismatch, CantDeleteNonEmptyType # constants INSTANCE_DELETE_NO_RESP_TTL = int( os.environ.get("INSTANCE_DELETE_NO_RESP_TTL", 5)) INSTANCE_DELETE_RESP_TTL = int(os.environ.get("INSTANCE_DELETE_RESP_TTL", 5)) USE_FAKE_SDL = bool( distutils.util.strtobool(os.environ.get("USE_FAKE_SDL", "False"))) A1NS = "A1m_ns" TYPE_PREFIX = "a1.policy_type." INSTANCE_PREFIX = "a1.policy_instance." METADATA_PREFIX = "a1.policy_inst_metadata." HANDLER_PREFIX = "a1.policy_handler." mdc_logger = Logger(name=__name__) if USE_FAKE_SDL: mdc_logger.debug("Using fake SDL") SDL = SDLWrapper(use_fake_sdl=USE_FAKE_SDL) # Internal helpers def _generate_type_key(policy_type_id): """ generate a key for a policy type """ return "{0}{1}".format(TYPE_PREFIX, policy_type_id) def _generate_instance_key(policy_type_id, policy_instance_id):
class _BaseXapp: """ This class initializes RMR, starts a thread that checks for incoming messages, provisions an SDL object and optionally creates a config-file watcher. This private base class should not be instantiated by clients directly, but it defines many public methods that may be used by clients. If environment variable CONFIG_FILE is defined, and that variable contains a path to an existing file, a watcher is defined to monitor modifications (writes) to that file using the Linux kernel's inotify feature. The watcher must be polled by calling method config_check(). Parameters ---------- rmr_port: int (optional, default is 4562) Port on which the RMR library listens for incoming messages. rmr_wait_for_ready: bool (optional, default is True) If this is True, then init waits until RMR is ready to send, which includes having a valid routing file. This can be set to False if the client wants to *receive only*. use_fake_sdl: bool (optional, default is False) if this is True, it uses the DBaaS "fake dict backend" instead of Redis or other backends. Set this to True when developing an xapp or during unit testing to eliminate the need for DBaaS. post_init: function (optional, default is None) Runs this user-provided function at the end of the init method; its signature should be post_init(self) """ def __init__(self, rmr_port=4562, rmr_wait_for_ready=True, use_fake_sdl=False, post_init=None): """ Documented in the class comment. """ # PUBLIC, can be used by xapps using self.(name): self.logger = Logger(name=__name__) # Start rmr rcv thread self._rmr_loop = xapp_rmr.RmrLoop(port=rmr_port, wait_for_ready=rmr_wait_for_ready) self._mrc = self._rmr_loop.mrc # for convenience # SDL self.sdl = SDLWrapper(use_fake_sdl) # Config # The environment variable specifies the path to the Xapp config file self._config_path = os.environ.get(CONFIG_FILE_ENV, None) if self._config_path and os.path.isfile(self._config_path): self._inotify = inotify_simple.INotify() self._inotify.add_watch(self._config_path, inotify_simple.flags.MODIFY) self.logger.debug("__init__: watching config file {}".format(self._config_path)) else: self._inotify = None self.logger.warning("__init__: NOT watching any config file") # run the optionally provided user post init if post_init: post_init(self) # Public rmr methods def rmr_get_messages(self): """ Returns a generator iterable over all items in the queue that have not yet been read by the client xapp. Each item is a tuple (S, sbuf) where S is a message summary dict and sbuf is the raw message. The caller MUST call rmr.rmr_free_msg(sbuf) when finished with each sbuf to prevent memory leaks! """ while not self._rmr_loop.rcv_queue.empty(): (summary, sbuf) = self._rmr_loop.rcv_queue.get() yield (summary, sbuf) def rmr_send(self, payload, mtype, retries=100): """ Allocates a buffer, sets payload and mtype, and sends Parameters ---------- payload: bytes payload to set mtype: int message type retries: int (optional) Number of times to retry at the application level before excepting RMRFailure Returns ------- bool whether or not the send worked after retries attempts """ sbuf = rmr.rmr_alloc_msg(vctx=self._mrc, size=len(payload), payload=payload, gen_transaction_id=True, mtype=mtype) for _ in range(retries): sbuf = rmr.rmr_send_msg(self._mrc, sbuf) if sbuf.contents.state == 0: self.rmr_free(sbuf) return True self.rmr_free(sbuf) return False def rmr_rts(self, sbuf, new_payload=None, new_mtype=None, retries=100): """ Allows the xapp to return to sender, possibly adjusting the payload and message type before doing so. This does NOT free the sbuf for the caller as the caller may wish to perform multiple rts per buffer. The client needs to free. Parameters ---------- sbuf: ctypes c_void_p Pointer to an rmr message buffer new_payload: bytes (optional) New payload to set new_mtype: int (optional) New message type (replaces the received message) retries: int (optional, default 100) Number of times to retry at the application level Returns ------- bool whether or not the send worked after retries attempts """ for _ in range(retries): sbuf = rmr.rmr_rts_msg(self._mrc, sbuf, payload=new_payload, mtype=new_mtype) if sbuf.contents.state == 0: return True self.logger.warning("RTS Failed! Summary: {}".format(rmr.message_summary(sbuf))) return False def rmr_free(self, sbuf): """ Frees an rmr message buffer after use Note: this does not need to be a class method, self is not used. However if we break it out as a function we need a home for it. Parameters ---------- sbuf: ctypes c_void_p Pointer to an rmr message buffer """ rmr.rmr_free_msg(sbuf) # Convenience (pass-thru) function for invoking SDL. def sdl_set(self, namespace, key, value, usemsgpack=True): """ ** Deprecate Warning ** ** Will be removed in a future function ** Stores a key-value pair to SDL, optionally serializing the value to bytes using msgpack. Parameters ---------- namespace: string SDL namespace key: string SDL key value: Object or byte array to store. See the `usemsgpack` parameter. usemsgpack: boolean (optional, default is True) Determines whether the value is serialized using msgpack before storing. If usemsgpack is True, the msgpack function `packb` is invoked on the value to yield a byte array that is then sent to SDL. Stated differently, if usemsgpack is True, the value can be anything that is serializable by msgpack. If usemsgpack is False, the value must be bytes. """ self.sdl.set(namespace, key, value, usemsgpack) def sdl_get(self, namespace, key, usemsgpack=True): """ ** Deprecate Warning ** ** Will be removed in a future function ** Gets the value for the specified namespace and key from SDL, optionally deserializing stored bytes using msgpack. Parameters ---------- namespace: string SDL namespace key: string SDL key usemsgpack: boolean (optional, default is True) If usemsgpack is True, the byte array stored by SDL is deserialized using msgpack to yield the original object that was stored. If usemsgpack is False, the byte array stored by SDL is returned without further processing. Returns ------- Value See the usemsgpack parameter for an explanation of the returned value type. Answers None if the key is not found. """ return self.sdl.get(namespace, key, usemsgpack) def sdl_find_and_get(self, namespace, prefix, usemsgpack=True): """ ** Deprecate Warning ** ** Will be removed in a future function ** Gets all key-value pairs in the specified namespace with keys that start with the specified prefix, optionally deserializing stored bytes using msgpack. Parameters ---------- nnamespaces: string SDL namespace prefix: string the key prefix usemsgpack: boolean (optional, default is True) If usemsgpack is True, the byte array stored by SDL is deserialized using msgpack to yield the original value that was stored. If usemsgpack is False, the byte array stored by SDL is returned without further processing. Returns ------- Dictionary of key-value pairs Each key has the specified prefix. The value object (its type) depends on the usemsgpack parameter, but is either a Python object or raw bytes as discussed above. Answers an empty dictionary if no keys matched the prefix. """ return self.sdl.find_and_get(namespace, prefix, usemsgpack) def sdl_delete(self, namespace, key): """ ** Deprecate Warning ** ** Will be removed in a future function ** Deletes the key-value pair with the specified key in the specified namespace. Parameters ---------- namespace: string SDL namespace key: string SDL key """ self.sdl.delete(namespace, key) # Health def healthcheck(self): """ this needs to be understood how this is supposed to work """ return self._rmr_loop.healthcheck() and self.sdl.healthcheck() # Convenience function for discovering config change events def config_check(self, timeout=0): """ Checks the watcher for configuration-file events. The watcher prerequisites and event mask are documented in __init__(). Parameters ---------- timeout: int (optional) Number of seconds to wait for a configuration-file event, default 0. Returns ------- List of Events, possibly empty An event is a tuple with objects wd, mask, cookie and name. For example:: Event(wd=1, mask=1073742080, cookie=0, name='foo') """ if not self._inotify: return [] events = self._inotify.read(timeout=timeout) return list(events) def stop(self): """ cleans up and stops the xapp rmr thread (currently). This is critical for unit testing as pytest will never return if the thread is running. TODO: can we register a ctrl-c handler so this gets called on ctrl-c? Because currently two ctrl-c are needed to stop. """ self._rmr_loop.stop()