def __init__(self, host, port, logger=None): self.queue = gevent.queue.Queue() self._base_url = "http://{}:{}/v2/keys/resource/".format(host, port) self._long_poll_url = self._base_url + "?recursive=true&wait=true" self._greenlet = gevent.spawn(self._wait_event, 0) self._alive = True self._logger = Logger("etcd_watch", logger)
def start(self): if not self.running: self.logger = Logger('mdns-engine') self.subscriptionController = MDNSSubscriptionController() self.registrationController = MDNSRegistrationController() self.interfaceController = MDNSInterfaceController(self.logger) self.running = True
def __init__(self, interactive=False): self.logger = Logger("facade", None) if HOST == "": self.logger.writeFatal( "Unable to start facade due to lack of connectivity") sys.exit(1) self.running = False self.httpServer = None self.interface = None self.interactive = interactive self.registry = None self.registry_cleaner = None self.node_id = None self.mdns = MDNSEngine() self.mdnsname_suffix = '_' + str(HOSTNAME) + "_" + str(getpid()) self.mappings = { "device": "ver_dvc", "flow": "ver_flw", "source": "ver_src", "sender": "ver_snd", "receiver": "ver_rcv", "self": "ver_slf" } self.mdns_updater = MDNSUpdater(self.mdns, "_nmos-node._tcp", "node" + self.mdnsname_suffix, self.mappings, PORT, self.logger, txt_recs={ "api_ver": "v1.0,v1.1,v1.2", "api_proto": "http" }) self.aggregator = Aggregator(self.logger, self.mdns_updater)
def __init__(self, logger=None, mdns_updater=None, auth_registry=None): self.logger = Logger("aggregator_proxy", logger) self.mdnsbridge = IppmDNSBridge(logger=self.logger) self.aggregator_apiversion = None self.service_type = None self._set_api_version_and_srv_type( _config.get('nodefacade').get('NODE_REGVERSION')) self.aggregator = None self.registration_order = [ "device", "source", "flow", "sender", "receiver" ] self._mdns_updater = mdns_updater # '_node_data' is a local mirror of aggregated items. self._node_data = { 'node': None, 'registered': False, 'entities': { 'resource': {} } } self._running = True self._aggregator_list_stale = True self._aggregator_failure = False # Variable to flag when aggregator has returned and unexpected error self._backoff_active = False self._backoff_period = 0 self.auth_registrar = None # Class responsible for registering with Auth Server self.auth_registry = auth_registry # Top level class that tracks locally registered OAuth clients self.auth_client = None # Instance of Oauth client responsible for performing token requests self._reg_queue = gevent.queue.Queue() self.main_thread = gevent.spawn(self._main_thread) self.queue_thread = gevent.spawn(self._process_queue)
def __init__(self, interactive=False): self.facades = {} for version in APIVERSIONS: self.facades[version] = Facade("{}/{}".format(APINAME, version)) self.logger = Logger("grouping", None) self.running = False self.httpServer = None with open(CONFIG_PATH) as config_fp: self.config = json.load(config_fp)
def __init__(self, registry, identifier, logger=None, interval=INTERVAL): """ interval Number of seconds between checks / collections. An interval of '0' means 'never check'. """ self.registry = registry self.logger = Logger("garbage_collect", logger) self.identifier = identifier if interval > 0: gevent.spawn_later(interval, self.garbage_collect)
def __init__(self, logger=None): self.running = False from nmoscommon.logger import Logger from nmosnode.facade import Facade self.logger = Logger("conmanage") self.logger.writeWarning("Could not find ipppython facade") self.facade = Facade("{}/{}".format(CONN_APINAME, CONN_APIVERSIONS[-1]), address="ipc:///tmp/ips-nodefacade", logger=self.logger) self.logger.writeDebug("Running Connection Management Service") self.httpServer = HttpServer(ConnectionManagementAPI, WS_PORT, '0.0.0.0', api_args=[self.logger])
def __init__(self, logger=None): self.running = False self.logger = Logger("regquery") self.logger.writeDebug('Running QueryService') self.config = {"priority": 0} self._load_config() self.mdns = MDNSEngine() self.httpServer = HttpServer(QueryServiceAPI, WS_PORT, '0.0.0.0', api_args=[self.logger])
def __init__(self, resources, aggregator, mdns_updater, node_id, node_data, logger=None): # `node_data` must be correctly structured self.permitted_resources = resources self.services = {} self.clocks = {"clk0": {"name": "clk0", "ref_type": "internal"}} self.aggregator = aggregator self.mdns_updater = mdns_updater self.node_id = node_id assert "interfaces" in node_data # Check data conforms to latest supported API version self.node_data = node_data self.logger = Logger("facade_registry", logger)
def __init__(self, logger=None): self.running = False self.logger = Logger("regquery") self.logger.writeDebug('Running QueryService') # HTTPS under test only at present # enabled = Use HTTPS only in all URLs and mDNS adverts # disabled = Use HTTP only in all URLs and mDNS adverts # mixed = Use HTTP in all URLs, but additionally advertise an HTTPS endpoint for discovery of this API only self.config = {"priority": 100, "https_mode": "disabled"} self._load_config() self.mdns = MDNSEngine() self.httpServer = HttpServer(QueryServiceAPI, WS_PORT, '0.0.0.0', api_args=[self.logger, self.config])
def __init__(self, logger=None, api_version="v1.0"): self.logger = Logger("regquery", _parent=logger) self.query_sockets = QuerySocketsCommon(WS_PORT, logger=self.logger) # there is a choice here: watch at specific top levels (if flat), or watch all data. # initially, watch all data - this may be less than ideal. self.watcher = ChangeWatcher(reg['host'], reg['port'], handler=self, logger=self.logger) self.watcher.start() self.api_version = api_version
def __init__(self, srv_type, address="ipc:///tmp/ips-nodefacade",logger=None): self.logger = Logger("facade_proxy", logger) self.ipc = None self.srv_registered = False # Flag whether service is registered self.reregister = False # Flag whether resources are correctly registered self.address = address self.srv_type = srv_type.lower() self.srv_type_urn = "urn:x-nmos-opensourceprivatenamespace:service:" + self.srv_type self.pid = os.getpid() self.resources = {} self.timeline = {} self.href = None self.proxy_path = None self.lock = Lock() # Protect access to IPC socket
def __init__(self, interactive=False): self.logger = Logger("facade", None) if HOST == "": self.logger.writeFatal( "Unable to start facade due to lack of connectivity") sys.exit(1) self.running = False self.httpServer = None self.interface = None self.interactive = interactive self.registry = None self.registry_cleaner = None self.node_id = None self.mdns = MDNSEngine() self.mappings = { "device": "ver_dvc", "flow": "ver_flw", "source": "ver_src", "sender": "ver_snd", "receiver": "ver_rcv", "self": "ver_slf" } self.mdns_updater = None if HTTPS_MODE == "enabled" and ENABLE_P2P: self.mdns_updater = MDNSUpdater(self.mdns, DNS_SD_TYPE, DNS_SD_NAME, self.mappings, DNS_SD_HTTPS_PORT, self.logger, txt_recs={ "api_ver": ",".join(NODE_APIVERSIONS), "api_proto": "https" }) elif ENABLE_P2P: self.mdns_updater = MDNSUpdater(self.mdns, DNS_SD_TYPE, DNS_SD_NAME, self.mappings, DNS_SD_HTTP_PORT, self.logger, txt_recs={ "api_ver": ",".join(NODE_APIVERSIONS), "api_proto": "http" }) self.aggregator = Aggregator(self.logger, self.mdns_updater)
def setUp(self): self.logger = Logger("Connection Management Tests") self.interface = MockSenderAPI() self.dut = SdpManager(self.logger, self.interface) self.callbackCalled = False self.callbackParams = [] self.callbackReturn = None
def setUpClass(cls): """Runs before each test""" cls.logger = Logger("conmanage") cls.mockApi = MockSenderAPI() cls.mockReceiver = MockSenderAPI() cls.activator = MockActivator() cls.sdpManager = cls.mockReceiver.transportManagers[0] cls.httpServer = HttpServer(ConnectionManagementAPI, SENDER_WS_PORT, '0.0.0.0', api_args=[cls.logger]) cls.httpServer.start() while not cls.httpServer.started.is_set(): cls.httpServer.started.wait() cls.dut = cls.httpServer.api cls.senderUUID = "8358af5c-6d82-4ef8-b992-13ed40a7246d" cls.receiverUUID = "076f7e9d-8a93-42cc-9506-fd57505ccc89" cls.dut.addSender(cls.mockApi, cls.senderUUID) cls.dut.addReceiver(cls.mockReceiver, cls.receiverUUID) cls.baseUrl = "http://127.0.0.1:" + str(SENDER_WS_PORT) cls.deviceRoot = cls.baseUrl + '/' + DEVICE_ROOT cls.senderRoot = cls.deviceRoot + "single/senders/" + cls.senderUUID cls.receiverRoot = cls.deviceRoot + "single/receivers/" + cls.receiverUUID cls.maxDiff = None
def __init__(self, logger=None, interactive=False): self.config = {"priority": 0} self._load_config() self.running = False self.httpServer = None self.interactive = interactive self.mdns = MDNSEngine() self.logger = Logger("aggregation", logger)
class GroupingAPI(WebAPI): def __init__(self): self.logger = Logger("grouping") self.config = {} try: with open(CONFIG_PATH, 'rt') as config_fp: self.config = json.load(config_fp) except Exception as e: self.logger.writeInfo("Did not read config file: {}".format(e)) super(GroupingAPI, self).__init__(oauth_config=self.config.get('oauth')) self.api_v2_0 = v2_0.Routes(self.logger, self.config.get('oauth'), self.app) self.add_routes(self.api_v2_0, basepath="/{}/{}/v2.0".format(APINAMESPACE, APINAME)) self.app.root_path = get_root_path("ipsgrouping") @secure_route('/') def root(self): return [APINAMESPACE + "/"] @secure_route('/' + APINAMESPACE + '/') def namespaceroot(self): return [APINAME + "/"] @secure_route('/' + APINAMESPACE + '/' + APINAME + "/") def nameroot(self): return [v + "/" for v in APIVERSIONS] @secure_route(APIBASE + 'selfcheck/', methods=['GET']) def selfcheck(self): """ Return diagnostics as JSON, and an overall code: 200 if "all ok". 400 otherwise. """ result = selfcheck.check(self._sfq) code = 200 if not result.get('passed'): code = 400 return (code, result)
def __init__(self, mdns_bridge, logger=None, apiversion=QUERY_APIVERSION, priority=None): self.mdns_bridge = mdns_bridge self._query_url = self.mdns_bridge.getHref(QUERY_MDNSTYPE, priority) iter = 0 #TODO FIXME: Remove once IPv6 work complete and Python can use link local v6 correctly while "fe80:" in self._query_url: self._query_url = self.mdns_bridge.getHref(QUERY_MDNSTYPE, priority) iter += 1 if iter > 20: break self.logger = Logger("nmoscommon.query", logger) self.apiversion = apiversion self.priority = priority
def __init__(self, interactive=False): self.logger = Logger("facade", None) if HOST == "": self.logger.writeFatal( "Unable to start facade due to lack of connectivity") sys.exit(1) self.running = False self.httpServer = None self.interface = None self.interactive = interactive self.registry = None self.registry_cleaner = None self.node_id = None self.mdns = MDNSEngine() self.mappings = { "device": "ver_dvc", "flow": "ver_flw", "source": "ver_src", "sender": "ver_snd", "receiver": "ver_rcv", "self": "ver_slf" } self.mdns_updater = None self.auth_registry = AuthRegistry(app=None, scope=ALLOWED_SCOPE) self.protocol = PROTOCOL if PROTOCOL == "https": self.dns_sd_port = DNS_SD_HTTPS_PORT else: self.dns_sd_port = DNS_SD_HTTP_PORT if ENABLE_P2P: self.mdns_updater = MDNSUpdater(self.mdns, DNS_SD_TYPE, DNS_SD_NAME, self.mappings, self.dns_sd_port, self.logger, txt_recs=self._mdns_txt( NODE_APIVERSIONS, self.protocol, OAUTH_MODE)) self.aggregator = Aggregator(self.logger, self.mdns_updater, self.auth_registry)
def __init__(self, registry, logger): self.host = Host(ADDRESS) self.registry = registry self.logger = Logger("facade_interface", logger) def getbases(cl): bases = list(cl.__bases__) for x in cl.__bases__: bases += getbases(x) return bases for cl in [ self.__class__, ] + getbases(self.__class__): for name in cl.__dict__.keys(): value = getattr(self, name) if callable(value): if hasattr(value, "ipc_method"): self.host.ipcmethod(name)(value)
def __init__(self): self.logger = Logger("grouping") self.config = {} try: with open(CONFIG_PATH, 'rt') as config_fp: self.config = json.load(config_fp) except Exception as e: self.logger.writeInfo("Did not read config file: {}".format(e)) super(GroupingAPI, self).__init__(oauth_config=self.config.get('oauth')) self.api_v2_0 = v2_0.Routes(self.logger, self.config.get('oauth'), self.app) self.add_routes(self.api_v2_0, basepath="/{}/{}/v2.0".format(APINAMESPACE, APINAME)) self.app.root_path = get_root_path("ipsgrouping")
def setUp(self): self.logger = Logger("Connection Management Tests") self.dut = RtpSender(self.logger, 2) self.dut.supportedSourceIPs = [ '127.0.0.1', "::1", "192.168.1.1", "192.168.0.1" ] self.dut.schemaPath = "../share/ipp-connectionmanagement/schemas/" self.hadCallback = False self.callbackArgs = None self.callbackReturn = None self.maxDiff = None
def __init__(self, logger=None, mdns_updater=None): self.logger = Logger("aggregator_proxy", logger) self.mdnsbridge = IppmDNSBridge(logger=self.logger) self.aggregator = "" self.registration_order = [ "device", "source", "flow", "sender", "receiver" ] self._mdns_updater = mdns_updater # 'registered' is a local mirror of aggregated items. There are helper methods # for manipulating this below. self._registered = { 'node': None, 'registered': False, 'entities': { 'resource': {} } } self._running = True self._reg_queue = gevent.queue.Queue() self.heartbeat_thread = gevent.spawn(self._heartbeat) self.queue_thread = gevent.spawn(self._process_queue)
class MDNSInterface(object): def __init__(self, interfaceIp): self.logger = Logger('MDNSInterface') self.ip = interfaceIp self._establish_interface() def _establish_interface(self): try: self.zeroconf = Zeroconf([self.ip]) except socket.error: msg = "Could not find interface with IP {}".format(self.ip) self.logger.writeError(msg) raise InterfaceNotFoundException(msg) def registerService(self, info): self.zeroconf.register_service(info) def unregisterService(self, info): self.zeroconf.unregister_service(info) def close(self): self.zeroconf.close()
class GarbageCollect(object): parent_tab = { 'devices': [('nodes', 'node_id')], 'senders': [('devices', 'device_id')], 'receivers': [('devices', 'device_id')], 'sources': [('devices', 'device_id')], 'flows': [('devices', 'device_id'), ('sources', 'source_id')] } def __init__(self, registry, identifier, logger=None, interval=INTERVAL): """ interval Number of seconds between checks / collections. An interval of '0' means 'never check'. """ self.registry = registry self.logger = Logger("garbage_collect", logger) self.identifier = identifier if interval > 0: gevent.spawn_later(interval, self.garbage_collect) def garbage_collect(self): # Check to see if garbage collection hasn't been done recently (by another aggregator) # Uses ETCD's prevExist=false function # See https://github.com/coreos/etcd/blob/master/Documentation/api.md#atomic-compare-and-swap try: flag = self.registry.put_garbage_collection_flag( host=self.identifier, ttl=LOCK_TIMEOUT) if flag.status_code != 201: self.logger.writeDebug( "Not collecting - another collector has recently collected" ) return # Kick off a collection with a specified timeout. try: with gevent.Timeout(TIMEOUT, TooLong): self._collect() finally: self.logger.writeDebug("remove flag") self._remove_flag() except Exception as e: self.logger.writeError( "Could not write garbage collect flag: {}".format(e)) finally: # Always schedule another gevent.spawn_later(INTERVAL, self.garbage_collect) self.logger.writeDebug("scheduled...") def _collect(self): try: self.logger.writeDebug("Collecting: {}".format(self.identifier)) # create list of nodes still alive alive_nodes = [] health_dict = self.registry.get_healths() for h in health_dict.get('/health', {}).keys(): node_name = h.split('/')[-1] alive_nodes.append(node_name) # TODO: GETs... maybe getting the whole response in one go is better? # Maybe doing these async is a good idea? For now, this suffices. all_types = [ "nodes", "devices", "senders", "receivers", "sources", "flows" ] resources = { rtype: self.registry.get_all(rtype) for rtype in all_types } # Get a flat list of (type, resource) pairs for existing resources # TODO: combine with above all_resources = [] for res_type, res in resources.items(): all_resources += [(res_type, x) for x in res] # Initialise the removal queue with any dead nodes nodes = [x.strip('/') for x in self.registry.getresources("nodes")] # TODO: already have this above... kill_q = [('nodes', node_id) for node_id in nodes if node_id not in alive_nodes] # Create a list of (type, id) pairs of resources that should be removed. to_kill = [] # Find orphaned resources kill_q += self.__find_dead_resources(all_resources, to_kill) # Process the removal queue. while kill_q: gevent.sleep(0.0) # Add these resources to the list of removals to_kill += kill_q # Reduce search space; this resource can never parent another # This proves to be faster in the long run. all_resources = [ x for x in all_resources if (x[0], x[1]['id']) not in to_kill ] # Look through remaining resources and get a new kill_q kill_q = self.__find_dead_resources(all_resources, to_kill) for resource_type, resource_id in to_kill: self.logger.writeInfo("removing resource: {}/{}".format( resource_type, resource_id)) self.registry.delete(resource_type, resource_id) except self.registry.RegistryUnavailable: self.logger.writeWarning("registry unavailable") except TooLong: self.logger.writeWarning("took too long") except Exception as e: self.logger.writeError("unhandled exception: {}".format(e)) def __find_dead_resources(self, all_resources, to_kill): def is_alive(parent_def): if parent_def in to_kill: return False parent_type, parent_id = parent_def found_parent = next( (x for x in all_resources if x[0] == parent_type and x[1]['id'] == parent_id), None) return found_parent is not None # Build a list of resource to remove kill_q = [] # Look through all remaining resources for child_type, child in all_resources: # We need never consider nodes; they should have already been marked. if child_type == "nodes": continue child_id = child['id'] # Get parent for child. There is only ever one; anything with multiple # parent entries in the parent table has multiple entries for backward # compatibility, in order strongest->weakest. parents = [(parent_type, child.get(parent_key)) for parent_type, parent_key in self.parent_tab.get( child_type, (None, None))] parent = next((x for x in parents if x[1] is not None), None) if parent is None or not is_alive(parent): kill_q.append((child_type, child_id)) return kill_q def _remove_flag(self): try: self.registry.delete_raw("garbage_collection") except Exception as e: self.logger.writeWarning("Could not remove flag: {}".format(e))
class QueryService: def __init__(self, logger=None): self.running = False self.logger = Logger("regquery") self.logger.writeDebug('Running QueryService') # HTTPS under test only at present # enabled = Use HTTPS only in all URLs and mDNS adverts # disabled = Use HTTP only in all URLs and mDNS adverts # mixed = Use HTTP in all URLs, but additionally advertise an HTTPS endpoint for discovery of this API only self.config = {"priority": 100, "https_mode": "disabled"} self._load_config() self.mdns = MDNSEngine() self.httpServer = HttpServer(QueryServiceAPI, WS_PORT, '0.0.0.0', api_args=[self.logger, self.config]) def start(self): if self.running: gevent.signal(signal.SIGINT, self.sig_handler) gevent.signal(signal.SIGTERM, self.sig_handler) self.running = True self.mdns.start() self.logger.writeDebug('Running web socket server on %i' % WS_PORT) self.httpServer.start() while not self.httpServer.started.is_set(): self.logger.writeDebug('Waiting for httpserver to start...') self.httpServer.started.wait() if self.httpServer.failed is not None: raise self.httpServer.failed self.logger.writeDebug("Running on port: {}".format(self.httpServer.port)) priority = self.config["priority"] if not str(priority).isdigit() or priority < 100: priority = 0 if self.config["https_mode"] != "enabled": self.mdns.register(DNS_SD_NAME + "_http", DNS_SD_TYPE, DNS_SD_HTTP_PORT, {"pri": priority, "api_ver": ",".join(API_VERSIONS), "api_proto": "http"}) if self.config["https_mode"] != "disabled": self.mdns.register(DNS_SD_NAME + "_https", DNS_SD_TYPE, DNS_SD_HTTPS_PORT, {"pri": priority, "api_ver": ",".join(API_VERSIONS), "api_proto": "https"}) def run(self): self.running = True self.start() while self.running: time.sleep(1) self._cleanup() def _cleanup(self): self.mdns.stop() self.mdns.close() self.httpServer.stop() def sig_handler(self): self.stop() def stop(self): self.running = False def _load_config(self): try: config_file = "/etc/ips-regquery/config.json" if os.path.isfile(config_file): f = open(config_file, 'r') extra_config = json.loads(f.read()) self.config.update(extra_config) except Exception as e: self.logger.writeDebug("Exception loading config: {}".format(e))
class NodeFacadeService: def __init__(self, interactive=False): self.logger = Logger("facade", None) if HOST == "": self.logger.writeFatal( "Unable to start facade due to lack of connectivity") sys.exit(1) self.running = False self.httpServer = None self.interface = None self.interactive = interactive self.registry = None self.registry_cleaner = None self.node_id = None self.mdns = MDNSEngine() self.mdnsname_suffix = '_' + str(HOSTNAME) + "_" + str(getpid()) self.mappings = { "device": "ver_dvc", "flow": "ver_flw", "source": "ver_src", "sender": "ver_snd", "receiver": "ver_rcv", "self": "ver_slf" } self.mdns_updater = MDNSUpdater(self.mdns, "_nmos-node._tcp", "node" + self.mdnsname_suffix, self.mappings, PORT, self.logger, txt_recs={ "api_ver": "v1.0,v1.1,v1.2", "api_proto": "http" }) self.aggregator = Aggregator(self.logger, self.mdns_updater) def sig_handler(self): print 'Pressed ctrl+c' self.stop() def sig_hup_handler(self): if getLocalIP() != "": global HOST HOST = updateHost() self.registry.modify_node(href=self.generate_href(), host=HOST, api={ "versions": NODE_APIVERSIONS, "endpoints": self.generate_endpoints() }, interfaces=self.list_interfaces()) def generate_endpoints(self): endpoints = [] if HTTPS_MODE != "enabled": endpoints.append({ "host": HOST, "port": 80, #Everything should go via apache proxy "protocol": "http" }) if HTTPS_MODE != "disabled": endpoints.append({ "host": HOST, "port": 443, #Everything should go via apache proxy "protocol": "https" }) return endpoints def generate_href(self): if HTTPS_MODE == "enabled": return "https://{}/".format(HOST) else: return "http://{}/".format(HOST) def list_interfaces(self): interfaces = {} # Initially populate interfaces from known-good location net_path = "/sys/class/net/" if os.path.exists(net_path): for interface_name in os.listdir(net_path): if interface_name != "lo": address_path = net_path + interface_name + "/address" if os.path.exists(address_path): address = open(address_path, "r").readline() interfaces[interface_name] = { "name": interface_name, "chassis_id": None, "port_id": address.lower().strip("\n").replace(":", "-") } # Attempt to source proper LLDP data for interfaces if os.path.exists("/usr/sbin/lldpcli"): try: chassis_data = json.loads( check_output( ["/usr/sbin/lldpcli", "show", "chassis", "-f", "json"])) chassis_id = chassis_data["local-chassis"]['chassis'].values( )[0]["id"]["value"] if chassis_data["local-chassis"]['chassis'].values( )[0]["id"]["type"] == "mac": chassis_id = chassis_id.lower().replace(":", "-") interface_data = json.loads( check_output([ "/usr/sbin/lldpcli", "show", "statistics", "-f", "json" ])) if isinstance(interface_data["lldp"]["interface"], dict): for interface_name in interface_data["lldp"][ "interface"].keys(): if interface_name in interfaces: # Only correct the Chassis ID. Port ID MUST be a MAC address interfaces[interface_name][ "chassis_id"] = chassis_id else: for interface_block in interface_data["lldp"]["interface"]: interface_name = interface_block.keys()[0] if interface_name in interfaces: # Only correct the Chassis ID. Port ID MUST be a MAC address interfaces[interface_name][ "chassis_id"] = chassis_id except Exception: pass return interfaces.values() def start(self): if self.running: gevent.signal(signal.SIGINT, self.sig_handler) gevent.signal(signal.SIGTERM, self.sig_handler) gevent.signal(signal.SIGHUP, self.sig_hup_handler) self.mdns.start() self.node_id = get_node_id() node_version = str(ptptime.ptp_detail()[0]) + ":" + str( ptptime.ptp_detail()[1]) node_data = { "id": self.node_id, "label": nmoscommonconfig.config.get('node_label', FQDN), "description": nmoscommonconfig.config.get('node_description', "Node on {}".format(FQDN)), "tags": nmoscommonconfig.config.get('node_tags', {}), "href": self.generate_href(), "host": HOST, "services": [], "hostname": HOSTNAME, "caps": {}, "version": node_version, "api": { "versions": NODE_APIVERSIONS, "endpoints": self.generate_endpoints(), }, "clocks": [ { "name": "clk0", "ref_type": "internal", }, { "name": "clk1", "ref_type": "ptp", "version": "IEEE1588-2008", "traceable": False, "gmid": "00-00-00-00-00-00-00-00", "locked": False, }, # Extra values will be filled in as needed at point of checking ], "interfaces": self.list_interfaces() } self.registry = FacadeRegistry(self.mappings.keys(), self.aggregator, self.mdns_updater, self.node_id, node_data, self.logger) self.registry_cleaner = FacadeRegistryCleaner(self.registry) self.registry_cleaner.start() self.httpServer = HttpServer(FacadeAPI, PORT, '0.0.0.0', api_args=[self.registry]) self.httpServer.start() while not self.httpServer.started.is_set(): self.logger.writeInfo('Waiting for httpserver to start...') self.httpServer.started.wait() if self.httpServer.failed is not None: raise self.httpServer.failed self.logger.writeInfo("Running on port: {}".format( self.httpServer.port)) try: self.logger.writeInfo("Registering as {}...".format(self.node_id)) self.aggregator.register( 'node', self.node_id, **legalise_resource(node_data, "node", NODE_REGVERSION)) except Exception as e: self.logger.writeWarning("Could not register: {}".format( e.__repr__())) self.interface = FacadeInterface(self.registry, self.logger) self.interface.start() def run(self): self.running = True pidfile = "/tmp/ips-nodefacade.pid" file(pidfile, 'w').write(str(getpid())) self.start() daemon.notify("READY=1") while self.running: self.registry.update_ptp() time.sleep(1) os.unlink(pidfile) self._cleanup() def _cleanup(self): try: self.logger.writeDebug("cleanup: unregister facade " + self.node_id) self.aggregator.unregister('node', self.node_id) except Exception as e: self.logger.writeWarning("Could not unregister: {}".format(e)) if self.mdns: try: self.mdns.stop() except Exception as e: self.logger.writeWarning("Could not stop mdns: {}".format(e)) self.registry_cleaner.stop() self.interface.stop() self.httpServer.stop() self.aggregator.stop() self.logger.writeInfo("Stopped main()") def stop(self): self.running = False
def __init__(self, logger=None): self.logger = Logger("mdnsbridge", logger) self.services = {} self.config = {"priority": 0} self.config.update(_config)
class IppmDNSBridge(object): def __init__(self, logger=None): self.logger = Logger("mdnsbridge", logger) self.services = {} self.config = {"priority": 0} self.config.update(_config) def _checkLocalQueryServiceExists(self): url = "http://127.0.0.1/x-nmos/query/v1.0/"; try: # Request to localhost:18870/ - if it succeeds, the service exists AND is running AND is accessible r = requests.get(url, timeout=0.5) if r is not None and r.status_code == 200: # If any results, put them in self.services return url except Exception as e: self.logger.writeWarning("No local query service running {}".format(e)) return "" def getHref(self, srv_type, priority=None): if priority == None: priority = self.config["priority"] if self.logger != None: self.logger.writeDebug("IppmDNSBridge priority = {}".format(priority)) # Check if type is in services. If not add it if srv_type not in self.services: self.services[srv_type] = [] # Check if there are any of that type of service, if not do a request no_results = True for service in self.services[srv_type]: if priority >= 100: if service["priority"] == priority: no_results = False elif service["priority"] < 100: no_results = False if no_results: self._updateServices(srv_type) # Re-check if there are any and return "" if not. current_priority = 99 valid_services = [] for service in self.services[srv_type]: if priority >= 100: if service["priority"] == priority: return self._createHref(service["address"], service["port"]) else: if service["priority"] < current_priority: current_priority = service["priority"] valid_services = [] if service["priority"] == current_priority: valid_services.append(service) if len(valid_services) == 0: self.logger.writeWarning("No services found: {}".format(srv_type)) if srv_type == "nmos-query": return self._checkLocalQueryServiceExists() return "" # Randomise selection. Delete entry from the services list and return it random.seed() index = random.randint(0, len(valid_services)-1) service = valid_services[index] href = self._createHref(service["address"], service["port"]) self.services[srv_type].remove(service) return href def _createHref(self, address, port): formatted_address = address if ":" in formatted_address: formatted_address = "[" + formatted_address + "]" return "http://" + formatted_address + ":" + str(port) def _updateServices(self, srv_type): req_url = "http://127.0.0.1/x-nmos-opensourceprivatenamespace/mdnsbridge/v1.0/" + srv_type + "/"; try: # Request to localhost/x-nmos-opensourceprivatenamespace/mdnsbridge/v1.0/<type>/ r = requests.get(req_url, timeout=0.5, proxies={'http': ''}) if r is not None and r.status_code == 200: # If any results, put them in self.services self.services[srv_type] = r.json()["representation"] except Exception as e: self.logger.writeWarning("Exception updating services: {}".format(e))
class GroupingService: def __init__(self, interactive=False): self.facades = {} for version in APIVERSIONS: self.facades[version] = Facade("{}/{}".format(APINAME, version)) self.logger = Logger("grouping", None) self.running = False self.httpServer = None with open(CONFIG_PATH) as config_fp: self.config = json.load(config_fp) def _sig_handler(self): self.logger.writeInfo("Stopping...") self._stop() def _start(self): gevent.signal(signal.SIGINT, self._sig_handler) gevent.signal(signal.SIGTERM, self._sig_handler) self.httpServer = HttpServer(GroupingAPI, PORT, '0.0.0.0', ssl=self.config.get("ssl")) self.httpServer.start() while not self.httpServer.started.is_set(): self.logger.writeInfo('Waiting for httpserver to start...') self.httpServer.started.wait() if self.httpServer.failed is not None: raise self.httpServer.failed self.httpServer.api.app.config.update(self.config) db_mongo.init_app(self.httpServer.api.app, logger=self.logger) self.logger.writeInfo("Running on port: {}".format( self.httpServer.port)) def run(self): self.running = True self._start() for version in APIVERSIONS: self.facades[version].register_service( "http://127.0.0.1:" + str(PORT), "{}/{}/{}/".format(APINAMESPACE, APINAME, version)) itercount = 0 while self.running: if itercount % 5 == 0: for facade in self.facades: self.facades[facade].heartbeat_service() gevent.sleep(1) itercount += 1 if itercount == 10: itercount = 0 for facade in self.facades: self.facades[facade].unregister_service() self._cleanup() def _cleanup(self): self.httpServer.stop() self.logger.writeInfo("Stopped") def _stop(self): self.running = False