def test_register_plugins_from_discovery(grpc_timeout, monkeypatch, mock_client_test_ok, mock_client_meta_ok): """Register plugins that we get back from discovery.""" assert len(plugin.Plugin.manager.plugins) == 0 monkeypatch.setattr(plugin.kubernetes, 'discover', lambda: ['10.0.0.1:5001', '10.0.0.2:5001']) plugin.register_plugins() assert len(plugin.Plugin.manager.plugins) == 2 assert 'vaporio/[email protected]:5001' in plugin.Plugin.manager.plugins p = plugin.Plugin.manager.plugins['vaporio/[email protected]:5001'] assert p.name == 'test-plugin' assert p.address == '10.0.0.1:5001' assert p.protocol == 'tcp' assert 'vaporio/[email protected]:5001' in plugin.Plugin.manager.plugins p = plugin.Plugin.manager.plugins['vaporio/[email protected]:5001'] assert p.name == 'test-plugin' assert p.address == '10.0.0.2:5001' assert p.protocol == 'tcp'
def test_register_plugins_ok(): """Register plugins successfully.""" # create the socket sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) path = '{}/test'.format(data_dir) sock.bind(path) assert len(plugin.Plugin.manager.plugins) == 0 # the plugin is not yet in the config assert config.options.get('plugin.unix') is None plugin.register_plugins() # the plugin has been added to the config (because it was # in the default socket directory) assert 'test' in config.options.get('plugin.unix') assert len(plugin.Plugin.manager.plugins) == 1 assert 'test' in plugin.Plugin.manager.plugins p = plugin.Plugin.manager.plugins['test'] assert p.name == 'test' assert p.addr == path assert p.mode == 'unix'
def test_register_plugins_already_exists(): """Register plugins when the plugins were already registered.""" # create the socket sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) path = '{}/test'.format(data_dir) sock.bind(path) assert len(plugin.Plugin.manager.plugins) == 0 plugin.register_plugins() assert len(plugin.Plugin.manager.plugins) == 1 assert 'test' in plugin.Plugin.manager.plugins p = plugin.Plugin.manager.plugins['test'] assert p.name == 'test' assert p.addr == path assert p.mode == 'unix' # now, re-register assert len(plugin.Plugin.manager.plugins) == 1 plugin.register_plugins() assert len(plugin.Plugin.manager.plugins) == 1 assert 'test' in plugin.Plugin.manager.plugins p = plugin.Plugin.manager.plugins['test'] assert p.name == 'test' assert p.addr == path assert p.mode == 'unix'
def test_register_plugins_already_exists(tmpsocket, grpc_timeout, mock_client_test_ok, mock_client_meta_ok): """Register plugins when the plugins were already registered.""" # create the socket _, path = tmpsocket.add('test') assert len(plugin.Plugin.manager.plugins) == 0 plugin.register_plugins() assert len(plugin.Plugin.manager.plugins) == 1 assert 'vaporio/test-plugin+unix@' + path in plugin.Plugin.manager.plugins p = plugin.Plugin.manager.plugins['vaporio/test-plugin+unix@' + path] assert p.name == 'test-plugin' assert p.address == path assert p.protocol == 'unix' # now, re-register assert len(plugin.Plugin.manager.plugins) == 1 plugin.register_plugins() assert len(plugin.Plugin.manager.plugins) == 1 assert 'vaporio/test-plugin+unix@' + path in plugin.Plugin.manager.plugins p = plugin.Plugin.manager.plugins['vaporio/test-plugin+unix@' + path] assert p.name == 'test-plugin' assert p.address == path assert p.protocol == 'unix'
def test_register_plugins_ok(tmpsocket, grpc_timeout, mock_client_test_ok, mock_client_meta_ok): """Register plugins successfully.""" # create the socket _, path = tmpsocket.add('test') assert len(plugin.Plugin.manager.plugins) == 0 # the plugin is not yet in the config assert config.options.get('plugin.unix') is None plugin.register_plugins() # the plugin has been added to the config (because it was # in the default socket directory) assert path in config.options.get('plugin.unix') assert len(plugin.Plugin.manager.plugins) == 1 assert 'vaporio/test-plugin+unix@' + path in plugin.Plugin.manager.plugins p = plugin.Plugin.manager.plugins['vaporio/test-plugin+unix@' + path] assert p.name == 'test-plugin' assert p.address == path assert p.protocol == 'unix'
async def _build_capabilities_cache(): """Construct the list that will become the device capabilities cache. Returns: list: A list of dictionaries, where each dictionary corresponds to a registered plugin. The plugin dict will identify the plugin and enumerate the device kinds it supports and the output types supported by those device kinds. Raises: errors.InternalApiError: All plugins failed the capabilities request. """ logger.debug(_('Building the device capabilities cache')) capabilities = [] # First, we want to iterate through all of the known plugins and use # their clients to get the capability info for each plugin. plugin_count = len(Plugin.manager.plugins) if plugin_count == 0: logger.debug(_('Manager has no plugins - registering plugins')) register_plugins() plugin_count = len(Plugin.manager.plugins) logger.debug(_('Plugins to get capabilities for: {}').format(plugin_count)) # Track which plugins failed to provide capability info for any reason. failures = {} # FIXME (etd): as of pylint 2.1.1, this gets marked with 'not-an-iterable' # It still appears to work just fine, so need to figure out why it is getting # marked as such and what should be done to fix it. async for plugin_id, plugin in get_plugins(): # pylint: disable=not-an-iterable logger.debug('{} - {}'.format(plugin_id, plugin)) devices = [] try: for capability in plugin.client.capabilities(): devices.append({ 'kind': capability.kind, 'outputs': capability.outputs }) except grpc.RpcError as ex: failures[plugin_id] = ex logger.warning( _('Failed to get capability for plugin: {}').format(plugin_id)) logger.warning(ex) continue capabilities.append({'plugin': plugin.tag, 'devices': devices}) # If we fail to read from all plugins (assuming there were any), then we # can raise an error since it is likely something is mis-configured. if plugin_count != 0 and plugin_count == len(failures): raise errors.InternalApiError( _('Failed to get capabilities for all plugins: {}').format( failures)) return capabilities
async def scan(rack=None, board=None, force=False): """The handler for the Synse Server "scan" API command. Args: rack (str): The rack to filter the scan results by. board (str): The board to filter the scan results by. force (bool): Force a re-scan of the meta-information. Returns: ScanResponse: The "scan" response scheme model. """ logger.debug(_('Scan Command (args: {}, {}, force: {})').format(rack, board, force)) if force: await cache.clear_all_meta_caches() # Plugins are registered on scan. If no plugins exist and a scan is # performed (e.g. on startup), we will find and register plugins. # Additionally, if we are forcing re-scan, we will re-register plugins. # This allows us to pick up any dynamically added plugins and clear out # any plugins that were removed. if len(plugin.Plugin.manager.plugins) == 0 or force: logger.debug(_('Re-registering plugins')) plugin.register_plugins() cache_data = await cache.get_scan_cache() # Filter the scan results by rack. if rack is not None: if not cache_data: raise errors.FailedScanCommandError( _('Unable to filter by resource - no scan results returned') ) for r in cache_data['racks']: if r['id'] == rack: cache_data = r break else: raise errors.RackNotFoundError( _('Rack "{}" not found in scan results').format(rack) ) # Filter the rack results by board. if board is not None: for b in cache_data['boards']: if b['id'] == board: cache_data = b break else: raise errors.BoardNotFoundError( _('Board "{}" not found in scan results').format(board) ) return ScanResponse( data=cache_data )
def test_register_plugins_no_socks(): """Register plugins when no sockets are in the plugin path.""" # create a non-socket file path = os.path.join(data_dir, 'test.txt') open(path, 'w').close() assert len(plugin.Plugin.manager.plugins) == 0 plugin.register_plugins() assert len(plugin.Plugin.manager.plugins) == 0
async def read_cached(start=None, end=None): """The handler for the Synse Server "readcached" API command. Args: start (str): An RFC3339 or RFC3339Nano formatted timestamp which defines a starting bound on the cache data to return. If no timestamp is specified, there will not be a starting bound. (default: None) end (str): An RFC3339 or RFC3339Nano formatted timestamp which defines an ending bound on the cache data to return. If no timestamp is specified, there will not be an ending bound. (default: None) Yields: ReadCachedResponse: The cached reading from the plugin. """ start, end = start or '', end or '' logger.debug( _('Read Cached command (start: {}, end: {})').format(start, end)) # If the plugins have not yet been registered, register them now. if len(plugin.Plugin.manager.plugins) == 0: logger.debug(_('Re-registering plugins')) plugin.register_plugins() # For each plugin, we'll want to request a dump of its readings cache. async for plugin_name, plugin_handler in plugin.get_plugins(): # pylint: disable=not-an-iterable logger.debug( _('Getting readings cache for plugin: {}').format(plugin_name)) # Get the cached data from the plugin try: for reading in plugin_handler.client.read_cached(start, end): # If there is no reading, we're done iterating if reading is None: return try: __, device = await cache.get_device_info( # pylint: disable=unused-variable reading.rack, reading.board, reading.device) except errors.DeviceNotFoundError: logger.info( _('Did not find device {}-{}-{} locally. Skipping device; ' 'server cache may be out of sync.')) continue yield ReadCachedResponse( device=device, device_reading=reading, ) except grpc.RpcError as ex: raise errors.FailedReadCachedCommandError(str(ex)) from ex
def test_register_plugins_no_socks(tmpdir, grpc_timeout): """Register plugins when no sockets are in the plugin path.""" sockdir = tmpdir.mkdir('socks') const.SOCKET_DIR = str(sockdir) # create a non-socket file path = '{}/test.txt'.format(sockdir) open(path, 'w').close() assert len(plugin.Plugin.manager.plugins) == 0 plugin.register_plugins() assert len(plugin.Plugin.manager.plugins) == 0
def test_register_plugins_old(): """Re-register, removing an old plugin.""" # create the socket sock1 = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) path1 = '{}/foo'.format(data_dir) sock1.bind(path1) sock2 = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) path2 = '{}/bar'.format(data_dir) sock2.bind(path2) assert len(plugin.Plugin.manager.plugins) == 0 plugin.register_plugins() assert len(plugin.Plugin.manager.plugins) == 2 assert 'foo' in plugin.Plugin.manager.plugins p = plugin.Plugin.manager.plugins['foo'] assert p.name == 'foo' assert p.addr == path1 assert p.mode == 'unix' p = plugin.Plugin.manager.plugins['bar'] assert p.name == 'bar' assert p.addr == path2 assert p.mode == 'unix' # remove one of the sockets and re-register. try: os.unlink(path1) except OSError: if os.path.exists(path1): raise assert len(plugin.Plugin.manager.plugins) == 2 plugin.register_plugins() assert len(plugin.Plugin.manager.plugins) == 1 assert 'bar' in plugin.Plugin.manager.plugins p = plugin.Plugin.manager.plugins['bar'] assert p.name == 'bar' assert p.addr == path2 assert p.mode == 'unix'
def test_register_plugins_old(tmpsocket, grpc_timeout, mock_client_test_ok, mock_client_meta_ok): """Re-register, removing an old plugin.""" # create the socket _, path1 = tmpsocket.add('foo') _, path2 = tmpsocket.add('bar') assert len(plugin.Plugin.manager.plugins) == 0 plugin.register_plugins() assert len(plugin.Plugin.manager.plugins) == 2 assert 'vaporio/test-plugin+unix@' + path1 in plugin.Plugin.manager.plugins p = plugin.Plugin.manager.plugins['vaporio/test-plugin+unix@' + path1] assert p.name == 'test-plugin' assert p.address == path1 assert p.protocol == 'unix' p = plugin.Plugin.manager.plugins['vaporio/test-plugin+unix@' + path2] assert p.name == 'test-plugin' assert p.address == path2 assert p.protocol == 'unix' # remove one of the sockets and re-register. try: os.unlink(path1) except OSError: if os.path.exists(path1): raise assert len(plugin.Plugin.manager.plugins) == 2 plugin.register_plugins() assert len(plugin.Plugin.manager.plugins) == 1 assert 'vaporio/test-plugin+unix@' + path2 in plugin.Plugin.manager.plugins p = plugin.Plugin.manager.plugins['vaporio/test-plugin+unix@' + path2] assert p.name == 'test-plugin' assert p.address == path2 assert p.protocol == 'unix'
def test_register_plugins_new(): """Re-register, adding a new plugin.""" # create the socket sock1 = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) path1 = '{}/foo'.format(data_dir) sock1.bind(path1) assert len(plugin.Plugin.manager.plugins) == 0 plugin.register_plugins() assert len(plugin.Plugin.manager.plugins) == 1 assert 'foo' in plugin.Plugin.manager.plugins p = plugin.Plugin.manager.plugins['foo'] assert p.name == 'foo' assert p.addr == path1 assert p.mode == 'unix' # now, re-register sock2 = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) path2 = '{}/bar'.format(data_dir) sock2.bind(path2) assert len(plugin.Plugin.manager.plugins) == 1 plugin.register_plugins() assert len(plugin.Plugin.manager.plugins) == 2 assert 'foo' in plugin.Plugin.manager.plugins assert 'bar' in plugin.Plugin.manager.plugins p = plugin.Plugin.manager.plugins['foo'] assert p.name == 'foo' assert p.addr == path1 assert p.mode == 'unix' p = plugin.Plugin.manager.plugins['bar'] assert p.name == 'bar' assert p.addr == path2 assert p.mode == 'unix'
def test_register_plugins_new(tmpsocket, grpc_timeout, mock_client_test_ok, mock_client_meta_ok): """Re-register, adding a new plugin.""" # create the socket _, path1 = tmpsocket.add('foo') assert len(plugin.Plugin.manager.plugins) == 0 plugin.register_plugins() assert len(plugin.Plugin.manager.plugins) == 1 assert 'vaporio/test-plugin+unix@' + path1 in plugin.Plugin.manager.plugins p = plugin.Plugin.manager.plugins['vaporio/test-plugin+unix@' + path1] assert p.name == 'test-plugin' assert p.address == path1 assert p.protocol == 'unix' # now, re-register _, path2 = tmpsocket.add('bar') assert len(plugin.Plugin.manager.plugins) == 1 plugin.register_plugins() assert len(plugin.Plugin.manager.plugins) == 2 assert 'vaporio/test-plugin+unix@' + path1 in plugin.Plugin.manager.plugins assert 'vaporio/test-plugin+unix@' + path2 in plugin.Plugin.manager.plugins p = plugin.Plugin.manager.plugins['vaporio/test-plugin+unix@' + path1] assert p.name == 'test-plugin' assert p.address == path1 assert p.protocol == 'unix' p = plugin.Plugin.manager.plugins['vaporio/test-plugin+unix@' + path2] assert p.name == 'test-plugin' assert p.address == path2 assert p.protocol == 'unix'
async def get_plugins(): """The handler for the Synse Server "plugins" API command. Returns: PluginsResponse: The "plugins" response scheme model. """ logger.debug(_('Plugins Command')) # Register plugins. If no plugins exist, this will attempt to register # new ones. If plugins already exist, this will just ensure that all of # the tracked plugins are up to date. plugin.register_plugins() # Build a view of all the plugins registered with the plugin manager. # Here we take the element at index 1 because get_plugins returns a tuple # of (name, plugin) -- we are only interested in the plugin. plugins = [{ 'name': p[1].name, 'network': p[1].mode, 'address': p[1].addr } async for p in plugin.get_plugins()] return PluginsResponse(data=plugins)
async def _build_metainfo_cache(): """Construct the dictionary that will become the metainfo cache. Returns: tuple(dict, dict): A tuple where the first dictionary is the metainfo dictionary (in which the key is the device id and the value is the data associated with that device), and the second dictionary is the plugins dictionary (in which the device ID is mapped to the name of the plugin which manages it). Raises: errors.InternalApiError: All plugins failed the metainfo scan. """ logger.debug(_('Building the metainfo cache')) metainfo, plugins = {}, {} # First, we want to iterate through all of the known plugins and # use the associated client to get the meta information provided by # that backend. plugin_count = len(Plugin.manager.plugins) if plugin_count == 0: logger.debug(_('Manager has no plugins - registering plugins')) register_plugins() plugin_count = len(Plugin.manager.plugins) logger.debug(_('Plugins to scan: {}').format(plugin_count)) # Track which plugins failed to provide metainfo for any reason. failures = {} async for name, plugin in get_plugins(): logger.debug('{} -- {}'.format(name, plugin)) try: for device in plugin.client.metainfo(): _id = utils.composite(device.location.rack, device.location.board, device.uid) metainfo[_id] = device plugins[_id] = name # We do not want to fail the scan if a single plugin fails to provide # meta-information. # # FIXME (etd): instead of just logging out the errors, we could either: # - update the response scheme to hold an 'errors' field which will alert # the user of these partial non-fatal errors. # - update the API to add a url to check the currently configured plugins # and their 'health'/'state'. # - both except grpc.RpcError as ex: failures[name] = ex logger.warning( _('Failed to get metainfo for plugin: {}').format(name)) logger.warning(ex) # If we fail to read from all plugins (assuming there were any), then we # can raise an error since it is likely something is mis-configured. if plugin_count != 0 and plugin_count == len(failures): raise errors.InternalApiError( _('Failed to scan all plugins: {}').format(failures)) return metainfo, plugins
async def _build_device_info_cache(): """Construct the dictionary that will become the device info cache. Returns: tuple(dict, dict): A tuple where the first dictionary is the device info dictionary (in which the key is the device id and the value is the data associated with that device), and the second dictionary is the plugins dictionary (in which the device ID is mapped to the name of the plugin which manages it). Raises: errors.InternalApiError: All plugins failed the device scan. """ logger.debug(_('Building the device cache')) devices, plugins = {}, {} # Register all plugins prior to rebuilding the cache. This ensures that we # are using all possible plugins to get device data. If a plugin was previously # down, this will add it back to tracking before the cache is rebuilt. # See: https://github.com/vapor-ware/synse-server/issues/317 logger.debug(_('re-registering plugins prior to cache rebuild')) register_plugins() plugin_count = len(Plugin.manager.plugins) logger.debug(_('Plugins to scan: {}').format(plugin_count)) # Track which plugins failed to provide devices for any reason. failures = {} # FIXME (etd): as of pylint 2.1.1, this gets marked with 'not-an-iterable' # It still appears to work just fine, so need to figure out why it is getting # marked as such and what should be done to fix it. # We want to iterate through all of the known plugins and # use the associated client to get the device information provided by # that backend. async for plugin_id, plugin in get_plugins(): # pylint: disable=not-an-iterable logger.debug('{} -- {}'.format(plugin_id, plugin)) try: for device in plugin.client.devices(): _id = utils.composite(device.location.rack, device.location.board, device.uid) devices[_id] = device plugins[_id] = plugin_id # We do not want to fail the scan if a single plugin fails to provide # device information. # # FIXME (etd): instead of just logging out the errors, we could either: # - update the response scheme to hold an 'errors' field which will alert # the user of these partial non-fatal errors. # - update the API to add a url to check the currently configured plugins # and their 'health'/'state'. # - both except grpc.RpcError as ex: failures[plugin_id] = ex logger.warning( _('Failed to get device info for plugin: {}').format( plugin_id)) logger.warning(ex) # If we fail to read from all plugins (assuming there were any), then we # can raise an error since it is likely something is mis-configured. if plugin_count != 0 and plugin_count == len(failures): raise errors.InternalApiError( _('Failed to scan all plugins: {}').format(failures)) return devices, plugins
async def get_plugins(): """The handler for the Synse Server "plugins" API command. Returns: PluginsResponse: The "plugins" response scheme model. """ logger.debug(_('Plugins Command')) # Register plugins. If no plugins exist, this will attempt to register # new ones. If plugins already exist, this will just ensure that all of # the tracked plugins are up to date. plugin.register_plugins() # We need to collect information from a few sources for each plugin: # - config (network/address) .. this should be encoded in the plugin model # - metadata (grpc metadata) .. we need to make a call for this # - health (grpc health) .. we need to make a call for this plugins = [] # FIXME (etd): as of pylint 2.1.1, this gets marked with 'not-an-iterable' # It still appears to work just fine, so need to figure out why it is getting # marked as such and what should be done to fix it. async for p in plugin.get_plugins(): # pylint: disable=not-an-iterable _plugin = p[1] # Get the plugin config and add it to the plugin data plugin_data = { 'tag': _plugin.tag, 'name': _plugin.name, 'description': _plugin.description, 'maintainer': _plugin.maintainer, 'vcs': _plugin.vcs, 'version': { 'plugin_version': _plugin.version.pluginVersion, 'sdk_version': _plugin.version.sdkVersion, 'build_date': _plugin.version.buildDate, 'git_commit': _plugin.version.gitCommit, 'git_tag': _plugin.version.gitTag, 'arch': _plugin.version.arch, 'os': _plugin.version.os, }, 'network': { 'protocol': _plugin.protocol, 'address': _plugin.address } } # Get the plugin health data try: health = _plugin.client.health() except grpc.RpcError as ex: plugin_data['health'] = { 'timestamp': utils.rfc3339now(), 'status': 'error', 'message': str(ex), 'checks': [] } else: plugin_data['health'] = { 'timestamp': health.timestamp, 'status': util.plugin_health_status_name(health.status), 'message': '', 'checks': [{ 'name': check.name, 'status': util.plugin_health_status_name(check.status), 'message': check.message, 'timestamp': check.timestamp, 'type': check.type, } for check in health.checks] } plugins.append(plugin_data) return PluginsResponse(data=plugins)
def test_register_plugins_no_default_socks(): """Register plugins when the plugin path doesn't exist.""" assert len(plugin.Plugin.manager.plugins) == 0 plugin.register_plugins() assert len(plugin.Plugin.manager.plugins) == 0