async def get_device_info(rack, board, device): """Get the device information for a device. Args: rack (str): The rack which the device resides on. board (str): The board which the device resides on. device (str): The ID of the device to get meta-info for. Returns: tuple(str, Device): A tuple where the first item is the name of the plugin that the device is associated with and the second item is the device information for that device. Raises: errors.DeviceNotFoundError: The given rack-board-device combination does not correspond to a known device. """ cid = utils.composite(rack, board, device) # This also builds the plugins cache _cache = await get_device_info_cache() dev = _cache.get(cid) if dev is None: raise errors.DeviceNotFoundError( _('{} does not correspond with a known device').format('/'.join( [rack, board, device]))) # If the device exists, it will have come from a plugin, so we should # always have the plugin name here. pcache = await _plugins_cache.get(PLUGINS_CACHE_KEY) return pcache.get(cid), dev
async def _build_device_info_cache(): """Construct the dictionary that will become the device info cache. Returns: tuple(dict, dict): A tuple where the first dictionary is the device info dictionary (in which the key is the device id and the value is the data associated with that device), and the second dictionary is the plugins dictionary (in which the device ID is mapped to the name of the plugin which manages it). Raises: errors.InternalApiError: All plugins failed the device scan. """ logger.debug(_('Building the device cache')) devices, plugins = {}, {} # Register all plugins prior to rebuilding the cache. This ensures that we # are using all possible plugins to get device data. If a plugin was previously # down, this will add it back to tracking before the cache is rebuilt. # See: https://github.com/vapor-ware/synse-server/issues/317 logger.debug(_('re-registering plugins prior to cache rebuild')) register_plugins() plugin_count = len(Plugin.manager.plugins) logger.debug(_('Plugins to scan: {}').format(plugin_count)) # Track which plugins failed to provide devices for any reason. failures = {} # FIXME (etd): as of pylint 2.1.1, this gets marked with 'not-an-iterable' # It still appears to work just fine, so need to figure out why it is getting # marked as such and what should be done to fix it. # We want to iterate through all of the known plugins and # use the associated client to get the device information provided by # that backend. async for plugin_id, plugin in get_plugins(): # pylint: disable=not-an-iterable logger.debug('{} -- {}'.format(plugin_id, plugin)) try: for device in plugin.client.devices(): _id = utils.composite(device.location.rack, device.location.board, device.uid) devices[_id] = device plugins[_id] = plugin_id # We do not want to fail the scan if a single plugin fails to provide # device information. # # FIXME (etd): instead of just logging out the errors, we could either: # - update the response scheme to hold an 'errors' field which will alert # the user of these partial non-fatal errors. # - update the API to add a url to check the currently configured plugins # and their 'health'/'state'. # - both except grpc.RpcError as ex: failures[plugin_id] = ex logger.warning( _('Failed to get device info for plugin: {}').format( plugin_id)) logger.warning(ex) # If we fail to read from all plugins (assuming there were any), then we # can raise an error since it is likely something is mis-configured. if plugin_count != 0 and plugin_count == len(failures): raise errors.InternalApiError( _('Failed to scan all plugins: {}').format(failures)) return devices, plugins
async def _build_metainfo_cache(): """Construct the dictionary that will become the metainfo cache. Returns: tuple(dict, dict): A tuple where the first dictionary is the metainfo dictionary (in which the key is the device id and the value is the data associated with that device), and the second dictionary is the plugins dictionary (in which the device ID is mapped to the name of the plugin which manages it). Raises: errors.InternalApiError: All plugins failed the metainfo scan. """ logger.debug(_('Building the metainfo cache')) metainfo, plugins = {}, {} # First, we want to iterate through all of the known plugins and # use the associated client to get the meta information provided by # that backend. plugin_count = len(Plugin.manager.plugins) if plugin_count == 0: logger.debug(_('Manager has no plugins - registering plugins')) register_plugins() plugin_count = len(Plugin.manager.plugins) logger.debug(_('Plugins to scan: {}').format(plugin_count)) # Track which plugins failed to provide metainfo for any reason. failures = {} async for name, plugin in get_plugins(): logger.debug('{} -- {}'.format(name, plugin)) try: for device in plugin.client.metainfo(): _id = utils.composite(device.location.rack, device.location.board, device.uid) metainfo[_id] = device plugins[_id] = name # We do not want to fail the scan if a single plugin fails to provide # meta-information. # # FIXME (etd): instead of just logging out the errors, we could either: # - update the response scheme to hold an 'errors' field which will alert # the user of these partial non-fatal errors. # - update the API to add a url to check the currently configured plugins # and their 'health'/'state'. # - both except grpc.RpcError as ex: failures[name] = ex logger.warning( _('Failed to get metainfo for plugin: {}').format(name)) logger.warning(ex) # If we fail to read from all plugins (assuming there were any), then we # can raise an error since it is likely something is mis-configured. if plugin_count != 0 and plugin_count == len(failures): raise errors.InternalApiError( _('Failed to scan all plugins: {}').format(failures)) return metainfo, plugins
def test_composite(params, expected): """Test successfully composing various string combinations.""" actual = utils.composite(*params) assert expected == actual