Ejemplo n.º 1
0
    def format_readings(self):
        """Format the instance's readings to the read response scheme.

        Returns:
            dict: A properly formatted Read response.
        """
        formatted = []

        dev_output = self.device.output
        for reading in self.readings:
            rt = reading.type

            # These fields may not be specified, e.g. in cases where it wouldn't
            # make sense for a reading unit, e.g. LED state (on/off)
            unit = None
            precision = None

            found = False
            for out in dev_output:
                if out.type == rt:
                    symbol = out.unit.symbol
                    name = out.unit.name
                    precision = out.precision

                    if symbol or name:
                        unit = {'symbol': symbol, 'name': name}

                    found = True
                    break

            # If the reading type does not match the supported types, we will not
            # return it, and instead will just just skip over it.
            if not found:
                logger.warning(
                    _('Found unexpected reading type "{}" for device {}').
                    format(rt, self.device))
                continue

            # The value is stored in a protobuf oneof block, so we need to figure out
            # which field it is in, and extract it. If no field is set, take the reading
            # value to be None.
            value = None

            field = reading.WhichOneof('value')
            if field is not None:
                value = getattr(reading, field)

            # Set the specified precision, if specified
            if precision and isinstance(value, float):
                value = round(value, precision)

            formatted.append({
                'value': value,
                'timestamp': reading.timestamp,
                'unit': unit,
                'type': rt,
                'info': reading.info,
            })

        return formatted
Ejemplo n.º 2
0
async def _build_capabilities_cache():
    """Construct the list that will become the device capabilities cache.

    Returns:
        list: A list of dictionaries, where each dictionary corresponds to
            a registered plugin. The plugin dict will identify the plugin
            and enumerate the device kinds it supports and the output types
            supported by those device kinds.

    Raises:
        errors.InternalApiError: All plugins failed the capabilities request.
    """
    logger.debug(_('Building the device capabilities cache'))
    capabilities = []

    # First, we want to iterate through all of the known plugins and use
    # their clients to get the capability info for each plugin.
    plugin_count = len(Plugin.manager.plugins)
    if plugin_count == 0:
        logger.debug(_('Manager has no plugins - registering plugins'))
        register_plugins()
        plugin_count = len(Plugin.manager.plugins)

    logger.debug(_('Plugins to get capabilities for: {}').format(plugin_count))

    # Track which plugins failed to provide capability info for any reason.
    failures = {}

    # FIXME (etd): as of pylint 2.1.1, this gets marked with 'not-an-iterable'
    # It still appears to work just fine, so need to figure out why it is getting
    # marked as such and what should be done to fix it.
    async for plugin_id, plugin in get_plugins():  # pylint: disable=not-an-iterable
        logger.debug('{} - {}'.format(plugin_id, plugin))

        devices = []

        try:
            for capability in plugin.client.capabilities():
                devices.append({
                    'kind': capability.kind,
                    'outputs': capability.outputs
                })

        except grpc.RpcError as ex:
            failures[plugin_id] = ex
            logger.warning(
                _('Failed to get capability for plugin: {}').format(plugin_id))
            logger.warning(ex)
            continue

        capabilities.append({'plugin': plugin.tag, 'devices': devices})

    # If we fail to read from all plugins (assuming there were any), then we
    # can raise an error since it is likely something is mis-configured.
    if plugin_count != 0 and plugin_count == len(failures):
        raise errors.InternalApiError(
            _('Failed to get capabilities for all plugins: {}').format(
                failures))

    return capabilities
Ejemplo n.º 3
0
def register_plugin(address, protocol):
    """Register a plugin. If a plugin with the given address already exists,
    it will not be re-registered, but its ID will still be returned.

    If a plugin fails to register, None is returned.

    Args:
        address (str): The address of the plugin to register.
        protocol (str): The protocol that the plugin uses. This should
            be one of 'unix', 'tcp'.

    Returns:
        str: The ID of the plugin that was registered.
        None: The given address failed to resolve, so no plugin
            was registered.

    Raises:
        ValueError: An invalid protocol is specified. The protocol must
            be one of: 'unix', 'tcp'

    """
    plugin = Plugin.manager.get_by_address(address)
    if plugin:
        logger.debug(_('{} is already registered').format(plugin))
        return plugin.id()

    # The client does not exist, so we must register it. This means we need to
    # connect with it to (a) make sure its reachable, and (b) get its metadata
    # in order to properly create a new Plugin model for it.
    if protocol == 'tcp':
        plugin_client = client.PluginTCPClient(address)
    elif protocol == 'unix':
        plugin_client = client.PluginUnixClient(address)
    else:
        raise ValueError(_('Invalid protocol specified for registration: {}').format(protocol))

    try:
        status = plugin_client.test()
        if not status.ok:
            logger.warning(_('gRPC Test response was not OK: {}').format(address))
            return None
    except Exception as e:
        logger.warning(_('Failed to reach plugin at address {}: {}').format(address, e))
        return None

    # If we made it here, we were successful in establishing communication
    # with the plugin. Now, we should get its metainfo and create a Plugin
    # instance with it.
    try:
        meta = plugin_client.metainfo()
    except Exception as e:
        logger.warning(_('Failed to get plugin metadata at address {}: {}').format(address, e))
        return None

    plugin = Plugin(
        metadata=meta,
        address=address,
        plugin_client=plugin_client
    )

    logger.debug(_('Registered new plugin: {}').format(plugin))
    return plugin.id()
Ejemplo n.º 4
0
async def _build_device_info_cache():
    """Construct the dictionary that will become the device info cache.

    Returns:
        tuple(dict, dict): A tuple where the first dictionary is the device info
            dictionary (in which the key is the device id and the value is the
            data associated with that device), and the second dictionary is the
            plugins dictionary (in which the device ID is mapped to the name of
            the plugin which manages it).

    Raises:
        errors.InternalApiError: All plugins failed the device scan.
    """
    logger.debug(_('Building the device cache'))
    devices, plugins = {}, {}

    # Register all plugins prior to rebuilding the cache. This ensures that we
    # are using all possible plugins to get device data. If a plugin was previously
    # down, this will add it back to tracking before the cache is rebuilt.
    # See: https://github.com/vapor-ware/synse-server/issues/317
    logger.debug(_('re-registering plugins prior to cache rebuild'))
    register_plugins()
    plugin_count = len(Plugin.manager.plugins)

    logger.debug(_('Plugins to scan: {}').format(plugin_count))

    # Track which plugins failed to provide devices for any reason.
    failures = {}

    # FIXME (etd): as of pylint 2.1.1, this gets marked with 'not-an-iterable'
    # It still appears to work just fine, so need to figure out why it is getting
    # marked as such and what should be done to fix it.

    # We want to iterate through all of the known plugins and
    # use the associated client to get the device information provided by
    # that backend.
    async for plugin_id, plugin in get_plugins():  # pylint: disable=not-an-iterable
        logger.debug('{} -- {}'.format(plugin_id, plugin))

        try:
            for device in plugin.client.devices():
                _id = utils.composite(device.location.rack,
                                      device.location.board, device.uid)
                devices[_id] = device
                plugins[_id] = plugin_id

        # We do not want to fail the scan if a single plugin fails to provide
        # device information.
        #
        # FIXME (etd): instead of just logging out the errors, we could either:
        #   - update the response scheme to hold an 'errors' field which will alert
        #     the user of these partial non-fatal errors.
        #   - update the API to add a url to check the currently configured plugins
        #     and their 'health'/'state'.
        #   - both
        except grpc.RpcError as ex:
            failures[plugin_id] = ex
            logger.warning(
                _('Failed to get device info for plugin: {}').format(
                    plugin_id))
            logger.warning(ex)

    # If we fail to read from all plugins (assuming there were any), then we
    # can raise an error since it is likely something is mis-configured.
    if plugin_count != 0 and plugin_count == len(failures):
        raise errors.InternalApiError(
            _('Failed to scan all plugins: {}').format(failures))

    return devices, plugins
Ejemplo n.º 5
0
async def _build_metainfo_cache():
    """Construct the dictionary that will become the metainfo cache.

    Returns:
        tuple(dict, dict): A tuple where the first dictionary is the metainfo
            dictionary (in which the key is the device id and the value is the
            data associated with that device), and the second dictionary is the
            plugins dictionary (in which the device ID is mapped to the name of
            the plugin which manages it).

    Raises:
        errors.InternalApiError: All plugins failed the metainfo scan.
    """
    logger.debug(_('Building the metainfo cache'))
    metainfo, plugins = {}, {}

    # First, we want to iterate through all of the known plugins and
    # use the associated client to get the meta information provided by
    # that backend.
    plugin_count = len(Plugin.manager.plugins)
    if plugin_count == 0:
        logger.debug(_('Manager has no plugins - registering plugins'))
        register_plugins()
        plugin_count = len(Plugin.manager.plugins)

    logger.debug(_('Plugins to scan: {}').format(plugin_count))

    # Track which plugins failed to provide metainfo for any reason.
    failures = {}

    async for name, plugin in get_plugins():
        logger.debug('{} -- {}'.format(name, plugin))

        try:
            for device in plugin.client.metainfo():
                _id = utils.composite(device.location.rack,
                                      device.location.board, device.uid)
                metainfo[_id] = device
                plugins[_id] = name

        # We do not want to fail the scan if a single plugin fails to provide
        # meta-information.
        #
        # FIXME (etd): instead of just logging out the errors, we could either:
        #   - update the response scheme to hold an 'errors' field which will alert
        #     the user of these partial non-fatal errors.
        #   - update the API to add a url to check the currently configured plugins
        #     and their 'health'/'state'.
        #   - both
        except grpc.RpcError as ex:
            failures[name] = ex
            logger.warning(
                _('Failed to get metainfo for plugin: {}').format(name))
            logger.warning(ex)

    # If we fail to read from all plugins (assuming there were any), then we
    # can raise an error since it is likely something is mis-configured.
    if plugin_count != 0 and plugin_count == len(failures):
        raise errors.InternalApiError(
            _('Failed to scan all plugins: {}').format(failures))

    return metainfo, plugins
Ejemplo n.º 6
0
async def read(rack, board, device):
    """The handler for the Synse Server "read" API command.

    Args:
        rack (str): The rack which the device resides on.
        board (str): The board which the device resides on.
        device (str): The device to read.

    Returns:
        ReadResponse: The "read" response scheme model.
    """
    logger.debug(
        _('Read Command (args: {}, {}, {})').format(rack, board, device))

    # Lookup the known info for the specified device.
    plugin_name, dev = await cache.get_device_info(rack, board, device)
    logger.debug(
        _('Device {} is managed by plugin {}').format(device, plugin_name))

    # Get the plugin context for the device's specified protocol.
    _plugin = plugin.get_plugin(plugin_name)
    logger.debug(_('Got plugin: {}').format(_plugin))
    if not _plugin:
        raise errors.PluginNotFoundError(
            _('Unable to find plugin named "{}" to read').format(plugin_name))

    try:
        # Perform a gRPC read on the device's managing plugin
        read_data = _plugin.client.read(rack, board, device)
    except grpc.RpcError as ex:

        # FIXME (etd) - this isn't the nicest way of doing this check.
        # this string is returned from the SDK, and its not likely to change
        # anytime soon, so this is "safe" for now, but we should see if there
        # is a better way to check this other than comparing strings..
        if hasattr(ex, 'code') and hasattr(ex, 'details'):
            if grpc.StatusCode.NOT_FOUND == ex.code(
            ) and 'no readings found' in ex.details().lower():

                # FIXME (etd) - with SDK v1.0, is the below correct? We should not longer
                # have to pass the "null" string. I think an empty string should also not
                # indicate no readings.. it should be the NOT_FOUND error (or, at least
                # some kind of error).

                # Currently, in the SDK, there are three different behaviors for
                # devices that do not have readings. Either (a). "null" is returned,
                # (b). an empty string ("") is returned, or (c). a gRPC error is
                # returned with the NOT_FOUND status code. Cases (a) and (b) are
                # handled in the ReadResponse initialization (below). This block
                # handles case (c).
                #
                # The reason for the difference between (a) and (b) is just one
                # of implementation. The empty string is the default value for the
                # gRPC read response, but sometimes it is useful to have an explict
                # value set to make things easier to read.
                #
                # The difference between those and (c) is more distinct. (c) should
                # only happen when a configured device is not being read from at all.
                # Essentially, (c) is the fallback for when device-specific handlers
                # fail to read a configured device.
                #
                # To summarize:
                #   (a), (b)
                #       A device is configured and the plugin's device handlers
                #       can operate on the device. This indicates that the plugin
                #       is working, but the device could be failing or disconnected.
                #
                #   (c)
                #       A device is configured, but the plugin's device handler
                #       can not (or is not) able to operate on the device. This
                #       could indicate either a plugin configuration error or
                #       an error with the plugin logic itself.

                # Create empty readings for each of the device's readings.
                logger.warning(
                    _('Read for {}/{}/{} returned gRPC "no readings found". Will '
                      'apply None as reading value. Note that this response might '
                      'indicate plugin error/misconfiguration.').format(
                          rack, board, device))
                read_data = []
                for output in dev.output:
                    read_data.append(
                        api.Reading(
                            timestamp=utils.rfc3339now(),
                            type=output.type,
                        ))
            else:
                raise errors.FailedReadCommandError(str(ex)) from ex
        else:
            raise errors.FailedReadCommandError(str(ex)) from ex

    return ReadResponse(device=dev, readings=read_data)
Ejemplo n.º 7
0
def register_unix_plugins():
    """Register the plugins that use a unix socket for communication.

    Unix plugins can be configured in a variety of ways:
      1.) Listed in the configuration file under plugin.unix
      2.) Via environment variable
      2.) Automatically, by placing the socket in the default socket directory

    Here, we will parse the configurations and the default socket directory,
    add them to the PluginManager, and return a unified list of all known
    unix-configured plugins.

    Returns:
        list[str]: The names of all plugins that were registered.
    """
    logger.debug(_('Registering plugins (unix)'))

    manager = Plugin.manager

    # Track the names of the plugins that have been registered.
    registered = []

    # First, register any plugins that are specified in the Synse Server
    # configuration (file, env).
    configured = config.options.get('plugin.unix', {})
    logger.debug(_('Unix plugins in configuration: {}').format(configured))
    if configured:
        for name, path in configured.items():
            # If the user wants to use the default configuration directory,
            # they can specify something like
            #
            #   plugins:
            #       unix:
            #           plugin_name:
            #
            # This will give us a 'name' here of 'plugin_name' and a 'path'
            # of None.
            if path is None:
                path = const.SOCKET_DIR

            # Check for both 'plugin_name' and 'plugin_name.sock'
            sock_path = os.path.join(path, name + '.sock')
            logger.debug(_('Checking for socket: {}').format(sock_path))
            if not os.path.exists(sock_path):
                logger.debug(_('Checking for socket: {}').format(sock_path))
                sock_path = os.path.join(path, name)
                if not os.path.exists(sock_path):
                    logger.error(
                        _('Unable to find configured socket: {}[.sock]').
                        format(sock_path))
                    continue

            # Check that the file is a socket
            if not stat.S_ISSOCK(os.stat(sock_path).st_mode):
                logger.warning(
                    _('{} is not a socket - skipping').format(sock_path))
                continue

            # We have a plugin socket. If it already exists, there is nothing
            # to do; it is already registered. If it does not exist, we will
            # need to register it.
            if manager.get(name) is None:
                plugin = Plugin(name=name, address=sock_path, mode='unix')
                logger.debug(_('Created new plugin (unix): {}').format(plugin))
            else:
                logger.debug(
                    _('Unix Plugin "{}" already exists - will not re-register'
                      ).format(name))

            registered.append(name)

    # Now go through the default socket directory to pick up any other sockets
    # that may be set for automatic registration.
    if not os.path.exists(const.SOCKET_DIR):
        logger.debug(
            _('No default socket path found -- no plugins registered from {}').
            format(const.SOCKET_DIR))

    else:
        logger.debug(_('Registering plugins from default socket directory'))

        for item in os.listdir(const.SOCKET_DIR):
            logger.debug('  {}'.format(item))
            fqn = os.path.join(const.SOCKET_DIR, item)
            name, __ = os.path.splitext(item)  # pylint: disable=unused-variable

            # Check that the file is a socket
            if not stat.S_ISSOCK(os.stat(fqn).st_mode):
                logger.warning(_('{} is not a socket - skipping').format(fqn))
                continue

            # We have a plugin socket. If it already exists, there is nothing
            # to do; it is already registered. If it does not exist, we will
            # need to register it.
            if manager.get(name) is None:
                # A new plugin gets added to the manager on initialization.
                plugin = Plugin(name=name, address=fqn, mode='unix')
                logger.debug(_('Created new plugin (unix): {}').format(plugin))

                # Add the plugin to the Synse Server configuration. This will
                # allow a caller of the '/config' endpoint to see what plugins
                # are configured. Further, it can help with other processing that
                # might need the list of configured plugins.
                #
                # The value of `None` is used to indicate the default directory.
                config.options.set('plugin.unix.{}'.format(name), None)

            else:
                logger.debug(
                    _('Unix Plugin "{}" already exists - will not re-register'
                      ).format(name))

            registered.append(name)

    return list(set(registered))
Ejemplo n.º 8
0
    def format_readings(self):
        """Format the instance's readings to the read response scheme.

        Returns:
            dict: A properly formatted Read response.
        """
        logger.debug(_('Formatting read response'))
        formatted = {}

        dev_output = self.device.output
        for reading in self.readings:
            rt = reading.type

            # These fields may not be specified, e.g. in cases where it wouldn't
            # make sense for a reading unit, e.g. LED state (on/off)
            unit = None
            precision = None
            data_type = None

            found = False
            for out in dev_output:
                if out.type == rt:
                    symbol = out.unit.symbol
                    name = out.unit.name
                    precision = out.precision
                    data_type = out.data_type

                    if symbol or name:
                        unit = {'symbol': symbol, 'name': name}

                    found = True
                    break

            # If the reading type does not match the supported types, we will not
            # return it, and instead will just just skip over it.
            if not found:
                logger.warning(
                    _('Found unexpected reading type "{}" for device {}').
                    format(rt, self.device))
                continue

            value = reading.value

            # Handle cases where no data was read. Currently, we consider the reading
            # to have no data if:
            #   - the ReadResponse value comes back as an empty string (e.g. "")
            #   - the ReadResponse value comes back as the string "null".
            if value == '' or value == 'null':
                logger.info(
                    _('Reading value for {} came back as empty/null').format(
                        rt))
                value = None

            else:
                # Set the specified precision
                if precision:
                    try:
                        value = str(round(float(value), precision))
                    except ValueError:
                        logger.warning(
                            _('Invalid value for {}: "{}"').format(
                                data_type, value))

                # Cast to the specified type
                try:
                    value = self._data_types.get(data_type, str)(value)
                except ValueError:
                    logger.warning(
                        _('Failed to cast "{}" to {}').format(
                            value, data_type))

            formatted[rt] = {
                'value': value,
                'timestamp': reading.timestamp,
                'unit': unit
            }

        return formatted
Ejemplo n.º 9
0
async def fan_sensors():
    """The handler for the Synse Server "fan sensors" API command.

    Returns:
        dict: A dictionary of device readings for all fan sensors.
    """
    # Auto fan uses the MAX11610 thermistors and SDP619 differential pressure
    # sensors. This isn't a *great* way of doing things since its hardcoded,
    # but this should be enough to get things in place and working in the short
    # term.
    #
    # In the long term, it would be good to expand read functionality in some
    # way such that we can do something like:
    #
    #   GET synse/2.0/read?type=temperature
    #
    # to read all the devices of a given type or model.

    start_time = datetime.datetime.now()
    _cache = await cache.get_metainfo_cache()
    scan_cache = await cache.get_scan_cache()

    readings = []
    new_readings = dict()
    new_readings['racks'] = OrderedDict()

    logger.debug('--- FAN SENSORS start ---')
    for _, v in _cache.items():

        logger.debug('FAN SENSORS')
        logger.debug('fan_sensors cache item: {}'.format(v))

        is_temp = v.type.lower() == 'temperature' and v.model.lower(
        ) == 'max11610'
        is_pressure = v.type.lower() == 'pressure' and v.model.lower(
        ) == 'sdp610'

        if is_temp or is_pressure:
            rack = v.location.rack  # string (vec1-c1-wrigley for example)
            board = v.location.board  # string (vec for example)
            device = v.uid  # string (uuid - only unique to one rack)

            # Find the device in the scan_cache.
            scan_cache_board = None
            scan_cache_device = None
            scan_cache_rack = next(
                (r for r in scan_cache['racks'] if r['id'] == rack), None)
            if scan_cache_rack is not None:
                scan_cache_board = next(
                    (b for b in scan_cache_rack['boards'] if b['id'] == board),
                    None)
                if scan_cache_board is not None:
                    scan_cache_device = next(
                        (d for d in scan_cache_board['devices']
                         if d['id'] == device), None)
            logger.debug(
                'scan_cache_rack_id, board_id, device_info: {}, {}, {}'.format(
                    scan_cache_rack.get('id', None),
                    scan_cache_board.get('id', None),
                    scan_cache_device.get('info', None)))

            try:
                resp = await read(rack, board, device)
            except Exception as e:
                logger.warning(
                    'Failed to get reading for {}-{}-{} for fan_sensors {}.'.
                    format(rack, board, device, e))
            else:
                single_reading = resp.data  # Single sensor reading.
                logger.debug('fan_sensors data: {}.'.format(single_reading))
                # Wedge in the VEC name that we received this data from.
                # That way auto_fan can map the data to a VEC.
                single_reading['location'] = {
                    'rack': rack,
                    'board': board,
                    'device': device,
                }
                single_reading['scan_cache_device'] = scan_cache_device
                logger.debug(
                    'fan_sensors data with vec: {}.'.format(single_reading))
                readings.append(single_reading)

                # If the rack is not a key in new readings, add it.
                if rack not in new_readings['racks']:
                    new_readings['racks'][rack] = dict()

                # Translate single_reading['scan_cache_device']['info']
                # and add it under the rack key which is:
                # new_readings['racks'][rack][translation] \
                #     = single_reading['data'][single_reading['type']]['value']
                logger.debug(
                    'single_reading[scan_cache_device][info]: {}'.format(
                        single_reading['scan_cache_device']['info']))
                logger.debug(
                    'single_reading[data][single_reading[type]][value]: {}'.
                    format(single_reading['data'][single_reading['type']]
                           ['value']))

                # Add sensor reading to result set.
                fan_sensor_key = _translate_device_info(
                    single_reading['scan_cache_device']['info'])
                reading_value = single_reading['data'][
                    single_reading['type']]['value']
                if fan_sensor_key is not None and reading_value is not None:
                    # Be sure not to overwrite any existing reading in the current result set.
                    # That would imply a mapping issue or some other bug.
                    if fan_sensor_key in new_readings['racks'][rack]:
                        message = 'fan_sensors avoiding overwrite of existing reading [{}] at ' \
                                  'new_readings[racks][{}][{}] with [{}]'.format(
                                      new_readings['racks'][rack][fan_sensor_key],
                                      rack, fan_sensor_key, reading_value)
                        logger.error(message)
                        raise ValueError(message)
                    # No existing reading in the result set, safe to add it.
                    new_readings['racks'][rack][fan_sensor_key] = reading_value

    logger.debug('--- FAN SENSORS end ---')
    # Sort the new_readings racks by racks['id']
    new_readings['racks'] = OrderedDict(sorted(new_readings['racks'].items()))
    end_time = datetime.datetime.now()
    read_time = (end_time - start_time).total_seconds() * 1000

    # Shim in the start, end, read times into each response now that we have them.
    # Even though they are all the same, they didn't used to be and auto fan wants
    # each for logging purposes.
    for rack in new_readings['racks']:
        new_readings['racks'][rack]['start_time'] = str(start_time)
        new_readings['racks'][rack]['end_time'] = str(end_time)
        new_readings['racks'][rack]['read_time'] = read_time

    return new_readings