Ejemplo n.º 1
0
def register_plugins():
    """Register all of the configured plugins.

    Plugins can either use a unix socket or TCP for communication. Unix
    socket based plugins will be detected from the presence of the socket
    file in a well-known directory, or via configuration. TCP based plugins
    will need to be made known to Synse Server via configuration.

    Upon initialization, the Plugin instances are automatically registered
    with the PluginManager.
    """
    # Register plugins from local config (file, env)
    unix = register_unix()
    tcp = register_tcp()

    # Get addresses of plugins to register via service discovery
    discovered = []
    addresses = kubernetes.discover()
    for address in addresses:
        plugin_id = register_plugin(address, 'tcp')
        if plugin_id is None:
            logger.error(_('Failed to register plugin with address: {}').format(address))
            continue
        discovered.append(plugin_id)

    diff = set(Plugin.manager.plugins) - set(unix + tcp + discovered)

    # Now that we have found all current plugins, we will want to clear out
    # any old plugins which may no longer be present.
    logger.debug(_('Plugins to purge from manager: {}').format(diff))
    Plugin.manager.purge(diff)

    logger.debug(_('Plugin registration complete'))
Ejemplo n.º 2
0
    def err(request, exception):
        """Handler for a 500 and 400 error."""
        logger.error('Exception for request: {}'.format(request))
        logger.exception(exception)

        if hasattr(exception, 'error_id'):
            error_id = exception.error_id
        else:
            error_id = errors.UNKNOWN

        return _make_error(error_id, exception)
Ejemplo n.º 3
0
    def err_404(request, exception):
        """Handler for a 404 error."""
        logger.error('Exception for request: {}'.format(request))
        logger.exception(exception)

        if hasattr(exception, 'error_id'):
            error_id = exception.error_id
        else:
            error_id = errors.URL_NOT_FOUND

        return _make_error(error_id, exception)
Ejemplo n.º 4
0
async def periodic_cache_invalidation():
    """Periodically invalidate the caches so they are rebuilt."""
    interval = 3 * 60  # 3 minutes

    while True:
        await asyncio.sleep(interval)
        logger.info('task [periodic cache invalidation]: Clearing device caches')

        try:
            await clear_all_meta_caches()
        except Exception as e:
            logger.error(
                'task [periodic cache invalidation]: Failed to clear device caches, '
                'will try again in {}s: {}'
                .format(interval, e)
            )
Ejemplo n.º 5
0
def _translate_device_info(device_info):
    """This translates the device info of the scan device to the field output
    in a fan sensors result set.

    Args:
        device_info (str): The info field of the device in a scan.

    Returns:
        str: The field output in a fan sensors result set.
        None: Unknown device info.
    """
    if device_info.startswith('Rack Temperature 0 '):
        return 'thermistor_0'
    if device_info.startswith('Rack Temperature 1 '):
        return 'thermistor_1'
    if device_info.startswith('Rack Temperature 2 '):
        return 'thermistor_2'
    if device_info.startswith('Rack Temperature 3 '):
        return 'thermistor_3'
    if device_info.startswith('Rack Temperature 4 '):
        return 'thermistor_4'
    if device_info.startswith('Rack Temperature 5 '):
        return 'thermistor_5'
    if device_info.startswith('Rack Temperature 6 '):
        return 'thermistor_6'
    if device_info.startswith('Rack Temperature 7 '):
        return 'thermistor_7'
    if device_info.startswith('Rack Temperature 8 '):
        return 'thermistor_8'
    if device_info.startswith('Rack Temperature 9 '):
        return 'thermistor_9'
    if device_info.startswith('Rack Temperature 10 '):
        return 'thermistor_10'
    if device_info.startswith('Rack Temperature 11 '):
        return 'thermistor_11'

    if device_info == 'Rack Differential Pressure Bottom':
        return 'differential_pressure_0'
    if device_info == 'Rack Differential Pressure Middle':
        return 'differential_pressure_1'
    if device_info == 'Rack Differential Pressure Top':
        return 'differential_pressure_2'

    logger.error('Unknown device_info: {}').format(device_info)
    return None
Ejemplo n.º 6
0
def register_tcp():
    """Register the plugins that use TCP for communication.

    Return:
        list[str]: The ids of all plugins that were registered.
    """
    registered = []

    configured = config.options.get('plugin.tcp', [])
    if not configured:
        logger.info(_('No plugin configurations for TCP'))
        return registered

    logger.debug(_('TCP plugin configuration: {}').format(configured))
    for address in configured:
        plugin_id = register_plugin(address, 'tcp')
        if plugin_id is None:
            logger.error(_('Failed to register plugin with address: {}').format(address))
            continue
        registered.append(plugin_id)

    logger.info('Registered tcp plugins: {}'.format(registered))
    return registered
Ejemplo n.º 7
0
def register_unix():
    """Register the plugins that use a unix socket for communication.

    Unix plugins can be configured in a variety of ways:
      1.) Listed in the configuration file under plugin.unix
      2.) Via environment variable
      2.) Automatically, by placing the socket in the default socket directory

    Here, we will parse the configurations and the default socket directory,
    add them to the PluginManager, and return a unified list of all known
    unix-configured plugins.

    Returns:
        list[str]: The ids of all plugins that were registered.
    """
    registered = []

    configured = config.options.get('plugin.unix', [])
    if not configured:
        logger.info(_('No plugin configurations for unix'))

    logger.debug(_('unix plugin configuration: {}').format(configured))
    for address in configured:
        # The config here should be the path the the unix socket, which is our address.
        # First, check that the socket exists and that the address is a socket file.
        if not os.path.exists(address):
            logger.error(_('Socket {} not found').format(address))
            continue

        if not stat.S_ISSOCK(os.stat(address).st_mode):
            logger.error(_('{} is not a socket').format(address))
            continue

        plugin_id = register_plugin(address, 'unix')
        if plugin_id is None:
            logger.error(_('Failed to register plugin with address: {}').format(address))
            continue
        registered.append(plugin_id)

    # Now, go through the default socket directory and pick up any sockets that
    # may be set for automatic registration.
    if not os.path.exists(const.SOCKET_DIR):
        logger.debug(
            _('No default socket path found, no plugins will be registered from {}')
            .format(const.SOCKET_DIR)
        )
    else:
        logger.debug(
            _('Registering plugins from default socket directory ({})')
            .format(const.SOCKET_DIR)
        )

        for item in os.listdir(const.SOCKET_DIR):
            logger.debug('  {}'.format(item))
            address = os.path.join(const.SOCKET_DIR, item)

            # Check if the file is a socket
            if not stat.S_ISSOCK(os.stat(address).st_mode):
                logger.debug(_('{} is not a socket - skipping').format(address))
                continue

            plugin_id = register_plugin(address, 'unix')
            if plugin_id is None:
                logger.error(_('Failed to register plugin with address: {}').format(address))
                continue
            registered.append(plugin_id)

            # We want the plugins registered from this default directory to
            # be surfaced in the config, so we will add it there.
            if config.options.get('plugin.unix') is None:
                config.options.set('plugin.unix', [address])
            else:
                config.options.get('plugin.unix').append(address)

    logger.info('Registered unix plugins: {}'.format(registered))
    return registered
Ejemplo n.º 8
0
def register_unix_plugins():
    """Register the plugins that use a unix socket for communication.

    Unix plugins can be configured in a variety of ways:
      1.) Listed in the configuration file under plugin.unix
      2.) Via environment variable
      2.) Automatically, by placing the socket in the default socket directory

    Here, we will parse the configurations and the default socket directory,
    add them to the PluginManager, and return a unified list of all known
    unix-configured plugins.

    Returns:
        list[str]: The names of all plugins that were registered.
    """
    logger.debug(_('Registering plugins (unix)'))

    manager = Plugin.manager

    # Track the names of the plugins that have been registered.
    registered = []

    # First, register any plugins that are specified in the Synse Server
    # configuration (file, env).
    configured = config.options.get('plugin.unix', {})
    logger.debug(_('Unix plugins in configuration: {}').format(configured))
    if configured:
        for name, path in configured.items():
            # If the user wants to use the default configuration directory,
            # they can specify something like
            #
            #   plugins:
            #       unix:
            #           plugin_name:
            #
            # This will give us a 'name' here of 'plugin_name' and a 'path'
            # of None.
            if path is None:
                path = const.SOCKET_DIR

            # Check for both 'plugin_name' and 'plugin_name.sock'
            sock_path = os.path.join(path, name + '.sock')
            logger.debug(_('Checking for socket: {}').format(sock_path))
            if not os.path.exists(sock_path):
                logger.debug(_('Checking for socket: {}').format(sock_path))
                sock_path = os.path.join(path, name)
                if not os.path.exists(sock_path):
                    logger.error(
                        _('Unable to find configured socket: {}[.sock]').
                        format(sock_path))
                    continue

            # Check that the file is a socket
            if not stat.S_ISSOCK(os.stat(sock_path).st_mode):
                logger.warning(
                    _('{} is not a socket - skipping').format(sock_path))
                continue

            # We have a plugin socket. If it already exists, there is nothing
            # to do; it is already registered. If it does not exist, we will
            # need to register it.
            if manager.get(name) is None:
                plugin = Plugin(name=name, address=sock_path, mode='unix')
                logger.debug(_('Created new plugin (unix): {}').format(plugin))
            else:
                logger.debug(
                    _('Unix Plugin "{}" already exists - will not re-register'
                      ).format(name))

            registered.append(name)

    # Now go through the default socket directory to pick up any other sockets
    # that may be set for automatic registration.
    if not os.path.exists(const.SOCKET_DIR):
        logger.debug(
            _('No default socket path found -- no plugins registered from {}').
            format(const.SOCKET_DIR))

    else:
        logger.debug(_('Registering plugins from default socket directory'))

        for item in os.listdir(const.SOCKET_DIR):
            logger.debug('  {}'.format(item))
            fqn = os.path.join(const.SOCKET_DIR, item)
            name, __ = os.path.splitext(item)  # pylint: disable=unused-variable

            # Check that the file is a socket
            if not stat.S_ISSOCK(os.stat(fqn).st_mode):
                logger.warning(_('{} is not a socket - skipping').format(fqn))
                continue

            # We have a plugin socket. If it already exists, there is nothing
            # to do; it is already registered. If it does not exist, we will
            # need to register it.
            if manager.get(name) is None:
                # A new plugin gets added to the manager on initialization.
                plugin = Plugin(name=name, address=fqn, mode='unix')
                logger.debug(_('Created new plugin (unix): {}').format(plugin))

                # Add the plugin to the Synse Server configuration. This will
                # allow a caller of the '/config' endpoint to see what plugins
                # are configured. Further, it can help with other processing that
                # might need the list of configured plugins.
                #
                # The value of `None` is used to indicate the default directory.
                config.options.set('plugin.unix.{}'.format(name), None)

            else:
                logger.debug(
                    _('Unix Plugin "{}" already exists - will not re-register'
                      ).format(name))

            registered.append(name)

    return list(set(registered))
Ejemplo n.º 9
0
async def write(rack, board, device, data):
    """The handler for the Synse Server "write" API command.

    Args:
        rack (str): The rack which the device resides on.
        board (str): The board which the device resides on.
        device (str): The device to write to.
        data (dict): The data to write to the device.

    Returns:
        WriteResponse: The "write" response scheme model.
    """
    logger.debug(
        _('Write Command (args: {}, {}, {}, data: {})').format(
            rack, board, device, data))

    # Lookup the known info for the specified device
    plugin_name, __ = await cache.get_device_info(rack, board, device)  # pylint: disable=unused-variable

    # Get the plugin context for the device's specified protocol
    _plugin = plugin.get_plugin(plugin_name)
    if not _plugin:
        raise errors.PluginNotFoundError(
            _('Unable to find plugin named "{}"').format(plugin_name))

    # The data comes in as the POSTed dictionary which includes an 'action'
    # and/or 'raw'/'data' field. Here, we convert it to the appropriate modeling for
    # transport to the plugin.
    write_action = data.get('action')
    if not isinstance(write_action, str):
        raise errors.InvalidArgumentsError(
            _('"action" value must be a string, but was {}').format(
                type(write_action)))

    # Get the data out. If the 'data' field is present, we will use it. Otherwise, we will
    # look for a 'raw' field, for backwards compatibility. If 'data' exists, 'raw' is ignored.
    write_data = data.get('data')
    if write_data is None:
        write_data = data.get('raw')

    if write_data is not None:
        # The data should be an instance of bytes, which in python is a string
        if not isinstance(write_data, str):
            raise errors.InvalidArgumentsError(
                _('"raw"/"data" value must be a string, but was {}').format(
                    type(write_data)))
        write_data = str.encode(write_data)

    wd = WriteData(action=write_action, data=write_data)
    logger.info(
        _('Writing to {}: {}').format('/'.join((rack, board, device)), wd))

    # Perform a gRPC write on the device's managing plugin
    try:
        t = _plugin.client.write(rack, board, device, [wd])
    except grpc.RpcError as ex:
        raise errors.FailedWriteCommandError(str(ex)) from ex

    # Now that we have the transaction info, we want to map it to the corresponding
    # process so any subsequent transaction check will know where to look.
    for _id, ctx in t.transactions.items():
        context = {'action': ctx.action, 'data': ctx.data}
        ok = await cache.add_transaction(_id, context, _plugin.id())
        if not ok:
            logger.error(
                _('Failed to add transaction {} to the cache').format(_id))

    return WriteResponse(transactions=t.transactions)
Ejemplo n.º 10
0
async def fan_sensors():
    """The handler for the Synse Server "fan sensors" API command.

    Returns:
        dict: A dictionary of device readings for all fan sensors.
    """
    # Auto fan uses the MAX11610 thermistors and SDP619 differential pressure
    # sensors. This isn't a *great* way of doing things since its hardcoded,
    # but this should be enough to get things in place and working in the short
    # term.
    #
    # In the long term, it would be good to expand read functionality in some
    # way such that we can do something like:
    #
    #   GET synse/2.0/read?type=temperature
    #
    # to read all the devices of a given type or model.

    start_time = datetime.datetime.now()
    _cache = await cache.get_metainfo_cache()
    scan_cache = await cache.get_scan_cache()

    readings = []
    new_readings = dict()
    new_readings['racks'] = OrderedDict()

    logger.debug('--- FAN SENSORS start ---')
    for _, v in _cache.items():

        logger.debug('FAN SENSORS')
        logger.debug('fan_sensors cache item: {}'.format(v))

        is_temp = v.type.lower() == 'temperature' and v.model.lower(
        ) == 'max11610'
        is_pressure = v.type.lower() == 'pressure' and v.model.lower(
        ) == 'sdp610'

        if is_temp or is_pressure:
            rack = v.location.rack  # string (vec1-c1-wrigley for example)
            board = v.location.board  # string (vec for example)
            device = v.uid  # string (uuid - only unique to one rack)

            # Find the device in the scan_cache.
            scan_cache_board = None
            scan_cache_device = None
            scan_cache_rack = next(
                (r for r in scan_cache['racks'] if r['id'] == rack), None)
            if scan_cache_rack is not None:
                scan_cache_board = next(
                    (b for b in scan_cache_rack['boards'] if b['id'] == board),
                    None)
                if scan_cache_board is not None:
                    scan_cache_device = next(
                        (d for d in scan_cache_board['devices']
                         if d['id'] == device), None)
            logger.debug(
                'scan_cache_rack_id, board_id, device_info: {}, {}, {}'.format(
                    scan_cache_rack.get('id', None),
                    scan_cache_board.get('id', None),
                    scan_cache_device.get('info', None)))

            try:
                resp = await read(rack, board, device)
            except Exception as e:
                logger.warning(
                    'Failed to get reading for {}-{}-{} for fan_sensors {}.'.
                    format(rack, board, device, e))
            else:
                single_reading = resp.data  # Single sensor reading.
                logger.debug('fan_sensors data: {}.'.format(single_reading))
                # Wedge in the VEC name that we received this data from.
                # That way auto_fan can map the data to a VEC.
                single_reading['location'] = {
                    'rack': rack,
                    'board': board,
                    'device': device,
                }
                single_reading['scan_cache_device'] = scan_cache_device
                logger.debug(
                    'fan_sensors data with vec: {}.'.format(single_reading))
                readings.append(single_reading)

                # If the rack is not a key in new readings, add it.
                if rack not in new_readings['racks']:
                    new_readings['racks'][rack] = dict()

                # Translate single_reading['scan_cache_device']['info']
                # and add it under the rack key which is:
                # new_readings['racks'][rack][translation] \
                #     = single_reading['data'][single_reading['type']]['value']
                logger.debug(
                    'single_reading[scan_cache_device][info]: {}'.format(
                        single_reading['scan_cache_device']['info']))
                logger.debug(
                    'single_reading[data][single_reading[type]][value]: {}'.
                    format(single_reading['data'][single_reading['type']]
                           ['value']))

                # Add sensor reading to result set.
                fan_sensor_key = _translate_device_info(
                    single_reading['scan_cache_device']['info'])
                reading_value = single_reading['data'][
                    single_reading['type']]['value']
                if fan_sensor_key is not None and reading_value is not None:
                    # Be sure not to overwrite any existing reading in the current result set.
                    # That would imply a mapping issue or some other bug.
                    if fan_sensor_key in new_readings['racks'][rack]:
                        message = 'fan_sensors avoiding overwrite of existing reading [{}] at ' \
                                  'new_readings[racks][{}][{}] with [{}]'.format(
                                      new_readings['racks'][rack][fan_sensor_key],
                                      rack, fan_sensor_key, reading_value)
                        logger.error(message)
                        raise ValueError(message)
                    # No existing reading in the result set, safe to add it.
                    new_readings['racks'][rack][fan_sensor_key] = reading_value

    logger.debug('--- FAN SENSORS end ---')
    # Sort the new_readings racks by racks['id']
    new_readings['racks'] = OrderedDict(sorted(new_readings['racks'].items()))
    end_time = datetime.datetime.now()
    read_time = (end_time - start_time).total_seconds() * 1000

    # Shim in the start, end, read times into each response now that we have them.
    # Even though they are all the same, they didn't used to be and auto fan wants
    # each for logging purposes.
    for rack in new_readings['racks']:
        new_readings['racks'][rack]['start_time'] = str(start_time)
        new_readings['racks'][rack]['end_time'] = str(end_time)
        new_readings['racks'][rack]['read_time'] = read_time

    return new_readings