Esempio n. 1
0
async def read_cached(start=None, end=None):
    """The handler for the Synse Server "readcached" API command.

    Args:
        start (str): An RFC3339 or RFC3339Nano formatted timestamp
            which defines a starting bound on the cache data to
            return. If no timestamp is specified, there will not
            be a starting bound. (default: None)
        end (str): An RFC3339 or RFC3339Nano formatted timestamp
            which defines an ending bound on the cache data to
            return. If no timestamp is specified, there will not
            be an ending bound. (default: None)

    Yields:
        ReadCachedResponse: The cached reading from the plugin.
    """
    start, end = start or '', end or ''
    logger.debug(
        _('Read Cached command (start: {}, end: {})').format(start, end))

    # If the plugins have not yet been registered, register them now.
    if len(plugin.Plugin.manager.plugins) == 0:
        logger.debug(_('Re-registering plugins'))
        plugin.register_plugins()

    # For each plugin, we'll want to request a dump of its readings cache.
    async for plugin_name, plugin_handler in plugin.get_plugins():  # pylint: disable=not-an-iterable
        logger.debug(
            _('Getting readings cache for plugin: {}').format(plugin_name))

        # Get the cached data from the plugin
        try:
            for reading in plugin_handler.client.read_cached(start, end):
                # If there is no reading, we're done iterating
                if reading is None:
                    return

                try:
                    __, device = await cache.get_device_info(  # pylint: disable=unused-variable
                        reading.rack, reading.board, reading.device)
                except errors.DeviceNotFoundError:
                    logger.info(
                        _('Did not find device {}-{}-{} locally. Skipping device; '
                          'server cache may be out of sync.'))
                    continue

                yield ReadCachedResponse(
                    device=device,
                    device_reading=reading,
                )

        except grpc.RpcError as ex:
            raise errors.FailedReadCachedCommandError(str(ex)) from ex
Esempio n. 2
0
async def periodic_cache_invalidation():
    """Periodically invalidate the caches so they are rebuilt."""
    interval = 3 * 60  # 3 minutes

    while True:
        await asyncio.sleep(interval)
        logger.info('task [periodic cache invalidation]: Clearing device caches')

        try:
            await clear_all_meta_caches()
        except Exception as e:
            logger.error(
                'task [periodic cache invalidation]: Failed to clear device caches, '
                'will try again in {}s: {}'
                .format(interval, e)
            )
Esempio n. 3
0
    def make_channel(self):
        """Make the channel for the grpc client stub."""
        # If Synse Server is configured to communicate with the plugin using
        # TLS, set up a secure channel, otherwise use an insecure channel.
        # FIXME (etd) - we'll probably want to support using a CA here?
        if config.options.get('grpc.tls'):
            logger.info(_('TLS enabled for gRPC'))

            cert = config.options.get('grpc.tls.cert')
            logger.info(_('Using cert file: {}').format(cert))
            with open(cert, 'rb') as f:
                plugin_cert = f.read()

            creds = grpc.ssl_channel_credentials(root_certificates=plugin_cert)
            self.channel = grpc.secure_channel(self._fmt_address(), creds)
        else:
            self.channel = grpc.insecure_channel(self._fmt_address())
Esempio n. 4
0
def register_tcp():
    """Register the plugins that use TCP for communication.

    Return:
        list[str]: The ids of all plugins that were registered.
    """
    registered = []

    configured = config.options.get('plugin.tcp', [])
    if not configured:
        logger.info(_('No plugin configurations for TCP'))
        return registered

    logger.debug(_('TCP plugin configuration: {}').format(configured))
    for address in configured:
        plugin_id = register_plugin(address, 'tcp')
        if plugin_id is None:
            logger.error(_('Failed to register plugin with address: {}').format(address))
            continue
        registered.append(plugin_id)

    logger.info('Registered tcp plugins: {}'.format(registered))
    return registered
Esempio n. 5
0
def make_app():
    """Create a new instance of the Synse Server Sanic application.

    This is the means by which all Synse Server applications should
    be created.

    Returns:
        Sanic: A Sanic application setup and configured to serve
            Synse Server routes.
    """
    app = Sanic(__name__, log_config=LOGGING)
    app.config.LOGO = None

    # Get the application configuration(s)
    config.options.add_config_paths('.', '/synse/config')
    config.options.env_prefix = 'SYNSE'
    config.options.auto_env = True

    config.options.parse(requires_cfg=False)
    config.options.validate()

    # Set up application logging
    setup_logger()

    # Set the language environment variable to that set in the config, if
    # it is not already set. This is how we specify the language/locale for
    # the application.
    # FIXME (etd): this isn't a great way of doing things, especially if Synse
    # Server is being run in a non-containerized environment.
    lang = os.environ.get('LANGUAGE')
    if lang:
        logger.info('LANGUAGE set from env: {}'.format(lang))
    else:
        lang = config.options.get('locale')
        logger.info('LANGUAGE set from config: {}'.format(lang))
        os.environ['LANGUAGE'] = lang

    # Register the blueprints
    app.blueprint(aliases.bp)
    app.blueprint(base.bp)
    app.blueprint(core.bp)

    _disable_favicon(app)
    _register_error_handling(app)

    configure_cache()

    logger.info('Synse Configuration: {}'.format(config.options.config))
    return app
Esempio n. 6
0
def register_unix():
    """Register the plugins that use a unix socket for communication.

    Unix plugins can be configured in a variety of ways:
      1.) Listed in the configuration file under plugin.unix
      2.) Via environment variable
      2.) Automatically, by placing the socket in the default socket directory

    Here, we will parse the configurations and the default socket directory,
    add them to the PluginManager, and return a unified list of all known
    unix-configured plugins.

    Returns:
        list[str]: The ids of all plugins that were registered.
    """
    registered = []

    configured = config.options.get('plugin.unix', [])
    if not configured:
        logger.info(_('No plugin configurations for unix'))

    logger.debug(_('unix plugin configuration: {}').format(configured))
    for address in configured:
        # The config here should be the path the the unix socket, which is our address.
        # First, check that the socket exists and that the address is a socket file.
        if not os.path.exists(address):
            logger.error(_('Socket {} not found').format(address))
            continue

        if not stat.S_ISSOCK(os.stat(address).st_mode):
            logger.error(_('{} is not a socket').format(address))
            continue

        plugin_id = register_plugin(address, 'unix')
        if plugin_id is None:
            logger.error(_('Failed to register plugin with address: {}').format(address))
            continue
        registered.append(plugin_id)

    # Now, go through the default socket directory and pick up any sockets that
    # may be set for automatic registration.
    if not os.path.exists(const.SOCKET_DIR):
        logger.debug(
            _('No default socket path found, no plugins will be registered from {}')
            .format(const.SOCKET_DIR)
        )
    else:
        logger.debug(
            _('Registering plugins from default socket directory ({})')
            .format(const.SOCKET_DIR)
        )

        for item in os.listdir(const.SOCKET_DIR):
            logger.debug('  {}'.format(item))
            address = os.path.join(const.SOCKET_DIR, item)

            # Check if the file is a socket
            if not stat.S_ISSOCK(os.stat(address).st_mode):
                logger.debug(_('{} is not a socket - skipping').format(address))
                continue

            plugin_id = register_plugin(address, 'unix')
            if plugin_id is None:
                logger.error(_('Failed to register plugin with address: {}').format(address))
                continue
            registered.append(plugin_id)

            # We want the plugins registered from this default directory to
            # be surfaced in the config, so we will add it there.
            if config.options.get('plugin.unix') is None:
                config.options.set('plugin.unix', [address])
            else:
                config.options.get('plugin.unix').append(address)

    logger.info('Registered unix plugins: {}'.format(registered))
    return registered
Esempio n. 7
0
    def format_readings(self):
        """Format the instance's readings to the read response scheme.

        Returns:
            dict: A properly formatted Read response.
        """
        logger.debug(_('Formatting read response'))
        formatted = {}

        dev_output = self.device.output
        for reading in self.readings:
            rt = reading.type

            # These fields may not be specified, e.g. in cases where it wouldn't
            # make sense for a reading unit, e.g. LED state (on/off)
            unit = None
            precision = None
            data_type = None

            found = False
            for out in dev_output:
                if out.type == rt:
                    symbol = out.unit.symbol
                    name = out.unit.name
                    precision = out.precision
                    data_type = out.data_type

                    if symbol or name:
                        unit = {'symbol': symbol, 'name': name}

                    found = True
                    break

            # If the reading type does not match the supported types, we will not
            # return it, and instead will just just skip over it.
            if not found:
                logger.warning(
                    _('Found unexpected reading type "{}" for device {}').
                    format(rt, self.device))
                continue

            value = reading.value

            # Handle cases where no data was read. Currently, we consider the reading
            # to have no data if:
            #   - the ReadResponse value comes back as an empty string (e.g. "")
            #   - the ReadResponse value comes back as the string "null".
            if value == '' or value == 'null':
                logger.info(
                    _('Reading value for {} came back as empty/null').format(
                        rt))
                value = None

            else:
                # Set the specified precision
                if precision:
                    try:
                        value = str(round(float(value), precision))
                    except ValueError:
                        logger.warning(
                            _('Invalid value for {}: "{}"').format(
                                data_type, value))

                # Cast to the specified type
                try:
                    value = self._data_types.get(data_type, str)(value)
                except ValueError:
                    logger.warning(
                        _('Failed to cast "{}" to {}').format(
                            value, data_type))

            formatted[rt] = {
                'value': value,
                'timestamp': reading.timestamp,
                'unit': unit
            }

        return formatted
Esempio n. 8
0
async def write(rack, board, device, data):
    """The handler for the Synse Server "write" API command.

    Args:
        rack (str): The rack which the device resides on.
        board (str): The board which the device resides on.
        device (str): The device to write to.
        data (dict): The data to write to the device.

    Returns:
        WriteResponse: The "write" response scheme model.
    """
    logger.debug(
        _('Write Command (args: {}, {}, {}, data: {})').format(
            rack, board, device, data))

    # Lookup the known info for the specified device
    plugin_name, __ = await cache.get_device_info(rack, board, device)  # pylint: disable=unused-variable

    # Get the plugin context for the device's specified protocol
    _plugin = plugin.get_plugin(plugin_name)
    if not _plugin:
        raise errors.PluginNotFoundError(
            _('Unable to find plugin named "{}"').format(plugin_name))

    # The data comes in as the POSTed dictionary which includes an 'action'
    # and/or 'raw'/'data' field. Here, we convert it to the appropriate modeling for
    # transport to the plugin.
    write_action = data.get('action')
    if not isinstance(write_action, str):
        raise errors.InvalidArgumentsError(
            _('"action" value must be a string, but was {}').format(
                type(write_action)))

    # Get the data out. If the 'data' field is present, we will use it. Otherwise, we will
    # look for a 'raw' field, for backwards compatibility. If 'data' exists, 'raw' is ignored.
    write_data = data.get('data')
    if write_data is None:
        write_data = data.get('raw')

    if write_data is not None:
        # The data should be an instance of bytes, which in python is a string
        if not isinstance(write_data, str):
            raise errors.InvalidArgumentsError(
                _('"raw"/"data" value must be a string, but was {}').format(
                    type(write_data)))
        write_data = str.encode(write_data)

    wd = WriteData(action=write_action, data=write_data)
    logger.info(
        _('Writing to {}: {}').format('/'.join((rack, board, device)), wd))

    # Perform a gRPC write on the device's managing plugin
    try:
        t = _plugin.client.write(rack, board, device, [wd])
    except grpc.RpcError as ex:
        raise errors.FailedWriteCommandError(str(ex)) from ex

    # Now that we have the transaction info, we want to map it to the corresponding
    # process so any subsequent transaction check will know where to look.
    for _id, ctx in t.transactions.items():
        context = {'action': ctx.action, 'data': ctx.data}
        ok = await cache.add_transaction(_id, context, _plugin.id())
        if not ok:
            logger.error(
                _('Failed to add transaction {} to the cache').format(_id))

    return WriteResponse(transactions=t.transactions)