def test_back_to_back(self): a, b = socket.socketpair() self.assertGreaterEqual(a.fileno(), 0) self.assertGreaterEqual(b.fileno(), 0) c1 = Client() c1.standalone_server = True c1.enable_server() c1.register_service('test', TestService()) c1.connect('fd://{0}'.format(a.fileno())) self.assertTrue(c1.connected) c2 = Client() c2.connect('fd://{0}'.format(b.fileno())) self.assertTrue(c2.connected) self.assertEqual(c2.call_sync('test.hello', 'freenas'), 'Hello World, freenas') c2.disconnect() a.close() c1.disconnect() b.close()
def setup_back_to_back(self, streaming=False): a, b = socket.socketpair() self.assertGreaterEqual(a.fileno(), 0) self.assertGreaterEqual(b.fileno(), 0) c1 = Client() c1._s = a c1.enable_server() c1.standalone_server = True if streaming: c1.streaming = True c1.rpc.streaming_enabled = True c1.register_service('test', TestService()) c1.connect('fd://{0}'.format(a.fileno())) self.assertTrue(c1.connected) c2 = Client() c2._s = b c2.streaming = True c2.connect('fd://{0}'.format(b.fileno())) self.assertTrue(c2.connected) return c1, c2
class Context(object): def __init__(self): self.logger = logging.getLogger(self.__class__.__name__) self.msock = msock.client.Client() self.msock.on_closed = self.on_msock_close self.rpc_fd = -1 self.connection_id = None self.jobs = [] self.state = ConnectionState.OFFLINE self.config = None self.keepalive = None self.connected_at = None self.cv = Condition() self.rpc = RpcContext() self.client = Client() self.server = Server() self.middleware_endpoint = None def start(self, configpath, sockpath): signal.signal(signal.SIGUSR2, lambda signo, frame: self.connect()) self.read_config(configpath) self.server.rpc = RpcContext() self.server.rpc.register_service_instance('control', ControlService(self)) self.server.start(sockpath) threading.Thread(target=self.server.serve_forever, name='server thread', daemon=True).start() def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect_dispatcher() self.middleware_endpoint = Client() self.middleware_endpoint.on_error(on_error) self.connect_dispatcher() def connect_dispatcher(self): while True: try: self.middleware_endpoint.connect('unix:') self.middleware_endpoint.login_service('debugd') self.middleware_endpoint.enable_server() self.middleware_endpoint.register_service( 'debugd.management', ControlService(self)) self.middleware_endpoint.resume_service('debugd.management') return except (OSError, RpcException) as err: self.logger.warning( 'Cannot connect to dispatcher: {0}, retrying in 1 second'. format(str(err))) time.sleep(1) def read_config(self, path): try: with open(path) as f: self.config = json.load(f) except (IOError, OSError, ValueError) as err: self.logger.fatal('Cannot open config file: {0}'.format(str(err))) self.logger.fatal('Exiting.') sys.exit(1) def connect(self, discard=False): if discard: self.connection_id = None self.keepalive = threading.Thread(target=self.connect_keepalive, daemon=True) self.keepalive.start() def connect_keepalive(self): while True: try: if not self.connection_id: self.connection_id = uuid.uuid4() self.msock.connect(SUPPORT_PROXY_ADDRESS) self.logger.info( 'Connecting to {0}'.format(SUPPORT_PROXY_ADDRESS)) self.rpc_fd = self.msock.create_channel(0) time.sleep(1) # FIXME self.client = Client() self.client.connect('fd://', fobj=self.rpc_fd) self.client.channel_serializer = MSockChannelSerializer( self.msock) self.client.standalone_server = True self.client.enable_server() self.client.register_service('debug', DebugService(self)) self.client.call_sync('server.login', str(self.connection_id), socket.gethostname(), get_version(), 'none') self.set_state(ConnectionState.CONNECTED) except BaseException as err: self.logger.warning( 'Failed to initiate support connection: {0}'.format(err), exc_info=True) self.msock.disconnect() else: self.connected_at = datetime.now() with self.cv: self.cv.wait_for(lambda: self.state in ( ConnectionState.LOST, ConnectionState.OFFLINE)) if self.state == ConnectionState.OFFLINE: return self.logger.warning( 'Support connection lost, retrying in 10 seconds') time.sleep(10) def disconnect(self): self.connected_at = None self.set_state(ConnectionState.OFFLINE) self.client.disconnect() self.msock.destroy_channel(0) self.msock.disconnect() self.jobs.clear() def on_msock_close(self): self.connected_at = None self.set_state(ConnectionState.LOST) def run_job(self, job): self.jobs.append(job) job.context = self job.start() def set_state(self, state): with self.cv: self.state = state self.cv.notify_all()
class Main(object): def __init__(self): self.logger = logging.getLogger('neighbord') self.config = None self.datastore = None self.configstore = None self.client = None self.config = None self.logger = logging.getLogger() self.plugin_dirs = [] self.plugins = {} def parse_config(self, filename): try: with open(filename, 'r') as f: self.config = json.load(f) except IOError as err: self.logger.error('Cannot read config file: %s', err.message) sys.exit(1) except ValueError: self.logger.error( 'Config file has unreadable format (not valid JSON)') sys.exit(1) self.plugin_dirs = self.config['neighbord']['plugin-dirs'] def init_datastore(self): try: self.datastore = datastore.get_datastore() except datastore.DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def scan_plugins(self): for i in self.plugin_dirs: self.scan_plugin_dir(i) def scan_plugin_dir(self, dir): self.logger.debug('Scanning plugin directory %s', dir) for f in os.listdir(dir): name, ext = os.path.splitext(os.path.basename(f)) if ext != '.py': continue try: plugin = load_module_from_file(name, os.path.join(dir, f)) plugin._init(self) except: self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True) def register_plugin(self, name, cls): self.plugins[name] = cls(self) self.logger.info('Registered plugin {0} (class {1})'.format(name, cls)) def register_service(self, name, regtype, port, properties=None): for plugin in self.plugins.values(): plugin.register(regtype, name, port, properties) def register(self): try: hostname = socket.gethostname() general = self.client.call_sync('system.general.get_config') properties = { 'version': self.client.call_sync('system.info.version'), 'description': general['description'], 'tags': ','.join(general['tags']) } self.register_service(hostname, 'freenas', 80, properties) self.register_service(hostname, 'http', 80) self.register_service(hostname, 'ssh', 22) self.register_service(hostname, 'sftp-ssh', 22) except BaseException as err: self.logger.error('Failed to register services: {0}'.format( str(err))) def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('neighbord') self.client.enable_server() self.client.register_service('neighbord.management', ManagementService(self)) self.client.register_service('neighbord.discovery', DiscoveryService(self)) self.client.register_service('neighbord.debug', DebugService()) self.client.resume_service('neighbord.management') self.client.resume_service('neighbord.discovery') self.client.resume_service('neighbord.debug') return except (OSError, RpcException) as err: self.logger.warning( 'Cannot connect to dispatcher: {0}, retrying in 1 second'. format(str(err))) time.sleep(1) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') args = parser.parse_args() self.config = args.c configure_logging('/var/log/neighbord.log', 'DEBUG') setproctitle.setproctitle('neighbord') self.parse_config(self.config) self.init_datastore() self.init_dispatcher() self.scan_plugins() self.register() self.client.wait_forever()
class Main(object): def __init__(self): self.logger = logging.getLogger('etcd') self.root = None self.configfile = None self.config = None self.datastore = None self.configstore = None self.client = None self.plugin_dirs = [] self.renderers = {} self.managed_files = {} def init_datastore(self): try: self.datastore = datastore.get_datastore(self.configfile) except datastore.DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('etcd') self.client.enable_server() self.client.register_service('etcd.generation', FileGenerationService(self)) self.client.register_service('etcd.management', ManagementService(self)) self.client.register_service('etcd.debug', DebugService()) self.client.resume_service('etcd.generation') self.client.resume_service('etcd.management') self.client.resume_service('etcd.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def init_renderers(self): for name, impl in TEMPLATE_RENDERERS.items(): self.renderers[name] = impl(self) def parse_config(self, filename): try: f = open(filename, 'r') self.config = json.load(f) f.close() except IOError as err: self.logger.error('Cannot read config file: %s', err.message) sys.exit(1) except ValueError: self.logger.error('Config file has unreadable format (not valid JSON)') sys.exit(1) self.plugin_dirs = self.config['etcd']['plugin-dirs'] def scan_plugins(self): for i in self.plugin_dirs: self.scan_plugin_dir(i) def scan_plugin_dir(self, dir): self.logger.debug('Scanning plugin directory %s', dir) for root, dirs, files in os.walk(dir): for name in files: abspath = os.path.join(root, name) path = os.path.relpath(abspath, dir) name, ext = os.path.splitext(path) if name in self.managed_files.keys(): continue if ext in TEMPLATE_RENDERERS.keys(): self.managed_files[name] = abspath self.logger.info('Adding managed file %s [%s]', name, ext) def generate_file(self, file_path): if file_path not in self.managed_files.keys(): raise RpcException(errno.ENOENT, 'No such file') template_path = self.managed_files[file_path] name, ext = os.path.splitext(template_path) if ext not in self.renderers.keys(): raise RuntimeError("Can't find renderer for {0}".format(file_path)) renderer = self.renderers[ext] try: return renderer.render_template(template_path) except Exception as e: self.logger.warn('Cannot generate file {0}: {1}'.format(file_path, str(e))) return "# FILE GENERATION FAILED: {0}\n".format(str(e)) def emit_event(self, name, params): self.client.emit_event(name, params) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') parser.add_argument('-f', action='store_true', default=False, help='Run in foreground') parser.add_argument('mountpoint', metavar='MOUNTPOINT', default='/etc', help='/etc mount point') args = parser.parse_args() configure_logging('/var/log/etcd.log', 'DEBUG') setproctitle.setproctitle('etcd') self.root = args.mountpoint self.configfile = args.c self.parse_config(args.c) self.scan_plugins() self.init_renderers() self.init_datastore() self.init_dispatcher() self.client.wait_forever()
class Main(object): def __init__(self): self.logger = logging.getLogger('alertd') self.config = None self.datastore = None self.configstore = None self.client = None self.plugin_dirs = [] self.emitters = {} def init_datastore(self): try: self.datastore = datastore.get_datastore() except datastore.DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_reminder(self): t = threading.Thread(target=self.reminder_thread) t.daemon = True t.start() def parse_config(self, filename): try: f = open(filename, 'r') self.config = json.load(f) f.close() except IOError as err: self.logger.error('Cannot read config file: %s', err.message) sys.exit(1) except ValueError: self.logger.error('Config file has unreadable format (not valid JSON)') sys.exit(1) self.plugin_dirs = self.config['alertd']['plugin-dirs'] def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('alertd') self.client.enable_server() self.client.register_service('alertd.management', ManagementService(self)) self.client.register_service('alertd.alert', AlertService(self)) self.client.register_service('alertd.debug', DebugService()) self.client.resume_service('alertd.management') self.client.resume_service('alertd.alert') self.client.resume_service('alertd.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def scan_plugins(self): for i in self.plugin_dirs: self.scan_plugin_dir(i) def scan_plugin_dir(self, dir): self.logger.debug('Scanning plugin directory %s', dir) for f in os.listdir(dir): name, ext = os.path.splitext(os.path.basename(f)) if ext != '.py': continue try: plugin = imp.load_source(name, os.path.join(dir, f)) plugin._init(self) except: self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True) def emit_alert(self, alert): self.logger.debug('Emitting alert <id:{0}> (class {1})'.format(alert['id'], alert['class'])) for i in self.datastore.query('alert.filters'): for predicate in i.get('predicates', []): if predicate['operator'] not in operators_table: continue if not operators_table[predicate['operator']](alert[predicate['property']], predicate['value']): break else: try: emitter = self.emitters.get(i['emitter']) if not emitter: self.logger.warning('Invalid emitter {0} for alert filter {1}'.format(i['emitter'], i['id'])) continue self.logger.debug('Alert <id:{0}> matched filter {1}'.format(alert['id'], i['id'])) if alert['send_count'] > 0: emitter.emit_again(alert, i['parameters']) else: emitter.emit_first(alert, i['parameters']) except BaseException as err: # Failed to emit alert using alert emitter # XXX: generate another alert about that self.logger.error('Cannot emit alert <id:{0}> using {1}: {2}'.format( alert['id'], i['emitter'], str(err)) ) alert['send_count'] += 1 alert['last_emitted_at'] = datetime.utcnow() if alert['one_shot']: alert['active'] = False self.datastore.update('alerts', alert['id'], alert) def cancel_alert(self, alert): self.logger.debug('Cancelling alert <id:{0}> (class {1})'.format(alert['id'], alert['class'])) alert.update({ 'active': False, 'cancelled': datetime.utcnow() }) self.datastore.update('alerts', alert['id'], alert) def register_emitter(self, name, cls): self.emitters[name] = cls(self) self.logger.info('Registered emitter {0} (class {1})'.format(name, cls)) def reminder_thread(self): while True: time.sleep(REMINDER_SECONDS) for i in self.datastore.query('alerts'): if not i['active'] or i['dismissed']: continue last_emission = i.get('last_emitted_at') or i['created_at'] interval = REMINDER_SCHEDULE[i['severity']] if not interval: continue if last_emission + timedelta(seconds=interval) <= datetime.utcnow(): self.emit_alert(i) def checkin(self): checkin() def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') args = parser.parse_args() configure_logging('/var/log/alertd.log', 'DEBUG') setproctitle('alertd') self.config = args.c self.parse_config(self.config) self.init_datastore() self.init_dispatcher() self.scan_plugins() self.init_reminder() self.checkin() self.client.wait_forever()
class Main(object): def __init__(self): self.logger = logging.getLogger('etcd') self.root = None self.configfile = None self.config = None self.datastore = None self.configstore = None self.client = None self.plugin_dirs = [] self.renderers = {} self.managed_files = {} def init_datastore(self): try: self.datastore = datastore.get_datastore(self.configfile) except datastore.DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('etcd') self.client.enable_server() self.client.register_service('etcd.generation', FileGenerationService(self)) self.client.register_service('etcd.management', ManagementService(self)) self.client.register_service('etcd.debug', DebugService()) self.client.resume_service('etcd.generation') self.client.resume_service('etcd.management') self.client.resume_service('etcd.debug') return except (OSError, RpcException) as err: self.logger.warning( 'Cannot connect to dispatcher: {0}, retrying in 1 second'. format(str(err))) time.sleep(1) def init_renderers(self): for name, impl in TEMPLATE_RENDERERS.items(): self.renderers[name] = impl(self) def parse_config(self, filename): try: f = open(filename, 'r') self.config = json.load(f) f.close() except IOError as err: self.logger.error('Cannot read config file: %s', err.message) sys.exit(1) except ValueError: self.logger.error( 'Config file has unreadable format (not valid JSON)') sys.exit(1) self.plugin_dirs = self.config['etcd']['plugin-dirs'] def scan_plugins(self): for i in self.plugin_dirs: self.scan_plugin_dir(i) def scan_plugin_dir(self, dir): self.logger.debug('Scanning plugin directory %s', dir) for root, dirs, files in os.walk(dir): for name in files: abspath = os.path.join(root, name) path = os.path.relpath(abspath, dir) name, ext = os.path.splitext(path) if name in self.managed_files.keys(): continue if ext in TEMPLATE_RENDERERS.keys(): self.managed_files[name] = abspath self.logger.info('Adding managed file %s [%s]', name, ext) def generate_file(self, file_path): if file_path not in self.managed_files.keys(): raise RpcException(errno.ENOENT, 'No such file') template_path = self.managed_files[file_path] name, ext = os.path.splitext(template_path) if ext not in self.renderers.keys(): raise RuntimeError("Can't find renderer for {0}".format(file_path)) renderer = self.renderers[ext] try: return renderer.render_template(template_path) except Exception as e: self.logger.warn('Cannot generate file {0}: {1}'.format( file_path, str(e))) return "# FILE GENERATION FAILED: {0}\n".format(str(e)) def emit_event(self, name, params): self.client.emit_event(name, params) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') parser.add_argument('-f', action='store_true', default=False, help='Run in foreground') parser.add_argument('mountpoint', metavar='MOUNTPOINT', default='/etc', help='/etc mount point') args = parser.parse_args() configure_logging('/var/log/etcd.log', 'DEBUG') setproctitle.setproctitle('etcd') self.root = args.mountpoint self.configfile = args.c self.parse_config(args.c) self.scan_plugins() self.init_renderers() self.init_datastore() self.init_dispatcher() self.client.wait_forever()
class Main(object): def __init__(self): self.logger = logging.getLogger('dscached') self.config = None self.datastore = None self.configstore = None self.client = None self.plugin_dirs = [] self.plugins = {} def init_datastore(self): try: self.datastore = datastore.get_datastore() except datastore.DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def parse_config(self, filename): try: with open(filename, 'r') as f: self.config = json.load(f) except IOError as err: self.logger.error('Cannot read config file: %s', err.message) sys.exit(1) except ValueError: self.logger.error('Config file has unreadable format (not valid JSON)') sys.exit(1) self.plugin_dirs = self.config['dscached']['plugin-dirs'] def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('dscached') self.client.enable_server() self.client.register_service('dscached.account', AccountService(self)) self.client.register_service('dscached.group', GroupService(self)) self.client.register_service('dscached.debug', DebugService()) self.client.resume_service('dscached.account') self.client.resume_service('dscached.group') self.client.resume_service('dscached.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def scan_plugins(self): for i in self.plugin_dirs: self.scan_plugin_dir(i) def scan_plugin_dir(self, dir): self.logger.debug('Scanning plugin directory %s', dir) for f in os.listdir(dir): name, ext = os.path.splitext(os.path.basename(f)) if ext != '.py': continue try: plugin = imp.load_source(name, os.path.join(dir, f)) plugin._init(self) except: self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True) def register_plugin(self, name, cls): self.plugins[name] = cls(self) self.logger.info('Registered plugin {0} (class {1})'.format(name, cls)) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') args = parser.parse_args() configure_logging('/var/log/dscached.log', 'DEBUG') setproctitle.setproctitle('dscached') self.config = args.c self.parse_config(self.config) self.init_datastore() self.init_dispatcher() self.scan_plugins() self.client.wait_forever()
class Main(object): def __init__(self): self.logger = logging.getLogger('neighbord') self.config = None self.datastore = None self.configstore = None self.client = None self.config = None self.logger = logging.getLogger() self.plugin_dirs = [] self.plugins = {} def parse_config(self, filename): try: with open(filename, 'r') as f: self.config = json.load(f) except IOError as err: self.logger.error('Cannot read config file: %s', err.message) sys.exit(1) except ValueError: self.logger.error('Config file has unreadable format (not valid JSON)') sys.exit(1) self.plugin_dirs = self.config['neighbord']['plugin-dirs'] def init_datastore(self): try: self.datastore = datastore.get_datastore() except datastore.DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def scan_plugins(self): for i in self.plugin_dirs: self.scan_plugin_dir(i) def scan_plugin_dir(self, dir): self.logger.debug('Scanning plugin directory %s', dir) for f in os.listdir(dir): name, ext = os.path.splitext(os.path.basename(f)) if ext != '.py': continue try: plugin = load_module_from_file(name, os.path.join(dir, f)) plugin._init(self) except: self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True) def register_plugin(self, name, cls): self.plugins[name] = cls(self) self.logger.info('Registered plugin {0} (class {1})'.format(name, cls)) def register_service(self, name, regtype, port, properties=None): for plugin in self.plugins.values(): plugin.register(regtype, name, port, properties) def register(self): try: hostname = socket.gethostname() general = self.client.call_sync('system.general.get_config') properties = { 'version': self.client.call_sync('system.info.version'), 'description': general['description'], 'tags': ','.join(general['tags']) } self.register_service(hostname, 'freenas', 80, properties) self.register_service(hostname, 'http', 80) self.register_service(hostname, 'ssh', 22) self.register_service(hostname, 'sftp-ssh', 22) except BaseException as err: self.logger.error('Failed to register services: {0}'.format(str(err))) def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('neighbord') self.client.enable_server() self.client.register_service('neighbord.management', ManagementService(self)) self.client.register_service('neighbord.discovery', DiscoveryService(self)) self.client.register_service('neighbord.debug', DebugService()) self.client.resume_service('neighbord.management') self.client.resume_service('neighbord.discovery') self.client.resume_service('neighbord.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def checkin(self): checkin() def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') args = parser.parse_args() self.config = args.c configure_logging('/var/log/neighbord.log', 'DEBUG') setproctitle('neighbord') self.parse_config(self.config) self.init_datastore() self.init_dispatcher() self.scan_plugins() self.register() self.checkin() self.client.wait_forever()
class Main(object): def __init__(self): self.logger = logging.getLogger('alertd') self.config = None self.datastore = None self.configstore = None self.client = None self.plugin_dirs = [] self.emitters = {} def init_datastore(self): try: self.datastore = datastore.get_datastore() except datastore.DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_reminder(self): t = threading.Thread(target=self.reminder_thread) t.daemon = True t.start() def parse_config(self, filename): try: f = open(filename, 'r') self.config = json.load(f) f.close() except IOError as err: self.logger.error('Cannot read config file: %s', err.message) sys.exit(1) except ValueError: self.logger.error( 'Config file has unreadable format (not valid JSON)') sys.exit(1) self.plugin_dirs = self.config['alertd']['plugin-dirs'] def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('alertd') self.client.enable_server() self.client.register_service('alertd.management', ManagementService(self)) self.client.register_service('alertd.alert', AlertService(self)) self.client.register_service('alertd.debug', DebugService()) self.client.resume_service('alertd.management') self.client.resume_service('alertd.alert') self.client.resume_service('alertd.debug') return except (OSError, RpcException) as err: self.logger.warning( 'Cannot connect to dispatcher: {0}, retrying in 1 second'. format(str(err))) time.sleep(1) def scan_plugins(self): for i in self.plugin_dirs: self.scan_plugin_dir(i) def scan_plugin_dir(self, dir): self.logger.debug('Scanning plugin directory %s', dir) for f in os.listdir(dir): name, ext = os.path.splitext(os.path.basename(f)) if ext != '.py': continue try: plugin = imp.load_source(name, os.path.join(dir, f)) plugin._init(self) except: self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True) def emit_alert(self, alert): self.logger.debug('Emitting alert <id:{0}> (class {1})'.format( alert['id'], alert['clazz'])) for i in self.datastore.query( 'alert.filters', ('or', [('clazz', '=', None), ('clazz', '=', alert['clazz'])])): for pr in i.get('predicates', []): if pr['operator'] not in operators_table: continue try: if not operators_table[pr['operator']]( alert.properties.get(pr['property']), pr['value']): break except: continue else: try: emitter = self.emitters.get(i['emitter']) if not emitter: self.logger.warning( 'Invalid emitter {0} for alert filter {1}'.format( i['emitter'], i['id'])) continue self.logger.debug( 'Alert <id:{0}> matched filter {1}'.format( alert['id'], i['id'])) if alert['send_count'] > 0: if not alert['one_shot']: emitter.emit_again(alert, i['parameters']) else: emitter.emit_first(alert, i['parameters']) except BaseException as err: # Failed to emit alert using alert emitter # XXX: generate another alert about that self.logger.error( 'Cannot emit alert <id:{0}> using {1}: {2}'.format( alert['id'], i['emitter'], str(err))) alert['send_count'] += 1 alert['last_emitted_at'] = datetime.utcnow() self.datastore.update('alerts', alert['id'], alert) def cancel_alert(self, alert): self.logger.debug('Cancelling alert <id:{0}> (class {1})'.format( alert['id'], alert['clazz'])) alert.update({'active': False, 'cancelled': datetime.utcnow()}) self.datastore.update('alerts', alert['id'], alert) def register_emitter(self, name, cls): self.emitters[name] = cls(self) self.logger.info('Registered emitter {0} (class {1})'.format( name, cls)) def reminder_thread(self): while True: time.sleep(REMINDER_SECONDS) for i in self.datastore.query('alerts'): if not i['active'] or i['dismissed']: continue last_emission = i.get('last_emitted_at') or i['created_at'] interval = REMINDER_SCHEDULE[i['severity']] if not interval: continue if last_emission + timedelta( seconds=interval) <= datetime.utcnow(): self.emit_alert(i) def checkin(self): checkin() def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') args = parser.parse_args() configure_logging('alertd', 'DEBUG') setproctitle('alertd') self.config = args.c self.parse_config(self.config) self.init_datastore() self.init_dispatcher() self.scan_plugins() self.init_reminder() self.checkin() self.client.wait_forever()
class Context(object): def __init__(self): self.logger = logging.getLogger('schedulerd') self.config = None self.datastore = None self.configstore = None self.client = None self.scheduler = None self.active_tasks = {} def init_datastore(self): try: self.datastore = get_datastore(self.config) except DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_scheduler(self): store = FreeNASJobStore() self.scheduler = BackgroundScheduler(jobstores={'default': store, 'temp': MemoryJobStore()}, timezone=pytz.utc) self.scheduler.start() def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('schedulerd') self.client.enable_server() self.client.register_service('scheduler.management', ManagementService(self)) self.client.register_service('scheduler.debug', DebugService()) self.client.resume_service('scheduler.management') self.client.resume_service('scheduler.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def run_job(self, *args, **kwargs): tid = self.client.call_sync('task.submit_with_env', args[0], args[1:], { 'RUN_AS_USER': '******', 'CALENDAR_TASK_NAME': kwargs.get('name') }) self.active_tasks[kwargs['id']] = tid self.client.call_sync('task.wait', tid, timeout=None) result = self.client.call_sync('task.status', tid) if result['state'] != 'FINISHED': try: self.client.call_sync('alert.emit', { 'name': 'scheduler.task.failed', 'severity': 'CRITICAL', 'description': 'Task {0} has failed: {1}'.format( kwargs.get('name', tid), result['error']['message'] ), }) except RpcException as e: self.logger.error('Failed to emit alert', exc_info=True) del self.active_tasks[kwargs['id']] self.datastore.insert('schedulerd.runs', { 'job_id': kwargs['id'], 'task_id': result['id'] }) def emit_event(self, name, params): self.client.emit_event(name, params) def checkin(self): checkin() def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') parser.add_argument('-f', action='store_true', default=False, help='Run in foreground') args = parser.parse_args() configure_logging('/var/log/schedulerd.log', 'DEBUG') setproctitle('schedulerd') self.config = args.c self.init_datastore() self.init_scheduler() self.init_dispatcher() self.checkin() self.client.wait_forever()
class Main(object): def __init__(self): self.logger = logging.getLogger('clid') self.config = None self.datastore = None self.configstore = None self.client = None self.config = None self.logger = logging.getLogger() self.plugin_dirs = [] self.ml = None self.context = None def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_cli(self): self.logger.info('Initializing CLI instance') self.context = Context() self.context.connection = self.client self.context.plugin_dirs = PLUGIN_DIRS self.context.discover_plugins() self.context.start_entity_subscribers() self.context.login_plugins() self.ml = MainLoop(self.context) self.logger.info('CLI instance ready') def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('clid') self.client.enable_server() self.client.call_sync('management.enable_features', ['streaming_responses']) self.client.register_service('clid.management', ManagementService(self)) self.client.register_service('clid.eval', EvalService(self)) self.client.register_service('clid.debug', DebugService()) self.client.resume_service('clid.management') self.client.resume_service('clid.eval') self.client.resume_service('clid.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') args = parser.parse_args() self.config = args.c configure_logging('clid', 'DEBUG') setproctitle('clid') self.init_dispatcher() self.init_cli() self.client.wait_forever()
class Context(object): def __init__(self): self.logger = logging.getLogger('schedulerd') self.config = None self.datastore = None self.configstore = None self.client = None self.scheduler = None self.active_tasks = {} def init_datastore(self): try: self.datastore = get_datastore(self.config) except DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_scheduler(self): store = MongoDBJobStore(database='freenas', collection='calendar_tasks', client=self.datastore.client) self.scheduler = BackgroundScheduler(jobstores={'default': store}, timezone=pytz.utc) self.scheduler.start() def register_schemas(self): self.client.register_schema('calendar-task', { 'type': 'object', 'additionalProperties': False, 'properties': { 'id': {'type': 'string'}, 'name': {'type': 'string'}, 'args': {'type': 'array'}, 'description': {'type': 'string'}, 'enabled': {'type': 'boolean'}, 'hidden': {'type': 'boolean'}, 'protected': {'type': 'boolean'}, 'status': {'$ref': 'calendar-task-status'}, 'schedule': { 'type': 'object', 'additionalProperties': False, 'properties': { 'coalesce': {'type': ['boolean', 'integer', 'null']}, 'year': {'type': ['string', 'integer', 'null']}, 'month': {'type': ['string', 'integer', 'null']}, 'day': {'type': ['string', 'integer', 'null']}, 'week': {'type': ['string', 'integer', 'null']}, 'day_of_week': {'type': ['string', 'integer', 'null']}, 'hour': {'type': ['string', 'integer', 'null']}, 'minute': {'type': ['string', 'integer', 'null']}, 'second': {'type': ['string', 'integer', 'null']}, 'timezone': {'type': ['string', 'null']} } } } }) self.client.register_schema('calendar-task-status', { 'type': 'object', 'properties': { 'next_run_time': {'type': 'string'}, 'last_run_status': {'type': 'string'}, 'current_run_status': {'type': ['string', 'null']}, 'current_run_progress': {'type': ['object', 'null']} } }) def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('schedulerd') self.client.enable_server() self.client.register_service('scheduler.management', ManagementService(self)) self.client.register_service('scheduler.debug', DebugService()) self.client.resume_service('scheduler.management') self.client.resume_service('scheduler.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def run_job(self, *args, **kwargs): tid = self.client.submit_task(*args) self.active_tasks[kwargs['id']] = tid self.client.call_sync('task.wait', tid, timeout=None) result = self.client.call_sync('task.status', tid) if result['state'] != 'FINISHED': try: self.client.call_sync('alerts.emit', { 'name': 'scheduler.task.failed', 'severity': 'CRITICAL', 'description': 'Task {0} has failed: {1}'.format(kwargs['name'], result['error']['message']), }) except RpcException as e: self.logger.error('Failed to emit alert', exc_info=True) del self.active_tasks[kwargs['id']] self.datastore.insert('schedulerd.runs', { 'job_id': kwargs['id'], 'task_id': result['id'] }) def emit_event(self, name, params): self.client.emit_event(name, params) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') parser.add_argument('-f', action='store_true', default=False, help='Run in foreground') args = parser.parse_args() configure_logging('/var/log/schedulerd.log', 'DEBUG') setproctitle.setproctitle('schedulerd') self.config = args.c self.init_datastore() self.init_scheduler() self.init_dispatcher() self.register_schemas() self.client.wait_forever()
class Context(object): def __init__(self): self.logger = logging.getLogger('schedulerd') self.config = None self.datastore = None self.configstore = None self.client = None self.scheduler = None self.active_tasks = {} def init_datastore(self): try: self.datastore = get_datastore(self.config) except DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_scheduler(self): store = MongoDBJobStore(database='freenas', collection='calendar_tasks', client=self.datastore.client) self.scheduler = BackgroundScheduler(jobstores={'default': store}, timezone=pytz.utc) self.scheduler.start() def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('schedulerd') self.client.enable_server() self.client.register_service('scheduler.management', ManagementService(self)) self.client.register_service('scheduler.debug', DebugService()) self.client.resume_service('scheduler.management') self.client.resume_service('scheduler.debug') return except (OSError, RpcException) as err: self.logger.warning( 'Cannot connect to dispatcher: {0}, retrying in 1 second'. format(str(err))) time.sleep(1) def run_job(self, *args, **kwargs): tid = self.client.call_sync( 'task.submit_with_env', args[0], args[1:], { 'RUN_AS_USER': '******', 'CALENDAR_TASK_NAME': kwargs.get('name') }) self.active_tasks[kwargs['id']] = tid self.client.call_sync('task.wait', tid, timeout=None) result = self.client.call_sync('task.status', tid) if result['state'] != 'FINISHED': try: self.client.call_sync( 'alert.emit', { 'name': 'scheduler.task.failed', 'severity': 'CRITICAL', 'description': 'Task {0} has failed: {1}'.format( kwargs.get('name', tid), result['error']['message']), }) except RpcException as e: self.logger.error('Failed to emit alert', exc_info=True) del self.active_tasks[kwargs['id']] self.datastore.insert('schedulerd.runs', { 'job_id': kwargs['id'], 'task_id': result['id'] }) def emit_event(self, name, params): self.client.emit_event(name, params) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') parser.add_argument('-f', action='store_true', default=False, help='Run in foreground') args = parser.parse_args() configure_logging('/var/log/schedulerd.log', 'DEBUG') setproctitle.setproctitle('schedulerd') self.config = args.c self.init_datastore() self.init_scheduler() self.init_dispatcher() self.client.wait_forever()
class Main(object): def __init__(self): self.logger = logging.getLogger('clid') self.config = None self.datastore = None self.configstore = None self.client = None self.config = None self.logger = logging.getLogger() self.plugin_dirs = [] self.ml = None self.context = None def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_cli(self): self.logger.info('Initializing CLI instance') self.context = Context() self.context.connection = self.client self.context.plugin_dirs = PLUGIN_DIRS self.context.discover_plugins() self.context.start_entity_subscribers() self.context.login_plugins() self.ml = MainLoop(self.context) self.logger.info('CLI instance ready') def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('clid') self.client.enable_server() self.client.call_sync('management.enable_features', ['streaming_responses']) self.client.register_service('clid.management', ManagementService(self)) self.client.register_service('clid.eval', EvalService(self)) self.client.register_service('clid.debug', DebugService()) self.client.resume_service('clid.management') self.client.resume_service('clid.eval') self.client.resume_service('clid.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') args = parser.parse_args() self.config = args.c configure_logging('/var/log/clid.log', 'DEBUG') setproctitle('clid') self.init_dispatcher() self.init_cli() self.client.wait_forever()
class Context(object): def __init__(self): self.logger = logging.getLogger(self.__class__.__name__) self.msock = msock.client.Client() self.msock.on_closed = self.on_msock_close self.rpc_fd = -1 self.connection_id = None self.jobs = [] self.state = ConnectionState.OFFLINE self.config = None self.keepalive = None self.connected_at = None self.cv = Condition() self.rpc = RpcContext() self.client = Client() self.server = Server() self.middleware_endpoint = None def start(self, configpath, sockpath): signal.signal(signal.SIGUSR2, lambda signo, frame: self.connect()) self.read_config(configpath) self.server.rpc = RpcContext() self.server.rpc.register_service_instance("control", ControlService(self)) self.server.start(sockpath) threading.Thread(target=self.server.serve_forever, name="server thread", daemon=True).start() def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning("Connection to dispatcher lost") self.connect_dispatcher() self.middleware_endpoint = Client() self.middleware_endpoint.on_error(on_error) self.connect_dispatcher() def connect_dispatcher(self): while True: try: self.middleware_endpoint.connect("unix:") self.middleware_endpoint.login_service("debugd") self.middleware_endpoint.enable_server() self.middleware_endpoint.register_service("debugd.management", ControlService(self)) self.middleware_endpoint.resume_service("debugd.management") return except (OSError, RpcException) as err: self.logger.warning("Cannot connect to dispatcher: {0}, retrying in 1 second".format(str(err))) time.sleep(1) def read_config(self, path): try: with open(path) as f: self.config = json.load(f) except (IOError, OSError, ValueError) as err: self.logger.fatal("Cannot open config file: {0}".format(str(err))) self.logger.fatal("Exiting.") sys.exit(1) def connect(self, discard=False): if discard: self.connection_id = None self.keepalive = threading.Thread(target=self.connect_keepalive, daemon=True) self.keepalive.start() def connect_keepalive(self): while True: try: if not self.connection_id: self.connection_id = uuid.uuid4() self.msock.connect(SUPPORT_PROXY_ADDRESS) self.logger.info("Connecting to {0}".format(SUPPORT_PROXY_ADDRESS)) self.rpc_fd = self.msock.create_channel(0) time.sleep(1) # FIXME self.client = Client() self.client.connect("fd://", fobj=self.rpc_fd) self.client.channel_serializer = MSockChannelSerializer(self.msock) self.client.standalone_server = True self.client.enable_server() self.client.register_service("debug", DebugService(self)) self.client.call_sync( "server.login", str(self.connection_id), socket.gethostname(), get_version(), "none" ) self.set_state(ConnectionState.CONNECTED) except BaseException as err: self.logger.warning("Failed to initiate support connection: {0}".format(err), exc_info=True) self.msock.disconnect() else: self.connected_at = datetime.now() with self.cv: self.cv.wait_for(lambda: self.state in (ConnectionState.LOST, ConnectionState.OFFLINE)) if self.state == ConnectionState.OFFLINE: return self.logger.warning("Support connection lost, retrying in 10 seconds") time.sleep(10) def disconnect(self): self.connected_at = None self.set_state(ConnectionState.OFFLINE) self.client.disconnect() self.msock.destroy_channel(0) self.msock.disconnect() self.jobs.clear() def on_msock_close(self): self.connected_at = None self.set_state(ConnectionState.LOST) def run_job(self, job): self.jobs.append(job) job.context = self job.start() def set_state(self, state): with self.cv: self.state = state self.cv.notify_all()