class Main(object): def __init__(self): self.logger = logging.getLogger('etcd') self.root = None self.configfile = None self.config = None self.datastore = None self.configstore = None self.client = None self.plugin_dirs = [] self.renderers = {} self.managed_files = {} def init_datastore(self): try: self.datastore = datastore.get_datastore(self.configfile) except datastore.DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('etcd') self.client.enable_server() self.client.register_service('etcd.generation', FileGenerationService(self)) self.client.register_service('etcd.management', ManagementService(self)) self.client.register_service('etcd.debug', DebugService()) self.client.resume_service('etcd.generation') self.client.resume_service('etcd.management') self.client.resume_service('etcd.debug') return except (OSError, RpcException) as err: self.logger.warning( 'Cannot connect to dispatcher: {0}, retrying in 1 second'. format(str(err))) time.sleep(1) def init_renderers(self): for name, impl in TEMPLATE_RENDERERS.items(): self.renderers[name] = impl(self) def parse_config(self, filename): try: f = open(filename, 'r') self.config = json.load(f) f.close() except IOError as err: self.logger.error('Cannot read config file: %s', err.message) sys.exit(1) except ValueError: self.logger.error( 'Config file has unreadable format (not valid JSON)') sys.exit(1) self.plugin_dirs = self.config['etcd']['plugin-dirs'] def scan_plugins(self): for i in self.plugin_dirs: self.scan_plugin_dir(i) def scan_plugin_dir(self, dir): self.logger.debug('Scanning plugin directory %s', dir) for root, dirs, files in os.walk(dir): for name in files: abspath = os.path.join(root, name) path = os.path.relpath(abspath, dir) name, ext = os.path.splitext(path) if name in self.managed_files.keys(): continue if ext in TEMPLATE_RENDERERS.keys(): self.managed_files[name] = abspath self.logger.info('Adding managed file %s [%s]', name, ext) def generate_file(self, file_path): if file_path not in self.managed_files.keys(): raise RpcException(errno.ENOENT, 'No such file') template_path = self.managed_files[file_path] name, ext = os.path.splitext(template_path) if ext not in self.renderers.keys(): raise RuntimeError("Can't find renderer for {0}".format(file_path)) renderer = self.renderers[ext] try: return renderer.render_template(template_path) except Exception as e: self.logger.warn('Cannot generate file {0}: {1}'.format( file_path, str(e))) return "# FILE GENERATION FAILED: {0}\n".format(str(e)) def emit_event(self, name, params): self.client.emit_event(name, params) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') parser.add_argument('-f', action='store_true', default=False, help='Run in foreground') parser.add_argument('mountpoint', metavar='MOUNTPOINT', default='/etc', help='/etc mount point') args = parser.parse_args() configure_logging('/var/log/etcd.log', 'DEBUG') setproctitle.setproctitle('etcd') self.root = args.mountpoint self.configfile = args.c self.parse_config(args.c) self.scan_plugins() self.init_renderers() self.init_datastore() self.init_dispatcher() self.client.wait_forever()
class Main(object): def __init__(self): self.logger = logging.getLogger('alertd') self.config = None self.datastore = None self.configstore = None self.client = None self.plugin_dirs = [] self.emitters = {} def init_datastore(self): try: self.datastore = datastore.get_datastore() except datastore.DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_reminder(self): t = threading.Thread(target=self.reminder_thread) t.daemon = True t.start() def parse_config(self, filename): try: f = open(filename, 'r') self.config = json.load(f) f.close() except IOError as err: self.logger.error('Cannot read config file: %s', err.message) sys.exit(1) except ValueError: self.logger.error('Config file has unreadable format (not valid JSON)') sys.exit(1) self.plugin_dirs = self.config['alertd']['plugin-dirs'] def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('alertd') self.client.enable_server() self.client.register_service('alertd.management', ManagementService(self)) self.client.register_service('alertd.alert', AlertService(self)) self.client.register_service('alertd.debug', DebugService()) self.client.resume_service('alertd.management') self.client.resume_service('alertd.alert') self.client.resume_service('alertd.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def scan_plugins(self): for i in self.plugin_dirs: self.scan_plugin_dir(i) def scan_plugin_dir(self, dir): self.logger.debug('Scanning plugin directory %s', dir) for f in os.listdir(dir): name, ext = os.path.splitext(os.path.basename(f)) if ext != '.py': continue try: plugin = imp.load_source(name, os.path.join(dir, f)) plugin._init(self) except: self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True) def emit_alert(self, alert): self.logger.debug('Emitting alert <id:{0}> (class {1})'.format(alert['id'], alert['class'])) for i in self.datastore.query('alert.filters'): for predicate in i.get('predicates', []): if predicate['operator'] not in operators_table: continue if not operators_table[predicate['operator']](alert[predicate['property']], predicate['value']): break else: try: emitter = self.emitters.get(i['emitter']) if not emitter: self.logger.warning('Invalid emitter {0} for alert filter {1}'.format(i['emitter'], i['id'])) continue self.logger.debug('Alert <id:{0}> matched filter {1}'.format(alert['id'], i['id'])) if alert['send_count'] > 0: emitter.emit_again(alert, i['parameters']) else: emitter.emit_first(alert, i['parameters']) except BaseException as err: # Failed to emit alert using alert emitter # XXX: generate another alert about that self.logger.error('Cannot emit alert <id:{0}> using {1}: {2}'.format( alert['id'], i['emitter'], str(err)) ) alert['send_count'] += 1 alert['last_emitted_at'] = datetime.utcnow() if alert['one_shot']: alert['active'] = False self.datastore.update('alerts', alert['id'], alert) def cancel_alert(self, alert): self.logger.debug('Cancelling alert <id:{0}> (class {1})'.format(alert['id'], alert['class'])) alert.update({ 'active': False, 'cancelled': datetime.utcnow() }) self.datastore.update('alerts', alert['id'], alert) def register_emitter(self, name, cls): self.emitters[name] = cls(self) self.logger.info('Registered emitter {0} (class {1})'.format(name, cls)) def reminder_thread(self): while True: time.sleep(REMINDER_SECONDS) for i in self.datastore.query('alerts'): if not i['active'] or i['dismissed']: continue last_emission = i.get('last_emitted_at') or i['created_at'] interval = REMINDER_SCHEDULE[i['severity']] if not interval: continue if last_emission + timedelta(seconds=interval) <= datetime.utcnow(): self.emit_alert(i) def checkin(self): checkin() def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') args = parser.parse_args() configure_logging('/var/log/alertd.log', 'DEBUG') setproctitle('alertd') self.config = args.c self.parse_config(self.config) self.init_datastore() self.init_dispatcher() self.scan_plugins() self.init_reminder() self.checkin() self.client.wait_forever()
class Main: def __init__(self): self.config = None self.client = None self.datastore = None self.configstore = None self.rtsock_thread = None self.logger = logging.getLogger('networkd') def configure_dhcp(self, interface): # Check if dhclient is running if os.path.exists(os.path.join('/var/run', 'dhclient.{0}.pid'.format(interface))): self.logger.info('Interface {0} already configured by DHCP'.format(interface)) return True # XXX: start dhclient through launchd in the future ret = subprocess.call(['/sbin/dhclient', interface]) return ret == 0 def interface_detached(self, name): self.logger.warn('Interface {0} detached from the system'.format(name)) def interface_attached(self, name): self.logger.warn('Interface {0} attached to the system'.format(name)) def using_dhcp_for_gateway(self): for i in self.datastore.query('network.interfaces'): if i.get('dhcp') and self.configstore.get('network.dhcp.assign_gateway'): return True return False def scan_interfaces(self): self.logger.info('Scanning available network interfaces...') existing = [] # Add newly plugged NICs to DB for i in list(netif.list_interfaces().values()): # We want only physical NICs if i.cloned: continue existing.append(i.name) if not self.datastore.exists('network.interfaces', ('id', '=', i.name)): self.logger.info('Found new interface {0} ({1})'.format(i.name, i.type.name)) self.datastore.insert('network.interfaces', { 'enabled': False, 'id': i.name, 'type': i.type.name }) # Remove unplugged NICs from DB for i in self.datastore.query('network.interfaces', ('id', 'nin', existing)): self.datastore.delete('network.interfaces', i['id']) def parse_config(self, filename): try: f = open(filename, 'r') self.config = json.load(f) f.close() except IOError as err: self.logger.error('Cannot read config file: %s', err.message) sys.exit(1) except ValueError as err: self.logger.error('Config file has unreadable format (not valid JSON)') sys.exit(1) def init_datastore(self): try: self.datastore = get_datastore(self.config['datastore']['driver'], self.config['datastore']['dsn']) except DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def connect(self, resume=False): while True: try: self.client.connect('127.0.0.1') self.client.login_service('networkd') self.client.enable_server() self.register_schemas() self.client.register_service('networkd.configuration', ConfigurationService(self)) self.client.register_service('networkd.debug', DebugService()) if resume: self.client.resume_service('networkd.configuration') self.client.resume_service('networkd.debug') return except socket.error as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect(resume=True) self.client = Client() self.client.on_error(on_error) self.connect() def init_routing_socket(self): self.rtsock_thread = RoutingSocketEventSource(self) self.rtsock_thread.start() def register_schemas(self): self.client.register_schema('network-aggregation-protocols', { 'type': 'string', 'enum': list(netif.AggregationProtocol.__members__.keys()) }) self.client.register_schema('network-interface-flags', { 'type': 'array', 'items': { 'type': 'string', 'enum': list(netif.InterfaceFlags.__members__.keys()) } }) self.client.register_schema('network-interface-capabilities', { 'type': 'array', 'items': { 'type': 'string', 'enum': list(netif.InterfaceCapability.__members__.keys()) } }) self.client.register_schema('network-interface-mediaopts', { 'type': 'array', 'items': { 'type': 'string', 'enum': list(netif.InterfaceMediaOption.__members__.keys()) } }) self.client.register_schema('network-interface-type', { 'type': 'string', 'enum': [ 'LOOPBACK', 'ETHER', 'VLAN', 'BRIDGE', 'LAGG' ] }) self.client.register_schema('network-interface-status', { 'type': 'object', 'properties': { 'name': {'type': 'string'}, 'link_state': {'type': 'string'}, 'link_address': {'type': 'string'}, 'mtu': {'type': 'integer'}, 'media_type': {'type': 'string'}, 'media_subtype': {'type': 'string'}, 'media_options': {'$ref': 'network-interface-media-options'}, 'capabilities': {'$ref': 'network-interface-capabilities'}, 'flags': {'$ref': 'network-interface-flags'}, 'aliases': { 'type': 'array', 'items': {'$ref': 'network-interface-alias'} } } }) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') args = parser.parse_args() configure_logging('/var/log/networkd.log', 'DEBUG') setproctitle.setproctitle('networkd') self.parse_config(args.c) self.init_datastore() self.init_dispatcher() self.scan_interfaces() self.init_routing_socket() self.client.resume_service('networkd.configuration') self.logger.info('Started') self.client.wait_forever()
class Main(object): def __init__(self): self.config = None self.client = None self.datastore = None self.configstore = None self.rtsock_thread = None self.dhcp_clients = {} self.dhcp_lock = threading.RLock() self.configure_network = None self.logger = logging.getLogger('networkd') self.default_interface = None self.cv = Condition() def dhclient_pid(self, interface): path = os.path.join('/var/run', 'dhclient.{0}.pid'.format(interface)) if not os.path.exists(path): return None try: with open(path) as f: pid = int(f.read().strip()) return pid except (IOError, ValueError): return None def dhclient_running(self, interface): pid = self.dhclient_pid(interface) if not pid: return False try: os.kill(pid, 0) return True except OSError: return False def configure_dhcp(self, interface, block=False, timeout=None): if interface in self.dhcp_clients: self.logger.info('Interface {0} already configured by DHCP'.format(interface)) return True def bind(old_lease, lease): self.logger.info('{0} DHCP lease on {1} from {2}, valid for {3} seconds'.format( 'Renewed' if old_lease else 'Acquired', interface, client.server_address, lease.lifetime, interface )) if old_lease is None or lease.client_ip != old_lease.client_ip: self.logger.info('Assigning IP address {0} to interface {1}'.format(lease.client_ip, interface)) alias = lease.client_interface iface = netif.get_interface(interface) if old_lease: try: addr = first_or_default(lambda a: a.address == old_lease.client_ip, iface.addresses) if addr: iface.remove_address(addr) except OSError as err: self.logger.error('Cannot remove alias {0}: {1}'.format(old_lease.client_ip, err.strerror)) try: iface.add_address(netif.InterfaceAddress(netif.AddressFamily.INET, alias)) except OSError as err: self.logger.error('Cannot add alias to {0}: {1}'.format(interface, err.strerror)) if lease.router and self.configstore.get('network.dhcp.assign_gateway'): try: rtable = netif.RoutingTable() newroute = default_route(lease.router) if rtable.default_route_ipv4 != newroute: if rtable.default_route_ipv4: self.logger.info('DHCP default route changed from {0} to {1}'.format( rtable.default_route_ipv4, newroute )) rtable.delete(rtable.default_route_ipv4) rtable.add(default_route(lease.router)) else: self.logger.info('Adding default route via {0}'.format(lease.router)) rtable.add(default_route(lease.router)) except OSError as err: self.logger.error('Cannot configure default route: {0}'.format(err.strerror)) if lease.dns_addresses and self.configstore.get('network.dhcp.assign_dns'): inp = [] addrs = [] proc = subprocess.Popen( ['/sbin/resolvconf', '-a', interface], stdout=subprocess.PIPE, stdin=subprocess.PIPE ) for i in lease.dns_addresses: # Filter out bogus DNS server addresses if str(i) in ('127.0.0.1', '0.0.0.0', '255.255.255.255'): continue inp.append('nameserver {0}'.format(i)) addrs.append(i) if lease.domain_name: inp.append('search {0}'.format(lease.domain_name)) proc.communicate('\n'.join(inp).encode('ascii')) proc.wait() self.client.emit_event('network.dns.configured', { 'addresses': addrs, }) self.logger.info('Updated DNS configuration') else: subprocess.call(['/sbin/resolvconf', '-d', interface]) self.client.emit_event('network.dns.configured', { 'addresses': [], }) self.logger.info('Deleted DNS configuration') def reject(reason): self.logger.info('DHCP request rejected on {0}: {1}'.format(interface, reason)) self.deconfigure_dhcp(interface) if not block: t = threading.Timer(60, self.configure_dhcp, args=(interface,)) t.start() def unbind(lease, reason): reasons = { dhcp.client.UnbindReason.EXPIRE: 'expired', dhcp.client.UnbindReason.REVOKE: 'revoked' } self.logger.info('DHCP lease on {0}: {1}'.format(interface, reasons.get(reason, 'revoked'))) def state_change(state): self.client.emit_event('network.interface.changed', { 'operation': 'update', 'ids': [interface] }) self.client.emit_event('network.changed', { 'operation': 'update' }) with self.dhcp_lock: client = dhcp.client.Client(interface, lambda: socket.gethostname().split('.')[0]) client.on_bind = bind client.on_unbind = unbind client.on_reject = reject client.on_state_change = state_change client.start() self.dhcp_clients[interface] = client if block: ret = client.wait_for_bind(timeout) if ret is None: client.stop() del self.dhcp_clients[interface] return ret is not None return True def deconfigure_dhcp(self, interface): with self.dhcp_lock: client = self.dhcp_clients[interface] client.release() client.stop() del self.dhcp_clients[interface] def renew_dhcp(self, interface): if interface not in self.dhcp_clients: raise RpcException(errno.ENXIO, 'Interface {0} is not configured for DHCP'.format(interface)) if not self.dhcp_clients[interface].lease: raise RpcException(errno.ENOENT, 'Cannot renew without a lease') self.dhcp_clients[interface].request(renew=True, timeout=30) def interface_detached(self, name): self.logger.warn('Interface {0} detached from the system'.format(name)) def interface_attached(self, name): self.logger.warn('Interface {0} attached to the system'.format(name)) def using_dhcp_for_gateway(self): for i in self.datastore.query('network.interfaces'): if i.get('dhcp') and self.configstore.get('network.dhcp.assign_gateway'): return True return False def scan_interfaces(self): self.logger.info('Scanning available network interfaces...') existing = [] # Add newly plugged NICs to DB for i in list(netif.list_interfaces().values()): existing.append(i.name) # We want only physical NICs if i.cloned: continue if i.name in ('mgmt0', 'nat0'): continue if i.name.startswith(('tap', 'brg')): continue if not self.datastore.exists('network.interfaces', ('id', '=', i.name)): self.logger.info('Found new interface {0} ({1})'.format(i.name, i.type.name)) self.datastore.insert('network.interfaces', { 'enabled': False, 'id': i.name, 'name': None, 'cloned': False, 'type': i.type.name, 'dhcp': False, 'noipv6': False, 'rtadv': False, 'mtu': i.mtu, 'media': None, 'mediaopts': [], 'aliases': [], 'capabilities': { 'add': [], 'del': [] } }) # Remove unplugged NICs from DB for i in self.datastore.query('network.interfaces', ('id', 'nin', existing), ('cloned', '=', False)): self.datastore.delete('network.interfaces', i['id']) def init_datastore(self): try: self.datastore = get_datastore(self.config) except DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def connect(self, resume=False): while True: try: self.client.connect('unix:') self.client.login_service('networkd') self.client.enable_server() self.register_schemas() self.client.register_service('networkd.configuration', ConfigurationService(self)) self.client.register_service('networkd.debug', DebugService()) if resume: self.client.resume_service('networkd.configuration') self.client.resume_service('networkd.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect(resume=True) self.client = Client() self.client.on_error(on_error) self.connect() def init_routing_socket(self): self.rtsock_thread = RoutingSocketEventSource(self) self.rtsock_thread.start() def register_schemas(self): self.client.register_schema('NetworkAggregationProtocols', { 'type': 'string', 'enum': list(netif.AggregationProtocol.__members__.keys()) }) self.client.register_schema('NetworkLaggPortFlags', { 'type': 'array', 'items': {'$ref': 'NetworkLaggPortFlagsItems'} }) self.client.register_schema('NetworkLaggPortFlagsItems', { 'type': 'string', 'enum': list(netif.LaggPortFlags.__members__.keys()) }) self.client.register_schema('NetworkInterfaceFlags', { 'type': 'array', 'items': {'$ref': 'NetworkInterfaceFlagsItems'} }) self.client.register_schema('NetworkInterfaceFlagsItems', { 'type': 'string', 'enum': list(netif.InterfaceFlags.__members__.keys()) }) self.client.register_schema('NetworkInterfaceCapabilities', { 'type': 'array', 'items': {'$ref': 'NetworkInterfaceCapabilitiesItems'} }) self.client.register_schema('NetworkInterfaceCapabilitiesItems', { 'type': 'string', 'enum': list(netif.InterfaceCapability.__members__.keys()) }) self.client.register_schema('NetworkInterfaceMediaopts', { 'type': 'array', 'items': {'$ref': 'NetworkInterfaceMediaoptsItems'} }) self.client.register_schema('NetworkInterfaceMediaoptsItems', { 'type': 'string', 'enum': list(netif.InterfaceMediaOption.__members__.keys()) }) self.client.register_schema('NetworkInterfaceNd6Flag', { 'type': 'array', 'items': {'$ref': 'NetworkInterfaceNd6FlagItems'} }) self.client.register_schema('NetworkInterfaceNd6FlagItems', { 'type': 'string', 'enum': list(netif.NeighborDiscoveryFlags.__members__.keys()) }) self.client.register_schema('NetworkInterfaceType', { 'type': 'string', 'enum': [ 'LOOPBACK', 'ETHER', 'VLAN', 'BRIDGE', 'LAGG' ] }) self.client.register_schema('NetworkInterfaceDhcpState', { 'type': 'string', 'enum': [ 'INIT', 'SELECTING', 'REQUESTING', 'INIT_REBOOT', 'REBOOTING', 'BOUND', 'RENEWING', 'REBINDING' ] }) self.client.register_schema('NetworkInterfaceStatus', { 'type': 'object', 'properties': { 'name': {'type': 'string'}, 'link_state': {'$ref': 'NetworkInterfaceStatusLinkstate'}, 'link_address': {'type': 'string'}, 'mtu': {'type': 'integer'}, 'media_type': {'type': 'string'}, 'media_subtype': {'type': 'string'}, 'active_media_type': {'type': 'string'}, 'active_media_subtype': {'type': 'string'}, 'media_options': {'$ref': 'NetworkInterfaceMediaopts'}, 'supported_media': { 'type': 'array', 'items': {'type': 'string'} }, 'cloned': {'type': 'boolean'}, 'capabilities': {'$ref': 'NetworkInterfaceCapabilities'}, 'flags': {'$ref': 'NetworkInterfaceFlags'}, 'dhcp': { 'type': 'object', 'properties': { 'state': {'$ref': 'NetworkInterfaceDhcpState'}, 'server_address': {'type': 'string'}, 'server_name': {'type': 'string'}, 'lease_starts_at': {'type': 'datetime'}, 'lease_ends_at': {'type': 'datetime'} } }, 'aliases': { 'type': 'array', 'items': {'$ref': 'NetworkInterfaceAlias'} }, 'nd6_flags': { 'type': 'array', 'items': {'$ref': 'NetworkInterfaceNd6Flag'} }, 'ports': { 'oneOf': [ {'type': 'null'}, { 'type': 'array', 'members': { 'type': 'object', 'properties': { 'name': {'type': 'string'}, 'flags': {'$ref': 'NetworkLaggPortFlags'} } } } ] }, 'members': { 'oneOf': [ {'type': 'null'}, { 'type': 'array', 'members': {'type': 'string'} } ] }, 'parent': {'type': ['string', 'null']}, 'tag': {'type': ['integer', 'null']} } }) self.client.register_schema('NetworkInterfaceStatusLinkstate', { 'type': 'string', 'enum': list(netif.InterfaceLinkState.__members__.keys()) }) def checkin(self): checkin() def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') args = parser.parse_args() configure_logging('networkd', 'DEBUG') setproctitle('networkd') self.config = args.c self.init_datastore() self.init_dispatcher() self.scan_interfaces() self.init_routing_socket() self.client.resume_service('networkd.configuration') self.client.resume_service('networkd.debug') for i in self.configure_network(): self.logger.info('Initial network configuration: {0}, {1}'.format(*i)) self.checkin() self.logger.info('Started') self.client.wait_forever()
class Context(object): def __init__(self): self.logger = logging.getLogger(self.__class__.__name__) self.msock = msock.client.Client() self.msock.on_closed = self.on_msock_close self.rpc_fd = -1 self.connection_id = None self.jobs = [] self.state = ConnectionState.OFFLINE self.config = None self.keepalive = None self.connected_at = None self.cv = Condition() self.rpc = RpcContext() self.client = Client() self.server = Server() self.middleware_endpoint = None def start(self, configpath, sockpath): signal.signal(signal.SIGUSR2, lambda signo, frame: self.connect()) self.read_config(configpath) self.server.rpc = RpcContext() self.server.rpc.register_service_instance('control', ControlService(self)) self.server.start(sockpath) threading.Thread(target=self.server.serve_forever, name='server thread', daemon=True).start() def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect_dispatcher() self.middleware_endpoint = Client() self.middleware_endpoint.on_error(on_error) self.connect_dispatcher() def connect_dispatcher(self): while True: try: self.middleware_endpoint.connect('unix:') self.middleware_endpoint.login_service('debugd') self.middleware_endpoint.enable_server() self.middleware_endpoint.register_service( 'debugd.management', ControlService(self)) self.middleware_endpoint.resume_service('debugd.management') return except (OSError, RpcException) as err: self.logger.warning( 'Cannot connect to dispatcher: {0}, retrying in 1 second'. format(str(err))) time.sleep(1) def read_config(self, path): try: with open(path) as f: self.config = json.load(f) except (IOError, OSError, ValueError) as err: self.logger.fatal('Cannot open config file: {0}'.format(str(err))) self.logger.fatal('Exiting.') sys.exit(1) def connect(self, discard=False): if discard: self.connection_id = None self.keepalive = threading.Thread(target=self.connect_keepalive, daemon=True) self.keepalive.start() def connect_keepalive(self): while True: try: if not self.connection_id: self.connection_id = uuid.uuid4() self.msock.connect(SUPPORT_PROXY_ADDRESS) self.logger.info( 'Connecting to {0}'.format(SUPPORT_PROXY_ADDRESS)) self.rpc_fd = self.msock.create_channel(0) time.sleep(1) # FIXME self.client = Client() self.client.connect('fd://', fobj=self.rpc_fd) self.client.channel_serializer = MSockChannelSerializer( self.msock) self.client.standalone_server = True self.client.enable_server() self.client.register_service('debug', DebugService(self)) self.client.call_sync('server.login', str(self.connection_id), socket.gethostname(), get_version(), 'none') self.set_state(ConnectionState.CONNECTED) except BaseException as err: self.logger.warning( 'Failed to initiate support connection: {0}'.format(err), exc_info=True) self.msock.disconnect() else: self.connected_at = datetime.now() with self.cv: self.cv.wait_for(lambda: self.state in ( ConnectionState.LOST, ConnectionState.OFFLINE)) if self.state == ConnectionState.OFFLINE: return self.logger.warning( 'Support connection lost, retrying in 10 seconds') time.sleep(10) def disconnect(self): self.connected_at = None self.set_state(ConnectionState.OFFLINE) self.client.disconnect() self.msock.destroy_channel(0) self.msock.disconnect() self.jobs.clear() def on_msock_close(self): self.connected_at = None self.set_state(ConnectionState.LOST) def run_job(self, job): self.jobs.append(job) job.context = self job.start() def set_state(self, state): with self.cv: self.state = state self.cv.notify_all()
class Main(object): def __init__(self): self.logger = logging.getLogger('dscached') self.config = None self.datastore = None self.configstore = None self.rpc = RpcContext() self.rpc.streaming_enabled = True self.rpc.streaming_burst = 16 self.client = None self.server = None self.plugin_dirs = [] self.plugins = {} self.directories = [] self.users_cache = TTLCacheStore() self.groups_cache = TTLCacheStore() self.hosts_cache = TTLCacheStore() self.cache_ttl = 7200 self.search_order = [] self.cache_enumerations = True self.cache_lookups = True self.home_directory_root = None self.account_service = AccountService(self) self.group_service = GroupService(self) self.rpc.register_service_instance('dscached.account', self.account_service) self.rpc.register_service_instance('dscached.group', self.group_service) self.rpc.register_service_instance('dscached.host', HostService(self)) self.rpc.register_service_instance('dscached.idmap', IdmapService(self)) self.rpc.register_service_instance('dscached.management', ManagementService(self)) self.rpc.register_service_instance('dscached.debug', DebugService()) def get_active_directories(self): return list( filter(lambda d: d and d.state == DirectoryState.BOUND, self.directories)) def get_searched_directories(self): return list( filter(lambda d: d and d.state == DirectoryState.BOUND, (self.get_directory_by_name(n) for n in self.get_search_order()))) def get_search_order(self): return self.search_order def get_directory_by_domain(self, domain_name): return first_or_default(lambda d: d.domain_name == domain_name, self.directories) def get_directory_by_name(self, name): return first_or_default(lambda d: d.name == name, self.directories) def get_directory_for_id(self, uid=None, gid=None): if uid is not None: if uid == 0: # Special case for root user return first_or_default(lambda d: d.plugin_type == 'local', self.directories) return first_or_default( lambda d: d.max_uid and d.max_uid >= uid >= d.min_uid, self.directories) if gid is not None: if gid == 0: # Special case for wheel group return first_or_default(lambda d: d.plugin_type == 'local', self.directories) return first_or_default( lambda d: d.max_gid and d.max_gid >= gid >= d.min_gid, self.directories) def get_home_directory(self, directory, username): if not self.home_directory_root: return '/nonexistent' return os.path.join(self.home_directory_root, f'{username}@{directory.domain_name}') def wait_for_etcd(self): self.client.test_or_wait_for_event( 'plugin.service_resume', lambda args: args['name'] == 'etcd.generation', lambda: 'etcd.generation' in self.client.call_sync( 'discovery.get_services')) def init_datastore(self): try: self.datastore = datastore.get_datastore() except datastore.DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_server(self, address): self.server = Server(self) self.server.rpc = self.rpc self.server.streaming = True self.server.start(address, transport_options={'permissions': 0o777}) thread = Thread(target=self.server.serve_forever) thread.name = 'ServerThread' thread.daemon = True thread.start() def parse_config(self, filename): try: with open(filename, 'r') as f: self.config = json.load(f) except IOError as err: self.logger.error('Cannot read config file: %s', err.message) sys.exit(1) except ValueError: self.logger.error( 'Config file has unreadable format (not valid JSON)') sys.exit(1) self.plugin_dirs = self.config['dscached']['plugin-dirs'] def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('dscached') self.client.enable_server(self.rpc) self.client.resume_service('dscached.account') self.client.resume_service('dscached.group') self.client.resume_service('dscached.host') self.client.resume_service('dscached.idmap') self.client.resume_service('dscached.management') self.client.resume_service('dscached.debug') return except (OSError, RpcException) as err: self.logger.warning( 'Cannot connect to dispatcher: {0}, retrying in 1 second'. format(str(err))) time.sleep(1) def scan_plugins(self): for i in self.plugin_dirs: self.scan_plugin_dir(i) def scan_plugin_dir(self, dir): self.logger.debug('Scanning plugin directory %s', dir) for f in os.listdir(dir): name, ext = os.path.splitext(os.path.basename(f)) if ext != '.py': continue try: plugin = load_module_from_file(name, os.path.join(dir, f)) plugin._init(self) except: self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True) def register_plugin(self, name, cls): self.plugins[name] = cls self.logger.info('Registered plugin {0} (class {1})'.format(name, cls)) def register_schema(self, name, schema): self.client.register_schema(name, schema) def register_schemas(self): from freenas.dispatcher.model import context for name, schema in (s.__named_json_schema__() for s in context.local_json_schema_objects): self.logger.debug(f'Registering schema: {name}') self.client.register_schema(name, schema) def init_directories(self): for i in self.datastore.query('directories'): try: directory = Directory(self, i) self.directories.append(directory) directory.configure() except: continue def load_config(self): self.search_order = self.configstore.get('directory.search_order') self.cache_ttl = self.configstore.get('directory.cache_ttl') self.cache_enumerations = self.configstore.get( 'directory.cache_enumerations') self.cache_lookups = self.configstore.get('directory.cache_lookups') self.home_directory_root = self.configstore.get( 'system.home_directory_root') def checkin(self): checkin() def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') parser.add_argument('-s', metavar='SOCKET', default=DEFAULT_SOCKET_ADDRESS, help='Socket address to listen on') args = parser.parse_args() configure_logging('dscached', 'DEBUG') setproctitle('dscached') self.config = args.c self.parse_config(self.config) self.init_datastore() self.init_dispatcher() self.load_config() self.init_server(args.s) self.scan_plugins() self.register_schemas() self.wait_for_etcd() self.init_directories() self.checkin() self.client.wait_forever()
class Main(object): def __init__(self): self.logger = logging.getLogger('neighbord') self.config = None self.datastore = None self.configstore = None self.client = None self.config = None self.logger = logging.getLogger() self.plugin_dirs = [] self.plugins = {} def parse_config(self, filename): try: with open(filename, 'r') as f: self.config = json.load(f) except IOError as err: self.logger.error('Cannot read config file: %s', err.message) sys.exit(1) except ValueError: self.logger.error('Config file has unreadable format (not valid JSON)') sys.exit(1) self.plugin_dirs = self.config['neighbord']['plugin-dirs'] def init_datastore(self): try: self.datastore = datastore.get_datastore() except datastore.DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def scan_plugins(self): for i in self.plugin_dirs: self.scan_plugin_dir(i) def scan_plugin_dir(self, dir): self.logger.debug('Scanning plugin directory %s', dir) for f in os.listdir(dir): name, ext = os.path.splitext(os.path.basename(f)) if ext != '.py': continue try: plugin = load_module_from_file(name, os.path.join(dir, f)) plugin._init(self) except: self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True) def register_plugin(self, name, cls): self.plugins[name] = cls(self) self.logger.info('Registered plugin {0} (class {1})'.format(name, cls)) def register_service(self, name, regtype, port, properties=None): for plugin in self.plugins.values(): plugin.register(regtype, name, port, properties) def register(self): try: hostname = socket.gethostname() general = self.client.call_sync('system.general.get_config') properties = { 'version': self.client.call_sync('system.info.version'), 'description': general['description'], 'tags': ','.join(general['tags']) } self.register_service(hostname, 'freenas', 80, properties) self.register_service(hostname, 'http', 80) self.register_service(hostname, 'ssh', 22) self.register_service(hostname, 'sftp-ssh', 22) except BaseException as err: self.logger.error('Failed to register services: {0}'.format(str(err))) def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('neighbord') self.client.enable_server() self.client.register_service('neighbord.management', ManagementService(self)) self.client.register_service('neighbord.discovery', DiscoveryService(self)) self.client.register_service('neighbord.debug', DebugService()) self.client.resume_service('neighbord.management') self.client.resume_service('neighbord.discovery') self.client.resume_service('neighbord.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def checkin(self): checkin() def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') args = parser.parse_args() self.config = args.c configure_logging('/var/log/neighbord.log', 'DEBUG') setproctitle('neighbord') self.parse_config(self.config) self.init_datastore() self.init_dispatcher() self.scan_plugins() self.register() self.checkin() self.client.wait_forever()
class Context(object): def __init__(self): self.server = None self.client = None self.jobs = {} self.provides = set() self.lock = RLock() self.kq = select.kqueue() self.devnull = os.open('/dev/null', os.O_RDWR) self.logger = logging.getLogger('Context') self.rpc = RpcContext() self.rpc.register_service_instance('serviced.management', ManagementService(self)) self.rpc.register_service_instance('serviced.job', JobService(self)) def init_dispatcher(self): if self.client and self.client.connected: return def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_server(self, address): self.server = Server(self) self.server.rpc = self.rpc self.server.streaming = True self.server.start(address, transport_options={'permissions': 0o777}) thread = Thread(target=self.server.serve_forever) thread.name = 'ServerThread' thread.daemon = True thread.start() def provide(self, targets): def doit(): self.logger.debug('Adding dependency targets: {0}'.format( ', '.join(targets))) with self.lock: self.provides |= targets for job in list(self.jobs.values()): if job.state == JobState.STOPPED and job.requires <= self.provides: job.start() if targets: Timer(2, doit).start() def job_by_pid(self, pid): job = first_or_default(lambda j: j.pid == pid, self.jobs.values()) return job def event_loop(self): while True: with contextlib.suppress(InterruptedError): for ev in self.kq.control(None, MAX_EVENTS): self.logger.log(TRACE, 'New event: {0}'.format(ev)) if ev.filter == select.KQ_FILTER_PROC: job = self.job_by_pid(ev.ident) if job: job.pid_event(ev) continue if ev.fflags & select.KQ_NOTE_CHILD: if ev.fflags & select.KQ_NOTE_EXIT: continue pjob = self.job_by_pid(ev.data) if not pjob: self.untrack_pid(ev.ident) continue # Stop tracking at session ID boundary try: if pjob.pgid != os.getpgid(ev.ident): self.untrack_pid(ev.ident) continue except ProcessLookupError: continue with self.lock: job = Job(self) job.load_anonymous(pjob, ev.ident) self.jobs[job.id] = job self.logger.info('Added job {0}'.format( job.label)) def track_pid(self, pid): ev = select.kevent( pid, select.KQ_FILTER_PROC, select.KQ_EV_ADD | select.KQ_EV_ENABLE, select.KQ_NOTE_EXIT | select.KQ_NOTE_EXEC | select.KQ_NOTE_FORK | select.KQ_NOTE_TRACK, 0, 0) self.kq.control([ev], 0) def untrack_pid(self, pid): ev = select.kevent(pid, select.KQ_FILTER_PROC, select.KQ_EV_DELETE, 0, 0, 0) with contextlib.suppress(FileNotFoundError): self.kq.control([ev], 0) def emit_event(self, name, args): self.server.broadcast_event(name, args) if self.client and self.client.connected: self.client.emit_event(name, args) def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('serviced') self.client.enable_server(self.rpc) self.client.resume_service('serviced.job') self.client.resume_service('serviced.management') return except (OSError, RpcException) as err: self.logger.warning( 'Cannot connect to dispatcher: {0}, retrying in 1 second'. format(str(err))) time.sleep(1) def bootstrap(self): def doit(): with self.lock: job = Job(self) job.load({ 'Label': 'org.freenas.serviced.bootstrap', 'ProgramArguments': BOOTSTRAP_JOB, 'OneShot': True, 'RunAtLoad': True, }) self.jobs[job.id] = job Thread(target=doit).start() def shutdown(self): self.client.disconnect() self.server.close() sys.exit(0) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-s', metavar='SOCKET', default=DEFAULT_SOCKET_ADDRESS, help='Socket address to listen on') args = parser.parse_args() configure_logging('/var/log/serviced.log', 'DEBUG', file=True) bsd.setproctitle('serviced') self.logger.info('Started') self.init_server(args.s) self.bootstrap() self.event_loop()
class Context(object): def __init__(self): self.logger = logging.getLogger('schedulerd') self.config = None self.datastore = None self.configstore = None self.client = None self.scheduler = None self.active_tasks = {} def init_datastore(self): try: self.datastore = get_datastore(self.config) except DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_scheduler(self): store = FreeNASJobStore() self.scheduler = BackgroundScheduler(jobstores={'default': store, 'temp': MemoryJobStore()}, timezone=pytz.utc) self.scheduler.start() def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('schedulerd') self.client.enable_server() self.client.register_service('scheduler.management', ManagementService(self)) self.client.register_service('scheduler.debug', DebugService()) self.client.resume_service('scheduler.management') self.client.resume_service('scheduler.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def run_job(self, *args, **kwargs): tid = self.client.call_sync('task.submit_with_env', args[0], args[1:], { 'RUN_AS_USER': '******', 'CALENDAR_TASK_NAME': kwargs.get('name') }) self.active_tasks[kwargs['id']] = tid self.client.call_sync('task.wait', tid, timeout=None) result = self.client.call_sync('task.status', tid) if result['state'] != 'FINISHED': try: self.client.call_sync('alert.emit', { 'name': 'scheduler.task.failed', 'severity': 'CRITICAL', 'description': 'Task {0} has failed: {1}'.format( kwargs.get('name', tid), result['error']['message'] ), }) except RpcException as e: self.logger.error('Failed to emit alert', exc_info=True) del self.active_tasks[kwargs['id']] self.datastore.insert('schedulerd.runs', { 'job_id': kwargs['id'], 'task_id': result['id'] }) def emit_event(self, name, params): self.client.emit_event(name, params) def checkin(self): checkin() def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') parser.add_argument('-f', action='store_true', default=False, help='Run in foreground') args = parser.parse_args() configure_logging('/var/log/schedulerd.log', 'DEBUG') setproctitle('schedulerd') self.config = args.c self.init_datastore() self.init_scheduler() self.init_dispatcher() self.checkin() self.client.wait_forever()
class Main(object): def __init__(self): self.logger = logging.getLogger('alertd') self.config = None self.datastore = None self.configstore = None self.client = None self.plugin_dirs = [] self.emitters = {} def init_datastore(self): try: self.datastore = datastore.get_datastore() except datastore.DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_reminder(self): t = threading.Thread(target=self.reminder_thread) t.daemon = True t.start() def parse_config(self, filename): try: f = open(filename, 'r') self.config = json.load(f) f.close() except IOError as err: self.logger.error('Cannot read config file: %s', err.message) sys.exit(1) except ValueError: self.logger.error( 'Config file has unreadable format (not valid JSON)') sys.exit(1) self.plugin_dirs = self.config['alertd']['plugin-dirs'] def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('alertd') self.client.enable_server() self.client.register_service('alertd.management', ManagementService(self)) self.client.register_service('alertd.alert', AlertService(self)) self.client.register_service('alertd.debug', DebugService()) self.client.resume_service('alertd.management') self.client.resume_service('alertd.alert') self.client.resume_service('alertd.debug') return except (OSError, RpcException) as err: self.logger.warning( 'Cannot connect to dispatcher: {0}, retrying in 1 second'. format(str(err))) time.sleep(1) def scan_plugins(self): for i in self.plugin_dirs: self.scan_plugin_dir(i) def scan_plugin_dir(self, dir): self.logger.debug('Scanning plugin directory %s', dir) for f in os.listdir(dir): name, ext = os.path.splitext(os.path.basename(f)) if ext != '.py': continue try: plugin = imp.load_source(name, os.path.join(dir, f)) plugin._init(self) except: self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True) def emit_alert(self, alert): self.logger.debug('Emitting alert <id:{0}> (class {1})'.format( alert['id'], alert['clazz'])) for i in self.datastore.query( 'alert.filters', ('or', [('clazz', '=', None), ('clazz', '=', alert['clazz'])])): for pr in i.get('predicates', []): if pr['operator'] not in operators_table: continue try: if not operators_table[pr['operator']]( alert.properties.get(pr['property']), pr['value']): break except: continue else: try: emitter = self.emitters.get(i['emitter']) if not emitter: self.logger.warning( 'Invalid emitter {0} for alert filter {1}'.format( i['emitter'], i['id'])) continue self.logger.debug( 'Alert <id:{0}> matched filter {1}'.format( alert['id'], i['id'])) if alert['send_count'] > 0: if not alert['one_shot']: emitter.emit_again(alert, i['parameters']) else: emitter.emit_first(alert, i['parameters']) except BaseException as err: # Failed to emit alert using alert emitter # XXX: generate another alert about that self.logger.error( 'Cannot emit alert <id:{0}> using {1}: {2}'.format( alert['id'], i['emitter'], str(err))) alert['send_count'] += 1 alert['last_emitted_at'] = datetime.utcnow() self.datastore.update('alerts', alert['id'], alert) def cancel_alert(self, alert): self.logger.debug('Cancelling alert <id:{0}> (class {1})'.format( alert['id'], alert['clazz'])) alert.update({'active': False, 'cancelled': datetime.utcnow()}) self.datastore.update('alerts', alert['id'], alert) def register_emitter(self, name, cls): self.emitters[name] = cls(self) self.logger.info('Registered emitter {0} (class {1})'.format( name, cls)) def reminder_thread(self): while True: time.sleep(REMINDER_SECONDS) for i in self.datastore.query('alerts'): if not i['active'] or i['dismissed']: continue last_emission = i.get('last_emitted_at') or i['created_at'] interval = REMINDER_SCHEDULE[i['severity']] if not interval: continue if last_emission + timedelta( seconds=interval) <= datetime.utcnow(): self.emit_alert(i) def checkin(self): checkin() def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') args = parser.parse_args() configure_logging('alertd', 'DEBUG') setproctitle('alertd') self.config = args.c self.parse_config(self.config) self.init_datastore() self.init_dispatcher() self.scan_plugins() self.init_reminder() self.checkin() self.client.wait_forever()
class Main: def __init__(self): self.config = None self.client = None self.datastore = None self.configstore = None self.rtsock_thread = None self.logger = logging.getLogger('networkd') def dhclient_pid(self, interface): path = os.path.join('/var/run', 'dhclient.{0}.pid'.format(interface)) if not os.path.exists(path): return None try: with open(path) as f: pid = int(f.read().strip()) return pid except (IOError, ValueError): return None def dhclient_running(self, interface): pid = self.dhclient_pid(interface) if not pid: return False try: os.kill(pid, 0) return True except OSError: return False def configure_dhcp(self, interface): # Check if dhclient is running if self.dhclient_running(interface): self.logger.info('Interface {0} already configured by DHCP'.format(interface)) return True def unblock_signals(): signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGTERM, signal.SIGINT]) ret = subprocess.call(['/sbin/dhclient', interface], close_fds=True, preexec_fn=unblock_signals) return ret == 0 def interface_detached(self, name): self.logger.warn('Interface {0} detached from the system'.format(name)) def interface_attached(self, name): self.logger.warn('Interface {0} attached to the system'.format(name)) def using_dhcp_for_gateway(self): for i in self.datastore.query('network.interfaces'): if i.get('dhcp') and self.configstore.get('network.dhcp.assign_gateway'): return True return False def scan_interfaces(self): self.logger.info('Scanning available network interfaces...') existing = [] # Add newly plugged NICs to DB for i in list(netif.list_interfaces().values()): existing.append(i.name) # We want only physical NICs if i.cloned: continue if not self.datastore.exists('network.interfaces', ('id', '=', i.name)): self.logger.info('Found new interface {0} ({1})'.format(i.name, i.type.name)) self.datastore.insert('network.interfaces', { 'enabled': False, 'id': i.name, 'type': i.type.name, 'dhcp': False, 'noipv6': False, 'rtadv': False, 'mtu': None, 'media': None, 'aliases': [] }) # Remove unplugged NICs from DB for i in self.datastore.query('network.interfaces', ('id', 'nin', existing), ('cloned', '=', False)): self.datastore.delete('network.interfaces', i['id']) def init_datastore(self): try: self.datastore = get_datastore(self.config) except DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def connect(self, resume=False): while True: try: self.client.connect('unix:') self.client.login_service('networkd') self.client.enable_server() self.register_schemas() self.client.register_service('networkd.configuration', ConfigurationService(self)) self.client.register_service('networkd.debug', DebugService()) if resume: self.client.resume_service('networkd.configuration') self.client.resume_service('networkd.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect(resume=True) self.client = Client() self.client.on_error(on_error) self.connect() def init_routing_socket(self): self.rtsock_thread = RoutingSocketEventSource(self) self.rtsock_thread.start() def register_schemas(self): self.client.register_schema('network-aggregation-protocols', { 'type': 'string', 'enum': list(netif.AggregationProtocol.__members__.keys()) }) self.client.register_schema('network-interface-flags', { 'type': 'array', 'items': { 'type': 'string', 'enum': list(netif.InterfaceFlags.__members__.keys()) } }) self.client.register_schema('network-interface-capabilities', { 'type': 'array', 'items': { 'type': 'string', 'enum': list(netif.InterfaceCapability.__members__.keys()) } }) self.client.register_schema('network-interface-mediaopts', { 'type': 'array', 'items': { 'type': 'string', 'enum': list(netif.InterfaceMediaOption.__members__.keys()) } }) self.client.register_schema('network-interface-type', { 'type': 'string', 'enum': [ 'LOOPBACK', 'ETHER', 'VLAN', 'BRIDGE', 'LAGG' ] }) self.client.register_schema('network-interface-status', { 'type': 'object', 'properties': { 'name': {'type': 'string'}, 'link_state': { 'type': 'string', 'enum': list(netif.InterfaceLinkState.__members__.keys()) }, 'link_address': {'type': 'string'}, 'mtu': {'type': 'integer'}, 'media_type': {'type': 'string'}, 'media_subtype': {'type': 'string'}, 'media_options': {'$ref': 'network-interface-mediaopts'}, 'capabilities': {'$ref': 'network-interface-capabilities'}, 'flags': {'$ref': 'network-interface-flags'}, 'aliases': { 'type': 'array', 'items': {'$ref': 'network-interface-alias'} } } }) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') args = parser.parse_args() configure_logging('/var/log/networkd.log', 'DEBUG') setproctitle.setproctitle('networkd') self.config = args.c self.init_datastore() self.init_dispatcher() self.scan_interfaces() self.init_routing_socket() self.client.resume_service('networkd.configuration') self.client.resume_service('networkd.debug') self.logger.info('Started') self.client.wait_forever()
class Main(object): def __init__(self): self.config = None self.client = None self.datastore = None self.configstore = None self.logger = logging.getLogger('dsd') def parse_config(self, filename): try: f = open(filename, 'r') self.config = json.load(f) f.close() except IOError as err: self.logger.error('Cannot read config file: %s', err.message) sys.exit(1) except ValueError as err: self.logger.error('Config file has unreadable format (not valid JSON)') sys.exit(1) def init_datastore(self, resume=False): try: self.datastore = get_datastore(self.config['datastore']['driver'], self.config['datastore']['dsn']) except DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def connect(self, resume=False): while True: try: self.client.connect('127.0.0.1') self.client.login_service('dsd') self.client.enable_server() self.register_schemas() self.client.register_service('dsd.configuration', DSDConfigurationService(self)) self.client.register_service('dsd.debug', DebugService()) if resume: self.client.resume_service('dsd.configuration') self.client.resume_service('dsd.debug') return except socket.error as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect(resume=True) self.client = Client() self.client.on_error(on_error) self.connect() def register_schemas(self): # XXX do stuff here? To be determined ... pass def report_error(self, message, exception): if not os.path.isdir('/var/tmp/crash'): try: os.mkdir('/var/tmp/crash') except: return report = { 'timestamp': str(datetime.datetime.now()), 'type': 'exception', 'application': 'dsd', 'message': message, 'exception': str(exception), 'traceback': traceback.format_exc() } try: with tempfile.NamedTemporaryFile(dir='/var/tmp/crash', suffix='.json', prefix='report-', delete=False) as f: json.dump(report, f, indent=4) except: pass def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') args = parser.parse_args() configure_logging('/var/log/dsd.log', 'DEBUG') setproctitle.setproctitle('dsd') self.parse_config(args.c) self.init_datastore() self.init_dispatcher() self.client.resume_service('dsd.configuration') self.logger.info('Started') self.client.wait_forever()
class Main(object): def __init__(self): self.logger = logging.getLogger('clid') self.config = None self.datastore = None self.configstore = None self.client = None self.config = None self.logger = logging.getLogger() self.plugin_dirs = [] self.ml = None self.context = None def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_cli(self): self.logger.info('Initializing CLI instance') self.context = Context() self.context.connection = self.client self.context.plugin_dirs = PLUGIN_DIRS self.context.discover_plugins() self.context.start_entity_subscribers() self.context.login_plugins() self.ml = MainLoop(self.context) self.logger.info('CLI instance ready') def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('clid') self.client.enable_server() self.client.call_sync('management.enable_features', ['streaming_responses']) self.client.register_service('clid.management', ManagementService(self)) self.client.register_service('clid.eval', EvalService(self)) self.client.register_service('clid.debug', DebugService()) self.client.resume_service('clid.management') self.client.resume_service('clid.eval') self.client.resume_service('clid.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') args = parser.parse_args() self.config = args.c configure_logging('clid', 'DEBUG') setproctitle('clid') self.init_dispatcher() self.init_cli() self.client.wait_forever()
class Main(object): def __init__(self): self.logger = logging.getLogger('etcd') self.root = None self.configfile = None self.config = None self.datastore = None self.configstore = None self.client = None self.plugin_dirs = [] self.renderers = {} self.managed_files = {} def init_datastore(self): try: self.datastore = datastore.get_datastore(self.configfile) except datastore.DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('etcd') self.client.enable_server() self.client.register_service('etcd.generation', FileGenerationService(self)) self.client.register_service('etcd.management', ManagementService(self)) self.client.register_service('etcd.debug', DebugService()) self.client.resume_service('etcd.generation') self.client.resume_service('etcd.management') self.client.resume_service('etcd.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def init_renderers(self): for name, impl in TEMPLATE_RENDERERS.items(): self.renderers[name] = impl(self) def parse_config(self, filename): try: f = open(filename, 'r') self.config = json.load(f) f.close() except IOError as err: self.logger.error('Cannot read config file: %s', err.message) sys.exit(1) except ValueError: self.logger.error('Config file has unreadable format (not valid JSON)') sys.exit(1) self.plugin_dirs = self.config['etcd']['plugin-dirs'] def scan_plugins(self): for i in self.plugin_dirs: self.scan_plugin_dir(i) def scan_plugin_dir(self, dir): self.logger.debug('Scanning plugin directory %s', dir) for root, dirs, files in os.walk(dir): for name in files: abspath = os.path.join(root, name) path = os.path.relpath(abspath, dir) name, ext = os.path.splitext(path) if name in self.managed_files.keys(): continue if ext in TEMPLATE_RENDERERS.keys(): self.managed_files[name] = abspath self.logger.info('Adding managed file %s [%s]', name, ext) def generate_file(self, file_path): if file_path not in self.managed_files.keys(): raise RpcException(errno.ENOENT, 'No such file') template_path = self.managed_files[file_path] name, ext = os.path.splitext(template_path) if ext not in self.renderers.keys(): raise RuntimeError("Can't find renderer for {0}".format(file_path)) renderer = self.renderers[ext] try: return renderer.render_template(template_path) except Exception as e: self.logger.warn('Cannot generate file {0}: {1}'.format(file_path, str(e))) return "# FILE GENERATION FAILED: {0}\n".format(str(e)) def emit_event(self, name, params): self.client.emit_event(name, params) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') parser.add_argument('-f', action='store_true', default=False, help='Run in foreground') parser.add_argument('mountpoint', metavar='MOUNTPOINT', default='/etc', help='/etc mount point') args = parser.parse_args() configure_logging('/var/log/etcd.log', 'DEBUG') setproctitle.setproctitle('etcd') self.root = args.mountpoint self.configfile = args.c self.parse_config(args.c) self.scan_plugins() self.init_renderers() self.init_datastore() self.init_dispatcher() self.client.wait_forever()
class Main(object): def __init__(self): self.logger = logging.getLogger('dscached') self.config = None self.datastore = None self.configstore = None self.client = None self.plugin_dirs = [] self.plugins = {} def init_datastore(self): try: self.datastore = datastore.get_datastore() except datastore.DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def parse_config(self, filename): try: with open(filename, 'r') as f: self.config = json.load(f) except IOError as err: self.logger.error('Cannot read config file: %s', err.message) sys.exit(1) except ValueError: self.logger.error('Config file has unreadable format (not valid JSON)') sys.exit(1) self.plugin_dirs = self.config['dscached']['plugin-dirs'] def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('dscached') self.client.enable_server() self.client.register_service('dscached.account', AccountService(self)) self.client.register_service('dscached.group', GroupService(self)) self.client.register_service('dscached.debug', DebugService()) self.client.resume_service('dscached.account') self.client.resume_service('dscached.group') self.client.resume_service('dscached.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def scan_plugins(self): for i in self.plugin_dirs: self.scan_plugin_dir(i) def scan_plugin_dir(self, dir): self.logger.debug('Scanning plugin directory %s', dir) for f in os.listdir(dir): name, ext = os.path.splitext(os.path.basename(f)) if ext != '.py': continue try: plugin = imp.load_source(name, os.path.join(dir, f)) plugin._init(self) except: self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True) def register_plugin(self, name, cls): self.plugins[name] = cls(self) self.logger.info('Registered plugin {0} (class {1})'.format(name, cls)) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') args = parser.parse_args() configure_logging('/var/log/dscached.log', 'DEBUG') setproctitle.setproctitle('dscached') self.config = args.c self.parse_config(self.config) self.init_datastore() self.init_dispatcher() self.scan_plugins() self.client.wait_forever()
class Main(object): def __init__(self): self.logger = logging.getLogger('dscached') self.config = None self.datastore = None self.configstore = None self.rpc = RpcContext() self.rpc.streaming_enabled = True self.rpc.streaming_burst = 16 self.client = None self.server = None self.plugin_dirs = [] self.plugins = {} self.directories = [] self.users_cache = TTLCacheStore() self.groups_cache = TTLCacheStore() self.hosts_cache = TTLCacheStore() self.cache_ttl = 7200 self.search_order = [] self.cache_enumerations = True self.cache_lookups = True self.rpc.register_service_instance('dscached.account', AccountService(self)) self.rpc.register_service_instance('dscached.group', GroupService(self)) self.rpc.register_service_instance('dscached.host', HostService(self)) self.rpc.register_service_instance('dscached.management', ManagementService(self)) self.rpc.register_service_instance('dscached.debug', DebugService()) def get_enabled_directories(self): return list(filter(None, (self.get_directory_by_name(n) for n in self.get_search_order()))) def get_search_order(self): return ['local', 'system'] + self.search_order def get_directory_by_domain(self, domain_name): return first_or_default(lambda d: d.domain_name == domain_name, self.directories) def get_directory_by_name(self, name): return first_or_default(lambda d: d.name == name, self.directories) def get_directory_for_id(self, uid=None, gid=None): if uid is not None: if uid == 0: # Special case for root user return first_or_default(lambda d: d.plugin_type == 'local', self.directories) return first_or_default( lambda d: d.max_uid and d.max_uid >= uid >= d.min_uid, self.directories ) if gid is not None: if gid == 0: # Special case for wheel group return first_or_default(lambda d: d.plugin_type == 'local', self.directories) return first_or_default( lambda d: d.max_gid and d.max_gid >= gid >= d.min_gid, self.directories ) def init_datastore(self): try: self.datastore = datastore.get_datastore() except datastore.DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_server(self, address): self.server = Server(self) self.server.rpc = self.rpc self.server.start(address) thread = Thread(target=self.server.serve_forever) thread.name = 'ServerThread' thread.daemon = True thread.start() def parse_config(self, filename): try: with open(filename, 'r') as f: self.config = json.load(f) except IOError as err: self.logger.error('Cannot read config file: %s', err.message) sys.exit(1) except ValueError: self.logger.error('Config file has unreadable format (not valid JSON)') sys.exit(1) self.plugin_dirs = self.config['dscached']['plugin-dirs'] def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('dscached') self.client.enable_server(self.rpc) self.client.resume_service('dscached.account') self.client.resume_service('dscached.group') self.client.resume_service('dscached.host') self.client.resume_service('dscached.management') self.client.resume_service('dscached.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def scan_plugins(self): for i in self.plugin_dirs: self.scan_plugin_dir(i) def scan_plugin_dir(self, dir): self.logger.debug('Scanning plugin directory %s', dir) for f in os.listdir(dir): name, ext = os.path.splitext(os.path.basename(f)) if ext != '.py': continue try: plugin = imp.load_source(name, os.path.join(dir, f)) plugin._init(self) except: self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True) def register_plugin(self, name, cls): self.plugins[name] = cls self.logger.info('Registered plugin {0} (class {1})'.format(name, cls)) def register_schema(self, name, schema): self.client.register_schema(name, schema) def init_directories(self): for i in self.datastore.query('directories'): try: directory = Directory(self, i) directory.configure() self.directories.append(directory) except BaseException as err: continue def load_config(self): self.search_order = self.configstore.get('directory.search_order') self.cache_ttl = self.configstore.get('directory.cache_ttl') self.cache_enumerations = self.configstore.get('directory.cache_enumerations') self.cache_lookups = self.configstore.get('directory.cache_lookups') def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') parser.add_argument('-s', metavar='SOCKET', default=DEFAULT_SOCKET_ADDRESS, help='Socket address to listen on') args = parser.parse_args() configure_logging('/var/log/dscached.log', 'DEBUG') setproctitle.setproctitle('dscached') self.config = args.c self.parse_config(self.config) self.init_datastore() self.init_dispatcher() self.load_config() self.init_server(args.s) self.scan_plugins() self.init_directories() self.client.wait_forever()
class Context(object): def __init__(self): self.logger = logging.getLogger('schedulerd') self.config = None self.datastore = None self.configstore = None self.client = None self.scheduler = None self.active_tasks = {} def init_datastore(self): try: self.datastore = get_datastore(self.config) except DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_scheduler(self): store = MongoDBJobStore(database='freenas', collection='calendar_tasks', client=self.datastore.client) self.scheduler = BackgroundScheduler(jobstores={'default': store}, timezone=pytz.utc) self.scheduler.start() def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('schedulerd') self.client.enable_server() self.client.register_service('scheduler.management', ManagementService(self)) self.client.register_service('scheduler.debug', DebugService()) self.client.resume_service('scheduler.management') self.client.resume_service('scheduler.debug') return except (OSError, RpcException) as err: self.logger.warning( 'Cannot connect to dispatcher: {0}, retrying in 1 second'. format(str(err))) time.sleep(1) def run_job(self, *args, **kwargs): tid = self.client.call_sync( 'task.submit_with_env', args[0], args[1:], { 'RUN_AS_USER': '******', 'CALENDAR_TASK_NAME': kwargs.get('name') }) self.active_tasks[kwargs['id']] = tid self.client.call_sync('task.wait', tid, timeout=None) result = self.client.call_sync('task.status', tid) if result['state'] != 'FINISHED': try: self.client.call_sync( 'alert.emit', { 'name': 'scheduler.task.failed', 'severity': 'CRITICAL', 'description': 'Task {0} has failed: {1}'.format( kwargs.get('name', tid), result['error']['message']), }) except RpcException as e: self.logger.error('Failed to emit alert', exc_info=True) del self.active_tasks[kwargs['id']] self.datastore.insert('schedulerd.runs', { 'job_id': kwargs['id'], 'task_id': result['id'] }) def emit_event(self, name, params): self.client.emit_event(name, params) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') parser.add_argument('-f', action='store_true', default=False, help='Run in foreground') args = parser.parse_args() configure_logging('/var/log/schedulerd.log', 'DEBUG') setproctitle.setproctitle('schedulerd') self.config = args.c self.init_datastore() self.init_scheduler() self.init_dispatcher() self.client.wait_forever()
class Main(object): def __init__(self): self.logger = logging.getLogger('neighbord') self.config = None self.datastore = None self.configstore = None self.client = None self.config = None self.logger = logging.getLogger() self.plugin_dirs = [] self.plugins = {} def parse_config(self, filename): try: with open(filename, 'r') as f: self.config = json.load(f) except IOError as err: self.logger.error('Cannot read config file: %s', err.message) sys.exit(1) except ValueError: self.logger.error( 'Config file has unreadable format (not valid JSON)') sys.exit(1) self.plugin_dirs = self.config['neighbord']['plugin-dirs'] def init_datastore(self): try: self.datastore = datastore.get_datastore() except datastore.DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def scan_plugins(self): for i in self.plugin_dirs: self.scan_plugin_dir(i) def scan_plugin_dir(self, dir): self.logger.debug('Scanning plugin directory %s', dir) for f in os.listdir(dir): name, ext = os.path.splitext(os.path.basename(f)) if ext != '.py': continue try: plugin = load_module_from_file(name, os.path.join(dir, f)) plugin._init(self) except: self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True) def register_plugin(self, name, cls): self.plugins[name] = cls(self) self.logger.info('Registered plugin {0} (class {1})'.format(name, cls)) def register_service(self, name, regtype, port, properties=None): for plugin in self.plugins.values(): plugin.register(regtype, name, port, properties) def register(self): try: hostname = socket.gethostname() general = self.client.call_sync('system.general.get_config') properties = { 'version': self.client.call_sync('system.info.version'), 'description': general['description'], 'tags': ','.join(general['tags']) } self.register_service(hostname, 'freenas', 80, properties) self.register_service(hostname, 'http', 80) self.register_service(hostname, 'ssh', 22) self.register_service(hostname, 'sftp-ssh', 22) except BaseException as err: self.logger.error('Failed to register services: {0}'.format( str(err))) def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('neighbord') self.client.enable_server() self.client.register_service('neighbord.management', ManagementService(self)) self.client.register_service('neighbord.discovery', DiscoveryService(self)) self.client.register_service('neighbord.debug', DebugService()) self.client.resume_service('neighbord.management') self.client.resume_service('neighbord.discovery') self.client.resume_service('neighbord.debug') return except (OSError, RpcException) as err: self.logger.warning( 'Cannot connect to dispatcher: {0}, retrying in 1 second'. format(str(err))) time.sleep(1) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') args = parser.parse_args() self.config = args.c configure_logging('/var/log/neighbord.log', 'DEBUG') setproctitle.setproctitle('neighbord') self.parse_config(self.config) self.init_datastore() self.init_dispatcher() self.scan_plugins() self.register() self.client.wait_forever()
class Context(object): def __init__(self): self.server = None self.client = None self.jobs = {} self.provides = set() self.lock = RLock() self.kq = select.kqueue() self.devnull = os.open('/dev/null', os.O_RDWR) self.logger = logging.getLogger('Context') self.rpc = RpcContext() self.rpc.register_service_instance('serviced.management', ManagementService(self)) self.rpc.register_service_instance('serviced.job', JobService(self)) def init_dispatcher(self): if self.client and self.client.connected: return def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_server(self, address): self.server = Server(self) self.server.rpc = self.rpc self.server.streaming = True self.server.start(address, transport_options={'permissions': 0o777}) thread = Thread(target=self.server.serve_forever) thread.name = 'ServerThread' thread.daemon = True thread.start() def provide(self, targets): def doit(): self.logger.debug('Adding dependency targets: {0}'.format(', '.join(targets))) with self.lock: self.provides |= targets for job in list(self.jobs.values()): if job.state == JobState.STOPPED and job.requires <= self.provides: job.start() if targets: Timer(2, doit).start() def job_by_pid(self, pid): job = first_or_default(lambda j: j.pid == pid, self.jobs.values()) return job def event_loop(self): while True: with contextlib.suppress(InterruptedError): for ev in self.kq.control(None, MAX_EVENTS): self.logger.log(TRACE, 'New event: {0}'.format(ev)) if ev.filter == select.KQ_FILTER_PROC: job = self.job_by_pid(ev.ident) if job: job.pid_event(ev) continue if ev.fflags & select.KQ_NOTE_CHILD: if ev.fflags & select.KQ_NOTE_EXIT: continue pjob = self.job_by_pid(ev.data) if not pjob: self.untrack_pid(ev.ident) continue # Stop tracking at session ID boundary try: if pjob.pgid != os.getpgid(ev.ident): self.untrack_pid(ev.ident) continue except ProcessLookupError: continue with self.lock: job = Job(self) job.load_anonymous(pjob, ev.ident) self.jobs[job.id] = job self.logger.info('Added job {0}'.format(job.label)) def track_pid(self, pid): ev = select.kevent( pid, select.KQ_FILTER_PROC, select.KQ_EV_ADD | select.KQ_EV_ENABLE, select.KQ_NOTE_EXIT | select.KQ_NOTE_EXEC | select.KQ_NOTE_FORK | select.KQ_NOTE_TRACK, 0, 0 ) self.kq.control([ev], 0) def untrack_pid(self, pid): ev = select.kevent( pid, select.KQ_FILTER_PROC, select.KQ_EV_DELETE, 0, 0, 0 ) with contextlib.suppress(FileNotFoundError): self.kq.control([ev], 0) def emit_event(self, name, args): self.server.broadcast_event(name, args) if self.client and self.client.connected: self.client.emit_event(name, args) def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('serviced') self.client.enable_server(self.rpc) self.client.resume_service('serviced.job') self.client.resume_service('serviced.management') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def bootstrap(self): def doit(): with self.lock: job = Job(self) job.load({ 'Label': 'org.freenas.serviced.bootstrap', 'ProgramArguments': BOOTSTRAP_JOB, 'OneShot': True, 'RunAtLoad': True, }) self.jobs[job.id] = job Thread(target=doit).start() def shutdown(self): self.client.disconnect() self.server.close() sys.exit(0) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-s', metavar='SOCKET', default=DEFAULT_SOCKET_ADDRESS, help='Socket address to listen on') args = parser.parse_args() configure_logging('/var/log/serviced.log', 'DEBUG', file=True) bsd.setproctitle('serviced') self.logger.info('Started') self.init_server(args.s) self.bootstrap() self.event_loop()
class Context(object): def __init__(self): self.logger = logging.getLogger('schedulerd') self.config = None self.datastore = None self.configstore = None self.client = None self.scheduler = None self.active_tasks = {} def init_datastore(self): try: self.datastore = get_datastore(self.config) except DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_scheduler(self): store = MongoDBJobStore(database='freenas', collection='calendar_tasks', client=self.datastore.client) self.scheduler = BackgroundScheduler(jobstores={'default': store}, timezone=pytz.utc) self.scheduler.start() def register_schemas(self): self.client.register_schema('calendar-task', { 'type': 'object', 'additionalProperties': False, 'properties': { 'id': {'type': 'string'}, 'name': {'type': 'string'}, 'args': {'type': 'array'}, 'description': {'type': 'string'}, 'enabled': {'type': 'boolean'}, 'hidden': {'type': 'boolean'}, 'protected': {'type': 'boolean'}, 'status': {'$ref': 'calendar-task-status'}, 'schedule': { 'type': 'object', 'additionalProperties': False, 'properties': { 'coalesce': {'type': ['boolean', 'integer', 'null']}, 'year': {'type': ['string', 'integer', 'null']}, 'month': {'type': ['string', 'integer', 'null']}, 'day': {'type': ['string', 'integer', 'null']}, 'week': {'type': ['string', 'integer', 'null']}, 'day_of_week': {'type': ['string', 'integer', 'null']}, 'hour': {'type': ['string', 'integer', 'null']}, 'minute': {'type': ['string', 'integer', 'null']}, 'second': {'type': ['string', 'integer', 'null']}, 'timezone': {'type': ['string', 'null']} } } } }) self.client.register_schema('calendar-task-status', { 'type': 'object', 'properties': { 'next_run_time': {'type': 'string'}, 'last_run_status': {'type': 'string'}, 'current_run_status': {'type': ['string', 'null']}, 'current_run_progress': {'type': ['object', 'null']} } }) def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('schedulerd') self.client.enable_server() self.client.register_service('scheduler.management', ManagementService(self)) self.client.register_service('scheduler.debug', DebugService()) self.client.resume_service('scheduler.management') self.client.resume_service('scheduler.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def run_job(self, *args, **kwargs): tid = self.client.submit_task(*args) self.active_tasks[kwargs['id']] = tid self.client.call_sync('task.wait', tid, timeout=None) result = self.client.call_sync('task.status', tid) if result['state'] != 'FINISHED': try: self.client.call_sync('alerts.emit', { 'name': 'scheduler.task.failed', 'severity': 'CRITICAL', 'description': 'Task {0} has failed: {1}'.format(kwargs['name'], result['error']['message']), }) except RpcException as e: self.logger.error('Failed to emit alert', exc_info=True) del self.active_tasks[kwargs['id']] self.datastore.insert('schedulerd.runs', { 'job_id': kwargs['id'], 'task_id': result['id'] }) def emit_event(self, name, params): self.client.emit_event(name, params) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') parser.add_argument('-f', action='store_true', default=False, help='Run in foreground') args = parser.parse_args() configure_logging('/var/log/schedulerd.log', 'DEBUG') setproctitle.setproctitle('schedulerd') self.config = args.c self.init_datastore() self.init_scheduler() self.init_dispatcher() self.register_schemas() self.client.wait_forever()
class Main(object): def __init__(self): self.config = None self.client = None self.datastore = None self.configstore = None self.rtsock_thread = None self.dhcp_clients = {} self.dhcp_lock = threading.RLock() self.logger = logging.getLogger('networkd') self.default_interface = None self.cv = Condition() def dhclient_pid(self, interface): path = os.path.join('/var/run', 'dhclient.{0}.pid'.format(interface)) if not os.path.exists(path): return None try: with open(path) as f: pid = int(f.read().strip()) return pid except (IOError, ValueError): return None def dhclient_running(self, interface): pid = self.dhclient_pid(interface) if not pid: return False try: os.kill(pid, 0) return True except OSError: return False def configure_dhcp(self, interface, block=False, timeout=None): if interface in self.dhcp_clients: self.logger.info('Interface {0} already configured by DHCP'.format(interface)) return True def bind(old_lease, lease): self.logger.info('{0} DHCP lease on {1} from {2}, valid for {3} seconds'.format( 'Renewed' if old_lease else 'Acquired', interface, client.server_address, lease.lifetime, interface )) if old_lease is None or lease.client_ip != old_lease.client_ip: self.logger.info('Assigning IP address {0} to interface {1}'.format(lease.client_ip, interface)) alias = lease.client_interface iface = netif.get_interface(interface) if old_lease: try: addr = first_or_default(lambda a: a.address == old_lease.client_ip, iface.addresses) if addr: iface.remove_address(addr) except OSError as err: self.logger.error('Cannot remove alias {0}: {1}'.format(old_lease.client_ip, err.strerror)) try: iface.add_address(netif.InterfaceAddress(netif.AddressFamily.INET, alias)) except OSError as err: self.logger.error('Cannot add alias to {0}: {1}'.format(interface, err.strerror)) if lease.router and self.configstore.get('network.dhcp.assign_gateway'): try: rtable = netif.RoutingTable() newroute = default_route(lease.router) if rtable.default_route_ipv4 != newroute: if rtable.default_route_ipv4: self.logger.info('DHCP default route changed from {0} to {1}'.format( rtable.default_route_ipv4, newroute )) rtable.delete(rtable.default_route_ipv4) rtable.add(default_route(lease.router)) else: self.logger.info('Adding default route via {0}'.format(lease.router)) rtable.add(default_route(lease.router)) except OSError as err: self.logger.error('Cannot configure default route: {0}'.format(err.strerror)) if lease.dns_addresses and self.configstore.get('network.dhcp.assign_dns'): inp = [] addrs = [] proc = subprocess.Popen( ['/sbin/resolvconf', '-a', interface], stdout=subprocess.PIPE, stdin=subprocess.PIPE ) for i in lease.dns_addresses: # Filter out bogus DNS server addresses if str(i) in ('127.0.0.1', '0.0.0.0', '255.255.255.255'): continue inp.append('nameserver {0}'.format(i)) addrs.append(i) if lease.domain_name: inp.append('search {0}'.format(lease.domain_name)) proc.communicate('\n'.join(inp).encode('ascii')) proc.wait() self.client.emit_event('network.dns.configured', { 'addresses': addrs, }) self.logger.info('Updated DNS configuration') else: subprocess.call(['/sbin/resolvconf', '-d', interface]) self.client.emit_event('network.dns.configured', { 'addresses': [], }) self.logger.info('Deleted DNS configuration') def reject(reason): self.logger.info('DHCP request rejected on {0}: {1}'.format(interface, reason)) self.deconfigure_dhcp(interface) if not block: t = threading.Timer(60, self.configure_dhcp, args=(interface,)) t.start() def unbind(lease, reason): reasons = { dhcp.client.UnbindReason.EXPIRE: 'expired', dhcp.client.UnbindReason.REVOKE: 'revoked' } self.logger.info('DHCP lease on {0}: {1}'.format(interface, reasons.get(reason, 'revoked'))) def state_change(state): self.client.emit_event('network.interface.changed', { 'operation': 'update', 'ids': [interface] }) self.client.emit_event('network.changed', { 'operation': 'update' }) with self.dhcp_lock: client = dhcp.client.Client(interface, lambda: socket.gethostname().split('.')[0]) client.on_bind = bind client.on_unbind = unbind client.on_reject = reject client.on_state_change = state_change client.start() self.dhcp_clients[interface] = client if block: ret = client.wait_for_bind(timeout) if ret is None: client.stop() del self.dhcp_clients[interface] return ret is not None return True def deconfigure_dhcp(self, interface): with self.dhcp_lock: client = self.dhcp_clients[interface] client.release() client.stop() del self.dhcp_clients[interface] def renew_dhcp(self, interface): if interface not in self.dhcp_clients: raise RpcException(errno.ENXIO, 'Interface {0} is not configured for DHCP'.format(interface)) if not self.dhcp_clients[interface].lease: raise RpcException(errno.ENOENT, 'Cannot renew without a lease') self.dhcp_clients[interface].request(renew=True, timeout=30) def interface_detached(self, name): self.logger.warn('Interface {0} detached from the system'.format(name)) def interface_attached(self, name): self.logger.warn('Interface {0} attached to the system'.format(name)) def using_dhcp_for_gateway(self): for i in self.datastore.query('network.interfaces'): if i.get('dhcp') and self.configstore.get('network.dhcp.assign_gateway'): return True return False def scan_interfaces(self): self.logger.info('Scanning available network interfaces...') existing = [] # Add newly plugged NICs to DB for i in list(netif.list_interfaces().values()): existing.append(i.name) # We want only physical NICs if i.cloned: continue if i.name in ('mgmt0', 'nat0'): continue if i.name.startswith(('tap', 'brg')): continue if not self.datastore.exists('network.interfaces', ('id', '=', i.name)): self.logger.info('Found new interface {0} ({1})'.format(i.name, i.type.name)) self.datastore.insert('network.interfaces', { 'enabled': False, 'id': i.name, 'name': None, 'cloned': False, 'type': i.type.name, 'dhcp': False, 'noipv6': False, 'rtadv': False, 'mtu': i.mtu, 'media': None, 'mediaopts': [], 'aliases': [], 'capabilities': { 'add': [], 'del': [] } }) # Remove unplugged NICs from DB for i in self.datastore.query('network.interfaces', ('id', 'nin', existing), ('cloned', '=', False)): self.datastore.delete('network.interfaces', i['id']) def init_datastore(self): try: self.datastore = get_datastore(self.config) except DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def connect(self, resume=False): while True: try: self.client.connect('unix:') self.client.login_service('networkd') self.client.enable_server() self.register_schemas() self.client.register_service('networkd.configuration', ConfigurationService(self)) self.client.register_service('networkd.debug', DebugService()) if resume: self.client.resume_service('networkd.configuration') self.client.resume_service('networkd.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect(resume=True) self.client = Client() self.client.on_error(on_error) self.connect() def init_routing_socket(self): self.rtsock_thread = RoutingSocketEventSource(self) self.rtsock_thread.start() def register_schemas(self): self.client.register_schema('network-aggregation-protocols', { 'type': 'string', 'enum': list(netif.AggregationProtocol.__members__.keys()) }) self.client.register_schema('network-lagg-port-flags', { 'type': 'array', 'items': {'$ref': 'network-lagg-port-flags-items'} }) self.client.register_schema('network-lagg-port-flags-items', { 'type': 'string', 'enum': list(netif.LaggPortFlags.__members__.keys()) }) self.client.register_schema('network-interface-flags', { 'type': 'array', 'items': {'$ref': 'network-interface-flags-items'} }) self.client.register_schema('network-interface-flags-items', { 'type': 'string', 'enum': list(netif.InterfaceFlags.__members__.keys()) }) self.client.register_schema('network-interface-capabilities', { 'type': 'array', 'items': {'$ref': 'network-interface-capabilities-items'} }) self.client.register_schema('network-interface-capabilities-items', { 'type': 'string', 'enum': list(netif.InterfaceCapability.__members__.keys()) }) self.client.register_schema('network-interface-mediaopts', { 'type': 'array', 'items': {'$ref': 'network-interface-mediaopts-items'} }) self.client.register_schema('network-interface-mediaopts-items', { 'type': 'string', 'enum': list(netif.InterfaceMediaOption.__members__.keys()) }) self.client.register_schema('network-interface-nd6-flag', { 'type': 'array', 'items': {'$ref': 'network-interface-nd6-flag-items'} }) self.client.register_schema('network-interface-nd6-flag-items', { 'type': 'string', 'enum': list(netif.NeighborDiscoveryFlags.__members__.keys()) }) self.client.register_schema('network-interface-type', { 'type': 'string', 'enum': [ 'LOOPBACK', 'ETHER', 'VLAN', 'BRIDGE', 'LAGG' ] }) self.client.register_schema('network-interface-dhcp-state', { 'type': 'string', 'enum': [ 'INIT', 'SELECTING', 'REQUESTING', 'INIT_REBOOT', 'REBOOTING', 'BOUND', 'RENEWING', 'REBINDING' ] }) self.client.register_schema('network-interface-status', { 'type': 'object', 'properties': { 'name': {'type': 'string'}, 'link_state': {'$ref': 'network-interface-status-linkstate'}, 'link_address': {'type': 'string'}, 'mtu': {'type': 'integer'}, 'media_type': {'type': 'string'}, 'media_subtype': {'type': 'string'}, 'active_media_type': {'type': 'string'}, 'active_media_subtype': {'type': 'string'}, 'media_options': {'$ref': 'network-interface-mediaopts'}, 'cloned': {'type': 'boolean'}, 'capabilities': {'$ref': 'network-interface-capabilities'}, 'flags': {'$ref': 'network-interface-flags'}, 'dhcp': { 'type': 'object', 'properties': { 'state': {'$ref': 'network-interface-dhcp-state'}, 'server_address': {'type': 'string'}, 'server_name': {'type': 'string'}, 'lease_starts_at': {'type': 'datetime'}, 'lease_ends_at': {'type': 'datetime'} } }, 'aliases': { 'type': 'array', 'items': {'$ref': 'network-interface-alias'} }, 'nd6_flags': { 'type': 'array', 'items': {'$ref': 'network-interface-nd6-flag'} }, 'ports': { 'oneOf': [ {'type': 'null'}, { 'type': 'array', 'members': { 'type': 'object', 'properties': { 'name': {'type': 'string'}, 'flags': {'$ref': 'network-lagg-port-flags'} } } } ] }, 'members': { 'oneOf': [ {'type': 'null'}, { 'type': 'array', 'members': {'type': 'string'} } ] }, 'parent': {'type': ['string', 'null']}, 'tag': {'type': ['integer', 'null']} } }) self.client.register_schema('network-interface-status-linkstate', { 'type': 'string', 'enum': list(netif.InterfaceLinkState.__members__.keys()) }) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') args = parser.parse_args() configure_logging('/var/log/networkd.log', 'DEBUG') setproctitle.setproctitle('networkd') self.config = args.c self.init_datastore() self.init_dispatcher() self.scan_interfaces() self.init_routing_socket() self.client.resume_service('networkd.configuration') self.client.resume_service('networkd.debug') self.logger.info('Started') self.client.wait_forever()
class Main(object): def __init__(self): self.logger = logging.getLogger('clid') self.config = None self.datastore = None self.configstore = None self.client = None self.config = None self.logger = logging.getLogger() self.plugin_dirs = [] self.ml = None self.context = None def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_cli(self): self.logger.info('Initializing CLI instance') self.context = Context() self.context.connection = self.client self.context.plugin_dirs = PLUGIN_DIRS self.context.discover_plugins() self.context.start_entity_subscribers() self.context.login_plugins() self.ml = MainLoop(self.context) self.logger.info('CLI instance ready') def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('clid') self.client.enable_server() self.client.call_sync('management.enable_features', ['streaming_responses']) self.client.register_service('clid.management', ManagementService(self)) self.client.register_service('clid.eval', EvalService(self)) self.client.register_service('clid.debug', DebugService()) self.client.resume_service('clid.management') self.client.resume_service('clid.eval') self.client.resume_service('clid.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') args = parser.parse_args() self.config = args.c configure_logging('/var/log/clid.log', 'DEBUG') setproctitle('clid') self.init_dispatcher() self.init_cli() self.client.wait_forever()
class Context(object): def __init__(self): self.logger = logging.getLogger(self.__class__.__name__) self.msock = msock.client.Client() self.msock.on_closed = self.on_msock_close self.rpc_fd = -1 self.connection_id = None self.jobs = [] self.state = ConnectionState.OFFLINE self.config = None self.keepalive = None self.connected_at = None self.cv = Condition() self.rpc = RpcContext() self.client = Client() self.server = Server() self.middleware_endpoint = None def start(self, configpath, sockpath): signal.signal(signal.SIGUSR2, lambda signo, frame: self.connect()) self.read_config(configpath) self.server.rpc = RpcContext() self.server.rpc.register_service_instance("control", ControlService(self)) self.server.start(sockpath) threading.Thread(target=self.server.serve_forever, name="server thread", daemon=True).start() def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning("Connection to dispatcher lost") self.connect_dispatcher() self.middleware_endpoint = Client() self.middleware_endpoint.on_error(on_error) self.connect_dispatcher() def connect_dispatcher(self): while True: try: self.middleware_endpoint.connect("unix:") self.middleware_endpoint.login_service("debugd") self.middleware_endpoint.enable_server() self.middleware_endpoint.register_service("debugd.management", ControlService(self)) self.middleware_endpoint.resume_service("debugd.management") return except (OSError, RpcException) as err: self.logger.warning("Cannot connect to dispatcher: {0}, retrying in 1 second".format(str(err))) time.sleep(1) def read_config(self, path): try: with open(path) as f: self.config = json.load(f) except (IOError, OSError, ValueError) as err: self.logger.fatal("Cannot open config file: {0}".format(str(err))) self.logger.fatal("Exiting.") sys.exit(1) def connect(self, discard=False): if discard: self.connection_id = None self.keepalive = threading.Thread(target=self.connect_keepalive, daemon=True) self.keepalive.start() def connect_keepalive(self): while True: try: if not self.connection_id: self.connection_id = uuid.uuid4() self.msock.connect(SUPPORT_PROXY_ADDRESS) self.logger.info("Connecting to {0}".format(SUPPORT_PROXY_ADDRESS)) self.rpc_fd = self.msock.create_channel(0) time.sleep(1) # FIXME self.client = Client() self.client.connect("fd://", fobj=self.rpc_fd) self.client.channel_serializer = MSockChannelSerializer(self.msock) self.client.standalone_server = True self.client.enable_server() self.client.register_service("debug", DebugService(self)) self.client.call_sync( "server.login", str(self.connection_id), socket.gethostname(), get_version(), "none" ) self.set_state(ConnectionState.CONNECTED) except BaseException as err: self.logger.warning("Failed to initiate support connection: {0}".format(err), exc_info=True) self.msock.disconnect() else: self.connected_at = datetime.now() with self.cv: self.cv.wait_for(lambda: self.state in (ConnectionState.LOST, ConnectionState.OFFLINE)) if self.state == ConnectionState.OFFLINE: return self.logger.warning("Support connection lost, retrying in 10 seconds") time.sleep(10) def disconnect(self): self.connected_at = None self.set_state(ConnectionState.OFFLINE) self.client.disconnect() self.msock.destroy_channel(0) self.msock.disconnect() self.jobs.clear() def on_msock_close(self): self.connected_at = None self.set_state(ConnectionState.LOST) def run_job(self, job): self.jobs.append(job) job.context = self job.start() def set_state(self, state): with self.cv: self.state = state self.cv.notify_all()