def stopService(self): """ Stop all child services, then stop the subprocess, if it's running. """ if self.delayedShutdown: # We're still in the process of initializing the database, so # delay shutdown until the shutdownDeferred fires. d = self.shutdownDeferred = Deferred() d.addCallback(lambda ignored: MultiService.stopService(self)) else: d = MultiService.stopService(self) def superStopped(result): # If pg_ctl's startup wasn't successful, don't bother to stop the # database. (This also happens in command-line tools.) if self.shouldStopDatabase: monitor = _PostgresMonitor() pgCtl = self.pgCtl() # FIXME: why is this 'logfile' and not self.logfile? self.reactor.spawnProcess( monitor, pgCtl, [pgCtl, "-l", "logfile", "stop"], env=self.env, path=self.workingDir.path, uid=self.uid, gid=self.gid, ) return monitor.completionDeferred return d.addCallback(superStopped)
def stopService(self): """ Stop all child services, then stop the subprocess, if it's running. """ if self.delayedShutdown: # We're still in the process of initializing the database, so # delay shutdown until the shutdownDeferred fires. d = self.shutdownDeferred = Deferred() d.addCallback(lambda ignored: MultiService.stopService(self)) else: d = MultiService.stopService(self) def superStopped(result): # If pg_ctl's startup wasn't successful, don't bother to stop the # database. (This also happens in command-line tools.) if self.shouldStopDatabase: monitor = PostgresMonitor() args = [ self._pgCtl, "stop", "--log={}".format(self.logFile), ] log.info("Requesting postgres stop via: {args}", args=args) self.reactor.spawnProcess( monitor, self._pgCtl, args, env=self.env, path=self.workingDir.path, uid=self.uid, gid=self.gid, ) return monitor.completionDeferred return d.addCallback(superStopped)
def stopService(self): """ Stop all child services, then stop the subprocess, if it's running. """ if self.delayedShutdown: # We're still in the process of initializing the database, so # delay shutdown until the shutdownDeferred fires. d = self.shutdownDeferred = Deferred() d.addCallback(lambda ignored: MultiService.stopService(self)) else: d = MultiService.stopService(self) def superStopped(result): # If pg_ctl's startup wasn't successful, don't bother to stop the # database. (This also happens in command-line tools.) if self.shouldStopDatabase: monitor = _PostgresMonitor() pgCtl = self.pgCtl() # FIXME: why is this 'logfile' and not self.logfile? self.reactor.spawnProcess(monitor, pgCtl, [pgCtl, '-l', 'logfile', 'stop'], env=self.env, path=self.workingDir.path, uid=self.uid, gid=self.gid, ) return monitor.completionDeferred return d.addCallback(superStopped)
def stopService(self): """ Stop all child services, then stop the subprocess, if it's running. """ if self.delayedShutdown: # We're still in the process of initializing the database, so # delay shutdown until the shutdownDeferred fires. d = self.shutdownDeferred = Deferred() d.addCallback(lambda ignored: MultiService.stopService(self)) else: d = MultiService.stopService(self) def superStopped(result): # If pg_ctl's startup wasn't successful, don't bother to stop the # database. (This also happens in command-line tools.) if self.shouldStopDatabase: # Compare pg_ctl inode with one we saw at the start; if different # (or missing), fall back to SIGTERM try: newInode = os.stat(self._pgCtl).st_ino except OSError: # Missing newInode = -1 if self._pgCtlInode != newInode: # send SIGTERM to postgres log.info("Postgres control script mismatch") if self._postgresPid: log.info("Sending SIGTERM to Postgres") try: os.kill(self._postgresPid, signal.SIGTERM) except OSError: pass return succeed(None) else: # use pg_ctl stop monitor = PostgresMonitor() args = [ self._pgCtl, "stop", "--log={}".format(self.logFile), ] log.info("Requesting postgres stop via: {args}", args=args) self.reactor.spawnProcess( monitor, self._pgCtl, args, env=self.env, path=self.workingDir.path, uid=self.uid, gid=self.gid, ) return monitor.completionDeferred return d.addCallback(superStopped)
class ServerManagerFrameImpl(ServerManagerFrame): def __init__(self, *args, **kw): super(self.__class__, self).__init__(*args, **kw) #self.hostsDialog = HostsDialogImpl(self) #servers = dict([((gethostbyname(k[0]), k[1]), v) for k, v in servers.iteritems()]) # TODO?: make this Service a serialized Application then load from XML self.services = MultiService() for klass, addresses in servers.iteritems(): module, klass = klass.rsplit('.', 1) self.services.addService( __import__(module).__dict__[klass](addresses)) # setup log window class Redirect(object): def write(inner, *args, **kw): self.text_ctrl_log.AppendText(args[0]) def flush(self, *args, **kw): pass log.startLogging(Redirect()) def isValidClick(self, button, other): if other.GetValue(): button.SetValue(True) other.SetValue(False) return True else: button.SetValue(not button.GetValue()) return False def StartServer(self, event): # NOTE: button is already flipped by the time we get here button = self.button_start other = self.button_stop if not self.isValidClick(button, other): return self.services.startService() def StopServer(self, event): # NOTE: button is already flipped by the time we get here button = self.button_stop other = self.button_start if not self.isValidClick(button, other): return self.services.stopService() def OnHostsButton(self, event): self.hostsDialog.Show(not self.hostsDialog.IsShown())
class ServerManagerFrameImpl(ServerManagerFrame): def __init__(self, *args, **kw): super(self.__class__, self).__init__(*args, **kw) #self.hostsDialog = HostsDialogImpl(self) #servers = dict([((gethostbyname(k[0]), k[1]), v) for k, v in servers.iteritems()]) # TODO?: make this Service a serialized Application then load from XML self.services = MultiService() for klass, addresses in servers.iteritems(): module, klass = klass.rsplit('.', 1) self.services.addService(__import__(module).__dict__[klass](addresses)) # setup log window class Redirect(object): def write(inner, *args, **kw): self.text_ctrl_log.AppendText(args[0]) def flush(self, *args, **kw): pass log.startLogging(Redirect()) def isValidClick(self, button, other): if other.GetValue(): button.SetValue(True) other.SetValue(False) return True else: button.SetValue(not button.GetValue()) return False def StartServer(self, event): # NOTE: button is already flipped by the time we get here button = self.button_start other = self.button_stop if not self.isValidClick(button, other): return self.services.startService() def StopServer(self, event): # NOTE: button is already flipped by the time we get here button = self.button_stop other = self.button_start if not self.isValidClick(button, other): return self.services.stopService() def OnHostsButton(self, event): self.hostsDialog.Show(not self.hostsDialog.IsShown())
def stopService(self): def finish_up(d): self.console("mark2 stopped.") d = MultiService.stopService(self) d.addCallback(finish_up) return d
def stopService(self): try: self._starting.cancel() yield MultiService.stopService(self) yield self._starting finally: self.config.api_client_endpoint = None
class TestServicesBase: run_tests_with = AsynchronousDeferredRunTest.make_factory(timeout=5) def setUp(self): super(TestServicesBase, self).setUp() self.observers = theLogPublisher.observers[:] self.services = MultiService() self.services.privilegedStartService() self.services.startService() def tearDown(self): super(TestServicesBase, self).tearDown() d = self.services.stopService() # The log file must be read in right after services have stopped, # before the temporary directory where the log lives is removed. d.addBoth(lambda ignore: self.addDetailFromLog()) d.addBoth(lambda ignore: self.assertNoObserversLeftBehind()) return d def addDetailFromLog(self): content = content_from_file(self.log_filename, buffer_now=True) self.addDetail("log", content) def assertNoObserversLeftBehind(self): self.assertEqual(self.observers, theLogPublisher.observers)
def stopService(self): """ Stop all child services, then stop the subprocess, if it's running. """ if self.delayedShutdown: # We're still in the process of initializing the database, so # delay shutdown until the shutdownDeferred fires. d = self.shutdownDeferred = Deferred() d.addCallback(lambda ignored: MultiService.stopService(self)) else: d = MultiService.stopService(self) def superStopped(result): # If pg_ctl's startup wasn't successful, don't bother to stop the # database. (This also happens in command-line tools.) if self.shouldStopDatabase: # Compare pg_ctl inode with one we saw at the start; if different # (or missing), fall back to SIGTERM try: newInode = os.stat(self._pgCtl).st_ino except OSError: # Missing newInode = -1 if self._pgCtlInode != newInode: # send SIGTERM to postgres log.info("Postgres control script mismatch") if self._postgresPid: log.info("Sending SIGTERM to Postgres") try: os.kill(self._postgresPid, signal.SIGTERM) except OSError: pass return succeed(None) else: # use pg_ctl stop monitor = PostgresMonitor() args = [self._pgCtl, "stop", "--log={}".format(self.logFile)] log.info("Requesting postgres stop via: {args}", args=args) self.reactor.spawnProcess( monitor, self._pgCtl, args, env=self.env, path=self.workingDir.path, uid=self.uid, gid=self.gid ) return monitor.completionDeferred return d.addCallback(superStopped)
def stopService(self): """Stop listening on both ports.""" logger.info("- - - - - SERVER STOPPING") yield MultiService.stopService(self) if self.heartbeat_writer: self.heartbeat_writer.loseConnection() self.heartbeat_writer = None logger.info("- - - - - SERVER STOPPED")
def stopService(self): """Stop listening on both ports.""" logger.info("- - - - - SERVER STOPPING") yield MultiService.stopService(self) if self.heartbeat_writer: self.heartbeat_writer.loseConnection() self.heartbeat_writer = None logger.info("- - - - - SERVER STOPPED")
def stopService(self, clean_stop=False): # If commander was stopped manually instead of connection was lost if clean_stop: count = yield self.cl_client.user_count() if count: LOG.debug("Notifying users about quitting") self.cl_client.chat_all( _("{commander} is quitting. Goodbye everyone!").format( commander=unicode(settings.COMMANDER_NAME))) yield MultiService.stopService(self) if not self.shared_storage.flushdb(): LOG.error("Failed to flush commander's shared storage")
def stopService(self): """ Stop all child services, then stop the subprocess, if it's running. """ d = MultiService.stopService(self) def superStopped(result): # Probably want to stop and wait for startup if that hasn't # completed yet... monitor = _PostgresMonitor() pg_ctl = which("pg_ctl")[0] reactor.spawnProcess(monitor, pg_ctl, [pg_ctl, '-l', 'logfile', 'stop'], self.env, uid=self.uid, gid=self.gid, ) return monitor.completionDeferred return d.addCallback(superStopped)
def stop(self, clean=False): """ Overloaded base method for stopping commander service with all its subservices. Call this method when connection with server is lost or commander is going to exit. Input: clean: 'True' if commander was stopped manually, 'False' if connection with server was lost. """ if clean: count = yield self.cl_client.users_count() if count: LOG.debug("Notifying users about quitting") self.cl_client.chat_all( _("Commander is quitting. Goodbye everyone!")) yield MultiService.stopService(self) self.confs.clear() self.clear_shared_storage()
class TestUDPRedirect(TxTestCase): def setUp(self): self.service = MultiService() self.received = [] class Collect(DatagramProtocol): def datagramReceived(cself, data, host_port): self.got_data(data) self.port = reactor.listenUDP(0, Collect()) self.processor = TestMessageProcessor() self.router = Router(self.processor, r"any => redirect_udp 127.0.0.1 %s" % (self.port.getHost().port,), service=self.service) self.service.startService() return self.router.ready @defer.inlineCallbacks def tearDown(self): yield self.service.stopService() self.port.stopListening() def test_redirect(self): """ Any message gets dropped with the drop rule. """ message = "gorets:1|c" d = defer.Deferred() def got_data(data): self.assertEqual(data, message) d.callback(True) self.got_data = got_data self.router.process(message) return d
def stopService(self): self.periodicTask.cancel() # call every T seconds return BaseMultiService.stopService(self)
def _main_async(reactor, argv=None, _abort_for_test=False): if argv is None: argv = sys.argv if not _abort_for_test: # Some log messages would be discarded if we did not set up things early. configure_logging() # Option parsing is done before importing the main modules so as to avoid the cost of initializing gnuradio if we are aborting early. TODO: Make that happen for createConfig too. argParser = argparse.ArgumentParser(prog=argv[0]) argParser.add_argument('config_path', metavar='CONFIG', help='path of configuration directory or file') argParser.add_argument('--create', dest='createConfig', action='store_true', help='write template configuration file to CONFIG and exit') argParser.add_argument('-g, --go', dest='openBrowser', action='store_true', help='open the UI in a web browser') argParser.add_argument('--force-run', dest='force_run', action='store_true', help='Run DSP even if no client is connected (for debugging).') args = argParser.parse_args(args=argv[1:]) # Verify we can actually run. # Note that this must be done before we actually load core modules, because we might get an import error then. version_report = yield _check_versions() if version_report: print >>sys.stderr, version_report sys.exit(1) # Write config file and exit if asked ... if args.createConfig: write_default_config(args.config_path) log.msg('Created default configuration at: ' + args.config_path) sys.exit(0) # TODO: Consider using a return value or something instead # ... else read config file config_obj = Config(reactor) execute_config(config_obj, args.config_path) yield config_obj._wait_and_validate() log.msg('Constructing...') app = config_obj._create_app() reactor.addSystemEventTrigger('during', 'shutdown', app.close_all_devices) log.msg('Restoring state...') pfg = PersistenceFileGlue( reactor=reactor, root_object=app, filename=config_obj._state_filename, get_defaults=_app_defaults) log.msg('Starting web server...') services = MultiService() for maker in config_obj._service_makers: IService(maker(app)).setServiceParent(services) services.startService() log.msg('ShinySDR is ready.') for service in services: # TODO: should have an interface (currently no proper module to put it in) service.announce(args.openBrowser) if args.force_run: log.msg('force_run') from gnuradio.gr import msg_queue # TODO kludge, make this less digging into guts app.get_receive_flowgraph().monitor.get_fft_distributor().subscribe(msg_queue(limit=2)) if _abort_for_test: services.stopService() yield pfg.sync() defer.returnValue(app) else: yield defer.Deferred() # never fires
def stopService(self): if self.parent.is_connected: self.cl_client.chat_all(_("Minicommander quits. Good bye!")) yield MultiService.stopService(self)
def stopService(self): signal.signal(signal.SIGHUP, self.previousHandler) MultiService.stopService(self)
def stopService(self): log.msg("Shutting down DBus service ...") return MultiService.stopService(self)
class RegionEventLoop: """An event loop running in a region controller process. Typically several processes will be running the web application -- chiefly Django -- across several machines, with multiple threads of execution in each processingle event loop for each *process*, allowing convenient control of the event loop -- a Twisted reactor running in a thread -- and to which to attach and query services. :cvar factories: A sequence of ``(name, factory)`` tuples. Used to populate :py:attr:`.services` at start time. :ivar services: A :py:class:`~twisted.application.service.MultiService` which forms the root of the service tree. """ factories = { "database-tasks": { "only_on_master": False, "factory": make_DatabaseTaskService, "requires": [], }, "region-controller": { "only_on_master": True, "factory": make_RegionControllerService, "requires": ["postgres-listener"], }, "rpc": { "only_on_master": False, "factory": make_RegionService, "requires": ["rpc-advertise"], }, "rpc-advertise": { "only_on_master": False, "factory": make_RegionAdvertisingService, "requires": [], }, "nonce-cleanup": { "only_on_master": True, "factory": make_NonceCleanupService, "requires": [], }, "dns-publication-cleanup": { "only_on_master": True, "factory": make_DNSPublicationGarbageService, "requires": [], }, "status-monitor": { "only_on_master": True, "factory": make_StatusMonitorService, "requires": [], }, "import-resources": { "only_on_master": True, "factory": make_ImportResourcesService, "requires": [], }, "import-resources-progress": { "only_on_master": True, "factory": make_ImportResourcesProgressService, "requires": [], }, "postgres-listener": { "only_on_master": False, "factory": make_PostgresListenerService, "requires": [], }, "web": { "only_on_master": False, "factory": make_WebApplicationService, "requires": ["postgres-listener", "status-worker"], }, "service-monitor": { "only_on_master": True, "factory": make_ServiceMonitorService, "requires": ["rpc-advertise"], }, "status-worker": { "only_on_master": False, "factory": make_StatusWorkerService, "requires": ["database-tasks"], }, "networks-monitor": { "only_on_master": False, "factory": make_NetworksMonitoringService, "requires": [], }, "active-discovery": { "only_on_master": True, "factory": make_ActiveDiscoveryService, "requires": ["postgres-listener"], }, "reverse-dns": { "only_on_master": True, "factory": make_ReverseDNSService, "requires": ["postgres-listener"], }, "rack-controller": { "only_on_master": False, "factory": make_RackControllerService, "requires": ["postgres-listener", "rpc-advertise"], }, "ntp": { "only_on_master": True, "factory": make_NetworkTimeProtocolService, "requires": [], }, } def __init__(self): super(RegionEventLoop, self).__init__() self.services = MultiService() self.handle = None @asynchronous def populateService(self, name): """Prepare a service.""" factoryInfo = self.factories[name] try: service = self.services.getServiceNamed(name) except KeyError: # Get all dependent services for this services. dependencies = [] for require in factoryInfo["requires"]: dependencies.append(self.populateService(require)) # Create the service with dependencies. service = factoryInfo["factory"](*dependencies) service.setName(name) service.setServiceParent(self.services) return service @asynchronous def populate(self): """Prepare services.""" if is_master_process(): for name in self.factories.keys(): self.populateService(name) else: for name, item in self.factories.items(): if not item["only_on_master"]: self.populateService(name) @asynchronous def prepare(self): """Perform start_up of the region process.""" from maasserver.start_up import start_up return start_up() @asynchronous def startMultiService(self, result): """Start the multi service.""" self.services.startService() @asynchronous def start(self): """start() Start all services in the region's event-loop. """ self.populate() self.handle = reactor.addSystemEventTrigger('before', 'shutdown', self.services.stopService) return self.prepare().addCallback(self.startMultiService) @asynchronous def stop(self): """stop() Stop all services in the region's event-loop. """ if self.handle is not None: handle, self.handle = self.handle, None reactor.removeSystemEventTrigger(handle) return self.services.stopService() @asynchronous def reset(self): """reset() Stop all services, then disown them all. """ def disown_all_services(_): for service in list(self.services): service.disownServiceParent() def reset_factories(_): try: # Unshadow class attribute. del self.factories except AttributeError: # It wasn't shadowed. pass d = self.stop() d.addCallback(disown_all_services) d.addCallback(reset_factories) return d @property def name(self): """A name for identifying this service in a distributed system.""" return "%s:pid=%d" % (gethostname(), os.getpid()) @property def running(self): """Is this running?""" return bool(self.services.running)
def stopService(self): d = MultiService.stopService(self) self.logObserver.stop() return d
def stopService(self): MultiService.stopService(self)
def _main_async(reactor, argv=None, _abort_for_test=False): if argv is None: argv = sys.argv if not _abort_for_test: # Configure logging. Some log messages would be discarded if we did not set up things early # TODO: Consult best practices for Python and Twisted logging. # TODO: Logs which are observably relevant should be sent to the client (e.g. the warning of refusing to have more receivers active) logging.basicConfig(level=logging.INFO) log.startLoggingWithObserver(log.PythonLoggingObserver(loggerName='shinysdr').emit, False) # Option parsing is done before importing the main modules so as to avoid the cost of initializing gnuradio if we are aborting early. TODO: Make that happen for createConfig too. argParser = argparse.ArgumentParser(prog=argv[0]) argParser.add_argument('configFile', metavar='CONFIG', help='path of configuration file') argParser.add_argument('--create', dest='createConfig', action='store_true', help='write template configuration file to CONFIG and exit') argParser.add_argument('-g, --go', dest='openBrowser', action='store_true', help='open the UI in a web browser') argParser.add_argument('--force-run', dest='force_run', action='store_true', help='Run DSP even if no client is connected (for debugging).') args = argParser.parse_args(args=argv[1:]) # We don't actually use shinysdr.devices directly, but we want it to be guaranteed available in the context of the config file. import shinysdr.devices as lazy_devices import shinysdr.source as lazy_source # legacy shim # Load config file if args.createConfig: with open(args.configFile, 'w') as f: f.write(make_default_config()) log.msg('Created default configuration file at: ' + args.configFile) sys.exit(0) # TODO: Consider using a return value or something instead else: configObj = Config(reactor) execute_config(configObj, args.configFile) yield configObj._wait_and_validate() stateFile = configObj._state_filename def noteDirty(): if stateFile is not None: # just immediately write (revisit this when more performance is needed) with open(stateFile, 'w') as f: json.dump(top.state_to_json(), f) def restore(root, get_defaults): if stateFile is not None: if os.path.isfile(stateFile): root.state_from_json(json.load(open(stateFile, 'r'))) # make a backup in case this code version misreads the state and loses things on save (but only if the load succeeded, in case the file but not its backup is bad) shutil.copyfile(stateFile, stateFile + '~') else: root.state_from_json(get_defaults(root)) log.msg('Constructing flow graph...') top = configObj._create_top_block() log.msg('Restoring state...') restore(top, top_defaults) log.msg('Starting web server...') services = MultiService() for maker in configObj._service_makers: IService(maker(top, noteDirty)).setServiceParent(services) services.startService() log.msg('ShinySDR is ready.') for service in services: # TODO: should have an interface (currently no proper module to put it in) service.announce(args.openBrowser) if args.force_run: log.msg('force_run') from gnuradio.gr import msg_queue top.add_audio_queue(msg_queue(limit=2), 44100) top.set_unpaused(True) if _abort_for_test: services.stopService() defer.returnValue((top, noteDirty)) else: yield defer.Deferred() # never fires
def stopService(self): self.evt_log.setServiceParent(self) return MultiService.stopService(self)
def stopService(self): "Stops service" MultiService.stopService(self) log.msg("stopping service")
def stopService(self): MultiService.stopService(self) logger.debug("stopped window management service")
def stopService(self): MultiService.stopService(self) logger.debug("stopped window management service")
def stopService(self): MultiService.stopService(self)
def stopService(self): log.msg("AirPlayService '%s' was stopped" % (self.name_, )) return MultiService.stopService(self)
def _main_async(reactor, argv=None, _abort_for_test=False): if argv is None: argv = sys.argv if not _abort_for_test: # Configure logging. Some log messages would be discarded if we did not set up things early # TODO: Consult best practices for Python and Twisted logging. # TODO: Logs which are observably relevant should be sent to the client (e.g. the warning of refusing to have more receivers active) logging.basicConfig(level=logging.INFO) log.startLoggingWithObserver( log.PythonLoggingObserver(loggerName='shinysdr').emit, False) # Option parsing is done before importing the main modules so as to avoid the cost of initializing gnuradio if we are aborting early. TODO: Make that happen for createConfig too. argParser = argparse.ArgumentParser(prog=argv[0]) argParser.add_argument('config_path', metavar='CONFIG', help='path of configuration directory or file') argParser.add_argument( '--create', dest='createConfig', action='store_true', help='write template configuration file to CONFIG and exit') argParser.add_argument('-g, --go', dest='openBrowser', action='store_true', help='open the UI in a web browser') argParser.add_argument( '--force-run', dest='force_run', action='store_true', help='Run DSP even if no client is connected (for debugging).') args = argParser.parse_args(args=argv[1:]) # Verify we can actually run. # Note that this must be done before we actually load core modules, because we might get an import error then. version_report = yield _check_versions() if version_report: print >> sys.stderr, version_report sys.exit(1) # We don't actually use shinysdr.devices directly, but we want it to be guaranteed available in the context of the config file. # pylint: disable=unused-variable import shinysdr.devices as lazy_devices import shinysdr.source as lazy_source # legacy shim # Load config file if args.createConfig: write_default_config(args.config_path) log.msg('Created default configuration at: ' + args.config_path) sys.exit(0) # TODO: Consider using a return value or something instead else: configObj = Config(reactor) execute_config(configObj, args.config_path) yield configObj._wait_and_validate() stateFile = configObj._state_filename def restore(root, get_defaults): if stateFile is not None: if os.path.isfile(stateFile): root.state_from_json(json.load(open(stateFile, 'r'))) # make a backup in case this code version misreads the state and loses things on save (but only if the load succeeded, in case the file but not its backup is bad) shutil.copyfile(stateFile, stateFile + '~') else: root.state_from_json(get_defaults(root)) log.msg('Constructing...') app = configObj._create_app() singleton_reactor.addSystemEventTrigger('during', 'shutdown', app.close_all_devices) log.msg('Restoring state...') restore(app, _app_defaults) # Set up persistence if stateFile is not None: def eventually_write(): log.msg('Scheduling state write.') def actually_write(): log.msg('Performing state write...') current_state = pcd.get() with open(stateFile, 'w') as f: json.dump(current_state, f) log.msg('...done') reactor.callLater(_PERSISTENCE_DELAY, actually_write) pcd = PersistenceChangeDetector(app, eventually_write, the_subscription_context) # Start implicit write-to-disk loop, but don't actually write. # This is because it is useful in some failure modes to not immediately overwrite a good state file with a bad one on startup. pcd.get() log.msg('Starting web server...') services = MultiService() for maker in configObj._service_makers: IService(maker(app)).setServiceParent(services) services.startService() log.msg('ShinySDR is ready.') for service in services: # TODO: should have an interface (currently no proper module to put it in) service.announce(args.openBrowser) if args.force_run: log.msg('force_run') from gnuradio.gr import msg_queue # TODO kludge, make this less digging into guts app.get_receive_flowgraph().monitor.get_fft_distributor().subscribe( msg_queue(limit=2)) if _abort_for_test: services.stopService() defer.returnValue(app) else: yield defer.Deferred() # never fires
def stopService(self): MultiService.stopService(self) self.reconnecting_factory.stopTrying() self.cluster_status.receive(ClusterStatusInputs.SHUTDOWN)
def stopService(self): MultiService.stopService(self) self.reconnecting_factory.stopTrying() self.cluster_status.receive(ClusterStatusInputs.SHUTDOWN)
def _main_async(reactor, argv=None, _abort_for_test=False): if argv is None: argv = sys.argv if not _abort_for_test: # Some log messages would be discarded if we did not set up things early. configure_logging() # Option parsing is done before importing the main modules so as to avoid the cost of initializing gnuradio if we are aborting early. TODO: Make that happen for createConfig too. argParser = argparse.ArgumentParser(prog=argv[0]) argParser.add_argument('config_path', metavar='CONFIG', help='path of configuration directory or file') argParser.add_argument('--create', dest='createConfig', action='store_true', help='write template configuration file to CONFIG and exit') argParser.add_argument('-g, --go', dest='openBrowser', action='store_true', help='open the UI in a web browser') argParser.add_argument('--force-run', dest='force_run', action='store_true', help='Run DSP even if no client is connected (for debugging).') args = argParser.parse_args(args=argv[1:]) # Verify we can actually run. # Note that this must be done before we actually load core modules, because we might get an import error then. version_report = yield _check_versions() if version_report: print(version_report, file=sys.stderr) sys.exit(1) # Write config file and exit if asked ... if args.createConfig: write_default_config(args.config_path) _log.info('Created default configuration at: {config_path}', config_path=args.config_path) sys.exit(0) # TODO: Consider using a return value or something instead # ... else read config file config_obj = Config(reactor=reactor, log=_log) execute_config(config_obj, args.config_path) yield config_obj._wait_and_validate() _log.info('Constructing...') app = config_obj._create_app() reactor.addSystemEventTrigger('during', 'shutdown', app.close_all_devices) _log.info('Restoring state...') pfg = PersistenceFileGlue( reactor=reactor, root_object=app, filename=config_obj._state_filename, get_defaults=_app_defaults) _log.info('Starting web server...') services = MultiService() for maker in config_obj._service_makers: IService(maker(app)).setServiceParent(services) services.startService() _log.info('ShinySDR is ready.') for service in services: # TODO: should have an interface (currently no proper module to put it in) service.announce(args.openBrowser) if args.force_run: _log.debug('force_run') # TODO kludge, make this less digging into guts app.get_receive_flowgraph().get_monitor().state()['fft'].subscribe2(lambda v: None, the_subscription_context) if _abort_for_test: services.stopService() yield pfg.sync() defer.returnValue(app) else: yield defer.Deferred() # never fires
def stopService(self): self._starting.cancel() MultiService.stopService(self) return self._starting
def stopService(self): log.msg("Shutting down accomplishments daemon ...") return MultiService.stopService(self)
def _main_async(reactor, argv=None, _abort_for_test=False): if argv is None: argv = sys.argv if not _abort_for_test: # Configure logging. Some log messages would be discarded if we did not set up things early # TODO: Consult best practices for Python and Twisted logging. # TODO: Logs which are observably relevant should be sent to the client (e.g. the warning of refusing to have more receivers active) logging.basicConfig(level=logging.INFO) log.startLoggingWithObserver(log.PythonLoggingObserver(loggerName='shinysdr').emit, False) # Option parsing is done before importing the main modules so as to avoid the cost of initializing gnuradio if we are aborting early. TODO: Make that happen for createConfig too. argParser = argparse.ArgumentParser(prog=argv[0]) argParser.add_argument('configFile', metavar='CONFIG', help='path of configuration file') argParser.add_argument('--create', dest='createConfig', action='store_true', help='write template configuration file to CONFIG and exit') argParser.add_argument('-g, --go', dest='openBrowser', action='store_true', help='open the UI in a web browser') argParser.add_argument('--force-run', dest='force_run', action='store_true', help='Run DSP even if no client is connected (for debugging).') args = argParser.parse_args(args=argv[1:]) # Verify we can actually run. # Note that this must be done before we actually load core modules, because we might get an import error then. yield check_versions() # We don't actually use shinysdr.devices directly, but we want it to be guaranteed available in the context of the config file. import shinysdr.devices as lazy_devices import shinysdr.source as lazy_source # legacy shim # Load config file if args.createConfig: with open(args.configFile, 'w') as f: f.write(make_default_config()) log.msg('Created default configuration file at: ' + args.configFile) sys.exit(0) # TODO: Consider using a return value or something instead else: configObj = Config(reactor) execute_config(configObj, args.configFile) yield configObj._wait_and_validate() stateFile = configObj._state_filename def noteDirty(): if stateFile is not None: # just immediately write (revisit this when more performance is needed) with open(stateFile, 'w') as f: json.dump(top.state_to_json(), f) def restore(root, get_defaults): if stateFile is not None: if os.path.isfile(stateFile): root.state_from_json(json.load(open(stateFile, 'r'))) # make a backup in case this code version misreads the state and loses things on save (but only if the load succeeded, in case the file but not its backup is bad) shutil.copyfile(stateFile, stateFile + '~') else: root.state_from_json(get_defaults(root)) log.msg('Constructing flow graph...') top = configObj._create_top_block() singleton_reactor.addSystemEventTrigger('during', 'shutdown', top.close_all_devices) log.msg('Restoring state...') restore(top, top_defaults) log.msg('Starting web server...') services = MultiService() for maker in configObj._service_makers: IService(maker(top, noteDirty)).setServiceParent(services) services.startService() log.msg('ShinySDR is ready.') for service in services: # TODO: should have an interface (currently no proper module to put it in) service.announce(args.openBrowser) if args.force_run: log.msg('force_run') from gnuradio.gr import msg_queue top.monitor.get_fft_distributor().subscribe(msg_queue(limit=2)) if _abort_for_test: services.stopService() defer.returnValue((top, noteDirty)) else: yield defer.Deferred() # never fires
class SqlFuse(FileSystem): MOUNT_OPTIONS = {"allow_other": None, "suid": None, "dev": None, "exec": None, "fsname": "fuse.sql"} rooter = DummyRooter() record = DummyRecorder() collector = DummyQuit() cleaner = DummyQuit() changer = DummyChanger() ichanger = DummyChanger() updatefinder = DummyQuit() copier = DummyQuit() db = DummyQuit() servers = [] readonly = True # 0: no atime; 1: only if <mtime; 2: always atime = 1 # 0: no atime: 1: when reading; 2: also when traversing diratime = 0 slow = False shutting_down = False topology = None # .topology wants to be an OrderedDict, neighbors = None # but that's py2.7 and I'm too lazy to backport that. missing_neighbors = None def __init__(self, *a, **k): self._slot = {} self._slot_next = 1 self._busy = {} self._update = {} self._xattr_name = {} self._xattr_id = {} self._xattr_lock = DeferredLock() # protects name/id translation self.FileType = SqlFile self.DirType = SqlDir self.ENTRY_VALID = (10, 0) self.ATTR_VALID = (10, 0) self.remote = RemoteDict(self) # Note: Calling super().__init__ will happen later, in init_db() # map fdnum ⇒ filehandle def new_slot(self, x): """\ Remember a file/dir handler. Return an ID. """ self._slot_next += 1 while self._slot_next in self._slot: if self._slot_next == 999999999: self._slot_next = 1 else: self._slot_next += 1 self._slot[self._slot_next] = x return self._slot_next def old_slot(self, x): """\ Fetch a file/dir handler, given its ID. """ return self._slot[x] def del_slot(self, x): """\ Fetch a file/dir handler, given its ID. As this will be the last access, also delete the mapping. """ res = self._slot[x] del self._slot[x] return res # def _inode_path(self, path, tail=0): # path = path.split('/') # while path: # name = path.pop() # if name != '': # break # if not tail: # path.append(name) # depth=0 # q=[""] # qa = {"root":self.inode} # for p in path: # if p == '': # continue # depth += 1 # q.append("JOIN tree AS t%d ON t%d.inode = t%d.parent and t%d.name=${t%d_name}" % (depth, depth-1, depth, depth, depth)) # qa["t"+str(depth)+"_name"] = p # q[0]="SELECT t%d.inode from tree as t0" % (depth,) # q.append("where t0.inode=${root}") # ino, = self.db.DoFn(" ".join(q),**qa) # return ino,name ### a few FUSE calls which are not handled by the inode object def rename(self, inode_old, name_old, inode_new, name_new, ctx=None): # This is atomic, as it's a transaction @inlineCallbacks def do_rename(db): try: yield inode_new._unlink(name_new, ctx=ctx, db=db) except EnvironmentError as e: if e.errno != errno.ENOENT: raise yield db.Do( "update tree set name=${nname},parent=${ninode} where name=${oname} and parent=${oinode}", nname=name_new, ninode=inode_new.inum, oname=name_old, oinode=inode_old.inum, ) def adj_size(): inode_old.mtime = nowtuple() inode_old.size -= len(name_old) + 1 inode_new.mtime = nowtuple() inode_new.size += len(name_new) + 1 db.call_committed(adj_size) returnValue(None) return self.db(do_rename, DB_RETRIES) ## not supported, we're not file-backed # def bmap(self, *a,**k): # log_call() # raise IOError(errno.EOPNOTSUPP) ## not used, because the 'default_permissions' option is set # def access(self, inode, mode, ctx): # log_call() # raise IOError(errno.EOPNOTSUPP) @inlineCallbacks def statfs(self): """\ File system status. We recycle some values, esp. free space, from the underlying storage. """ s = {} osb = os.statvfs(self.store) s["bsize"] = BLOCKSIZE s["frsize"] = BLOCKSIZE s["blocks"], s["files"] = yield self.db( lambda db: db.DoFn("select nblocks,nfiles from root where id=${root}", root=self.root_id), DB_RETRIES ) s["bfree"] = (osb.f_bfree * osb.f_bsize) // BLOCKSIZE s["bavail"] = (osb.f_bavail * osb.f_bsize) // BLOCKSIZE s["ffree"] = osb.f_ffree # s['favail'] = osb.f_favail s["namelen"] = int(self.info.namelen) # see SQL schema s["blocks"] += s["bfree"] s["files"] += s["ffree"] returnValue(s) ## xattr back-end. The table uses IDs because they're much shorter than the names. ## This code only handles the name/ID caching; actual attribute access is in the inode. @inlineCallbacks def xattr_name(self, xid, db): """\ xattr key-to-name translation. Data consistency states that there must be one. """ try: returnValue(self._xattr_name[xid]) except KeyError: pass yield self._xattr_lock.acquire() try: try: returnValue(self._xattr_name[xid]) except KeyError: pass name, = yield db.DoFn("select name from xattr_name where id=${xid}", xid=xid) self._xattr_name[xid] = name self._xattr_id[name] = xid def _drop(): del self._xattr_name[xid] del self._xattr_id[name] db.call_rolledback(_drop) finally: self._xattr_lock.release() returnValue(name) @inlineCallbacks def xattr_id(self, name, db, add=False): """\ xattr name-to-key translation. Remembers null mappings, or creates a new one if @add is set. """ if len(name) == 0 or len(name) > self.info.attrnamelen: raise IOError(errno.ENAMETOOLONG) try: returnValue(self._xattr_id[name]) except KeyError: pass try: yield self._xattr_lock.acquire() try: returnValue(self._xattr_id[name]) except KeyError: pass try: xid, = yield db.DoFn("select id from xattr_name where name=${name}", name=name) except NoData: if not add: self._xattr_id[name] = None returnValue(None) xid = yield db.Do("insert into xattr_name(name) values(${name})", name=name) self._xattr_name[xid] = name self._xattr_id[name] = xid def _drop(): del self._xattr_name[xid] del self._xattr_id[name] db.call_rolledback(_drop) finally: self._xattr_lock.release() returnValue(xid) def call_node(self, dest, name, *a, **k): if dest in self.missing_neighbors: raise NoLink(dest, "missing") try: node = self.topology[dest] rem = self.remote[node] except KeyError: trace("error", "NoLink! %s %s %s %s", dest, name, repr(a), repr(k)) self.missing_neighbors.add(dest) raise NoLink(dest, "missing 2") if dest == node: return getattr(rem, "do_" + name)(*a, **k) else: return rem.remote_exec(node, name, *a, **k) @inlineCallbacks def each_node(self, chk, name, *a, **k): e = None if not self.topology: raise RuntimeError("No topology information available") # for dest in self.topology.keys(): for dest in self.neighbors: try: trace("remote", "%d: calling %s %s %s", dest, name, repr(a), repr(k)) res = yield self.call_node(dest, name, *a, **k) except Exception as ee: trace("remote", "%d: error %s", dest, ee) # If any link is down, that's the error I return. en = sys.exc_info() if e is None or isinstance(en[1], NoLink): e = en else: trace("remote", "%d: %s", dest, res) if chk and chk(res): returnValue(res) if e is None: returnValue(None) raise e[0], e[1], e[2] @inlineCallbacks def init_db(self, db, node): """\ Setup the database part of the file system's operation. """ # TODO: setup a copying thread self.db = db # reactor.addSystemEventTrigger('before', 'shutdown', db.stopService) self.node = node @inlineCallbacks def do_init_db(db): try: self.node_id, self.root_id, self.root_inum, self.store, self.port = yield db.DoFn( "select node.id,root.id,root.inode,node.files,node.port from node,root where root.id=node.root and node.name=${name}", name=node, ) except NoData: raise RuntimeError("data for '%s' is missing" % (self.node,)) nnodes, = yield db.DoFn( "select count(*) from node where root=${root} and id != ${node}", root=self.root_id, node=self.node_id ) self.single_node = not nnodes try: mode, = yield db.DoFn("select mode from inode where id=${inode}", inode=self.root_inum) except NoData: raise RuntimeError("database has not been initialized: inode %d is missing" % (self.inode,)) if mode == 0: yield db.Do( "update inode set mode=${dir} where id=${inode}", dir=stat.S_IFDIR | stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO, inode=self.root_inum, ) self.info = Info() yield self.info._load(db) if self.info.version != DBVERSION: raise RuntimeError("Need database version %s, got %s" % (DBVERSION, self.info.version)) root = SqlInode(self, self.root_inum) yield root._load(db) returnValue(root) root = yield self.db(do_init_db) super(SqlFuse, self).__init__(root=root, filetype=SqlFile, dirtype=SqlDir) @inlineCallbacks def init(self, opt): """\ Last step before running the file system mainloop. """ if opt.atime: self.atime = {"no": 0, "mtime": 1, "yes": 2}[opt.atime] if opt.diratime: self.diratime = {"no": 0, "read": 1, "access": 2}[opt.diratime] if opt.slow: self.slow = True self.services = MultiService() for a, b in ( ("rooter", RootUpdater), ("updatefinder", UpdateCollector), ("changer", CacheRecorder), ("record", Recorder), ("collector", NodeCollector), ("ichanger", InodeWriter), ("copier", CopyWorker), ("cleaner", InodeCleaner), ): b = b(self) setattr(self, a, b) b.setServiceParent(self.services) reactor.addSystemEventTrigger("before", "shutdown", self.umount) yield self.services.startService() yield self.connect_all() self.record.trigger() @inlineCallbacks def umount(self): self.readonly = True if self.shutting_down: trace("shutdown", "called twice") return try: self.shutting_down = True trace("shutdown", "stopping services") yield self.services.stopService() trace("shutdown", "disconnect peers") self.disconnect_all() for k in self.remote.keys(): del self.remote[k] n = 0 for c in reactor.getDelayedCalls(): n += 1 trace("shutdown", "speed-up %s", c) c.reset(0) if n: trace("shutdown", "speed-up wait %d", n) yield reactor.callLater(n / 10, lambda: None) trace("shutdown", "run idle") yield IdleWorker.run() trace("shutdown", "flush inodes") yield self.db(flush_inodes) trace("shutdown", "super") yield super(SqlFuse, self).stop(False) trace("shutdown", "stop DB") yield self.db.stopService() trace("shutdown", "done") except Exception as e: log.err(e, "Shutting down") traceback.print_exc() @inlineCallbacks def connect_all(self): from sqlfuse.connect import METHODS for m in METHODS: try: m = __import__("sqlfuse.connect." + m, fromlist=("NodeServerFactory",)) m = m.NodeServerFactory(self) yield m.connect() except NoLink: log.err(None, "No link to nodes %s" % (m,)) except Exception: f = failure.Failure() log.err(f, "No link to nodes %s" % (m,)) else: self.servers.append(m) pass def disconnect_all(self): srv = self.servers self.servers = [] for s in srv: try: yield s.disconnect() except Exception: log.err(None, "Disconnect") def mount(self, handler, flags): """\ FUSE callback. """ self.handler = handler return { "flags": FUSE_ATOMIC_O_TRUNC | FUSE_ASYNC_READ | FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES, "max_write": MAX_BLOCK, }
def stopService(self): self.periodicTask.stop() # call every T seconds return BaseMultiService.stopService(self)
def stopService(self): MultiService.stopService(self) logger.debug("stopped console service")