def test_dataflow_unsub(self): """ Check the dataflow is automatically unsubscribed when the subscriber disappears. """ comp = model.createInNewContainer("testscont", MyComponent, {"name":"MyComp"}) comp2 = model.createInNewContainer("testscont2", MyComponent, {"name":"MyComp2"}) self.count = 0 self.data_arrays_sent = 0 comp.data.reset() # special hidden function that will directly ask the original DF nlisteners = comp.data._count_listeners() self.assertEqual(nlisteners, 0) comp2.sub(comp.data) time.sleep(0.5) # comp2.unsub() count_arrays = comp2.get_data_count() logging.info("received %d arrays", count_arrays) nlisteners = comp.data._count_listeners() logging.info("orig has %d listeners", nlisteners) comp2.terminate() model.getContainer("testscont2").terminate() logging.info("comp2 should now be disappeared") time.sleep(0.4) nlisteners = comp.data._count_listeners() self.assertEqual(nlisteners, 0) comp.terminate() model.getContainer("testscont").terminate() time.sleep(0.1) # give it some time to terminate
def test_component(self): comp = model.createInNewContainer("testscont", model.HwComponent, { "name": "MyHwComp", "role": "affected" }) self.assertEqual(comp.name, "MyHwComp") comp2 = model.createInNewContainer("testscont2", model.HwComponent, { "name": "MyHwComp2", "role": "affecter" }) self.assertEqual(comp2.name, "MyHwComp2") comp2._set_affects(set([comp])) self.assertEqual(len(comp2.affects), 1) for c in comp2.affects: self.assertTrue(isinstance(c, model.ComponentBase)) self.assertEqual(c.name, "MyHwComp") self.assertEqual(c.role, "affected") comp2_new = model.getObject("testscont2", "MyHwComp2") self.assertEqual(comp2_new.name, "MyHwComp2") self.assertEqual(len(comp2_new.affects), 1) comp.terminate() comp2.terminate() model.getContainer("testscont").terminate() model.getContainer("testscont2").terminate() time.sleep(0.1) # give it some time to terminate
def test_dataflow(self): comp = model.createInNewContainer("testscont", MyComponent, {"name": "MyComp"}) self.assertEqual(comp.name, "MyComp") comp2 = model.createInNewContainer("testscont2", model.HwComponent, { "name": "MyHwComp2", "role": "affecter" }) self.assertEqual(comp2.name, "MyHwComp2") comp2._set_affects(set([comp])) self.assertEqual(len(comp2.affects), 1) comp_indir = list(comp2.affects)[0] self.count = 0 self.data_arrays_sent = 0 comp_indir.data.reset() comp_indir.data.subscribe(self.receive_data) time.sleep(0.5) comp_indir.data.unsubscribe(self.receive_data) count_end = self.count print "received %d arrays over %d" % (self.count, self.data_arrays_sent) time.sleep(0.1) self.assertEqual(count_end, self.count) comp.terminate() comp2.terminate() model.getContainer("testscont").terminate() model.getContainer("testscont2").terminate() time.sleep(0.1) # give it some time to terminate
def test_component(self): comp = model.createInNewContainer("testscont", model.HwComponent, {"name":"MyHwComp", "role":"affected"}) self.assertEqual(comp.name, "MyHwComp") comp2 = model.createInNewContainer("testscont2", model.HwComponent, {"name":"MyHwComp2", "role":"affecter"}) self.assertEqual(comp2.name, "MyHwComp2") comp2._set_affects(set([comp])) self.assertEqual(len(comp2.affects), 1) for c in comp2.affects: self.assertTrue(isinstance(c, model.ComponentBase)) self.assertEqual(c.name, "MyHwComp") self.assertEqual(c.role, "affected") comp2_new = model.getObject("testscont2", "MyHwComp2") self.assertEqual(comp2_new.name, "MyHwComp2") self.assertEqual(len(comp2_new.affects), 1) comp.terminate() comp2.terminate() model.getContainer("testscont").terminate() model.getContainer("testscont2").terminate() time.sleep(0.1) # give it some time to terminate
def scan(): """ Scan for connected devices and list them Output like: Classname: 'Name of Device' init={arg: value, arg2: value2} """ # only here, to avoid importing everything for other commands from odemis import driver num = 0 # we scan by using every HwComponent class which has a .scan() method for module_name in driver.__all__: module = importlib.import_module("." + module_name, "odemis.driver") for cls_name, cls in inspect.getmembers(module, inspect.isclass): if issubclass(cls, model.HwComponent) and hasattr(cls, "scan"): logging.info("Scanning for %s.%s components", module_name, cls_name) # do it in a separate container so that we don't have to load # all drivers in the same process (andor cams don't like it) container_name = "scanner%d" % num num += 1 scanner = model.createInNewContainer(container_name, Scanner, {"cls": cls}) devices = scanner.scan() scanner.terminate() model.getContainer(container_name).terminate() for name, args in devices: print "%s.%s: '%s' init=%s" % (module_name, cls_name, name, str(args)) return 0
def test_dataflow_unsub(self): """ Check the dataflow is automatically unsubscribed when the subscriber disappears. """ comp = model.createInNewContainer("testscont", MyComponent, {"name": "MyComp"}) comp2 = model.createInNewContainer("testscont2", MyComponent, {"name": "MyComp2"}) self.count = 0 self.data_arrays_sent = 0 comp.data.reset() # special hidden function that will directly ask the original DF nlisteners = comp.data._count_listeners() self.assertEqual(nlisteners, 0) comp2.sub(comp.data) time.sleep(0.5) # comp2.unsub() count_arrays = comp2.get_data_count() logging.info("received %d arrays", count_arrays) nlisteners = comp.data._count_listeners() logging.info("orig has %d listeners", nlisteners) comp2.terminate() model.getContainer("testscont2").terminate() logging.info("comp2 should now be disappeared") time.sleep(0.4) nlisteners = comp.data._count_listeners() self.assertEqual(nlisteners, 0) comp.terminate() model.getContainer("testscont").terminate() time.sleep(0.1) # give it some time to terminate
def test_dataflow(self): comp = model.createInNewContainer("testscont", MyComponent, {"name":"MyComp"}) self.assertEqual(comp.name, "MyComp") comp2 = model.createInNewContainer("testscont2", model.HwComponent, {"name":"MyHwComp2", "role":"affecter"}) self.assertEqual(comp2.name, "MyHwComp2") comp2._set_affects(set([comp])) self.assertEqual(len(comp2.affects), 1) comp_indir = list(comp2.affects)[0] self.count = 0 self.data_arrays_sent = 0 comp_indir.data.reset() comp_indir.data.subscribe(self.receive_data) time.sleep(0.5) comp_indir.data.unsubscribe(self.receive_data) count_end = self.count print "received %d arrays over %d" % (self.count, self.data_arrays_sent) time.sleep(0.1) self.assertEqual(count_end, self.count) comp.terminate() comp2.terminate() model.getContainer("testscont").terminate() model.getContainer("testscont2").terminate() time.sleep(0.1) # give it some time to terminate
def test_multi_components(self): comp = model.createInNewContainer("testmulti", FatherComponent, {"name":"Father", "children_num":3}) self.assertEqual(comp.name, "Father") self.assertEqual(len(comp.children), 3, "Component should have 3 children") for child in comp.children: self.assertLess(child.value, 3) comp_direct = model.getObject("testmulti", child.name) self.assertEqual(comp_direct.name, child.name) # child.terminate() comp.terminate() # we are not terminating the children, but this should be caught by the container model.getContainer("testmulti").terminate()
def kill_backend(): try: backend = model.getContainer(model.BACKEND_NAME) backend.terminate() except: logging.error("Failed to stop the back-end") return 127 return 0
def test_multi_components(self): comp = model.createInNewContainer("testmulti", FatherComponent, { "name": "Father", "children_num": 3 }) self.assertEqual(comp.name, "Father") self.assertEqual(len(comp.children), 3, "Component should have 3 children") for child in comp.children: self.assertLess(child.value, 3) comp_direct = model.getObject("testmulti", child.name) self.assertEqual(comp_direct.name, child.name) # child.terminate() comp.terminate() # we are not terminating the children, but this should be caught by the container model.getContainer("testmulti").terminate()
def test_instantiate_simple_component(self): comp = model.createInNewContainer("testscont", FamilyValueComponent, {"name":"MyComp"}) self.assertEqual(comp.name, "MyComp") comp_prime = model.getObject("testscont", "MyComp") self.assertEqual(comp_prime.name, "MyComp") container = model.getContainer("testscont") comp.terminate() container.terminate()
def test_instantiate_simple_component(self): comp = model.createInNewContainer("testscont", FamilyValueComponent, {"name": "MyComp"}) self.assertEqual(comp.name, "MyComp") comp_prime = model.getObject("testscont", "MyComp") self.assertEqual(comp_prime.name, "MyComp") container = model.getContainer("testscont") comp.terminate() container.terminate()
def test_va(self): comp = model.createInNewContainer("testscont", SimpleHwComponent, { "name": "MyHwComp", "role": "affected" }) self.assertEqual(comp.name, "MyHwComp") comp2 = model.createInNewContainer("testscont2", model.HwComponent, { "name": "MyHwComp2", "role": "affecter" }) self.assertEqual(comp2.name, "MyHwComp2") comp2._set_affects(set([comp])) self.assertEqual(len(comp2.affects), 1) comp_indir = list(comp2.affects)[0] self.assertIsInstance(comp_indir.prop, VigilantAttributeBase) self.assertIsInstance(comp_indir.cont, VigilantAttributeBase) self.assertIsInstance(comp_indir.enum, VigilantAttributeBase) prop = comp_indir.prop self.assertEqual(prop.value, 42) prop.value += 1 self.assertEqual(prop.value, 43) self.assertEqual(comp.prop.value, 43) self.assertEqual(comp_indir.cont.value, 2.0) self.assertIsInstance(comp_indir.cont.range, tuple) try: # there is no such thing, it should fail c = len(comp_indir.cont.choices) self.fail("Accessing choices should fail") except: pass self.assertEqual(comp_indir.enum.value, "a") comp.terminate() comp2.terminate() model.getContainer("testscont").terminate() model.getContainer("testscont2").terminate()
def test_roattributes(self): comp = model.createInNewContainer("testscont", MyComponent, {"name":"MyComp"}) self.assertEqual(comp.name, "MyComp") comp2 = model.createInNewContainer("testscont2", model.HwComponent, {"name":"MyHwComp2", "role":"affecter"}) self.assertEqual(comp2.name, "MyHwComp2") comp2._set_affects(set([comp])) self.assertEqual(len(comp2.affects), 1) for c in comp2.affects: self.assertTrue(isinstance(c, model.ComponentBase)) self.assertEqual(c.name, "MyComp") val = comp.my_value self.assertEqual(val, "ro", "Reading attribute failed") comp.terminate() comp2.terminate() model.getContainer("testscont").terminate() model.getContainer("testscont2").terminate()
def test_instantiate_component(self): comp = model.createInNewContainer("testcont", MyComponent, {"name":"MyComp"}) self.assertEqual(comp.name, "MyComp") val = comp.my_value self.assertEqual(val, "ro", "Reading attribute failed") comp_prime = model.getObject("testcont", "MyComp") self.assertEqual(comp_prime.name, "MyComp") container = model.getContainer("testcont") container.ping() comp.terminate() container.terminate()
def test_roattributes(self): comp = model.createInNewContainer("testscont", MyComponent, {"name": "MyComp"}) self.assertEqual(comp.name, "MyComp") comp2 = model.createInNewContainer("testscont2", model.HwComponent, { "name": "MyHwComp2", "role": "affecter" }) self.assertEqual(comp2.name, "MyHwComp2") comp2._set_affects(set([comp])) self.assertEqual(len(comp2.affects), 1) for c in comp2.affects: self.assertTrue(isinstance(c, model.ComponentBase)) self.assertEqual(c.name, "MyComp") val = comp.my_value self.assertEqual(val, "ro", "Reading attribute failed") comp.terminate() comp2.terminate() model.getContainer("testscont").terminate() model.getContainer("testscont2").terminate()
def test_instantiate_component(self): comp = model.createInNewContainer("testcont", MyComponent, {"name": "MyComp"}) self.assertEqual(comp.name, "MyComp") val = comp.my_value self.assertEqual(val, "ro", "Reading attribute failed") comp_prime = model.getObject("testcont", "MyComp") self.assertEqual(comp_prime.name, "MyComp") container = model.getContainer("testcont") container.ping() comp.terminate() container.terminate()
def test_va(self): comp = model.createInNewContainer("testscont", SimpleHwComponent, {"name":"MyHwComp", "role":"affected"}) self.assertEqual(comp.name, "MyHwComp") comp2 = model.createInNewContainer("testscont2", model.HwComponent, {"name":"MyHwComp2", "role":"affecter"}) self.assertEqual(comp2.name, "MyHwComp2") comp2._set_affects(set([comp])) self.assertEqual(len(comp2.affects), 1) comp_indir = list(comp2.affects)[0] self.assertIsInstance(comp_indir.prop, VigilantAttributeBase) self.assertIsInstance(comp_indir.cont, VigilantAttributeBase) self.assertIsInstance(comp_indir.enum, VigilantAttributeBase) prop = comp_indir.prop self.assertEqual(prop.value, 42) prop.value += 1 self.assertEqual(prop.value, 43) self.assertEqual(comp.prop.value, 43) self.assertEqual(comp_indir.cont.value, 2.0) self.assertIsInstance(comp_indir.cont.range, tuple) try: # there is no such thing, it should fail c = len(comp_indir.cont.choices) self.fail("Accessing choices should fail") except: pass self.assertEqual(comp_indir.enum.value, "a") comp.terminate() comp2.terminate() model.getContainer("testscont").terminate() model.getContainer("testscont2").terminate()
def instantiate_comp(self, name): """ Instantiate a component name (str): name that will be given to the component instance returns (HwComponent): an instance of the component Raises: SemanticError in case an error in the model is detected. """ attr = self.ast[name] class_name = attr["class"] class_comp = get_class(class_name) # create the arguments: # name (str) # role (str) # children: # * explicit creation: (dict str -> HwComponent) internal name -> comp # * delegation: (dict str -> dict) internal name -> init arguments # anything else is passed as is args = self.make_args(name) if self.dry_run and not class_name == "Microscope": # mock class for everything but Microscope (because it is safe) args["_realcls"] = class_comp class_comp = model.MockComponent try: if self.create_sub_containers and self.is_leaf(name): # new container has the same name as the component comp = model.createInNewContainer(name, class_comp, args) self.sub_containers.add(model.getContainer(name)) elif self.root_container: comp = self.root_container.instantiate(class_comp, args) else: comp = class_comp(**args) except Exception: logging.error("Error while instantiating component %s.", name) raise # Add all the children to our list of components. Useful only if child # created by delegation, but can't hurt to add them all. self.components |= getattr(comp, "children", set()) return comp
def main(args): """ Contains the console handling code for the daemon args is the list of arguments passed return (int): value to return to the OS as program exit code """ #print args # arguments handling parser = argparse.ArgumentParser(description=odemis.__fullname__) parser.add_argument('--version', dest="version", action='store_true', help="show program's version number and exit") dm_grp = parser.add_argument_group('Daemon management') dm_grpe = dm_grp.add_mutually_exclusive_group() dm_grpe.add_argument("--kill", "-k", dest="kill", action="store_true", default=False, help="Kill the running back-end") dm_grpe.add_argument( "--check", dest="check", action="store_true", default=False, help="Check for a running back-end (only returns exit code)") dm_grpe.add_argument("--daemonize", "-D", action="store_true", dest="daemon", default=False, help="Daemonize the back-end") opt_grp = parser.add_argument_group('Options') opt_grp.add_argument( '--validate', dest="validate", action="store_true", default=False, help="Validate the microscope description file and exit") dm_grpe.add_argument( "--debug", action="store_true", dest="debug", default=False, help="Activate debug mode, where everything runs in one process") opt_grp.add_argument("--log-level", dest="loglev", metavar="LEVEL", type=int, default=0, help="Set verbosity level (0-2, default = 0)") opt_grp.add_argument( "--log-target", dest="logtarget", metavar="{auto,stderr,filename}", default="auto", help="Specify the log target (auto, stderr, filename)") # The settings file is opened here because root privileges are dropped at some point after # the initialization. opt_grp.add_argument( "--settings", dest='settings', type=argparse.FileType('a+'), help="Path to the settings file " "(stores values of persistent properties and metadata). " "Default is %s, if writable." % DEFAULT_SETTINGS_FILE) parser.add_argument( "model", metavar="file.odm.yaml", nargs='?', type=open, help="Microscope model instantiation file (*.odm.yaml)") options = parser.parse_args(args[1:]) # Cannot use the internal feature, because it doesn't support multiline if options.version: print(odemis.__fullname__ + " " + odemis.__version__ + "\n" + odemis.__copyright__ + "\n" + "Licensed under the " + odemis.__license__) return 0 # Set up logging before everything else if options.loglev < 0: parser.error("log-level must be positive.") loglev_names = [logging.WARNING, logging.INFO, logging.DEBUG] loglev = loglev_names[min(len(loglev_names) - 1, options.loglev)] # auto = {odemis.log if daemon, stderr otherwise} if options.logtarget == "auto": # default to SysLogHandler ? if options.daemon: options.logtarget = "odemis.log" else: options.logtarget = "stderr" if options.logtarget == "stderr": handler = logging.StreamHandler() else: if sys.platform.startswith('linux'): # On Linux, we use logrotate, so nothing much to do handler = WatchedFileHandler(options.logtarget) else: # Rotate the log, with max 5*50Mb used. # Note: we used to rely on RotatingFileHandler, but due to multi- # processes, it would be rotated multiple times every time it reached the # limit. So now, just do it at startup, and hope it doesn't reach huge # size in one run. rotateLog(options.logtarget, maxBytes=50 * (2**20), backupCount=5) handler = FileHandler(options.logtarget) logging.getLogger().setLevel(loglev) handler.setFormatter( logging.Formatter( "%(asctime)s\t%(levelname)s\t%(module)s:%(lineno)d:\t%(message)s")) logging.getLogger().addHandler(handler) if loglev <= logging.DEBUG: # Activate also Pyro logging # TODO: options.logtarget pyrolog = logging.getLogger("Pyro4") pyrolog.setLevel(min(pyrolog.getEffectiveLevel(), logging.INFO)) # Useful to debug cases of multiple conflicting installations logging.info("Starting Odemis back-end v%s (from %s) using Python %d.%d", odemis.__version__, __file__, sys.version_info[0], sys.version_info[1]) if options.validate and (options.kill or options.check or options.daemon): logging.error( "Impossible to validate a model and manage the daemon simultaneously" ) return 1 # Daemon management # python-daemon is a fancy library but seems to do too many things for us. # We just need to contact the backend and see what happens status = get_backend_status() if options.check: logging.info("Status of back-end is %s", status) return status_to_xtcode[status] try: if options.kill: if status != BACKEND_RUNNING: raise IOError("No running back-end to kill") backend = model.getContainer(model.BACKEND_NAME) backend.terminate() return 0 # check if there is already a backend running if status == BACKEND_RUNNING: raise IOError("Back-end already running, cannot start a new one") if options.model is None: raise ValueError("No microscope model instantiation file provided") if options.settings is None: try: options.settings = open(DEFAULT_SETTINGS_FILE, "r+") except IOError as ex: logging.warning("%s. Will not be able to use persistent data", ex) if options.debug: cont_pol = BackendRunner.CONTAINER_ALL_IN_ONE else: cont_pol = BackendRunner.CONTAINER_SEPARATED # let's become the back-end for real runner = BackendRunner(options.model, options.settings, options.daemon, dry_run=options.validate, containement=cont_pol) runner.run() except ValueError as exp: logging.error("%s", exp) return 127 except IOError as exp: logging.error("%s", exp) return 129 except Exception: logging.exception("Unexpected error while performing action.") return 130 return 0
def main(args): """ Contains the console handling code for the daemon args is the list of arguments passed return (int): value to return to the OS as program exit code """ #print args # arguments handling parser = argparse.ArgumentParser(description=odemis.__fullname__) parser.add_argument('--version', dest="version", action='store_true', help="show program's version number and exit") dm_grp = parser.add_argument_group('Daemon management') dm_grpe = dm_grp.add_mutually_exclusive_group() dm_grpe.add_argument("--kill", "-k", dest="kill", action="store_true", default=False, help="Kill the running back-end") dm_grpe.add_argument("--check", dest="check", action="store_true", default=False, help="Check for a running back-end (only returns exit code)") dm_grpe.add_argument("--daemonize", "-D", action="store_true", dest="daemon", default=False, help="Daemonize the back-end") opt_grp = parser.add_argument_group('Options') opt_grp.add_argument('--validate', dest="validate", action="store_true", default=False, help="Validate the microscope description file and exit") dm_grpe.add_argument("--debug", action="store_true", dest="debug", default=False, help="Activate debug mode, where everything runs in one process") opt_grp.add_argument("--log-level", dest="loglev", metavar="LEVEL", type=int, default=0, help="Set verbosity level (0-2, default = 0)") opt_grp.add_argument("--log-target", dest="logtarget", metavar="{auto,stderr,filename}", default="auto", help="Specify the log target (auto, stderr, filename)") # The settings file is opened here because root privileges are dropped at some point after # the initialization. opt_grp.add_argument("--settings", dest='settings', type=argparse.FileType('a+'), help="Path to the settings file " "(stores values of persistent properties and metadata). " "Default is %s, if writable." % DEFAULT_SETTINGS_FILE) parser.add_argument("model", metavar="file.odm.yaml", nargs='?', type=open, help="Microscope model instantiation file (*.odm.yaml)") options = parser.parse_args(args[1:]) # Cannot use the internal feature, because it doesn't support multiline if options.version: print(odemis.__fullname__ + " " + odemis.__version__ + "\n" + odemis.__copyright__ + "\n" + "Licensed under the " + odemis.__license__) return 0 # Set up logging before everything else if options.loglev < 0: parser.error("log-level must be positive.") loglev_names = [logging.WARNING, logging.INFO, logging.DEBUG] loglev = loglev_names[min(len(loglev_names) - 1, options.loglev)] # auto = {odemis.log if daemon, stderr otherwise} if options.logtarget == "auto": # default to SysLogHandler ? if options.daemon: options.logtarget = "odemis.log" else: options.logtarget = "stderr" if options.logtarget == "stderr": handler = logging.StreamHandler() else: if os.sys.platform.startswith('linux'): # On Linux, we use logrotate, so nothing much to do handler = WatchedFileHandler(options.logtarget) else: # Rotate the log, with max 5*50Mb used. # Note: we used to rely on RotatingFileHandler, but due to multi- # processes, it would be rotated multiple times every time it reached the # limit. So now, just do it at startup, and hope it doesn't reach huge # size in one run. rotateLog(options.logtarget, maxBytes=50 * (2 ** 20), backupCount=5) handler = FileHandler(options.logtarget) logging.getLogger().setLevel(loglev) handler.setFormatter(logging.Formatter('%(asctime)s (%(module)s) %(levelname)s: %(message)s')) logging.getLogger().addHandler(handler) if loglev <= logging.DEBUG: # Activate also Pyro logging # TODO: options.logtarget pyrolog = logging.getLogger("Pyro4") pyrolog.setLevel(min(pyrolog.getEffectiveLevel(), logging.INFO)) # Useful to debug cases of multiple conflicting installations logging.info("Starting Odemis back-end v%s (from %s)", odemis.__version__, __file__) if options.validate and (options.kill or options.check or options.daemon): logging.error("Impossible to validate a model and manage the daemon simultaneously") return 1 # Daemon management # python-daemon is a fancy library but seems to do too many things for us. # We just need to contact the backend and see what happens status = get_backend_status() if options.check: logging.info("Status of back-end is %s", status) return status_to_xtcode[status] try: if options.kill: if status != BACKEND_RUNNING: raise IOError("No running back-end to kill") backend = model.getContainer(model.BACKEND_NAME) backend.terminate() return 0 # check if there is already a backend running if status == BACKEND_RUNNING: raise IOError("Back-end already running, cannot start a new one") if options.model is None: raise ValueError("No microscope model instantiation file provided") if options.settings is None: try: options.settings = open(DEFAULT_SETTINGS_FILE, "a+") except IOError as ex: logging.warning("%s. Will not be able to use persistent data", ex) if options.debug: cont_pol = BackendRunner.CONTAINER_ALL_IN_ONE else: cont_pol = BackendRunner.CONTAINER_SEPARATED # let's become the back-end for real runner = BackendRunner(options.model, options.settings, options.daemon, dry_run=options.validate, containement=cont_pol) runner.run() except ValueError as exp: logging.error("%s", exp) return 127 except IOError as exp: logging.error("%s", exp) return 129 except Exception: logging.exception("Unexpected error while performing action.") return 130 return 0
def main(args): """ Contains the console handling code for the daemon args is the list of arguments passed return (int): value to return to the OS as program exit code """ #print args # arguments handling parser = argparse.ArgumentParser(description=odemis.__fullname__) parser.add_argument('--version', dest="version", action='store_true', help="show program's version number and exit") dm_grp = parser.add_argument_group('Daemon management') dm_grpe = dm_grp.add_mutually_exclusive_group() dm_grpe.add_argument("--kill", "-k", dest="kill", action="store_true", default=False, help="Kill the running back-end") dm_grpe.add_argument("--check", dest="check", action="store_true", default=False, help="Check for a running back-end (only returns exit code)") dm_grpe.add_argument("--daemonize", "-D", action="store_true", dest="daemon", default=False, help="Daemonize the back-end after startup") opt_grp = parser.add_argument_group('Options') opt_grp.add_argument('--validate', dest="validate", action="store_true", default=False, help="Validate the microscope description file and exit") dm_grpe.add_argument("--debug", action="store_true", dest="debug", default=False, help="Activate debug mode, where everything runs in one process") opt_grp.add_argument("--log-level", dest="loglev", metavar="LEVEL", type=int, default=0, help="Set verbosity level (0-2, default = 0)") opt_grp.add_argument("--log-target", dest="logtarget", metavar="{auto,stderr,filename}", default="auto", help="Specify the log target (auto, stderr, filename)") parser.add_argument("model", metavar="file.odm.yaml", nargs='?', type=open, help="Microscope model instantiation file (*.odm.yaml)") options = parser.parse_args(args[1:]) # Cannot use the internal feature, because it doesn't support multiline if options.version: print (odemis.__fullname__ + " " + odemis.__version__ + "\n" + odemis.__copyright__ + "\n" + "Licensed under the " + odemis.__license__) return 0 # Set up logging before everything else if options.loglev < 0: parser.error("log-level must be positive.") loglev_names = [logging.WARNING, logging.INFO, logging.DEBUG] loglev = loglev_names[min(len(loglev_names) - 1, options.loglev)] # auto = {odemis.log if daemon, stderr otherwise} if options.logtarget == "auto": # default to SysLogHandler ? if options.daemon: # Rotate the log, with max 500Mb used handler = RotatingFileHandler("odemis.log", maxBytes=100 * (2 ** 20), backupCount=5) else: handler = logging.StreamHandler() elif options.logtarget == "stderr": handler = logging.StreamHandler() else: handler = RotatingFileHandler(options.logtarget, maxBytes=100 * (2 ** 20), backupCount=5) logging.getLogger().setLevel(loglev) handler.setFormatter(logging.Formatter('%(asctime)s (%(module)s) %(levelname)s: %(message)s')) logging.getLogger().addHandler(handler) # Useful to debug cases of multiple conflicting installations logging.debug("Starting Odemis back-end (from %s)", __file__) if options.validate and (options.kill or options.check or options.daemon): logging.error("Impossible to validate a model and manage the daemon simultaneously") return 127 # Daemon management # python-daemon is a fancy library but seems to do too many things for us. # We just need to contact the backend and see what happens status = get_backend_status() if options.kill: if status != BACKEND_RUNNING: logging.error("No running back-end to kill") return 127 try: backend = model.getContainer(model.BACKEND_NAME) backend.terminate() except: logging.error("Failed to stop the back-end") return 127 return 0 elif options.check: logging.info("Status of back-end is %s", status) return status_to_xtcode[status] # check if there is already a backend running if status == BACKEND_RUNNING: logging.error("Back-end already running, cannot start a new one") if options.model is None: logging.error("No microscope model instantiation file provided") return 127 if options.debug: #cont_pol = BackendRunner.CONTAINER_DISABLE cont_pol = BackendRunner.CONTAINER_ALL_IN_ONE else: cont_pol = BackendRunner.CONTAINER_SEPARATED # let's become the back-end for real runner = BackendRunner(options.model, options.daemon, options.validate, cont_pol) return runner.run()
def kill_backend(): try: backend = model.getContainer(model.BACKEND_NAME) backend.terminate() except Exception: raise IOError("Failed to stop the back-end")
def wait_backend_is_ready(self): """ Block until the back-end is fully ready (when all the components are ready) Raises: IOError: If the back-end eventually fails to start """ # Get a connection to the back-end end_time = time.time() + 5 # 5s max to start the backend backend = None while self._mic is None: try: backend = model.getContainer(model.BACKEND_NAME, validate=False) self._mic = backend.getRoot() except (IOError, CommunicationError) as exp: if (isinstance(exp, CommunicationError) and "Permission denied" in str(exp)): raise # No hope if time.time() > end_time: logging.exception("Timeout waiting for back-end to start") self.show_popup( "Odemis back-end failed to start", ("For more information look at the log messages in %s " "or type odemis-start in a terminal.") % self._config["LOGFILE"], "dialog-warning") raise IOError("Back-end failed to start") else: logging.debug( "Waiting a bit more for the backend to appear") time.sleep(1) try: self._mic.ghosts.subscribe(self._on_ghosts, init=True) except (IOError, CommunicationError): self.show_popup( "Odemis back-end failed to start", ("For more information look at the log messages in %s " "or type odemis-start in a terminal.") % self._config["LOGFILE"], "dialog-warning") raise IOError("Back-end failed to fully instantiate") # create a window with the list of the components self._component_frame = self._create_component_frame(self._mic.name) # In theory Python raise KeyboardInterrupt on SIGINT, which is what we # need. But that doesn't happen if in a wait(), and in addition, when # there are several threads, only one of them receives the exception. # signal.signal(signal.SIGINT, self._on_sigint) # Check in background if the back-end is ready check_thread = threading.Thread(target=self._watch_backend_status, args=(backend, self._mic)) check_thread.start() # Show status window until the backend is ready (or failed to start) ret = self._component_frame.ShowModal() # Blocking until the window is closed. It return either: # * ID_CANCEL => the user doesn't want to start finally # * ID_EXIT => Error in the backend # * ID_OK => the backend is ready logging.debug("Window closed with: %d", ret) # TODO: detect Ctrl+C and interpret as pressing "Cancel" # except KeyboardInterrupt: # # self._frame.Destroy() # self._mic.ghosts.unsubscribe(self._on_ghosts) # self._frame.EndModal(wx.ID_CANCEL) # logging.info("Stopping the backend") # backend.terminate() # self._backend_done.set() # raise # make sure check_thread and ghost listener stop self._backend_done.set() try: if ret != wx.ID_EXIT: self._mic.ghosts.unsubscribe(self._on_ghosts) except Exception: # Can happen if the backend failed pass # status = driver.get_backend_status() # if status == driver.BACKEND_RUNNING: if ret == wx.ID_OK: self.show_popup( "Odemis back-end successfully started", "" if self._nogui else "Graphical interface will now start.", "dialog-info") # elif status in (driver.BACKEND_DEAD, driver.BACKEND_STOPPED): elif ret == wx.ID_EXIT: self.show_popup( "Odemis back-end failed to start", ("For more information look at the log messages in %s " "or type odemis-start in a terminal.") % self._config["LOGFILE"], "dialog-warning") raise IOError("Back-end failed to fully instantiate") elif ret == wx.ID_CANCEL: logging.info("Stopping the backend") backend.terminate() self.show_popup("Odemis back-end start cancelled") raise ValueError("Back-end start cancelled by the user") else: logging.warning("Unexpected return code %d", ret)