def configure_main(self, config_name, action, contents): config = self.default_config.copy() config.update(contents) if action == "NEW": try: configure_socket_lock() configure_publish_lock(10000) except ValueError as e: _log.error("ERROR PROCESSING STARTUP CRITICAL CONFIGURATION SETTINGS: {}".format(e)) _log.error("SIMULATION DRIVER SHUTTING DOWN") sys.exit(1) self.update_override_patterns() self.update_scrape_schedule(config)
def configure_main(self, config_name, action, contents): config = self.default_config.copy() config.update(contents) if action == "NEW": try: configure_socket_lock() configure_publish_lock(10000) except ValueError as e: _log.error( "ERROR PROCESSING STARTUP CRITICAL CONFIGURATION SETTINGS: {}" .format(e)) _log.error("SIMULATION DRIVER SHUTTING DOWN") sys.exit(1) self.update_override_patterns() self.update_scrape_schedule(config)
def configure_main(self, config_name, action, contents): config = self.default_config.copy() config.update(contents) if action == "NEW": try: self.max_open_sockets = config["max_open_sockets"] if self.max_open_sockets is not None: max_open_sockets = int(self.max_open_sockets) configure_socket_lock(max_open_sockets) _log.info("maximum concurrently open sockets limited to " + str(max_open_sockets)) elif self.system_socket_limit is not None: max_open_sockets = int(self.system_socket_limit * 0.8) _log.info("maximum concurrently open sockets limited to " + str(max_open_sockets) + " (derived from system limits)") configure_socket_lock(max_open_sockets) else: configure_socket_lock() _log.warn( "No limit set on the maximum number of concurrently open sockets. " "Consider setting max_open_sockets if you plan to work with 800+ modbus devices." ) self.max_concurrent_publishes = config[ 'max_concurrent_publishes'] max_concurrent_publishes = int(self.max_concurrent_publishes) if max_concurrent_publishes < 1: _log.warn( "No limit set on the maximum number of concurrent driver publishes. " "Consider setting max_concurrent_publishes if you plan to work with many devices." ) else: _log.info( "maximum concurrent driver publishes limited to " + str(max_concurrent_publishes)) configure_publish_lock(max_concurrent_publishes) self.scalability_test = bool(config["scalability_test"]) self.scalability_test_iterations = int( config["scalability_test_iterations"]) if self.scalability_test: self.waiting_to_finish = set() self.test_iterations = 0 self.test_results = [] self.current_test_start = None except ValueError as e: _log.error( "ERROR PROCESSING STARTUP CRITICAL CONFIGURATION SETTINGS: {}" .format(e)) _log.error("MASTER DRIVER SHUTTING DOWN") sys.exit(1) else: if self.max_open_sockets != config["max_open_sockets"]: _log.info( "The master driver must be restarted for changes to the max_open_sockets setting to take effect" ) if self.max_concurrent_publishes != config[ "max_concurrent_publishes"]: _log.info( "The master driver must be restarted for changes to the max_concurrent_publishes setting to take effect" ) if self.scalability_test != bool(config["scalability_test"]): if not self.scalability_test: _log.info( "The master driver must be restarted with scalability_test set to true in order to run a test." ) if self.scalability_test: _log.info( "A scalability test may not be interrupted. Restarting the driver is required to stop the test." ) try: if self.scalability_test_iterations != int( config["scalability_test_iterations"] ) and self.scalability_test: _log.info( "A scalability test must be restarted for the scalability_test_iterations setting to take effect." ) except ValueError: pass #update override patterns if self._override_patterns is None: try: values = self.vip.config.get("override_patterns") values = jsonapi.loads(values) if isinstance(values, dict): self._override_patterns = set() for pattern, end_time in values.items(): #check the end_time now = utils.get_aware_utc_now() #If end time is indefinite, set override with indefinite duration if end_time == "0.0": self._set_override_on(pattern, 0.0, from_config_store=True) else: end_time = utils.parse_timestamp_string(end_time) # If end time > current time, set override with new duration if end_time > now: delta = end_time - now self._set_override_on(pattern, delta.total_seconds(), from_config_store=True) else: self._override_patterns = set() except KeyError: self._override_patterns = set() except ValueError: _log.error( "Override patterns is not set correctly in config store") self._override_patterns = set() try: driver_scrape_interval = float(config["driver_scrape_interval"]) except ValueError as e: _log.error("ERROR PROCESSING CONFIGURATION: {}".format(e)) _log.error("Master driver scrape interval settings unchanged") # TODO: set a health status for the agent try: group_offset_interval = float(config["group_offset_interval"]) except ValueError as e: _log.error("ERROR PROCESSING CONFIGURATION: {}".format(e)) _log.error("Master driver group interval settings unchanged") # TODO: set a health status for the agent if self.scalability_test and action == "UPDATE": _log.info( "Running scalability test. Settings may not be changed without restart." ) return if (self.driver_scrape_interval != driver_scrape_interval or self.group_offset_interval != group_offset_interval): self.driver_scrape_interval = driver_scrape_interval self.group_offset_interval = group_offset_interval _log.info("Setting time delta between driver device scrapes to " + str(driver_scrape_interval)) #Reset all scrape schedules self.freed_time_slots.clear() self.group_counts.clear() for driver in self.instances.itervalues(): time_slot = self.group_counts[driver.group] driver.update_scrape_schedule(time_slot, self.driver_scrape_interval, driver.group, self.group_offset_interval) self.group_counts[driver.group] += 1 self.publish_depth_first_all = bool(config["publish_depth_first_all"]) self.publish_breadth_first_all = bool( config["publish_breadth_first_all"]) self.publish_depth_first = bool(config["publish_depth_first"]) self.publish_breadth_first = bool(config["publish_breadth_first"]) #Update the publish settings on running devices. for driver in self.instances.itervalues(): driver.update_publish_types(self.publish_depth_first_all, self.publish_breadth_first_all, self.publish_depth_first, self.publish_breadth_first)
def master_driver_agent(config_path, **kwargs): config = utils.load_config(config_path) def get_config(name, default=None): try: return kwargs.pop(name) except KeyError: return config.get(name, default) max_open_sockets = get_config('max_open_sockets', None) # Increase open files resource limit to max or 8192 if unlimited limit = None try: soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) except OSError: _log.exception('error getting open file limits') else: if soft != hard and soft != resource.RLIM_INFINITY: try: limit = 8192 if hard == resource.RLIM_INFINITY else hard resource.setrlimit(resource.RLIMIT_NOFILE, (limit, hard)) except OSError: _log.exception('error setting open file limits') else: _log.debug('open file resource limit increased from %d to %d', soft, limit) if soft == hard: limit = soft if max_open_sockets is not None: configure_socket_lock(max_open_sockets) _log.info("maximum concurrently open sockets limited to " + str(max_open_sockets)) elif limit is not None: max_open_sockets = int(limit * 0.8) _log.info("maximum concurrently open sockets limited to " + str(max_open_sockets) + " (derived from system limits)") configure_socket_lock(max_open_sockets) else: configure_socket_lock() _log.warn( "No limit set on the maximum number of concurrently open sockets. " "Consider setting max_open_sockets if you plan to work with 800+ modbus devices." ) #TODO: update the default after scalability testing. max_concurrent_publishes = get_config('max_concurrent_publishes', 10000) if max_concurrent_publishes < 1: _log.warn( "No limit set on the maximum number of concurrent driver publishes. " "Consider setting max_concurrent_publishes if you plan to work with many devices." ) else: _log.info("maximum concurrent driver publishes limited to " + str(max_concurrent_publishes)) configure_publish_lock(max_concurrent_publishes) vip_identity = get_config('vip_identity', 'platform.driver') #pop the uuid based id kwargs.pop('identity', None) driver_config_list = get_config('driver_config_list') class MasterDriverAgent(Agent): def __init__(self, **kwargs): super(MasterDriverAgent, self).__init__(**kwargs) self.instances = {} @Core.receiver('onstart') def starting(self, sender, **kwargs): env = os.environ.copy() env.pop('AGENT_UUID', None) for config_name in driver_config_list: _log.debug("Launching driver for config " + config_name) driver = DriverAgent(self, config_name) gevent.spawn(driver.core.run) #driver.core.stop to kill an agent. def device_startup_callback(self, topic, driver): _log.debug("Driver hooked up for " + topic) topic = topic.strip('/') self.instances[topic] = driver @RPC.export def get_point(self, path, point_name): return self.instances[path].get_point(point_name) @RPC.export def set_point(self, path, point_name, value): return self.instances[path].set_point(point_name, value) @RPC.export def heart_beat(self): _log.debug("sending heartbeat") for device in self.instances.values(): device.heart_beat() return MasterDriverAgent(identity=vip_identity, **kwargs)
def configure_main(self, config_name, action, contents): config = self.default_config.copy() config.update(contents) if action == "NEW": try: self.max_open_sockets = config["max_open_sockets"] if self.max_open_sockets is not None: max_open_sockets = int(self.max_open_sockets) configure_socket_lock(max_open_sockets) _log.info("maximum concurrently open sockets limited to " + str(max_open_sockets)) elif self.system_socket_limit is not None: max_open_sockets = int(self.system_socket_limit * 0.8) _log.info("maximum concurrently open sockets limited to " + str(max_open_sockets) + " (derived from system limits)") configure_socket_lock(max_open_sockets) else: configure_socket_lock() _log.warn("No limit set on the maximum number of concurrently open sockets. " "Consider setting max_open_sockets if you plan to work with 800+ modbus devices.") self.max_concurrent_publishes = config['max_concurrent_publishes'] max_concurrent_publishes = int(self.max_concurrent_publishes) if max_concurrent_publishes < 1: _log.warn("No limit set on the maximum number of concurrent driver publishes. " "Consider setting max_concurrent_publishes if you plan to work with many devices.") else: _log.info("maximum concurrent driver publishes limited to " + str(max_concurrent_publishes)) configure_publish_lock(max_concurrent_publishes) self.scalability_test = bool(config["scalability_test"]) self.scalability_test_iterations = int(config["scalability_test_iterations"]) if self.scalability_test: self.waiting_to_finish = set() self.test_iterations = 0 self.test_results = [] self.current_test_start = None except ValueError as e: _log.error("ERROR PROCESSING STARTUP CRITICAL CONFIGURATION SETTINGS: {}".format(e)) _log.error("MASTER DRIVER SHUTTING DOWN") sys.exit(1) else: if self.max_open_sockets != config["max_open_sockets"]: _log.info("The master driver must be restarted for changes to the max_open_sockets setting to take effect") if self.max_concurrent_publishes != config["max_concurrent_publishes"]: _log.info("The master driver must be restarted for changes to the max_concurrent_publishes setting to take effect") if self.scalability_test != bool(config["scalability_test"]): if not self.scalability_test: _log.info( "The master driver must be restarted with scalability_test set to true in order to run a test.") if self.scalability_test: _log.info( "A scalability test may not be interrupted. Restarting the driver is required to stop the test.") try: if self.scalability_test_iterations != int(config["scalability_test_iterations"]) and self.scalability_test: _log.info( "A scalability test must be restarted for the scalability_test_iterations setting to take effect.") except ValueError: pass #update override patterns if self._override_patterns is None: try: values = self.vip.config.get("override_patterns") values = jsonapi.loads(values) if isinstance(values, dict): self._override_patterns = set() for pattern, end_time in values.items(): #check the end_time now = utils.get_aware_utc_now() #If end time is indefinite, set override with indefinite duration if end_time == "0.0": self._set_override_on(pattern, 0.0, from_config_store=True) else: end_time = utils.parse_timestamp_string(end_time) # If end time > current time, set override with new duration if end_time > now: delta = end_time - now self._set_override_on(pattern, delta.total_seconds(), from_config_store=True) else: self._override_patterns = set() except KeyError: self._override_patterns = set() except ValueError: _log.error("Override patterns is not set correctly in config store") self._override_patterns = set() try: driver_scrape_interval = float(config["driver_scrape_interval"]) except ValueError as e: _log.error("ERROR PROCESSING CONFIGURATION: {}".format(e)) _log.error("Master driver scrape interval settings unchanged") # TODO: set a health status for the agent try: group_offset_interval = float(config["group_offset_interval"]) except ValueError as e: _log.error("ERROR PROCESSING CONFIGURATION: {}".format(e)) _log.error("Master driver group interval settings unchanged") # TODO: set a health status for the agent if self.scalability_test and action == "UPDATE": _log.info("Running scalability test. Settings may not be changed without restart.") return if (self.driver_scrape_interval != driver_scrape_interval or self.group_offset_interval != group_offset_interval): self.driver_scrape_interval = driver_scrape_interval self.group_offset_interval = group_offset_interval _log.info("Setting time delta between driver device scrapes to " + str(driver_scrape_interval)) #Reset all scrape schedules self.freed_time_slots.clear() self.group_counts.clear() for driver in self.instances.itervalues(): time_slot = self.group_counts[driver.group] driver.update_scrape_schedule(time_slot, self.driver_scrape_interval, driver.group, self.group_offset_interval) self.group_counts[driver.group] += 1 self.publish_depth_first_all = bool(config["publish_depth_first_all"]) self.publish_breadth_first_all = bool(config["publish_breadth_first_all"]) self.publish_depth_first = bool(config["publish_depth_first"]) self.publish_breadth_first = bool(config["publish_breadth_first"]) #Update the publish settings on running devices. for driver in self.instances.itervalues(): driver.update_publish_types(self.publish_depth_first_all, self.publish_breadth_first_all, self.publish_depth_first, self.publish_breadth_first)
def master_driver_agent(config_path, **kwargs): config = utils.load_config(config_path) def get_config(name, default=None): try: return kwargs.pop(name) except KeyError: return config.get(name, default) max_open_sockets = get_config('max_open_sockets', None) # Increase open files resource limit to max or 8192 if unlimited limit = None try: soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) except OSError: _log.exception('error getting open file limits') else: if soft != hard and soft != resource.RLIM_INFINITY: try: limit = 8192 if hard == resource.RLIM_INFINITY else hard resource.setrlimit(resource.RLIMIT_NOFILE, (limit, hard)) except OSError: _log.exception('error setting open file limits') else: _log.debug('open file resource limit increased from %d to %d', soft, limit) if soft == hard: limit = soft if max_open_sockets is not None: configure_socket_lock(max_open_sockets) _log.info("maximum concurrently open sockets limited to " + str(max_open_sockets)) elif limit is not None: max_open_sockets = int(limit * 0.8) _log.info("maximum concurrently open sockets limited to " + str(max_open_sockets) + " (derived from system limits)") configure_socket_lock(max_open_sockets) else: configure_socket_lock() _log.warn( "No limit set on the maximum number of concurrently open sockets. " "Consider setting max_open_sockets if you plan to work with 800+ modbus devices." ) #TODO: update the default after scalability testing. max_concurrent_publishes = get_config('max_concurrent_publishes', 10000) if max_concurrent_publishes < 1: _log.warn( "No limit set on the maximum number of concurrent driver publishes. " "Consider setting max_concurrent_publishes if you plan to work with many devices." ) else: _log.info("maximum concurrent driver publishes limited to " + str(max_concurrent_publishes)) configure_publish_lock(max_concurrent_publishes) driver_config_list = get_config('driver_config_list') scalability_test = get_config('scalability_test', False) scalability_test_iterations = get_config('scalability_test_iterations', 3) staggered_start = get_config('staggered_start', None) return MasterDriverAgent(driver_config_list, scalability_test, scalability_test_iterations, staggered_start, heartbeat_autostart=True, **kwargs)
def master_driver_agent(config_path, **kwargs): config = utils.load_config(config_path) def get_config(name, default=None): try: return kwargs.pop(name) except KeyError: return config.get(name, default) max_open_sockets = get_config("max_open_sockets", None) # Increase open files resource limit to max or 8192 if unlimited limit = None try: soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) except OSError: _log.exception("error getting open file limits") else: if soft != hard and soft != resource.RLIM_INFINITY: try: limit = 8192 if hard == resource.RLIM_INFINITY else hard resource.setrlimit(resource.RLIMIT_NOFILE, (limit, hard)) except OSError: _log.exception("error setting open file limits") else: _log.debug("open file resource limit increased from %d to %d", soft, limit) if soft == hard: limit = soft if max_open_sockets is not None: configure_socket_lock(max_open_sockets) _log.info("maximum concurrently open sockets limited to " + str(max_open_sockets)) elif limit is not None: max_open_sockets = int(limit * 0.8) _log.info( "maximum concurrently open sockets limited to " + str(max_open_sockets) + " (derived from system limits)" ) configure_socket_lock(max_open_sockets) else: configure_socket_lock() _log.warn( "No limit set on the maximum number of concurrently open sockets. " "Consider setting max_open_sockets if you plan to work with 800+ modbus devices." ) # TODO: update the default after scalability testing. max_concurrent_publishes = get_config("max_concurrent_publishes", 10000) if max_concurrent_publishes < 1: _log.warn( "No limit set on the maximum number of concurrent driver publishes. " "Consider setting max_concurrent_publishes if you plan to work with many devices." ) else: _log.info("maximum concurrent driver publishes limited to " + str(max_concurrent_publishes)) configure_publish_lock(max_concurrent_publishes) vip_identity = get_config("vip_identity", "platform.driver") # pop the uuid based id kwargs.pop("identity", None) driver_config_list = get_config("driver_config_list") class MasterDriverAgent(Agent): def __init__(self, **kwargs): super(MasterDriverAgent, self).__init__(**kwargs) self.instances = {} @Core.receiver("onstart") def starting(self, sender, **kwargs): env = os.environ.copy() env.pop("AGENT_UUID", None) for config_name in driver_config_list: _log.debug("Launching driver for config " + config_name) driver = DriverAgent(self, config_name) gevent.spawn(driver.core.run) # driver.core.stop to kill an agent. def device_startup_callback(self, topic, driver): _log.debug("Driver hooked up for " + topic) topic = topic.strip("/") self.instances[topic] = driver @RPC.export def get_point(self, path, point_name): return self.instances[path].get_point(point_name) @RPC.export def set_point(self, path, point_name, value): return self.instances[path].set_point(point_name, value) @RPC.export def heart_beat(self): _log.debug("sending heartbeat") for device in self.instances.values(): device.heart_beat() return MasterDriverAgent(identity=vip_identity, **kwargs)
def configure_main(self, config_name, action, contents): config = self.default_config.copy() config.update(contents) if action == "NEW": try: self.max_open_sockets = config["max_open_sockets"] if self.max_open_sockets is not None: max_open_sockets = int(self.max_open_sockets) configure_socket_lock(max_open_sockets) _log.info("maximum concurrently open sockets limited to " + str(max_open_sockets)) elif self.system_socket_limit is not None: max_open_sockets = int(self.system_socket_limit * 0.8) _log.info("maximum concurrently open sockets limited to " + str(max_open_sockets) + " (derived from system limits)") configure_socket_lock(max_open_sockets) else: configure_socket_lock() _log.warn("No limit set on the maximum number of concurrently open sockets. " "Consider setting max_open_sockets if you plan to work with 800+ modbus devices.") self.max_concurrent_publishes = config['max_concurrent_publishes'] max_concurrent_publishes = int(self.max_concurrent_publishes) if max_concurrent_publishes < 1: _log.warn("No limit set on the maximum number of concurrent driver publishes. " "Consider setting max_concurrent_publishes if you plan to work with many devices.") else: _log.info("maximum concurrent driver publishes limited to " + str(max_concurrent_publishes)) configure_publish_lock(max_concurrent_publishes) self.scalability_test = bool(config["scalability_test"]) self.scalability_test_iterations = int(config["scalability_test_iterations"]) if self.scalability_test: self.waiting_to_finish = set() self.test_iterations = 0 self.test_results = [] self.current_test_start = None except ValueError as e: _log.error("ERROR PROCESSING STARTUP CRITICAL CONFIGURATION SETTINGS: {}".format(e)) _log.error("MASTER DRIVER SHUTTING DOWN") sys.exit(1) else: if self.max_open_sockets != config["max_open_sockets"]: _log.info("The master driver must be restarted for changes to the max_open_sockets setting to take effect") if self.max_concurrent_publishes != config["max_concurrent_publishes"]: _log.info("The master driver must be restarted for changes to the max_concurrent_publishes setting to take effect") if self.scalability_test != bool(config["scalability_test"]): if not self.scalability_test: _log.info( "The master driver must be restarted with scalability_test set to true in order to run a test.") if self.scalability_test: _log.info( "A scalability test may not be interrupted. Restarting the driver is required to stop the test.") try: if self.scalability_test_iterations != int(config["scalability_test_iterations"]) and self.scalability_test: _log.info( "A scalability test must be restarted for the scalability_test_iterations setting to take effect.") except ValueError: pass try: driver_scrape_interval = float(config["driver_scrape_interval"]) except ValueError as e: _log.error("ERROR PROCESSING CONFIGURATION: {}".format(e)) _log.error("Master driver settings unchanged") # TODO: set a health status for the agent return if self.driver_scrape_interval == driver_scrape_interval: #Setting unchanged. return if self.scalability_test and action == "UPDATE": _log.info("Running scalability test. Settings may not be changed without restart.") return self.driver_scrape_interval = driver_scrape_interval _log.info("Setting time delta between driver device scrapes to " + str(driver_scrape_interval)) #Reset all scrape schedules self.freed_time_slots = [] time_slot = 0 for driver in self.instances.itervalues(): driver.update_scrape_schedule(time_slot, self.driver_scrape_interval) time_slot+=1
def configure_main(self, config_name, action, contents): config = self.default_config.copy() config.update(contents) if action == "NEW": try: self.max_open_sockets = config["max_open_sockets"] if self.max_open_sockets is not None: max_open_sockets = int(self.max_open_sockets) configure_socket_lock(max_open_sockets) _log.info("maximum concurrently open sockets limited to " + str(max_open_sockets)) elif self.system_socket_limit is not None: max_open_sockets = int(self.system_socket_limit * 0.8) _log.info("maximum concurrently open sockets limited to " + str(max_open_sockets) + " (derived from system limits)") configure_socket_lock(max_open_sockets) else: configure_socket_lock() _log.warn( "No limit set on the maximum number of concurrently open sockets. " "Consider setting max_open_sockets if you plan to work with 800+ modbus devices." ) self.max_concurrent_publishes = config[ 'max_concurrent_publishes'] max_concurrent_publishes = int(self.max_concurrent_publishes) if max_concurrent_publishes < 1: _log.warn( "No limit set on the maximum number of concurrent driver publishes. " "Consider setting max_concurrent_publishes if you plan to work with many devices." ) else: _log.info( "maximum concurrent driver publishes limited to " + str(max_concurrent_publishes)) configure_publish_lock(max_concurrent_publishes) self.scalability_test = bool(config["scalability_test"]) self.scalability_test_iterations = int( config["scalability_test_iterations"]) if self.scalability_test: self.waiting_to_finish = set() self.test_iterations = 0 self.test_results = [] self.current_test_start = None except ValueError as e: _log.error( "ERROR PROCESSING STARTUP CRITICAL CONFIGURATION SETTINGS: {}" .format(e)) _log.error("MASTER DRIVER SHUTTING DOWN") sys.exit(1) else: if self.max_open_sockets != config["max_open_sockets"]: _log.info( "The master driver must be restarted for changes to the max_open_sockets setting to take effect" ) if self.max_concurrent_publishes != config[ "max_concurrent_publishes"]: _log.info( "The master driver must be restarted for changes to the max_concurrent_publishes setting to take effect" ) if self.scalability_test != bool(config["scalability_test"]): if not self.scalability_test: _log.info( "The master driver must be restarted with scalability_test set to true in order to run a test." ) if self.scalability_test: _log.info( "A scalability test may not be interrupted. Restarting the driver is required to stop the test." ) try: if self.scalability_test_iterations != int( config["scalability_test_iterations"] ) and self.scalability_test: _log.info( "A scalability test must be restarted for the scalability_test_iterations setting to take effect." ) except ValueError: pass try: driver_scrape_interval = float(config["driver_scrape_interval"]) except ValueError as e: _log.error("ERROR PROCESSING CONFIGURATION: {}".format(e)) _log.error("Master driver settings unchanged") # TODO: set a health status for the agent return if self.driver_scrape_interval == driver_scrape_interval: #Setting unchanged. return if self.scalability_test and action == "UPDATE": _log.info( "Running scalability test. Settings may not be changed without restart." ) return self.driver_scrape_interval = driver_scrape_interval _log.info("Setting time delta between driver device scrapes to " + str(driver_scrape_interval)) #Reset all scrape schedules self.freed_time_slots = [] time_slot = 0 for driver in self.instances.itervalues(): driver.update_scrape_schedule(time_slot, self.driver_scrape_interval) time_slot += 1