def __init__(self, site, building, device, subdevice, device_points, subdevice_points): """ Device constructor. :param site: :param building: :param device: :param subdevice: :param device_points: :param subdevice_points: """ self.device = device if not subdevice_points: subdevice = "" base_record_list = [ "tnc", site, building, device, subdevice, "update_model" ] base_record_list = list(filter(lambda a: a != "", base_record_list)) self.record_topic = '/'.join(base_record_list) key_map = defaultdict() for token, point in subdevice_points.items(): topic = topics.RPC_DEVICE_PATH(campus=site, building=building, unit=device, path=subdevice, point=point) key_map[token] = topic for token, point in device_points.items(): topic = topics.RPC_DEVICE_PATH(campus=site, building=building, unit=device, path='', point=point) key_map[token] = topic self.input_data = key_map
def light_agent(config_path, **kwargs): """Parses the lighting agent configuration and returns an instance of the agent created using that configuration. :param config_path: Path to a configuration file. :type config_path: str :returns: Market Service Agent :rtype: MarketServiceAgent """ try: config = utils.load_config(config_path) except StandardError: config = {} if not config: _log.info("Using defaults for starting configuration.") base_name = config.get("market_name", "electric") market_name = [] for i in range(24): market_name.append('_'.join([base_name, str(i)])) agent_name = config.get('agent_name', "lighting") default_occ_lighting_level = config.get('default_dimming_level', 0.) min_occupied_lighting_level = config.get("min_occupied_lighting_level", 70.0) heartbeat_period = config.get('heartbeat_period', 3600) power_absnom = config.get('Pabsnom', 0.) non_responsive = config.get('non_responsive', False) schedule_topic = topics.DEVICES_VALUE(campus=config.get("campus", ""), building=config.get("building", ""), unit=config.get( "schedule_device", ""), path=config.get("schedule_path", ""), point="all") schedule_point = config.get("schedule_point", "SupplyFanStatus") lighting_setpoint = config["lighting_level_stpt"] base_rpc_path = topics.RPC_DEVICE_PATH(campus=config.get("campus", ""), building=config.get("building", ""), unit=config.get("device", ""), path=config.get("path", ""), point=lighting_setpoint) actuator = config.get("actuator", "platform.actuator") verbose_logging = config.get('verbose_logging', True) return LightAgent(market_name, agent_name, min_occupied_lighting_level, default_occ_lighting_level, power_absnom, non_responsive, verbose_logging, base_rpc_path, schedule_topic, schedule_point, actuator, heartbeat_period, **kwargs)
def send_autocorrect_command(self, point, value): """Send autocorrect command to the AHU/RTU to improve operational efficiency""" base_actuator_path = topics.RPC_DEVICE_PATH(campus=self.campus, building=self.building, unit=None, path="", point=None) if not self.actuation_mode: _log.debug("Actuation disabled: autocorrect point: {} -- value: {}".format(point, value)) return for device in self.publish_list: point_path = base_actuator_path(unit=device, point=point) try: _log.info("Set point {} to {}".format(point_path, value)) self.actuation_vip.call("platform.actuator", "set_point", "rcx", point_path, value).get(timeout=15) except RemoteError as ex: _log.warning("Failed to set {} to {}: {}".format(point_path, value, str(ex))) continue
def __init__(self, campus, building, device, sensor_conditions): """ Sensor constructor. :param campus: :param building: :param device: :param sensor_conditions: """ self.diagnostic_parameters = defaultdict() topic = topics.RPC_DEVICE_PATH(campus=campus, building=building, unit=device, path='', point=None) self.device_topic = topic self.report_topic = {} self.sensors = {} self.evaluations = {} for sensor in sensor_conditions: self.init_sensors(topic, sensor)
def driven_agent(config_path, **kwargs): """Reads agent configuration and converts it to run driven agent. :param kwargs: Any driver specific parameters""" config = utils.load_config(config_path) arguments = config.get('arguments') mode = True if config.get('mode', 'PASSIVE') == 'ACTIVE' else False multiple_devices = isinstance(config['device']['unit'], dict) campus_building_config = config['device'] analysis_name = campus_building_config.get('analysis_name', 'analysis_name') analysis_dict = {'analysis_name': analysis_name} arguments.update(analysis_dict) agent_id = config.get('agentid', None) actuator_id = agent_id if agent_id is not None else analysis_name campus_building = dict( (key, campus_building_config[key]) for key in ['campus', 'building']) analysis = deepcopy(campus_building) analysis.update(analysis_dict) device_config = config['device']['unit'] command_devices = device_config.keys() device_topic_dict = {} device_topic_list = [] subdevices_list = [] vip_destination = config.get('vip_destination', None) from_file = config.get('from_file') for device_name in device_config: device_topic = topics.DEVICES_VALUE( campus=campus_building.get('campus'), building=campus_building.get('building'), unit=device_name, path='', point='all') device_topic_dict.update({device_topic: device_name}) device_topic_list.append(device_name) if multiple_devices: for subdevice in device_config[device_name]['subdevices']: subdevices_list.append(subdevice) subdevice_topic = topics.DEVICES_VALUE( campus=campus_building.get('campus'), building=campus_building.get('building'), unit=device_name, path=subdevice, point='all') subdevice_name = device_name + "/" + subdevice device_topic_dict.update({subdevice_topic: subdevice_name}) device_topic_list.append(subdevice_name) base_actuator_path = topics.RPC_DEVICE_PATH( campus=campus_building.get('campus', ''), building=campus_building.get('building', ''), unit=None, path='', point=None) device_lock_duration = config.get('device_lock_duration', 1.25) conversion_map = config.get('conversion_map') map_names = {} for key, value in conversion_map.items(): map_names[key.lower() if isinstance(key, str) else key] = value application = config.get('application') validation_error = '' if not application: validation_error = 'Invalid application specified in config\n' if validation_error: _log.error(validation_error) raise ValueError(validation_error) config.update(config.get('arguments')) converter = ConversionMapper() output_file_prefix = config.get('output_file') #unittype_map = config.get('unittype_map', None) #assert unittype_map klass = _get_class(application) # This instances is used to call the applications run method when # data comes in on the message bus. It is constructed here # so that_process_results each time run is called the application # can keep it state. app_instance = klass(**arguments) class DrivenAgent(Agent): """Agent listens to message bus device and runs when data is published. """ def __init__(self, **kwargs): """ Initializes agent :param kwargs: Any driver specific parameters""" super(DrivenAgent, self).__init__(**kwargs) # master is where we copy from to get a poppable list of # subdevices that should be present before we run the analysis. self._master_devices = device_topic_list self._needed_devices = [] self._device_values = {} self._initialize_devices() self.received_input_datetime = None self._kwargs = kwargs self._header_written = False self.file_creation_set = set() self.actuation_vip = self.vip.rpc if vip_destination: self.agent = self.setup_remote_actuation(vip_destination) self.actuation_vip = self.agent.vip.rpc def _initialize_devices(self): self._needed_devices = deepcopy(self._master_devices) self._device_values = {} def setup_remote_actuation(self, vip_destination): event = gevent.event.Event() agent = Agent(address=vip_destination) gevent.spawn(agent.core.run, event) event.wait(timeout=15) return agent @Core.receiver('onstart') def starup(self, sender, **kwargs): """ Starts up the agent and subscribes to device topics based on agent configuration. :param sender: :param kwargs: Any driver specific parameters :type sender: str""" self._initialize_devices() for device_topic in device_topic_dict: _log.info('Subscribing to ' + device_topic) self.vip.pubsub.subscribe(peer='pubsub', prefix=device_topic, callback=self.on_analysis_message) def _should_run_now(self): """ Checks if messages from all the devices are received before running application :returns: True or False based on received messages. :rtype: boolean""" # Assumes the unit/all values will have values. if not len(self._device_values.keys()) > 0: return False return not len(self._needed_devices) > 0 def on_analysis_message(self, peer, sender, bus, topic, headers, message): """ Subscribe to device data and assemble data set to pass to applications. :param peer: :param sender: device name :param bus: :param topic: device path topic :param headers: message headers :param message: message containing points and values dict from device with point type :type peer: str :type sender: str :type bus: str :type topic: str :type headers: dict :type message: dict""" device_data = message[0] if isinstance(device_data, list): device_data = device_data[0] def aggregate_subdevice(device_data): tagged_device_data = {} device_tag = device_topic_dict[topic] if device_tag not in self._needed_devices: return False for key, value in device_data.items(): device_data_tag = '&'.join([key, device_tag]) tagged_device_data[device_data_tag] = value self._device_values.update(tagged_device_data) self._needed_devices.remove(device_tag) return True device_needed = aggregate_subdevice(device_data) if not device_needed: _log.error("Warning device values already present, " "reinitializing") self._initialize_devices() if self._should_run_now(): field_names = {} for key, value in self._device_values.items(): field_names[key.lower() if isinstance(key, str ) else key] = value if not converter.initialized and conversion_map is not None: converter.setup_conversion_map(map_names, field_names) if from_file: _timestamp = parse(headers.get('Date')) self.received_input_datetime = _timestamp else: _timestamp = dt.now() self.received_input_datetime = dt.utcnow() device_data = converter.process_row(field_names) results = app_instance.run(_timestamp, device_data) # results = app_instance.run( # dateutil.parser.parse(self._subdevice_values['Timestamp'], # fuzzy=True), self._subdevice_values) self._process_results(results) self._initialize_devices() else: _log.info("Still need {} before running.".format( self._needed_devices)) def _process_results(self, results): """ Runs driven application with converted data. Calls appropriate methods to process commands, log and table_data in results. :param results: Results object containing commands for devices, log messages and table data. :type results: Results object \\volttron.platform.agent.driven :returns: Same as results param. :rtype: Results object \\volttron.platform.agent.driven""" _log.info('Processing Results!') actuator_error = True if mode: if results.devices: actuator_error = self.actuator_request(results.devices) elif results.commands: actuator_error = self.actuator_request(command_devices) if not actuator_error: results = self.actuator_set(results) for value in results.log_messages: _log.info("LOG: {}".format(value)) for key, value in results.table_output.items(): _log.info("TABLE: {}->{}".format(key, value)) if output_file_prefix is not None: results = self.create_file_output(results) if len(results.table_output.keys()): results = self.publish_analysis_results(results) return results def publish_analysis_results(self, results): """ Publish table_data in analysis results to the message bus for capture by the data historian. :param results: Results object containing commands for devices, log messages and table data. :type results: Results object \\volttron.platform.agent.driven :returns: Same as results param. :rtype: Results object \\volttron.platform.agent.driven""" headers = { headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON, headers_mod.DATE: str(self.received_input_datetime), } for app, analysis_table in results.table_output.items(): try: name_timestamp = app.split('&') _name = name_timestamp[0] timestamp = name_timestamp[1] except: _name = app timestamp = str(self.received_input_datetime) headers = { headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON, headers_mod.DATE: timestamp, } # The keys in this publish should look like the following # with the values being a dictionary of points off of these # base topics # # Schedule-Reset ACCx/data/interior_ahu/vav1600e # Schedule-Reset ACCx/data/interior_ahu/vav1534 to_publish = defaultdict(list) for entry in analysis_table: for key, value in entry.items(): for _device in command_devices: analysis['unit'] = _device analysis_topic = topics.ANALYSIS_VALUE(point=key, **analysis) datatype = 'float' if isinstance(value, int): datatype = 'int' kbase = key[key.rfind('/') + 1:] topic_without_point = analysis_topic[: analysis_topic .rfind('/')] if not to_publish[topic_without_point]: to_publish[topic_without_point] = [{}, {}] to_publish[topic_without_point][0][kbase] = value to_publish[topic_without_point][1][kbase] = { 'tz': 'US/Pacific', 'type': datatype, 'units': 'float', } for equipment, _analysis in to_publish.items(): self.vip.pubsub.publish('pubsub', equipment, headers, _analysis) to_publish.clear() return results def create_file_output(self, results): """ Create results/data files for testing and algorithm validation if table data is present in the results. :param results: Results object containing commands for devices, log messages and table data. :type results: Results object \\volttron.platform.agent.driven :returns: Same as results param. :rtype: Results object \\volttron.platform.agent.driven""" for key, value in results.table_output.items(): name_timestamp = key.split('&') _name = name_timestamp[0] timestamp = name_timestamp[1] file_name = output_file_prefix + "-" + _name + ".csv" if file_name not in self.file_creation_set: self._header_written = False self.file_creation_set.update([file_name]) for row in value: with open(file_name, 'a+') as file_to_write: row.update({'Timestamp': timestamp}) _keys = row.keys() file_output = csv.DictWriter(file_to_write, _keys) if not self._header_written: file_output.writeheader() self._header_written = True file_output.writerow(row) file_to_write.close() return results def actuator_request(self, command_equip): """ Calls the actuator's request_new_schedule method to get device schedule :param command_equip: contains the names of the devices that will be scheduled with the ActuatorAgent. :type: dict or list :returns: Return result from request_new_schedule method and True or False for error in scheduling device. :rtype: boolean :Return Values: request_error = True/False warning:: Calling without previously scheduling a device and not within the time allotted will raise a LockError""" _now = dt.now() str_now = _now.strftime(DATE_FORMAT) _end = _now + td(minutes=device_lock_duration) str_end = _end.strftime(DATE_FORMAT) for device in command_equip: actuation_device = base_actuator_path(unit=device, point='') schedule_request = [[actuation_device, str_now, str_end]] try: _log.info('Make Request {} for start {} and end {}'.format( actuation_device, str_now, str_end)) result = self.actuation_vip.call( 'platform.actuator', 'request_new_schedule', actuator_id, actuation_device, 'HIGH', schedule_request).get(timeout=15) except RemoteError as ex: _log.warning( "Failed to schedule device {} (RemoteError): {}". format(device, str(ex))) request_error = True if result['result'] == 'FAILURE': if result['info'] == 'TASK_ID_ALREADY_EXISTS': _log.info('Task to schedule device already exists ' + device) request_error = False else: _log.warn('Failed to schedule device (unavailable) ' + device) request_error = True else: request_error = False return request_error def actuator_set(self, results): """ Calls the actuator's set_point method to set point on device :param results: Results object containing commands for devices, log messages and table data. :type results: Results object \\volttron.platform.agent.driven""" def make_actuator_set(device, point_value_dict): for point, new_value in point_value_dict.items(): point_path = base_actuator_path(unit=device, point=point) try: _log.info('Set point {} to {}'.format( point_path, new_value)) result = self.actuation_vip.call( 'platform.actuator', 'set_point', actuator_id, point_path, new_value).get(timeout=15) except RemoteError as ex: _log.warning("Failed to set {} to {}: {}".format( point_path, new_value, str(ex))) continue for device, point_value_dict in results.devices.items(): make_actuator_set(device, point_value_dict) for device in command_devices: make_actuator_set(device, results.commands) return results DrivenAgent.__name__ = 'DrivenLoggerAgent' return DrivenAgent(**kwargs)
def driven_agent(config_path, **kwargs): """ Reads agent configuration and converts it to run driven agent. :param config_path: :param kwargs: :return: """ config = utils.load_config(config_path) arguments = config.get("arguments") actuation_mode = True if config.get("actuation_mode", "PASSIVE") == "ACTIVE" else False actuator_lock_required = config.get("require_actuator_lock", False) campus = config["device"].get("campus", "") building = config["device"].get("building", "") analysis_name = config.get("analysis_name", "analysis_name") publish_base = "/".join([analysis_name, campus, building]) application_name = config.get("pretty_name", analysis_name) arguments.update({"analysis_name": analysis_name}) device_config = config["device"]["unit"] multiple_devices = isinstance(device_config, dict) command_devices = list(device_config.keys()) device_topic_dict = {} device_topic_list = [] subdevices_list = [] interval = config.get("interval", 60) vip_destination = config.get("vip_destination", None) timezone = config.get("local_timezone", "US/Pacific") for device_name in device_config: device_topic = topics.DEVICES_VALUE(campus=campus, building=building, unit=device_name, path="", point="all") device_topic_dict.update({device_topic: device_name}) device_topic_list.append(device_name) if multiple_devices: for subdevice in device_config[device_name]["subdevices"]: subdevices_list.append(subdevice) subdevice_topic = topics.DEVICES_VALUE(campus=campus, building=building, unit=device_name, path=subdevice, point="all") subdevice_name = device_name + "/" + subdevice device_topic_dict.update({subdevice_topic: subdevice_name}) device_topic_list.append(subdevice_name) base_actuator_path = topics.RPC_DEVICE_PATH(campus=campus, building=building, unit=None, path="", point=None) device_lock_duration = config.get("device_lock_duration", 10.0) conversion_map = config.get("conversion_map") missing_data_threshold = config.get("missing_data_threshold", 15.0) / 100.0 map_names = {} for key, value in conversion_map.items(): map_names[key.lower() if isinstance(key, str) else key] = value application = config.get("application") validation_error = "" if not application: validation_error = "Invalid application specified in config\n" if validation_error: _log.error(validation_error) raise ValueError(validation_error) converter = ConversionMapper() # output_file_prefix = config.get("output_file") klass = _get_class(application) # This instances is used to call the applications run method when # data comes in on the message bus. It is constructed here # so that_process_results each time run is called the application # can keep it state. # points = arguments.pop("point_mapping") app_instance = klass(**arguments) class DrivenAgent(Agent): """Agent listens to message bus device and runs when data is published. """ def __init__(self, **kwargs): """ Initializes agent :param kwargs: Any driver specific parameters""" super(DrivenAgent, self).__init__(**kwargs) # master is where we copy from to get a poppable list of # subdevices that should be present before we run the analysis. self.master_devices = device_topic_list self.needed_devices = [] self.device_values = self.master_devices[:] self.initialize_devices() self.received_input_datetime = None self._header_written = False self.file_creation_set = set() self.actuation_vip = self.vip.rpc self.initialize_time = None if vip_destination: self.agent = setup_remote_actuation(vip_destination) self.actuation_vip = self.agent.vip.rpc def initialize_devices(self): self.needed_devices = self.master_devices[:] self.device_values = {} @Core.receiver("onstart") def startup(self, sender, **kwargs): """ Starts up the agent and subscribes to device topics based on agent configuration. :param sender: :param kwargs: Any driver specific parameters :type sender: str """ for device in device_topic_dict: _log.info("Subscribing to " + device) self.vip.pubsub.subscribe(peer="pubsub", prefix=device, callback=self.on_analysis_message) def _should_run_now(self): """ Checks if messages from all the devices are received before running application :returns: True or False based on received messages. :rtype: boolean """ # Assumes the unit/all values will have values. if not self.device_values.keys(): return False return not self.needed_devices def aggregate_subdevice(self, device_data, topic): """ Aggregates device and subdevice data for application :returns: True or False based on if device data is needed. :rtype: boolean""" tagged_device_data = {} device_tag = device_topic_dict[topic] _log.debug("Current device to aggregate: {}".format(device_tag)) if device_tag not in self.needed_devices: return False for key, value in device_data.items(): device_data_tag = "&".join([key, device_tag]) tagged_device_data[device_data_tag] = value self.device_values.update(tagged_device_data) self.needed_devices.remove(device_tag) return True def on_analysis_message(self, peer, sender, bus, topic, headers, message): """ Subscribe to device data and assemble data set to pass to applications. :param peer: :param sender: device name :param bus: :param topic: device path topic :param headers: message headers :param message: message containing points and values dict from device with point type :type peer: str :type sender: str :type bus: str :type topic: str :type headers: dict :type message: dict """ timestamp = parse(headers.get("Date")) missing_but_running = False if self.initialize_time is None and len(self.master_devices) > 1: self.initialize_time = find_reinitialize_time(timestamp) if self.initialize_time is not None and timestamp < self.initialize_time: if len(self.master_devices) > 1: return to_zone = dateutil.tz.gettz(timezone) timestamp = timestamp.astimezone(to_zone) self.received_input_datetime = timestamp _log.debug("Current time of publish: {}".format(timestamp)) device_data = message[0] if isinstance(device_data, list): device_data = device_data[0] device_needed = self.aggregate_subdevice(device_data, topic) if not device_needed: fraction_missing = float(len(self.needed_devices)) / len( self.master_devices) if fraction_missing > missing_data_threshold: _log.error( "Device values already present, reinitializing at publish: {}" .format(timestamp)) self.initialize_devices() device_needed = self.aggregate_subdevice( device_data, topic) return missing_but_running = True _log.warning( "Device already present. Using available data for diagnostic.: {}" .format(timestamp)) _log.warning( "Device already present - topic: {}".format(topic)) _log.warning("All devices: {}".format(self.master_devices)) _log.warning("Needed devices: {}".format(self.needed_devices)) if self._should_run_now() or missing_but_running: field_names = {} for point, data in self.device_values.items(): field_names[point] = data if not converter.initialized and conversion_map is not None: converter.setup_conversion_map(map_names, field_names) device_data = converter.process_row(field_names) results = app_instance.run(timestamp, device_data) self.process_results(results) self.initialize_devices() if missing_but_running: device_needed = self.aggregate_subdevice( device_data, topic) else: _log.info("Still need {} before running.".format( self.needed_devices)) def process_results(self, results): """ Runs driven application with converted data. Calls appropriate methods to process commands, log and table_data in results. :param results: Results object containing commands for devices, log messages and table data. :type results: Results object \\volttron.platform.agent.driven :returns: Same as results param. :rtype: Results object \\volttron.platform.agent.driven """ _log.info("Processing Results!") actuator_error = False if actuation_mode: if results.devices and actuator_lock_required: actuator_error = self.actuator_request(results.devices) elif results.commands and actuator_lock_required: actuator_error = self.actuator_request(command_devices) if not actuator_error: results = self.actuator_set(results) for log in results.log_messages: _log.info("LOG: {}".format(log)) for key, value in results.table_output.items(): _log.info("TABLE: {}->{}".format(key, value)) #if output_file_prefix is not None: # results = self.create_file_output(results) if len(results.table_output.keys()): results = self.publish_analysis_results(results) return results def publish_analysis_results(self, results): """ Publish table_data in analysis results to the message bus for capture by the data historian. :param results: Results object containing commands for devices, log messages and table data. :type results: Results object \\volttron.platform.agent.driven :returns: Same as results param. :rtype: Results object \\volttron.platform.agent.driven """ to_publish = defaultdict(dict) for app, analysis_table in list(results.table_output.items()): try: name_timestamp = app.split("&") timestamp = name_timestamp[1] except: timestamp = self.received_input_datetime timestamp = format_timestamp(timestamp) headers = { headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON, headers_mod.DATE: timestamp, } for entry in analysis_table: for point, result in list(entry.items()): for device in command_devices: publish_topic = "/".join( [publish_base, device, point]) analysis_topic = topics.RECORD( subtopic=publish_topic) datatype = str(type(value)) to_publish[analysis_topic] = result for result_topic, result in to_publish.items(): self.vip.pubsub.publish("pubsub", result_topic, headers, result) to_publish.clear() return results def create_file_output(self, results): """ Create results/data files for testing and algorithm validation if table data is present in the results. :param results: Results object containing commands for devices, log messages and table data. :type results: Results object \\volttron.platform.agent.driven :returns: Same as results param. :rtype: Results object \\volttron.platform.agent.driven""" tag = 0 for key, value in list(results.table_output.items()): for row in value: name_timestamp = key.split("&") _name = name_timestamp[0] timestamp = name_timestamp[1] file_name = _name + str(tag) + ".csv" tag += 1 if file_name not in self.file_creation_set: self._header_written = False self.file_creation_set.update([file_name]) with open(file_name, "a+") as file_to_write: row.update({"Timestamp": timestamp}) _keys = list(row.keys()) file_output = csv.DictWriter(file_to_write, _keys) if not self._header_written: file_output.writeheader() self._header_written = True file_output.writerow(row) file_to_write.close() return results def actuator_request(self, command_equip): """ Calls the actuator"s request_new_schedule method to get device schedule :param command_equip: contains the names of the devices that will be scheduled with the ActuatorAgent. :type: dict or list :returns: Return result from request_new_schedule method and True or False for error in scheduling device. :rtype: boolean :Return Values: request_error = True/False warning:: Calling without previously scheduling a device and not within the time allotted will raise a LockError""" _now = get_aware_utc_now() str_now = format_timestamp(_now) _end = _now + td(minutes=device_lock_duration) str_end = format_timestamp(_end) for device in command_equip: actuation_device = base_actuator_path(unit=device, point="") schedule_request = [[actuation_device, str_now, str_end]] try: _log.info("Make Request {} for start {} and end {}".format( actuation_device, str_now, str_end)) result = self.actuation_vip.call( "platform.actuator", "request_new_schedule", "rcx", actuation_device, "HIGH", schedule_request).get(timeout=15) except RemoteError as ex: _log.warning( "Failed to schedule device {} (RemoteError): {}". format(device, str(ex))) request_error = True if result["result"] == "FAILURE": if result["info"] == "TASK_ID_ALREADY_EXISTS": _log.info("Task to schedule device already exists " + device) request_error = False else: _log.warning( "Failed to schedule device (unavailable) " + device) request_error = True else: request_error = False return request_error def actuator_set(self, results): """ Calls the actuator"s set_point method to set point on device :param results: Results object containing commands for devices, log messages and table data. :type results: Results object \\volttron.platform.agent.driven""" def make_actuator_set(device, point_value_dict): for point, new_value in point_value_dict.items(): point_path = base_actuator_path(unit=device, point=point) try: _log.info("Set point {} to {}".format( point_path, new_value)) result = self.actuation_vip.call( "platform.actuator", "set_point", "rcx", point_path, new_value).get(timeout=15) except RemoteError as ex: _log.warning("Failed to set {} to {}: {}".format( point_path, new_value, str(ex))) continue for device, point_value_dict in results.devices.items(): make_actuator_set(device, point_value_dict) for device in command_devices: make_actuator_set(device, results.commands) return results def find_reinitialize_time(current_time): midnight = current_time.replace(hour=0, minute=0, second=0, microsecond=0) seconds_from_midnight = (current_time - midnight).total_seconds() offset = seconds_from_midnight % interval previous_in_seconds = seconds_from_midnight - offset next_in_seconds = previous_in_seconds + interval from_midnight = td(seconds=next_in_seconds) _log.debug("Start of next scrape interval: {}".format(midnight + from_midnight)) return midnight + from_midnight def setup_remote_actuation(vip_destination): event = gevent.event.Event() agent = Agent(address=vip_destination) gevent.spawn(agent.core.run, event) event.wait(timeout=15) return agent DrivenAgent.__name__ = "DrivenLoggerAgent" return DrivenAgent(**kwargs)
def vav_agent(config_path, **kwargs): """Parses the Electric Meter Agent configuration and returns an instance of the agent created using that configuation. :param config_path: Path to a configuation file. :type config_path: str :returns: Market Service Agent :rtype: MarketServiceAgent """ try: config = utils.load_config(config_path) except StandardError: config = {} if not config: _log.info("Using defaults for starting configuration.") market_name = config.get('market_name') x0 = config.get('x0', 0) x1 = config.get('x1', 0) x2 = config.get('x2', 0) x3 = config.get('x3', 0) x4 = config.get('x4', 0) c0 = config.get('c0', 0) c1 = config.get('c1', 0) c2 = config.get('c2', 0) c3 = config.get('c3', 0) c4 = config.get('c4', 0) tMinAdj = config.get('tMin', 0) tMaxAdj = config.get('tMax', 0) mDotMin = config.get('mDotMin', 0) mDotMax = config.get('mDotMax', 0) sim_flag = config.get('sim_flag', False) tIn = config.get('tIn', 0) nonResponsive = config.get('nonResponsive', False) agent_name = config.get('agent_name') actuator = config.get('actuator', 'platform.actuator') mode = config.get('mode') device_points = config.get("device_points") parent_device_points = config.get("parent_device_points") setpoint = config.get('setpoint') activate_topic = "/".join([config.get("building", agent_name), "actuate"]) setpoint_mode = config.get("setpoint_mode", 0) parent_device_topic = topics.DEVICES_VALUE(campus=config.get("campus", ""), building=config.get("building", ""), unit=config.get("parent_device", ""), path="", point="all") device_topic = topics.DEVICES_VALUE(campus=config.get("campus", ""), building=config.get("building", ""), unit=config.get("parent_device", ""), path=config.get("device", ""), point="all") base_rpc_path = topics.RPC_DEVICE_PATH(campus=config.get("campus", ""), building=config.get("building", ""), unit=config.get("parent_device", ""), path=config.get("device", ""), point=setpoint) verbose_logging = config.get('verbose_logging', True) return VAVAgent(market_name, agent_name, x0, x1, x2, x3, x4, c0, c1, c2, c3, c4, tMinAdj, tMaxAdj, mDotMin, mDotMax, tIn, nonResponsive, verbose_logging, device_topic, device_points, parent_device_topic, parent_device_points, base_rpc_path, activate_topic, actuator, mode, setpoint_mode, sim_flag, **kwargs)
def rtu_agent(config_path, **kwargs): """Parses the Electric Meter Agent configuration and returns an instance of the agent created using that configuation. :param config_path: Path to a configuation file. :type config_path: str :returns: Market Service Agent :rtype: MarketServiceAgent """ try: config = utils.load_config(config_path) except StandardError: config = {} if not config: _log.info("Using defaults for starting configuration.") base_name = config.get('market_name', 'electric') market_name = [] for i in range(24): market_name.append('_'.join([base_name, str(i)])) c1 = config.get('c1') c2 = config.get('c2') c3 = config.get('c3') c = config.get('c') heartbeat_period = config.get('heartbeat_period', 300) hvac_avail = config.get("occupancy_schedule") tMinAdj = config.get('tMin', 0) tMaxAdj = config.get('tMax', 0) sim_flag = config.get('sim_flag', False) tIn = config.get('tIn', 0) Qrate = config.get("Qrate", 0) agent_name = config.get('agent_name') actuator = config.get('actuator', 'platform.actuator') mode = config.get('mode') device_points = config.get("device_points") setpoint = config.get('setpoint') activate_topic = "/".join([config.get("building", agent_name), "actuate"]) setpoint_mode = config.get("setpoint_mode", 0) price_multiplier = config.get('price_multiplier', 2) default_min_price = config.get('default_min_price', 0.01) default_max_price = config.get('default_max_price', 0.1) tMinUnoc = config.get('tMinUnoc', 66.0) device_topic = topics.DEVICES_VALUE(campus=config.get('campus', ''), building=config.get('building', ''), unit=config.get('device', ''), path='', point='all') base_rpc_path = topics.RPC_DEVICE_PATH(campus=config.get("campus", ""), building=config.get("building", ""), unit=config.get("device", ""), path="", point=setpoint) verbose_logging = config.get('verbose_logging', True) return RTUAgent(market_name, agent_name, c1, c2, c3, c, tMinAdj, tMaxAdj, tMinUnoc, tIn, Qrate, verbose_logging, device_topic, hvac_avail, device_points, base_rpc_path, activate_topic, actuator, mode, setpoint_mode, sim_flag, heartbeat_period, price_multiplier, default_min_price, default_max_price, **kwargs)
def ilc_agent(config_path, **kwargs): '''Intelligent Load Curtailment (ILC) Application using Analytical Hierarchical Process (AHP). ''' config = utils.load_config(config_path) location = {} location['campus'] = config.get('campus') location['building'] = config.get('building') cluster_configs = config['clusters'] agent_id = config.get('agent_id') global mappers try: mappers = config['mappers'] except KeyError: mappers = {} clusters = Clusters() for cluster_config in cluster_configs: excel_file_name = cluster_config['critieria_file_path'] cluster_config_file_name = cluster_config['device_file_path'] cluster_priority = cluster_config['cluster_priority'] crit_labels, criteria_arr = extract_criteria(excel_file_name, 'CriteriaMatrix') col_sums = calc_column_sums(criteria_arr) _, row_average = normalize_matrix(criteria_arr, col_sums) if not (validate_input(criteria_arr, col_sums, crit_labels, CRITERIA_LABELSTRING, MATRIX_ROWSTRING)): _log.info('Inconsistent criteria matrix. Check configuration ' 'in ' + excel_file_name) sys.exit() cluster_config = utils.load_config(cluster_config_file_name) device_cluster = DeviceCluster(cluster_priority, crit_labels, row_average, cluster_config) _log.debug('Crit Labels: ' + str(crit_labels)) clusters.add_device_cluster(device_cluster) base_device_topic = topics.DEVICES_VALUE(campus=config.get('campus', ''), building=config.get('building', ''), unit=None, path='', point=None) base_rpc_path = topics.RPC_DEVICE_PATH(campus=config.get('campus', ''), building=config.get('building', ''), unit=None, path='', point=None) device_topic_list = [] device_topic_map = {} all_devices = clusters.get_device_name_list() for device_name in all_devices: device_topic = topics.DEVICES_VALUE(campus=config.get('campus', ''), building=config.get('building', ''), unit=device_name, path='', point='all') device_topic_list.append(device_topic) device_topic_map[device_topic] = device_name power_token = config['power_meter'] power_meter = power_token['device'] power_pt = power_token['point'] power_meter_topic = topics.DEVICES_VALUE(campus=config.get('campus', ''), building=config.get('building', ''), unit=power_meter, path='', point='all') kill_device_topic = None kill_token = config.get('kill_switch') if kill_token is not None: kill_device = kill_token['device'] kill_pt = kill_token['point'] kill_device_topic = topics.DEVICES_VALUE(campus=config.get('campus', ''), building=config.get('building', ''), unit=kill_device, path='', point='all') demand_limit = float(config['demand_limit']) curtail_time = td(minutes=config.get('curtailment_time', 15.0)) average_building_power_window = td(minutes=config.get('average_building_power_window', 5.0)) curtail_confirm = td(minutes=config.get('curtailment_confirm', 5.0)) curtail_break = td(minutes=config.get('curtailment_break', 15.0)) actuator_schedule_buffer = td(minutes=config.get('actuator_schedule_buffer', 5.0)) reset_curtail_count_time = td(hours=config.get('reset_curtail_count_time', 6.0)) longest_possible_curtail = len(clusters.devices) * curtail_time stagger_release_time = config.get('curtailment_break', 15.0)*60.0 stagger_release = config.get('stagger_release', False) minimum_stagger_window = config.get('minimum_stagger_window', 120) class AHP(Agent): def __init__(self, **kwargs): super(AHP, self).__init__(**kwargs) self.running_ahp = False self.row_average = None self.next_curtail_confirm = None self.curtail_end = None self.break_end = None self.reset_curtail_count_time = None self.kill_signal_recieved = False self.scheduled_devices = set() self.devices_curtailed = set() self.bldg_power = [] @Core.receiver('onstart') def starting_base(self, sender, **kwargs): '''startup method: - Extract Criteria Matrix from excel file. - Setup subscriptions to curtailable devices. - Setup subscription to building power meter. ''' for device_topic in device_topic_list: _log.debug('Subscribing to '+device_topic) self.vip.pubsub.subscribe(peer='pubsub', prefix=device_topic, callback=self.new_data) _log.debug('Subscribing to '+power_meter_topic) self.vip.pubsub.subscribe(peer='pubsub', prefix=power_meter_topic, callback=self.load_message_handler) if kill_device_topic is not None: _log.debug('Subscribing to '+kill_device_topic) self.vip.pubsub.subscribe(peer='pubsub', prefix=kill_device_topic, callback=self.handle_agent_kill) def handle_agent_kill(self, peer, sender, bus, topic, headers, message): ''' Locally implemented override for ILC application. When an override is detected the ILC application will return operations for all units to normal. ''' data = message[0] _log.info('Checking kill signal') kill_signal = bool(data[kill_pt]) if kill_signal: _log.info('Kill signal received, shutting down') self.kill_signal_recieved = False gevent.sleep(8) self.end_curtail() sys.exit() def new_data(self, peer, sender, bus, topic, headers, message): '''Call back method for curtailable device data subscription.''' if self.kill_signal_recieved: return _log.info('Data Received for {}'.format(topic)) # topic of form: devices/campus/building/device device_name = device_topic_map[topic] data = message[0] now = parser.parse(headers['Date']) clusters.get_device(device_name).ingest_data(now, data) def load_message_handler(self, peer, sender, bus, topic, headers, message): '''Call back method for building power meter. Calculates the average building demand over a configurable time and manages the curtailment time and curtailment break times. ''' if self.kill_signal_recieved: return _log.debug('Reading building power data.') current_power = float(message[0][power_pt]) if current_power < 0: current_power = 0.0 now = parser.parse(headers['Date']) self.bldg_power.append((now, current_power)) if self.bldg_power[-1][0] - self.bldg_power[0][0] > average_building_power_window: self.bldg_power.pop(0) average_power = sum(power[1] for power in self.bldg_power)/len(self.bldg_power) _log.debug('Reported time: '+str(now)) _log.info('Current load: {}'.format(average_power)) if self.reset_curtail_count_time is not None: if self.reset_curtail_count_time <= now: _log.debug('Resetting curtail count') clusters.reset_curtail_count() if self.running_ahp: if now >= self.curtail_end: self.end_curtail() elif now >= self.next_curtail_confirm: self.curtail_confirm(average_power, now) return elif self.break_end is not None and now < self.break_end: _log.debug('Skipping load check, still on curtailment break.') return self.check_load(average_power, now) def check_load(self, bldg_power, now): '''Check whole building power and if the value is above the the demand limit (demand_limit) then initiate the ILC (AHP) sequence. ''' _log.debug('Checking building load.') if bldg_power > demand_limit: _log.info('Current load ({load}) exceeds limit or {limit}.' .format(load=bldg_power, limit=demand_limit)) score_order = clusters.get_score_order() if not score_order: _log.info('All devices are off, nothing to curtail.') return scored_devices = self.actuator_request(score_order) self.curtail(scored_devices, bldg_power, now) def curtail(self, scored_devices, bldg_power, now): '''Curtail loads by turning off device (or device components)''' need_curtailed = bldg_power - demand_limit est_curtailed = 0.0 remaining_devices = scored_devices[:] for device in self.devices_curtailed: if device in remaining_devices: remaining_devices.remove(device) if not self.running_ahp: _log.info('Starting AHP') self.running_ahp = True if not remaining_devices: _log.debug('Everything available has already been curtailed') return self.curtail_end = now + curtail_time self.break_end = now + curtail_break + curtail_time self.reset_curtail_count_time = self.curtail_end + reset_curtail_count_time self.next_curtail_confirm = now + curtail_confirm _log.info('Curtialing load.') for item in remaining_devices: device_name, command = item curtail = clusters.get_device(device_name).get_curtailment(command) curtail_pt = curtail['point'] curtail_val = curtail['value'] curtail_load = curtail['load'] curtailed_point = base_rpc_path(unit=device_name, point=curtail_pt) # TODO: catch errors. _log.debug('Setting '+curtailed_point+' to '+str(curtail_val)) try: if self.kill_signal_recieved: break result = self.vip.rpc.call('platform.actuator', 'set_point', agent_id, curtailed_point, curtail_val).get(timeout=4) except RemoteError as ex: _log.warning('Failed to set {} to {}: {}' .format(curtailed_point, curtail_val, str(ex))) continue est_curtailed += curtail_load clusters.get_device(device_name).increment_curtail(command) self.devices_curtailed.add(item) if est_curtailed >= need_curtailed: break return def curtail_confirm(self, cur_pwr, now): '''Check if load shed has been met. If the demand goal is not met and there are additional devices to curtail then the ILC will shed additional load by curtailing more devices. ''' if cur_pwr < demand_limit: _log.info('Curtail goal for building load met.') else: _log.info('Curtail goal for building load NOT met.') self.check_load(cur_pwr, now) def actuator_request(self, score_order): '''request access to devices.''' _now = dt.now() str_now = _now.strftime(DATE_FORMAT) _end = _now + longest_possible_curtail + actuator_schedule_buffer str_end = _end.strftime(DATE_FORMAT) ctrl_dev = [] already_handled = dict((device, True) for device in self.scheduled_devices) for item in score_order: device, point = item _log.debug('Reserving device: ' + device) if device in already_handled: if already_handled[device]: _log.debug('Skipping reserve device (previously reserved): ' + device) ctrl_dev.append(item) continue curtailed_device = base_rpc_path(unit=device, point='') schedule_request = [[curtailed_device, str_now, str_end]] try: if self.kill_signal_recieved: break result = self.vip.rpc.call( 'platform.actuator', 'request_new_schedule', agent_id, device, 'HIGH', schedule_request).get(timeout=4) except RemoteError as ex: _log.warning('Failed to schedule device {} (RemoteError): {}' .format(device, str(ex))) continue if result['result'] == 'FAILURE': _log.warn('Failed to schedule device (unavailable) ' + device) already_handled[device] = False else: already_handled[device] = True self.scheduled_devices.add(device) ctrl_dev.append(item) return ctrl_dev def end_curtail(self): self.running_ahp = False self.reset_devices() self.release_devices() def reset_devices(self): _log.info('Resetting devices') current_stagger = stagger_release_time/len(self.devices_curtailed) device_group_size = 1 group_count = 0 while current_stagger < minimum_stagger_window: device_group_size += 1 current_stagger = current_stagger + current_stagger if device_group_size == len(self.devices_curtailed): device_group_size += 1 break for item in self.devices_curtailed: group_count += 1 if stagger_release and group_count == device_group_size: gevent.sleep(current_stagger) device_name, command = item curtail = clusters.get_device(device_name).get_curtailment(command) curtail_pt = curtail['point'] curtailed_point = base_rpc_path(unit=device_name, point=curtail_pt) try: result = self.vip.rpc.call('platform.actuator', 'revert_point', agent_id, curtailed_point).get(timeout=10) _log.debug('Reverted point: {}'.format(curtailed_point)) except RemoteError as ex: _log.warning('Failed to revert point {} (RemoteError): {}' .format(curtailed_point, str(ex))) continue self.devices_curtailed = set() def release_devices(self): for device in self.scheduled_devices: result = self.vip.rpc.call( 'platform.actuator', 'request_cancel_schedule', agent_id, device).get(timeout=10) self.scheduled_devices = set() return AHP(**kwargs)
def configure_main(self, config_name, action, contents): """This triggers configuration of the ProactiveDiagnostic via the VOLTTRON configuration store. :param config_name: canonical name is config :param action: on instantiation this is "NEW" or "UPDATE" if user uploads update config to store :param contents: configuration contents :return: None """ LOG.debug("Update %s for %s", config_name, self.core.identity) config = self.default_config.copy() config.update(contents) if action == "NEW" or "UPDATE": # The run schedule should be a cron string # https://volttron.readthedocs.io/en/develop/devguides/agent_development/Agent-Development-Cheatsheet.html # https://crontab.guru/ self.run_schedule = config.get("run_schedule") # The campus, building, device parameters are used to build the # (devices/campus/building/device/all) subscription for device data # coming from master driver and the rpc to do actuation # (campus/building/device/point) campus = config.get("campus", "") building = config.get("building", "") device_list = config.get("device", []) self.revert_action = config.get("revert_action", "release") # Configure global diagnostic prerequisites. # Data mechanism is through subscription. # Evaluation is only done prior to running diagnostic. prerequisites = config.get("prerequisites", {}) self.actuator = config.get("actuator_vip", "platform.actuator") self.remote_platform = config.get("remote_platform") self.base_rpc_path = [] self.device_topics_list = [] if not device_list: LOG.warning("Configuration ERROR: no device_list " "configured for diagnostic!") LOG.warning("Check configuration and update " "device_list!") for device in device_list: self.base_rpc_path.append( topics.RPC_DEVICE_PATH(campus=campus, building=building, unit=device, path="", point=None)) self.device_topics_list.append( topics.DEVICES_VALUE(campus=campus, building=building, unit=device, path="", point="all")) diagnostics = config.get("diagnostics", []) if not diagnostics: LOG.warning("Configuration ERROR diagnostics" "information is not configured!") LOG.warning("Diagnostic cannot be performed, " "Update configuration!") self.diagnostics = diagnostics self.diagnostics_container = [] self.prerequisites_expr_list = [] self.prerequisites_data_required = {} self.prerequisites_variables = None if prerequisites: self.initialize_prerequisites(prerequisites) else: LOG.debug("No diagnostic prerequisites configured!") self.starting_base()
def __init__(self, config_path, **kwargs): super(ILCAgent, self).__init__(**kwargs) config = utils.load_config(config_path) campus = config.get("campus", "") building = config.get("building", "") # For dash board message publishes self.agent_id = config.get("agent_id", "Intelligent Load Control Agent") dashboard_topic = config.get("dashboard_topic") self.application_category = config.get("application_category", "Load Control") self.application_name = config.get("application_name", "Intelligent Load Control") self.ilc_start_topic = self.agent_id # -------------------------------------------------------------------------------- # For Target agent updates... analysis_prefix_topic = config.get("analysis_prefix_topic", "record") self.target_agent_subscription = "{}/target_agent".format(analysis_prefix_topic) # -------------------------------------------------------------------------------- self.update_base_topic = "/".join([analysis_prefix_topic, self.agent_id]) if campus: self.update_base_topic = "/".join([self.update_base_topic, campus]) ilc_start_topic = "/".join([self.agent_id, campus]) if dashboard_topic is not None: dashboard_topic = "/".join([dashboard_topic, self.agent_id, campus]) if building: self.update_base_topic = "/".join([self.update_base_topic, building]) ilc_start_topic = "/".join([ilc_start_topic, building]) if dashboard_topic is not None: dashboard_topic = "/".join([dashboard_topic, building]) self.ilc_topic = dashboard_topic if dashboard_topic is not None else self.update_base_topic self.ilc_start_topic = "/".join([ilc_start_topic, "ilc/start"]) cluster_configs = config["clusters"] self.criteria = CriteriaContainer() self.curtailment = CurtailmentContainer() for cluster_config in cluster_configs: criteria_file_name = cluster_config["pairwise_criteria_file"] if criteria_file_name[0] == "~": criteria_file_name = os.path.expanduser(criteria_file_name) device_criteria_config = cluster_config["device_criteria_file"] device_curtailment_config = cluster_config["device_curtailment_file"] cluster_priority = cluster_config["cluster_priority"] cluster_actuator = cluster_config.get("cluster_actuator", "platform.actuator") criteria_labels, criteria_array = extract_criteria(criteria_file_name) col_sums = calc_column_sums(criteria_array) row_average = normalize_matrix(criteria_array, col_sums) if not validate_input(criteria_array, col_sums): _log.debug("Inconsistent criteria matrix. Check configuration " "in: {}" .format(criteria_file_name)) sys.exit() if device_criteria_config[0] == "~": device_criteria_config = os.path.expanduser(device_criteria_config) criteria_config = utils.load_config(device_criteria_config) criteria_cluster = CriteriaCluster(cluster_priority, criteria_labels, row_average, criteria_config) self.criteria.add_criteria_cluster(criteria_cluster) if device_curtailment_config[0] == "~": device_curtailment_config = os.path.expanduser(device_curtailment_config) curtailment_config = utils.load_config(device_curtailment_config) curtailment_cluster = CurtailmentCluster(curtailment_config, cluster_actuator) self.curtailment.add_curtailment_cluster(curtailment_cluster) self.base_device_topic = topics.DEVICES_VALUE(campus=campus, building=building, unit=None, path="", point=None) self.base_rpc_path = topics.RPC_DEVICE_PATH(campus=campus, building=building, unit=None, path="", point=None) self.device_topic_list = [] self.device_topic_map = {} all_devices = self.curtailment.get_device_name_list() for device_name in all_devices: device_topic = topics.DEVICES_VALUE(campus=campus, building=building, unit=device_name[0], path="", point="all") self.device_topic_list.append(device_topic) self.device_topic_map[device_topic] = device_name power_token = config["power_meter"] power_meter = power_token["device"] self.power_point = power_token["point"] demand_formula = power_token.get("demand_formula") self.calculate_demand = False if demand_formula is not None: self.calculate_demand = True try: demand_operation = parse_sympy(demand_formula["operation"]) _log.debug("Demand calculation - expression: {}".format(demand_operation)) self.demand_expr = parse_expr(parse_sympy(demand_operation)) self.demand_args = parse_sympy(demand_formula["operation_args"]) self.demand_points = symbols(self.demand_args) except (KeyError, ValueError): _log.debug("Missing 'operation_args' or 'operation' for setting demand formula!") self.calculate_demand = False except: _log.debug("Unexpected error when reading demand formula parameters!") self.calculate_demand = False self.power_meter_topic = topics.DEVICES_VALUE(campus=campus, building=building, unit=power_meter, path="", point="all") self.kill_device_topic = None kill_token = config.get("kill_switch") if kill_token is not None: kill_device = kill_token["device"] self.kill_pt = kill_token["point"] self.kill_device_topic = topics.DEVICES_VALUE(campus=campus, building=building, unit=kill_device, path="", point="all") demand_limit = config["demand_limit"] if isinstance(demand_limit, (int, float)): self.demand_limit = float(demand_limit) else: self.demand_limit = None self.demand_schedule = config.get("demand_schedule") self.curtail_time = td(minutes=config.get("curtailment_time", 15)) self.average_building_power_window = td(minutes=config.get("average_building_power_window", 15)) self.curtail_confirm = td(minutes=config.get("curtailment_confirm", 5)) self.curtail_break = td(minutes=config.get("curtailment_break", 15)) self.actuator_schedule_buffer = td(minutes=config.get("actuator_schedule_buffer", 15)) + self.curtail_break self.reset_curtail_count_time = td(hours=config.get("reset_curtail_count_time", 6)) self.longest_possible_curtail = len(all_devices) * self.curtail_time * 2 maximum_time_without_release = config.get("maximum_time_without_release") self.maximum_time_without_release = td(minutes=maximum_time_without_release) if maximum_time_without_release is not None else None self.stagger_release_time = float(config.get("curtailment_break", 15.0)) self.stagger_release = config.get("stagger_release", False) self.stagger_off_time = config.get("stagger_off_time", True) need_actuator_schedule = config.get("need_actuator_schedule", False) self.running_ahp = False self.next_curtail_confirm = None self.curtail_end = None self.break_end = None self.reset_curtail_count = None self.kill_signal_received = False self.scheduled_devices = set() self.devices_curtailed = [] self.bldg_power = [] self.device_group_size = None self.current_stagger = None self.next_release = None self.power_meta = None self.tasks = {} self.tz = None self.simulation_running = config.get("simulation_running", False)
def update_driver(self, config_name, action, contents): topic = self.derive_device_topic(config_name) topic_split = topic.split('/', 2) if len(topic_split) > 1: campus = topic_split[0] building = topic_split[1] if len(topic_split) > 2: unit = topic_split[2] else: unit = "" site_name = "/".join([campus, building]) publish_base = "/".join([analysis_name, campus, building]) command_devices = [] site_device_topic_dict = {} device_topic_list = [] subdevices_list = [] base_actuator_path = topics.RPC_DEVICE_PATH(campus=campus, building=building, unit=None, path="", point=None) site_dict = { 'site_name': site_name, 'publish_base': publish_base, 'multiple_devices': False, 'device_topic_dict': site_device_topic_dict, 'device_topic_list': device_topic_list, 'subdevices_list': subdevices_list, 'command_devices': command_devices, 'base_actuator_path': base_actuator_path } if 'point_mapping' in contents: site_dict['point_mapping'] = contents['point_mapping'] if not unit: # lookup the subdevices from point_mapping for point in contents['point_mapping'].keys(): # remove the point name to get the subdevice subdevice_name = point.rsplit('/', 1)[0] sd_split = subdevice_name.rsplit('/', 1) device_name = sd_split[0] subdevice = '' if len(sd_split) > 1: subdevice = sd_split[1] if subdevice not in subdevices_list: subdevices_list.append(subdevice) command_devices.append(subdevice) subdevice_topic = topics.DEVICES_VALUE(campus=campus, building=building, unit=device_name, path=subdevice, point="all") self.site_topic_dict.update({subdevice_topic: site_dict}) self.device_topic_dict.update( {subdevice_topic: subdevice_name}) site_device_topic_dict.update( {subdevice_topic: subdevice_name}) device_topic_list.append(subdevice_name) _log.info("device_topic_list topic {} -> subdev {}".format( subdevice_topic, subdevice_name)) self.sites_config_list.append(site_dict) device_topic = topics.DEVICES_VALUE(campus=campus, building=building, unit=unit, path="", point="all") if device_topic in self.device_topic_dict: self.unsubscribe_from_device(device_topic) self.site_topic_dict.update({device_topic: site_dict}) if unit: self.device_topic_dict.update({device_topic: unit}) site_device_topic_dict.update({device_topic: unit}) device_topic_list.append(unit) command_devices.append(unit) # overrides the publishing unit topic, which is needed for split topics override_unit = self.derive_device_unit(config_name, contents) if override_unit: del command_devices[:] command_devices.append(override_unit) _log.info("device_topic_list topic {} -> device {}".format( device_topic, unit)) self.initialize_device(site_dict) _log.info("-- Site config {}".format(site_dict)) for dt in site_device_topic_dict.keys(): self.subscribe_to_device(dt)
def configure_main(self, config_name, action, contents): config = self.default_config.copy() config.update(contents) _log.info("configure_main with {}".format(config)) self.unsubscribe_from_all_devices() self.actuation_mode = True if config.get( "actuation_mode", "PASSIVE") == "ACTIVE" else False self.actuator_lock_required = config.get("require_actuator_lock", False) self.interval = config.get("interval", 60) self.vip_destination = config.get("vip_destination", None) self.timezone = config.get("local_timezone", "US/Pacific") self.device_lock_duration = config.get("device_lock_duration", 10.0) self.conversion_map = config.get("conversion_map") self.missing_data_threshold = config.get("missing_data_threshold", 50.0) / 100.0 self.actuation_vip = self.vip.rpc if self.vip_destination: self.agent = setup_remote_actuation(self.vip_destination) self.actuation_vip = self.agent.vip.rpc self.map_names = {} if self.conversion_map: for key, value in self.conversion_map.items(): self.map_names[key.lower( ) if isinstance(key, str) else key] = value _log.info("--- actuation_mode {}".format(self.actuation_mode)) _log.info("--- require_actuator_lock {}".format( self.actuator_lock_required)) _log.info("--- interval {}".format(self.interval)) _log.info("--- vip_destination {}".format(self.vip_destination)) _log.info("--- local_timezone {}".format(self.timezone)) _log.info("--- device_lock_duration {}".format( self.device_lock_duration)) _log.info("--- missing_data_threshold {}".format( self.missing_data_threshold)) _log.info("--- conversion_map {}".format(self.conversion_map)) _log.info("--- map_names {}".format(self.map_names)) self.sites = config["device"] if not isinstance(self.sites, list): self.sites = [self.sites] self.sites_config_list = [] self.site_topic_dict = {} self.device_topic_dict = {} for site in self.sites: campus = site.get("campus", "") building = site.get("building", "") site_name = "/".join([campus, building]) publish_base = "/".join([analysis_name, campus, building]) device_config = site["unit"] multiple_devices = isinstance(device_config, dict) command_devices = device_config.keys() site_device_topic_dict = {} device_topic_list = [] subdevices_list = [] base_actuator_path = topics.RPC_DEVICE_PATH(campus=campus, building=building, unit=None, path="", point=None) site_dict = { 'site_name': site_name, 'publish_base': publish_base, 'multiple_devices': multiple_devices, 'device_topic_dict': site_device_topic_dict, 'device_topic_list': device_topic_list, 'subdevices_list': subdevices_list, 'command_devices': command_devices, 'base_actuator_path': base_actuator_path } if 'point_mapping' in site: site_dict['point_mapping'] = site['point_mapping'] self.sites_config_list.append(site_dict) for device_name in device_config: device_topic = topics.DEVICES_VALUE(campus=campus, building=building, unit=device_name, path="", point="all") self.site_topic_dict.update({device_topic: site_dict}) self.device_topic_dict.update({device_topic: device_name}) site_device_topic_dict.update({device_topic: device_name}) device_topic_list.append(device_name) _log.info("device_topic_list topic {} -> device {}".format( device_topic, device_name)) if multiple_devices: for subdevice in device_config[device_name][ "subdevices"]: if subdevice not in subdevices_list: subdevices_list.append(subdevice) subdevice_topic = topics.DEVICES_VALUE( campus=campus, building=building, unit=device_name, path=subdevice, point="all") subdevice_name = device_name + "/" + subdevice self.site_topic_dict.update( {subdevice_topic: site_dict}) self.device_topic_dict.update( {subdevice_topic: subdevice_name}) site_device_topic_dict.update( {subdevice_topic: subdevice_name}) device_topic_list.append(subdevice_name) _log.info( "device_topic_list topic {} -> subdev {}". format(subdevice_topic, subdevice_name)) _log.info("-- Site config {}".format(site_dict)) self.initialize_devices() self.subscribe_to_all_devices()
def vav_agent(config_path, **kwargs): """Parses the Electric Meter Agent configuration and returns an instance of the agent created using that configuation. :param config_path: Path to a configuation file. :type config_path: str :returns: Market Service Agent :rtype: MarketServiceAgent """ try: config = utils.load_config(config_path) except StandardError: config = {} if not config: _log.info('Using defaults for starting configuration.') base_name = config.get('market_name', 'air') market_name = [] for i in range(24): market_name.append('_'.join([base_name, str(i)])) c1 = config.get('c1') c2 = config.get('c2') c3 = config.get('c3') tMinAdj = config.get('tMin', 0) tMaxAdj = config.get('tMax', 0) tMinUnoc = config.get('tMinUnoc', 19.0) mDotMin = config.get('mDotMin', 0) mDotMax = config.get('mDotMax', 0) sim_flag = config.get('sim_flag', False) tIn = config.get('tIn', 0) agent_name = config.get('agent_name') actuator = config.get('actuator', 'platform.actuator') mode = config.get('mode') device_points = config.get('device_points') parent_device_points = config.get('parent_device_points') setpoint = config.get('setpoint') activate_topic = '/'.join([config.get('building', agent_name), 'actuate']) setpoint_mode = config.get('setpoint_mode', 0) price_multiplier = config.get('price_multiplier', 2.0) default_min_price = config.get('default_min_price', 0.02) default_max_price = config.get('default_max_price', 0.04) heartbeat_period = config.get('heartbeat_period', 300) hvac_avail = config.get("hvac_occupancy_schedule", [1] * 24) parent_device_topic = topics.DEVICES_VALUE( campus=config.get('campus', ''), building=config.get('building', ''), unit=config.get('parent_device', ''), path='', point='all') device_topic = topics.DEVICES_VALUE(campus=config.get('campus', ''), building=config.get('building', ''), unit=config.get('parent_device', ''), path=config.get('device', ''), point='all') base_rpc_path = topics.RPC_DEVICE_PATH(campus=config.get('campus', ''), building=config.get('building', ''), unit=config.get( 'parent_device', ''), path=config.get('device', ''), point=setpoint) verbose_logging = config.get('verbose_logging', True) tns_actuate = config.get('tns_actuate', 'tns/actuate') return VAVAgent(market_name, agent_name, c1, c2, c3, tMinAdj, tMaxAdj, tMinUnoc, mDotMin, mDotMax, tIn, verbose_logging, device_topic, hvac_avail, device_points, parent_device_topic, parent_device_points, base_rpc_path, activate_topic, actuator, mode, setpoint_mode, sim_flag, heartbeat_period, tns_actuate, price_multiplier, default_min_price, default_max_price, **kwargs)
def ilc_agent(config_path, **kwargs): '''Intelligent Load Curtailment (ILC) Application using Analytical Hierarchical Process (AHP). ''' config = utils.load_config(config_path) location = {} location['campus'] = config.get('campus') location['building'] = config.get('building') cluster_configs = config['clusters'] agent_id = config.get('agent_id') global mappers try: mappers = config['mappers'] except KeyError: mappers = {} clusters = Clusters() for cluster_config in cluster_configs: excel_file_name = cluster_config['critieria_file_path'] cluster_config_file_name = cluster_config['device_file_path'] cluster_priority = cluster_config['cluster_priority'] crit_labels, criteria_arr = extract_criteria(excel_file_name, 'CriteriaMatrix') col_sums = calc_column_sums(criteria_arr) _, row_average = normalize_matrix(criteria_arr, col_sums) if not (validate_input(criteria_arr, col_sums, crit_labels, CRITERIA_LABELSTRING, MATRIX_ROWSTRING)): _log.info('Inconsistent criteria matrix. Check configuration ' 'in ' + excel_file_name) sys.exit() cluster_config = utils.load_config(cluster_config_file_name) device_cluster = DeviceCluster(cluster_priority, crit_labels, row_average, cluster_config) _log.debug('Crit Labels: ' + str(crit_labels)) clusters.add_device_cluster(device_cluster) base_device_topic = topics.DEVICES_VALUE(campus=config.get('campus', ''), building=config.get( 'building', ''), unit=None, path='', point=None) base_rpc_path = topics.RPC_DEVICE_PATH(campus=config.get('campus', ''), building=config.get('building', ''), unit=None, path='', point=None) device_topic_list = [] device_topic_map = {} all_devices = clusters.get_device_name_list() for device_name in all_devices: device_topic = topics.DEVICES_VALUE(campus=config.get('campus', ''), building=config.get( 'building', ''), unit=device_name, path='', point='all') device_topic_list.append(device_topic) device_topic_map[device_topic] = device_name power_token = config['power_meter'] power_meter = power_token['device'] power_point = power_token['point'] power_meter_topic = topics.DEVICES_VALUE(campus=config.get('campus', ''), building=config.get( 'building', ''), unit=power_meter, path='', point='all') kill_device_topic = None kill_token = config.get('kill_switch') if kill_token is not None: kill_device = kill_token['device'] kill_pt = kill_token['point'] kill_device_topic = topics.DEVICES_VALUE( campus=config.get('campus', ''), building=config.get('building', ''), unit=kill_device, path='', point='all') demand_limit = float(config['demand_limit']) curtail_time = td(minutes=config.get('curtailment_time', 15.0)) average_building_power_window = td( minutes=config.get('average_building_power_window', 5.0)) curtail_confirm = td(minutes=config.get('curtailment_confirm', 5.0)) curtail_break = td(minutes=config.get('curtailment_break', 15.0)) actuator_schedule_buffer = td( minutes=config.get('actuator_schedule_buffer', 15.0)) + curtail_break reset_curtail_count_time = td( hours=config.get('reset_curtail_count_time', 6.0)) longest_possible_curtail = len(clusters.devices) * curtail_time stagger_release_time = config.get('curtailment_break', 15.0) * 60.0 stagger_release = config.get('stagger_release', False) minimum_stagger_window = int(curtail_confirm.total_seconds() + 2) _log.debug('Minimum stagger window: {}'.format(minimum_stagger_window)) if stagger_release_time - minimum_stagger_window < minimum_stagger_window: stagger_release = False else: stagger_release_time = stagger_release_time - minimum_stagger_window class AHP(Agent): def __init__(self, **kwargs): super(AHP, self).__init__(**kwargs) self.running_ahp = False self.row_average = None self.next_curtail_confirm = None self.curtail_end = None self.break_end = None self.reset_curtail_count_time = None self.kill_signal_recieved = False self.power_data_count = 0.0 self.scheduled_devices = set() self.devices_curtailed = [] self.bldg_power = [] self.device_group_size = None self.average_power = None self.current_stagger = None self.next_release = None @Core.receiver('onstart') def starting_base(self, sender, **kwargs): '''startup method: - Extract Criteria Matrix from excel file. - Setup subscriptions to curtailable devices. - Setup subscription to building power meter. ''' for device_topic in device_topic_list: _log.debug('Subscribing to ' + device_topic) self.vip.pubsub.subscribe(peer='pubsub', prefix=device_topic, callback=self.new_data) _log.debug('Subscribing to ' + power_meter_topic) self.vip.pubsub.subscribe(peer='pubsub', prefix=power_meter_topic, callback=self.load_message_handler) if kill_device_topic is not None: _log.debug('Subscribing to ' + kill_device_topic) self.vip.pubsub.subscribe(peer='pubsub', prefix=kill_device_topic, callback=self.handle_agent_kill) def handle_agent_kill(self, peer, sender, bus, topic, headers, message): ''' Locally implemented override for ILC application. When an override is detected the ILC application will return operations for all units to normal. ''' data = message[0] _log.info('Checking kill signal') kill_signal = bool(data[kill_pt]) _now = parser.parse(headers['Date']) if kill_signal: _log.info('Kill signal received, shutting down') self.kill_signal_recieved = False gevent.sleep(8) self.end_curtail(_now) sys.exit() def new_data(self, peer, sender, bus, topic, headers, message): '''Call back method for curtailable device data subscription.''' if self.kill_signal_recieved: return _log.info('Data Received for {}'.format(topic)) # topic of form: devices/campus/building/device device_name = device_topic_map[topic] data = message[0] now = parser.parse(headers['Date']) clusters.get_device(device_name).ingest_data(now, data) def load_message_handler(self, peer, sender, bus, topic, headers, message): '''Call back method for building power meter. Calculates the average building demand over a configurable time and manages the curtailment time and curtailment break times. ''' if self.kill_signal_recieved: return _log.debug('Reading building power data.') current_power = float(message[0][power_point]) current_time = parser.parse(headers['Date']) if self.bldg_power: current_average_window = (self.bldg_power[-1][0] - self.bldg_power[0][0]) + td( minutes=1.125) else: current_average_window = td(minutes=0.125) _log.debug('TIME VALUES {} : {}'.format( current_average_window, average_building_power_window)) if current_average_window >= average_building_power_window and current_power > 0: self.bldg_power.append((current_time, current_power)) self.bldg_power.pop(0) elif current_power > 0: self.bldg_power.append((current_time, current_power)) self.power_data_count += 1.0 smoothing_constant = 2.2756 * self.power_data_count**( -0.718) if self.power_data_count > 0 else 1.0 alpha_smoothing = 0.125 window_power = 0 power_sort = list(self.bldg_power) power_sort.sort(reverse=True) for n in xrange(len(self.bldg_power)): window_power += power_sort[n][1] * smoothing_constant * ( 1.0 - smoothing_constant)**n window_power = window_power if window_power > 0.0 else 0.0 if self.average_power is None: self.average_power = current_power self.average_power = self.average_power * ( 1 - alpha_smoothing) + current_power * alpha_smoothing norm_list = [float(i[1]) for i in self.bldg_power] normal_average_power = mean(norm_list) if norm_list else 0.0 str_now = format_timestamp(current_time) _log.debug('Reported time: ' + str_now + ' data count: {} / power array count {}'.format( self.power_data_count, len(self.bldg_power))) _log.debug('Current instantaneous power: {}'.format(current_power)) _log.debug('Current standard 30 minute average power: {}'.format( normal_average_power)) _log.debug('Current simple smoothing load: {}'.format( self.average_power)) _log.debug('Current smoothing {} and window load: {}'.format( smoothing_constant, window_power)) _log_csv = [ str_now, current_power, normal_average_power, self.average_power, smoothing_constant, window_power ] if not os.path.isfile('/home/volttron/power_log.csv'): _header = [ 'ts', 'instantaneous power', 'Normal Average', 'Simple Exponential Smoothing', 'Smoothing Constant', 'Fifteen Minute Exponential Smoothing' ] myfile = open('/home/volttron/power_log.csv', 'wb') wr = csv.writer(myfile, quoting=csv.QUOTE_ALL) wr.writerow(_header) myfile.close() myfile = open('/home/volttron/power_log.csv', 'a+') wr = csv.writer(myfile, quoting=csv.QUOTE_NONE) wr.writerow(_log_csv) myfile.close() if self.reset_curtail_count_time is not None: if self.reset_curtail_count_time <= current_time: _log.debug('Resetting curtail count') clusters.reset_curtail_count() if self.running_ahp: _log.debug('Next confirm: {}'.format( self.next_curtail_confirm)) if current_time >= self.next_curtail_confirm: self.curtail_confirm(self.average_power, current_time) _log.debug( 'now: {} ------- Next Curtail Confirm: {}'.format( current_time, self.next_curtail_confirm)) if current_time >= self.curtail_end: _log.debug('Running stagger tracking method') self.end_curtail(current_time) return if self.break_end is not None and current_time < self.break_end: _log.debug('Break ends: {}'.format(self.break_end)) return #if len(self.bldg_power) < 5: # return self.check_load(self.average_power, current_time) def check_load(self, bldg_power, now): '''Check whole building power and if the value is above the the demand limit (demand_limit) then initiate the ILC (AHP) sequence. ''' _log.debug('Checking building load.') if bldg_power > demand_limit: _log.info( 'Current load ({load}) exceeds limit or {limit}.'.format( load=bldg_power, limit=demand_limit)) self.device_group_size = None score_order = clusters.get_score_order() if not score_order: _log.info('All devices are off, nothing to curtail.') return scored_devices = self.actuator_request(score_order) self.curtail(scored_devices, bldg_power, now) def curtail(self, scored_devices, bldg_power, now): '''Curtail loads by turning off device (or device components)''' need_curtailed = bldg_power - demand_limit est_curtailed = 0.0 remaining_devices = scored_devices[:] for device in self.devices_curtailed: current_tuple = (device[0], device[1]) if current_tuple in remaining_devices: remaining_devices.remove(current_tuple) if not self.running_ahp: _log.info('Starting AHP') self.running_ahp = True if not remaining_devices: _log.debug('Everything available has already been curtailed') return self.break_end = now + curtail_time + curtail_break self.curtail_end = now + curtail_time self.reset_curtail_count_time = self.curtail_end + reset_curtail_count_time self.next_curtail_confirm = now + curtail_confirm _log.info('Curtialing load.') for item in remaining_devices: device_name, command = item curtail = clusters.get_device(device_name).get_curtailment( command) curtail_pt = curtail['point'] curtail_load = curtail['load'] current_offset = curtail['offset'] curtail_value = curtail['value'] revert_priority = curtail['revert_priority'] curtailed_point = base_rpc_path(unit=device_name, point=curtail_pt) value = self.vip.rpc.call('platform.actuator', 'get_point', curtailed_point).get(timeout=5) if current_offset is not None: curtailed_value = value + curtail['offset'] else: curtailed_value = curtail_value # TODO: remove offset from curtailment manager _log.debug('Setting ' + curtailed_point + ' to ' + str(curtailed_value)) try: if self.kill_signal_recieved: break result = self.vip.rpc.call('platform.actuator', 'set_point', agent_id, curtailed_point, curtailed_value).get(timeout=5) gevent.sleep(3) except RemoteError as ex: _log.warning('Failed to set {} to {}: {}'.format( curtailed_point, curtailed_value, str(ex))) continue est_curtailed += curtail_load clusters.get_device(device_name).increment_curtail(command) self.devices_curtailed.append( [device_name, command, value, revert_priority]) if est_curtailed >= need_curtailed: break return def curtail_confirm(self, cur_pwr, now): '''Check if load shed has been met. If the demand goal is not met and there are additional devices to curtail then the ILC will shed additional load by curtailing more devices. ''' if cur_pwr < demand_limit: _log.info('Curtail goal for building load met.') else: _log.info('Curtail goal for building load NOT met.') self.check_load(cur_pwr, now) def actuator_request(self, score_order): '''request access to devices.''' _now = get_aware_utc_now() str_now = format_timestamp(_now) _end = _now + longest_possible_curtail + actuator_schedule_buffer str_end = format_timestamp(_end) ctrl_dev = [] already_handled = dict( (device, True) for device in self.scheduled_devices) for item in score_order: device, point = item _log.debug('Reserving device: ' + device) if device in already_handled: if already_handled[device]: _log.debug( 'Skipping reserve device (previously reserved): ' + device) ctrl_dev.append(item) continue curtailed_device = base_rpc_path(unit=device, point='') schedule_request = [[curtailed_device, str_now, str_end]] try: if self.kill_signal_recieved: break result = self.vip.rpc.call('platform.actuator', 'request_new_schedule', agent_id, device, 'HIGH', schedule_request).get(timeout=5) except RemoteError as ex: _log.warning( 'Failed to schedule device {} (RemoteError): {}'. format(device, str(ex))) continue if result['result'] == 'FAILURE': _log.warn('Failed to schedule device (unavailable) ' + device) already_handled[device] = False else: already_handled[device] = True self.scheduled_devices.add(device) ctrl_dev.append(item) return ctrl_dev def end_curtail(self, _now): _log.info('Stagger release: {}'.format(stagger_release)) if stagger_release: _log.info('Stagger release enabled.') if self.device_group_size is None: _log.debug('Stagger setup.') self.next_curtail_confirm = _now + curtail_confirm self.stagger_release_setup() self.next_release = _now + td(seconds=self.current_stagger) self.reset_devices() if _now >= self.next_release: _log.debug('Release group stagger.') self.reset_devices() self.next_release = _now + td(seconds=self.current_stagger) _log.debug('Next scheduled release: {}'.format( self.next_release)) if _now >= self.break_end: _log.debug('Release all in contingency.') self.reinit_stagger(reset_all=True) return self.device_group_size = len(self.devices_curtailed) self.reinit_stagger() def reset_devices(self, reset_all=False): _log.info('Resetting Devices: {}'.format(self.devices_curtailed)) current_devices_curtailed = deepcopy(self.devices_curtailed) index_counter = 0 if reset_all: self.device_group_size = len(self.devices_curtailed) for item in xrange(self.device_group_size): if item >= len(self.devices_curtailed): break device_name, command, revert_val, revert_priority = self.devices_curtailed[ item] curtail = clusters.get_device(device_name).get_curtailment( command) curtail_pt = curtail['point'] curtailed_point = base_rpc_path(unit=device_name, point=curtail_pt) revert_value = self.get_revert_value(device_name, revert_priority, revert_val) _log.debug('Returned revert value: {}'.format(revert_value)) try: if revert_value is not None: result = self.vip.rpc.call('platform.actuator', 'set_point', agent_id, curtailed_point, revert_value).get(timeout=5) gevent.sleep(3) _log.debug( 'Reverted point: {} --------- value: {}'.format( curtailed_point, revert_value)) else: result = self.vip.rpc.call( 'platform.actuator', 'revert_point', agent_id, curtailed_point).get(timeout=5) gevent.sleep(3) _log.debug('Reverted point: {} - Result: {}'.format( curtailed_point, result)) if current_devices_curtailed: _log.debug('Removing from curtailed list: {} '.format( self.devices_curtailed[item])) _index = self.devices_curtailed.index( self.devices_curtailed[item]) - index_counter current_devices_curtailed.pop(_index) _log.debug('Sucess!: {} '.format( self.devices_curtailed[item])) index_counter += 1 except RemoteError as ex: _log.warning( 'Failed to revert point {} (RemoteError): {}'.format( curtailed_point, str(ex))) continue self.devices_curtailed = current_devices_curtailed def get_revert_value(self, device_name, revert_priority, revert_val): current_device_list = [] if revert_priority is None: return None for item in self.devices_curtailed: if item[0] == device_name: current_device_list.append(item) if len(current_device_list) <= 1: return None index_value = min(current_device_list, key=lambda t: t[3]) return_value = deepcopy(index_value[2]) _log.debug('Calculated revert value: {}'.format(return_value)) curtail_set_index = self.devices_curtailed.index(index_value) self.devices_curtailed[curtail_set_index][2] = revert_val self.devices_curtailed[curtail_set_index][3] = revert_priority return return_value def stagger_release_setup(self): _log.debug('Number or curtailed devices: {}'.format( len(self.devices_curtailed))) device_group_size = max( 1, round(minimum_stagger_window * len(self.devices_curtailed) / stagger_release_time)) _log.debug( 'MINIMUM: {} ------- STAGGER: {} ------------- NUMBER: {}'. format(minimum_stagger_window, stagger_release_time, len(self.devices_curtailed))) self.device_group_size = int(device_group_size) current_release_count = int(stagger_release_time / minimum_stagger_window + 1) if current_release_count > self.device_group_size: self.current_stagger = ( current_release_count / device_group_size) * minimum_stagger_window else: self.current_stagger = minimum_stagger_window _log.debug('Current stagger time: {}'.format( self.current_stagger)) _log.debug('Current group size: {}'.format( self.device_group_size)) def release_devices(self): for device in self.scheduled_devices: result = self.vip.rpc.call('platform.actuator', 'request_cancel_schedule', agent_id, device).get(timeout=10) self.scheduled_devices = set() def reinit_stagger(self, reset_all=False): if reset_all is not None: self.reset_devices(reset_all=reset_all) self.devices_curtailed = [] self.running_ahp = False self.device_group_size = None self.release_devices() return AHP(**kwargs)