def setup_device_list(self): """Setup the device subscriptions""" #get device, then the units underneath that self.analysis_name = self.config.get("analysis_name", "analysis_name") self.timezone = self.config.get("local_timezone", "US/Pacific") self.device = self.config.get("device", {}) if "campus" in self.device: self.campus = self.device["campus"] if "building" in self.device: self.building = self.device["building"] if "unit" in self.device: #units will be a dictionary with subdevices self.units = self.device["unit"] for u in self.units: #building the connection string for each unit self.device_list.append( topics.DEVICES_VALUE(campus=self.campus, building=self.building, unit=u, path="", point="all")) self.publish_list.append("/".join([self.campus, self.building, u])) #loop over subdevices and add them if "subdevices" in self.units[u]: for sd in self.units[u]["subdevices"]: self.device_list.append( topics.DEVICES_VALUE(campus=self.campus, building=self.building, unit=u, path=sd, point="all")) self.publish_list.append("/".join( [self.campus, self.building, u, sd]))
def setup_device_list(self): """Setup the device subscriptions""" self.analysis_name = self.config.get("analysis_name", "AirsideAIRCx") self.actuation_mode = self.config.get("actuation_mode", "passive") self.timezone = self.config.get("local_timezone", "US/Pacific") self.interval = self.config.get("interval", 60) self.missing_data_threshold = self.config.get("missing_data_threshold", 15.0) / 100.0 self.device = self.config.get("device", {}) if not self.device: _log.warning( "device parameters are not present in configuration file for {}" .format(self.core.identity)) self.core.stop() self.campus = self.device.get("campus", "") self.building = self.device.get("building", "") self.units = self.device.get("unit", {}) if not self.units: _log.warning( "device unit parameters are not present in configuration file for {}" .format(self.core.identity)) self.core.stop() has_zone_information = False for u in self.units: # building the connection string for each unit device_topic = topics.DEVICES_VALUE(campus=self.campus, building=self.building, unit=u, path="", point="all") self.device_list.append(device_topic) self.publish_list.append("/".join([self.campus, self.building, u])) self.device_topic_dict.update({device_topic: u}) self.master_devices.append(u) # loop over subdevices and add them if "subdevices" in self.units[u]: for sd in self.units[u]["subdevices"]: has_zone_information = True subdevice_topic = topics.DEVICES_VALUE( campus=self.campus, building=self.building, unit=u, path=sd, point="all") self.device_list.append(subdevice_topic) sd_string = u + "/" + sd self.master_devices.append(sd_string) self.device_topic_dict.update({subdevice_topic: sd_string}) if not has_zone_information: _log.warning( "subdevice (VAV zone information) is missing from device unit configuration for {}" .format(self.core.identity)) self.core.stop() self.initialize_devices()
def on_subscribe(self): """ :return: """ campus = self.device["campus"] building = self.device["building"] device_config = self.device["unit"] self.publish_topics = "/".join([self.analysis_name, campus, building]) multiple_devices = isinstance(device_config, dict) self.command_devices = device_config.keys() try: for device_name in device_config: device_topic = topics.DEVICES_VALUE(campus=campus, building=building, \ unit=device_name, path="", \ point="all") self.device_topic_list.update({device_topic: device_name}) self.device_name.append(device_name) except Exception as e: _log.error('Error configuring signal: {}'.format(e)) try: for device in self.device_topic_list: _log.info("Subscribing to " + device) self.vip.pubsub.subscribe(peer="pubsub", prefix=device, callback=self.on_data) except Exception as e: _log.error('Error configuring signal: {}'.format(e)) _log.error( "Missing {} data to execute the AIRx process".format(device))
def publish_data_or_heartbeat(self): published_data = {} now = datetime.datetime.now().isoformat(' ') if not self._src_file_handle.closed: line = self._src_file_handle.readline() line = line.strip() data = line.split(',') if (line): # Create 'all' message for i in xrange(0, len(self._headers)): published_data[self._headers[i]] = data[i] all_data = json.dumps(published_data) print all_data # Pushing out the data self.publish(topics.DEVICES_VALUE(point='all', **rtu_path), {HEADER_NAME_CONTENT_TYPE: MIME_PLAIN_TEXT, HEADER_NAME_DATE: now}, all_data) else: self._src_file_handle.close() else: # file is closed -> publish heartbeat self.publish('heartbeat/DataPublisher', { 'AgentID': self._agent_id, HEADER_NAME_CONTENT_TYPE: MIME_PLAIN_TEXT, HEADER_NAME_DATE: now, }, now)
def uncontrol_agent(config_path, **kwargs): """Parses the uncontrollable load agent configuration and returns an instance of the agent created using that configuration. :param config_path: Path to a configuration file. :type config_path: str :returns: Market Service Agent :rtype: MarketServiceAgent """ _log.debug("Starting the uncontrol agent") try: config = utils.load_config(config_path) except StandardError: config = {} if not config: _log.info("Using defaults for starting configuration.") agent_name = config.get("agent_name", "uncontrol") base_name = config.get('market_name', 'electric') market_name = [] q_uc=[] for i in range(24): market_name.append('_'.join([base_name, str(i)])) q_uc.append(float(config.get("power_"+str(i), 0))) verbose_logging = config.get('verbose_logging', True) building_topic = topics.DEVICES_VALUE(campus=config.get("campus", ""), building=config.get("building", ""), unit=None, path="", point="all") devices = config.get("devices") return UncontrolAgent(agent_name, market_name, verbose_logging, q_uc, building_topic, devices, **kwargs)
def send_data(self): data = { 'ReturnAirTemperature': 55, 'OutsideAirTemperature': 50, 'MixedAirTemperature': 45, 'Damper': self.damper } self.publish_ex(topics.DEVICES_VALUE(point='all', **rtu_path), {}, ('application/json', jsonapi.dumps(data)))
def uncontrol_agent(config_path, **kwargs): """Parses the uncontrollable load agent configuration and returns an instance of the agent created using that configuration. :param config_path: Path to a configuration file. :type config_path: str :returns: Market Service Agent :rtype: MarketServiceAgent """ _log.debug("Starting the uncontrol agent") try: config = utils.load_config(config_path) except StandardError: config = {} if not config: _log.info("Using defaults for starting configuration.") agent_name = config.get("agent_name", "uncontrol") base_name = config.get('market_name', 'electric') market_name = [] q_uc = [] price_multiplier = config.get('price_multiplier', 1.0) default_min_price = config.get('static_minimum_price', 0.01) default_max_price = config.get('static_maximum_price', 100.0) market_type = config.get("market_type", "tns") single_market_interval = config.get("single_market_interval", 15) market_number = 24 if market_type == "rtp": market_number = 1 for i in range(market_number): market_name.append('_'.join([base_name, str(i)])) for i in range(24): q_uc.append(float(config.get("power_" + str(i), 0))) verbose_logging = config.get('verbose_logging', True) building_topic = topics.DEVICES_VALUE(campus=config.get("campus", ""), building=config.get("building", ""), unit=None, path="", point="all") devices = config.get("devices") static_price_flag = config.get('static_price_flag', False) record_topic = '/'.join( ["tnc", config.get("campus", ""), config.get("building", "")]) sim_flag = config.get("sim_flag", False) return UncontrolAgent(agent_name, market_name, single_market_interval, verbose_logging, q_uc, building_topic, devices, price_multiplier, default_min_price, default_max_price, sim_flag, record_topic, static_price_flag, **kwargs)
def on_subscribe(self): """Setup the device subscriptions""" # If self.device is a dict then devices contain subdevices otherwise it is a list multiple_devices = isinstance( self.device, dict) # check whether self.device is a dict if self.device: try: if multiple_devices: # create a device topic list for devices with subdevices for device_name in self.device: for subdevices in self.device[device_name]: device_topic = topics.DEVICES_VALUE(campus=self.campus, building=self.building, \ unit=device_name, path=subdevices, \ point="all") self.device_topic_list.update( {device_topic: device_name}) self.device_name.append(device_name) else: for device_name in self.device: device_topic = topics.DEVICES_VALUE(campus=self.campus, building=self.building, \ unit=device_name, path="", \ point="all") self.device_topic_list.update( {device_topic: device_name}) self.device_name.append(device_name) except Exception as e: _log.error('Error configuring device topic {}'.format(e)) try: for device in self.device_topic_list: _log.info("Subscribing to " + device) self.vip.pubsub.subscribe(peer="pubsub", prefix=device, callback=self.time_scheduler_handler) # subscribe to each devices with self.time_schedule_handler except Exception as e: _log.error('Error configuring signal: {}'.format(e)) _log.error( "Missing {} data to execute the AIRx process".format(device))
def ahu_agent(config_path, **kwargs): """Parses the ahu_agent configuration and returns an instance of the agent created using that configuration. :param config_path: Path to a configuration file. :type config_path: str :returns: Market Service Agent :rtype: MarketServiceAgent """ try: config = utils.load_config(config_path) except StandardError: config = {} if not config: _log.info("Using defaults for starting configuration.") air_market_base_name = config.get("air_market_name", "air") electric_market_base_name = config.get("electric_market_name", "electric") air_market_name = [] electric_market_name = [] for i in range(24): air_market_name.append('_'.join([air_market_base_name, str(i)])) electric_market_name.append('_'.join( [electric_market_base_name, str(i)])) agent_name = config.get('agent_name') c0 = config.get('c0') c1 = config.get('c1') c2 = config.get('c2') c3 = config.get('c3') COP = config.get('COP') mDot = config.get('mDot', 0) CAV_flag = config.get('CAV_flag', 0) sim_flag = config.get('sim_flag', False) power_unit = config.get('power_unit', 'kW') device_points = config.get("device_points") device_topic = topics.DEVICES_VALUE(campus=config.get("campus", ""), building=config.get("building", ""), unit=config.get("device", ""), path="", point="all") verbose_logging = config.get('verbose_logging', True) sat_setpoint = config.get('sat_setpoint', 12.8) economizer_limit = config.get('economizer_limit', 18.33) has_economizer = config.get('has_economizer', True) tset_avg = config.get('average_zone_setpoint', 22.8) min_oaf = config.get('minimum_oaf', 0.15) return AHUAgent(air_market_name, electric_market_name, agent_name, mDot, CAV_flag, device_topic, c0, c1, c2, c3, COP, power_unit, verbose_logging, device_points, sim_flag, air_market_base_name, electric_market_base_name, sat_setpoint, has_economizer, economizer_limit, tset_avg, min_oaf, **kwargs)
def light_agent(config_path, **kwargs): """Parses the lighting agent configuration and returns an instance of the agent created using that configuration. :param config_path: Path to a configuration file. :type config_path: str :returns: Market Service Agent :rtype: MarketServiceAgent """ try: config = utils.load_config(config_path) except StandardError: config = {} if not config: _log.info("Using defaults for starting configuration.") base_name = config.get("market_name", "electric") market_name = [] for i in range(24): market_name.append('_'.join([base_name, str(i)])) agent_name = config.get('agent_name', "lighting") default_occ_lighting_level = config.get('default_dimming_level', 0.) min_occupied_lighting_level = config.get("min_occupied_lighting_level", 70.0) heartbeat_period = config.get('heartbeat_period', 3600) power_absnom = config.get('Pabsnom', 0.) non_responsive = config.get('non_responsive', False) schedule_topic = topics.DEVICES_VALUE(campus=config.get("campus", ""), building=config.get("building", ""), unit=config.get( "schedule_device", ""), path=config.get("schedule_path", ""), point="all") schedule_point = config.get("schedule_point", "SupplyFanStatus") lighting_setpoint = config["lighting_level_stpt"] base_rpc_path = topics.RPC_DEVICE_PATH(campus=config.get("campus", ""), building=config.get("building", ""), unit=config.get("device", ""), path=config.get("path", ""), point=lighting_setpoint) actuator = config.get("actuator", "platform.actuator") verbose_logging = config.get('verbose_logging', True) return LightAgent(market_name, agent_name, min_occupied_lighting_level, default_occ_lighting_level, power_absnom, non_responsive, verbose_logging, base_rpc_path, schedule_topic, schedule_point, actuator, heartbeat_period, **kwargs)
def configure(self, config_name, action, contents): """ The main configuration callback. """ _log.info('Received configuration {} signal: {}'.format( action, config_name)) self.current_config = self.default_config.copy() self.current_config.update(contents) campus = self.current_config.get("campus", "") building = self.current_config.get("building", "") self.device_list = self.current_config.get("device", []) self.station_code = self.current_config.get("station_code", "") self.run_schedule = self.current_config.get("run_schedule", "") self.timezone = self.current_config.get("timezone", "") self.device_point_name = self.current_config.get( "device_point_name", []) self.weather_point_name = self.current_config.get( "weather_point_name", []) self.sensors_condition = self.current_config.get( "sensors_condition", {}) self.publish_topics = "/".join( ["Diagnosis", self.campus, self.building]) self.headers = { "Accept": "application/json", "Accept-Language": "en-US" } try: for device_name in self.device_list: device_topic = topics.DEVICES_VALUE(campus=campus, building=building, \ unit=device_name, path="", \ point="all") self.device_topic_list.update({device_topic: device_name}) self.device_name.append(device_name) except Exception as e: _log.error('Error configuring signal: {}'.format(e)) for device in self.device_topic_list: _log.info("Subscribing to " + device) try: self.vip.pubsub.subscribe(peer="pubsub", prefix=device, callback=self.on_data) except Exception as e: _log.error('Error configuring signal: {}'.format(e)) self.core.schedule(cron(self.run_schedule), self.run_diagnostics_realtime)
def configure(self, config_name, action, contents): """ The main configuration callback. """ _log.info('Received configuration {} signal: {}'.format( action, config_name)) self.current_config = self.default_config.copy() self.current_config.update(contents) self.analysis_name = self.current_config.get("analysis_name") self.schedule_time = self.current_config.get("schedule_time") self.device = self.current_config.get("device") self.mht = self.current_config.get("mht") self.excess_operation = self.current_config.get("excess_operation") self.interval = self.current_config.get("interval") self.timezone = self.current_config.get("timezone") self.condition_list = self.current_config.get("condition_list", {}) self.device_true_time = 0 campus = self.device["campus"] building = self.device["building"] device_config = self.device["unit"] self.publish_topics = "/".join([self.analysis_name, campus, building]) multiple_devices = isinstance(device_config, dict) self.command_devices = device_config.keys() try: for device_name in device_config: device_topic = topics.DEVICES_VALUE(campus=campus, building=building, \ unit=device_name, path="", \ point="all") self.device_topic_list.update({device_topic: device_name}) self.device_name.append(device_name) except Exception as e: _log.error('Error configuring signal: {}'.format(e)) date_today = datetime.utcnow().astimezone( dateutil.tz.gettz(self.timezone)) print(date_today) if date_today in holidays.US( years=2020) or date_today.weekday() == 5 and 6: schedule_time = "* * * * *" self.core.schedule(cron(schedule_time), self.run_schedule) else: self.core.schedule(cron(self.schedule_time), self.run_schedule)
def remove_driver(self, config_name, action, contents): topic = self.derive_device_topic(config_name) topic_split = topic.split('/', 2) if len(topic_split) > 1: campus = topic_split[0] building = topic_split[1] if len(topic_split) > 2: unit = topic_split[2] else: unit = "" device_topic = topics.DEVICES_VALUE(campus=campus, building=building, unit=unit, path="", point="all") self.site_topic_dict.pop(device_topic, None) self.device_topic_dict.pop(device_topic, None) self.unsubscribe_from_device(device_topic)
def meter_agent(config_path, **kwargs): """Parses the Electric Meter Agent configuration and returns an instance of the agent created using that configuration. :param config_path: Path to a configuration file. :type config_path: str :returns: Market Service Agent :rtype: MarketServiceAgent """ _log.debug("Starting MeterAgent") try: config = utils.load_config(config_path) except StandardError: config = {} if not config: _log.info("Using defaults for starting configuration.") agent_name = config.get("agent_name", "meter") market_name = config.get('market_name', 'electric') price = config.get('price', 55) price_file = config.get('price_file', None) if price_file is not None: f = open(price_file, 'r') prices = f.readlines() f.close() else: prices = None demand_limit = config.get('demand_limit', False) demand_limit_threshold = config.get("demand_limit_threshold", None) verbose_logging = config.get('verbose_logging', True) building_topic = topics.DEVICES_VALUE(campus=config.get("campus", ""), building=config.get("building", ""), unit=None, path="", point="all") devices = config.get("devices") return MeterAgent(agent_name, market_name, price, prices, verbose_logging, demand_limit, demand_limit_threshold, building_topic, devices, **kwargs)
def ahu_agent(config_path, **kwargs): """Parses the ahu_agent configuration and returns an instance of the agent created using that configuration. :param config_path: Path to a configuration file. :type config_path: str :returns: Market Service Agent :rtype: MarketServiceAgent """ try: config = utils.load_config(config_path) except StandardError: config = {} if not config: _log.info("Using defaults for starting configuration.") air_market_name = config.get('air_market_name', 'air') electric_market_name = config.get('electric_market_name', 'electric') agent_name = config.get('agent_name') c0 = config.get('c0') c1 = config.get('c1') c2 = config.get('c2') c3 = config.get('c3') COP = config.get('COP') sim_flag = config.get('sim_flag', False) device_points = config.get("device_points") device_topic = topics.DEVICES_VALUE(campus=config.get("campus", ""), building=config.get("building", ""), unit=config.get("device", ""), path="", point="all") verbose_logging = config.get('verbose_logging', True) return AHUAgent(air_market_name, electric_market_name, agent_name, device_topic, c0, c1, c2, c3, COP, verbose_logging, device_points, sim_flag, **kwargs)
def rtu_agent(config_path, **kwargs): """Parses the Electric Meter Agent configuration and returns an instance of the agent created using that configuation. :param config_path: Path to a configuation file. :type config_path: str :returns: Market Service Agent :rtype: MarketServiceAgent """ try: config = utils.load_config(config_path) except StandardError: config = {} if not config: _log.info("Using defaults for starting configuration.") base_name = config.get('market_name', 'electric') market_name = [] for i in range(24): market_name.append('_'.join([base_name, str(i)])) c1 = config.get('c1') c2 = config.get('c2') c3 = config.get('c3') c = config.get('c') heartbeat_period = config.get('heartbeat_period', 300) hvac_avail = config.get("occupancy_schedule") tMinAdj = config.get('tMin', 0) tMaxAdj = config.get('tMax', 0) sim_flag = config.get('sim_flag', False) tIn = config.get('tIn', 0) Qrate = config.get("Qrate", 0) agent_name = config.get('agent_name') actuator = config.get('actuator', 'platform.actuator') mode = config.get('mode') device_points = config.get("device_points") setpoint = config.get('setpoint') activate_topic = "/".join([config.get("building", agent_name), "actuate"]) setpoint_mode = config.get("setpoint_mode", 0) price_multiplier = config.get('price_multiplier', 2) default_min_price = config.get('default_min_price', 0.01) default_max_price = config.get('default_max_price', 0.1) tMinUnoc = config.get('tMinUnoc', 66.0) device_topic = topics.DEVICES_VALUE(campus=config.get('campus', ''), building=config.get('building', ''), unit=config.get('device', ''), path='', point='all') base_rpc_path = topics.RPC_DEVICE_PATH(campus=config.get("campus", ""), building=config.get("building", ""), unit=config.get("device", ""), path="", point=setpoint) verbose_logging = config.get('verbose_logging', True) return RTUAgent(market_name, agent_name, c1, c2, c3, c, tMinAdj, tMaxAdj, tMinUnoc, tIn, Qrate, verbose_logging, device_topic, hvac_avail, device_points, base_rpc_path, activate_topic, actuator, mode, setpoint_mode, sim_flag, heartbeat_period, price_multiplier, default_min_price, default_max_price, **kwargs)
class Agent(PublishMixin, BaseAgent): '''Agent listens to message bus device and runs when data is published. ''' def __init__(self, **kwargs): super(Agent, self).__init__(**kwargs) self._update_event = None self._update_event_time = None self.keys = None self._device_states = {} self._kwargs = kwargs self.commands = {} self.current_point = None self.current_key = None self.received_input_datetime = None if output_file != None: with open(output_file, 'w') as writer: writer.close() self._header_written = False @matching.match_exact(topics.DEVICES_VALUE(point='all', **device)) def on_received_message(self, topic, headers, message, matched): '''Subscribe to device data and convert data to correct type for the driven application. ''' _log.debug("Message received") _log.debug("MESSAGE: " + jsonapi.dumps(message[0])) _log.debug("TOPIC: " + topic) data = jsonapi.loads(message[0]) #TODO: grab the time from the header if it's there or use now if not self.received_input_datetime = datetime.utcnow() results = app_instance.run(self.received_input_datetime, data) self._process_results(results) def _process_results(self, results): '''Run driven application with converted data and write the app results to a file or database. ''' _log.debug('Processing Results!') for key, value in results.commands.iteritems(): _log.debug("COMMAND: {}->{}".format(key, value)) for value in results.log_messages: _log.debug("LOG: {}".format(value)) for key, value in results.table_output.iteritems(): _log.debug("TABLE: {}->{}".format(key, value)) # publish to output file if available. if output_file != None: if len(results.table_output.keys()) > 0: for _, v in results.table_output.items(): fname = output_file # +"-"+k+".csv" for r in v: with open(fname, 'a+') as f: keys = r.keys() fout = csv.DictWriter(f, keys) if not self._header_written: fout.writeheader() self._header_written = True # if not header_written: # fout.writerow(keys) fout.writerow(r) f.close() # publish to message bus. if len(results.table_output.keys()) > 0: now = utils.format_timestamp(self.received_input_datetime) headers = { headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON, headers_mod.DATE: now, headers_mod.TIMESTAMP: now } for _, v in results.table_output.items(): for r in v: for key, value in r.iteritems(): if isinstance(value, bool): value = int(value) topic = topics.ANALYSIS_VALUE( point=key, ** config['device']) #.replace('{analysis}', key) #print "publishing {}->{}".format(topic, value) self.publish_json(topic, headers, value) if results.commands and mode: self.commands = results.commands if self.keys is None: self.keys = self.commands.keys() self.schedule_task() def schedule_task(self): '''Schedule access to modify device controls.''' _log.debug('Schedule Device Access') headers = { 'type': 'NEW_SCHEDULE', 'requesterID': agent_id, 'taskID': actuator_id, 'priority': 'LOW' } start = datetime.now() end = start + td(seconds=300) start = str(start) end = str(end) _log.debug("{campus}/{building}/{unit}".format(**device)) self.publish_json( topics.ACTUATOR_SCHEDULE_REQUEST(), headers, [["{campus}/{building}/{unit}".format(**device), start, end]]) def command_equip(self): '''Execute commands on configured device.''' self.current_key = self.keys[0] value = self.commands[self.current_key] headers = { 'Content-Type': 'text/plain', 'requesterID': agent_id, } self.publish(topics.ACTUATOR_SET(point=self.current_key, **device), headers, str(value)) @matching.match_headers({headers_mod.REQUESTER_ID: agent_id}) @matching.match_exact(topics.ACTUATOR_SCHEDULE_RESULT()) def schedule_result(self, topic, headers, message, match): '''Actuator response (FAILURE, SUCESS).''' print 'Actuator Response' msg = jsonapi.loads(message[0]) msg = msg['result'] _log.debug('Schedule Device ACCESS') if self.keys: if msg == "SUCCESS": self.command_equip() elif msg == "FAILURE": print 'auto correction failed' _log.debug('Auto-correction of device failed.') @matching.match_headers({headers_mod.REQUESTER_ID: agent_id}) @matching.match_glob(topics.ACTUATOR_VALUE(point='*', **device)) def on_set_result(self, topic, headers, message, match): '''Setting of point on device was successful.''' print('Set Success: {point} - {value}'.format( point=self.current_key, value=str(self.commands[self.current_key]))) _log.debug('set_point({}, {})'.format( self.current_key, self.commands[self.current_key])) self.keys.remove(self.current_key) if self.keys: self.command_equip() else: print 'Done with Commands - Release device lock.' headers = { 'type': 'CANCEL_SCHEDULE', 'requesterID': agent_id, 'taskID': actuator_id } self.publish_json(topics.ACTUATOR_SCHEDULE_REQUEST(), headers, {}) self.keys = None @matching.match_headers({headers_mod.REQUESTER_ID: agent_id}) @matching.match_glob(topics.ACTUATOR_ERROR(point='*', **device)) def on_set_error(self, topic, headers, message, match): '''Setting of point on device failed, log failure message.''' print 'Set ERROR' msg = jsonapi.loads(message[0]) msg = msg['type'] _log.debug('Actuator Error: ({}, {}, {})'.format( msg, self.current_key, self.commands[self.current_key])) self.keys.remove(self.current_key) if self.keys: self.command_equip() else: headers = { 'type': 'CANCEL_SCHEDULE', 'requesterID': agent_id, 'taskID': actuator_id } self.publish_json(topics.ACTUATOR_SCHEDULE_REQUEST(), headers, {}) self.keys = None
class Agent(PublishMixin, BaseAgent): def __init__(self, **kwargs): '''Input and initialize user configurable parameters.''' super(Agent, self).__init__(**kwargs) self.agent_id = config_data.get('agentid') self.aggregate_data = int(config_data["aggregate_data"]) self.matemp_missing = int(config_data["matemp_missing"]) self.mat_low = float(config_data["mat_low"]) self.mat_high = float(config_data["mat_high"]) self.oat_low = float(config_data["oat_low"]) self.oat_high = float(config_data["oat_high"]) self.rat_low = float(config_data["rat_low"]) self.rat_high = float(config_data["rat_high"]) self.high_limit = float(config_data["high_limit"]) self.oae2_damper_threshold = float( config_data["oae2_damper_threshold"]) self.oae2_oaf_threshold = float(config_data["oae2_oaf_threshold"]) self.economizer_type = int(config_data["economizer_type"]) self.damper_minimum = float(config_data["damper_minimum"]) self.minimum_oa = float(config_data["minimum_oa"]) self.oae4_oaf_threshold = float(config_data["oae4_oaf_threshold"]) self.oae5_oaf_threshold = float(config_data["oae5_oaf_threshold"]) self.eer = float(config_data["EER"]) tonnage = float(config_data["tonnage"]) self.cfm = 300 * tonnage self.csv_input = int(config_data["csv_input"]) self.timestamp_name = config_data.get('timestamp_name') self.input_file = config_data.get('input_file', 'CONFIG_ERROR') self.oat_name = config_data.get('oat_point_name') self.rat_name = config_data.get('rat_point_name') self.mat_name = config_data.get('mat_point_name') self.sampling_rate = config_data.get('sampling_rate') self.fan_status_name = config_data.get('fan_status_point_name') self.cool_cmd_name = config_data.get('cool_cmd_name') self.heat_cmd_name = config_data.get('heat_cmd_name') self.damper_name = config_data.get('damper_point_name') self.mat_missing = config_data.get('mixed_air_sensor_missing') self.temp_deadband = config_data.get('temp_deadband') self.damper_deadband = config_data.get('damper_deadband') sunday = config_data.get('Sunday') monday = config_data.get('Monday') tuesday = config_data.get('Tuesday') wednesday = config_data.get('Wednesday') thursday = config_data.get('Thursday') friday = config_data.get('Friday') saturday = config_data.get('Saturday') self.schedule_dict = dict({ 0: sunday, 1: monday, 2: tuesday, 3: wednesday, 4: thursday, 5: friday, 6: saturday }) self.oaf_raw = [] self.timestamp_raw = [] self.matemp_raw = [] self.oatemp_raw = [] self.ratemp_raw = [] self.compressor_raw = [] self.heating_raw = [] self.damper_raw = [] self.fan_status_raw = [] self.oaf = [] self.timestamp = [] self.matemp = [] self.oatemp = [] self.ratemp = [] self.compressor = [] self.heating = [] self.damper = [] self.fan_status = [] self.run_aggregate = None self.names = [ config_data.get('oat_point_name'), config_data.get('mat_point_name'), config_data.get('dat_point_name'), config_data.get('rat_point_name'), config_data.get('damper_point_name'), config_data.get('cool_cmd_name'), config_data.get('fan_status_point_name'), config_data.get('heat_cmd_name') ] self.file = config_data.get('input_file') def setup(self): '''Enter location for the data file if using text csv. Entry can be through file entry window using TKinter or through configuration file as input_file. ''' try: super(Agent, self).setup() _log.info('Running') if self.csv_input: self.file_path = open_file() if self.file_path == '': _log.info('No csv file not found ...') return if (self.file_path == 'File Selected is not a csv' or not self.file_path.endswith('.csv')): _log.info('File must be in CSV format.') return if self.input_file == "CONFIG_ERROR": _log.info( 'Check configuration file and add input_file parameter ' 'as file path to data file') return if self.file_path is None: self.file_path = self.file self.bldg_data = read_oae_pandas(self.file_path, self.names) self.process_data() except Exception: _log.exception('Error on data input, could not data file...') def process_data(self): '''Aggregate the data based on compressor status, heating status, and supply-fan status where one hour is the largest aggregated interval. ''' _log.info('Processing data') timestamp = [] if self.csv_input: timestamp_ = self.bldg_data[self.timestamp_name].tolist() matemp = self.bldg_data[self.mat_name].tolist() oatemp = self.bldg_data[self.oat_name].tolist() ratemp = self.bldg_data[self.rat_name].tolist() compressor = self.bldg_data[self.cool_cmd_name].tolist() heating = self.bldg_data[self.heat_cmd_name].tolist() damper = self.bldg_data[self.damper_name].tolist() fan_status = self.bldg_data[self.fan_status_name].tolist() else: timestamp_ = self.timestamp_raw matemp = self.matemp_raw oatemp = self.oatemp_raw ratemp = self.ratemp_raw compressor = self.compressor_raw heating = self.heating_raw damper = self.damper_raw fan_status = self.fan_status_raw for item in timestamp_: timestamp.append(dateutil.parser.parse(item, fuzzy=True)) if self.aggregate_data: temp_damper = [] temp_mat = [] temp_oat = [] temp_rat = [] for points in xrange(0, len(timestamp) - 1): temp_damper.append(damper[points]) temp_oat.append(oatemp[points]) temp_mat.append(matemp[points]) temp_rat.append(ratemp[points]) if timestamp[points].hour != timestamp[points + 1].hour: self.timestamp.append( (timestamp[points] + datetime.timedelta(hours=1)).replace(minute=0)) temp_oat[:] = (value for value in temp_oat if value != 0) temp_rat[:] = (value for value in temp_rat if value != 0) temp_mat[:] = (value for value in temp_mat if value != 0) self.damper.append(numpy.mean(temp_damper)) self.oatemp.append(numpy.mean(temp_oat)) self.matemp.append(numpy.mean(temp_mat)) self.ratemp.append(numpy.mean(temp_rat)) self.compressor.append(compressor[points]) self.fan_status.append(fan_status[points]) self.heating.append(heating[points]) temp_damper = [] temp_mat = [] temp_oat = [] temp_rat = [] elif (compressor[points + 1] != compressor[points] or heating[points + 1] != heating[points] or ((timestamp[points + 1] - timestamp[points] > datetime.timedelta(minutes=self.sampling_rate)))): self.timestamp.append(timestamp[points]) temp_oat[:] = (value for value in temp_oat if value != 0) temp_rat[:] = (value for value in temp_rat if value != 0) temp_mat[:] = (value for value in temp_mat if value != 0) self.damper.append(numpy.mean(temp_damper)) self.oatemp.append(numpy.mean(temp_oat)) self.matemp.append(numpy.mean(temp_mat)) self.ratemp.append(numpy.mean(temp_rat)) self.compressor.append(compressor[points]) self.fan_status.append(fan_status[points]) self.heating.append(heating[points]) temp_damper = [] temp_mat = [] temp_oat = [] temp_rat = [] if (points == len(timestamp) - 2 and not temp_oat): temp_damper.append(damper[points + 1]) temp_oat.append(oatemp[points + 1]) temp_mat.append(matemp[points + 1]) temp_rat.append(ratemp[points + 1]) self.timestamp.append(timestamp[points + 1]) temp_oat[:] = (value for value in temp_oat if value != 0) temp_rat[:] = (value for value in temp_rat if value != 0) temp_mat[:] = (value for value in temp_mat if value != 0) self.damper.append(numpy.mean(temp_damper)) self.oatemp.append(numpy.mean(temp_oat)) self.matemp.append(numpy.mean(temp_mat)) self.ratemp.append(numpy.mean(temp_rat)) self.compressor.append(compressor[points + 1]) self.fan_status.append(fan_status[points + 1]) self.heating.append(heating[points + 1]) temp_damper = [] temp_mat = [] temp_oat = [] temp_rat = [] else: self.matemp = matemp self.oatemp = oatemp self.ratemp = ratemp self.compressor = compressor self.heating = heating self.damper = damper self.fan_status = fan_status self.oaf_raw = [] self.timestamp_raw = [] self.matemp_raw = [] self.oatemp_raw = [] self.ratemp_raw = [] self.compressor_raw = [] self.heating_raw = [] self.damper_raw = [] self.fan_status_raw = [] self.newdata = len(self.timestamp) def check_nan(data): '''check for any nan values in data.''' length = len(data) for x in xrange(0, length): if math.isnan(data[x]): data[x] = -99 return data self.matemp = check_nan(self.matemp) self.oatemp = check_nan(self.oatemp) self.ratemp = check_nan(self.ratemp) self.compressor = check_nan(self.compressor) self.heating = check_nan(self.heating) self.damper = check_nan(self.damper) self.fan_status = check_nan(self.fan_status) self.oaf = self.calculate_oaf() #self.output_aggregate() _log.info('Performing Diagnostic') oae_1 = self.sensor_diagnostic() oae_2 = self.economizer_diagnostic1() oae_3 = self.economizer_diagnostic2() oae_4 = self.excess_oa_intake() oae_5 = self.insufficient_ventilation() oae_6 = self.schedule_diagnostic() energy_impact = self.calculate_energy_impact(oae_2, oae_3, oae_4) contents = [ self.timestamp, oae_1, oae_2, oae_3, oae_4, oae_5, oae_6, energy_impact, self.oaf ] result_writer(contents) def output_aggregate(self): '''output_aggregate writes the results of the data aggregation to file for inspection. ''' file_path = inspect.getfile(inspect.currentframe()) out_dir = os.path.dirname(os.path.realpath(file_path)) now = datetime.date.today() file_path = os.path.join(out_dir, "Aggregate_Data({ts}).csv".format(ts=now)) ofile = open(file_path, 'wb') x = [ self.timestamp, self.oatemp, self.matemp, self.ratemp, self.damper, self.compressor, self.heating, self.fan_status ] outs = csv.writer(ofile, dialect='excel') writer = csv.DictWriter(ofile, fieldnames=[ "Timestamp", "OutsideAirTemp", "MixedAirTemp", "ReturnAirTemp", "Damper", "CompressorStatus", "Heating", "FanStatus" ], delimiter=',') writer.writeheader() for row in itertools.izip_longest(*x): outs.writerow(row) ofile.close() def calculate_oaf(self): '''Create OAF vector for data set.''' for points in xrange(0, self.newdata): if (self.matemp[points] != -99 and self.oatemp[points] != -99 and self.ratemp[points] != -99 and math.fabs(self.oatemp[points] - self.ratemp[points]) > 4.0 and int(self.fan_status[points]) == 1): self.oaf.append( (self.matemp[points] - self.ratemp[points]) / (self.oatemp[points] - self.ratemp[points])) else: self.oaf.append(int(-99)) return self.oaf def sensor_diagnostic(self): oae1_result = [] for points in xrange(0, self.newdata): if self.fan_status[points] != -99: if int(self.fan_status[points]): if (self.matemp[points] != -99 and self.ratemp[points] != -99 and self.oatemp[points] != -99): if ((int(self.matemp_missing) and int(self.compressor[points]) or int(self.heating[points]))): oae1_result.append(22) elif (self.matemp[points] < self.mat_low or self.matemp[points] > self.mat_high): # Temperature sensor problem detected (fault). oae1_result.append(23) elif (self.ratemp[points] < self.rat_low or self.ratemp[points] > self.rat_high): # Temperature sensor problem detected (fault). oae1_result.append(24) elif (self.oatemp[points] < self.oat_low or self.oatemp[points] > self.oat_high): # Temperature sensor problem detected (fault). oae1_result.append(25) elif ( (self.matemp[points] > self.ratemp[points] and self.matemp[points] > self.oatemp[points]) or (self.matemp[points] < self.ratemp[points] and self.matemp[points] < self.oatemp[points])): # Temperature sensor problem detected (fault). oae1_result.append(21) else: # No faults detected. oae1_result.append(20) else: # Missing required data for diagnostic (No fault). oae1_result.append(27) else: # Unit is off (No Fault). oae1_result.append(29) else: # Missing required data for diagnostic (No fault). oae1_result.append(27) return oae1_result def economizer_diagnostic1(self): oae2_result = [] for points in xrange(0, self.newdata): if self.fan_status[points] != -99: if self.fan_status[points]: if (self.ratemp[points] != -99 and self.oatemp[points] != -99 and self.compressor[points] != -99 and self.damper[points] != -99): if ((self.ratemp[points] - self.oatemp[points] > self.temp_deadband and self.economizer_type == 0.0) or (self.high_limit - self.oatemp[points] > self.temp_deadband and self.economizer_type == 1.0)): if ((100.0 - self.damper[points]) < self.oae2_damper_threshold): if math.fabs( self.oatemp[points] - self.ratemp[points] ) > 5.0 and not self.matemp_missing: if (1.0 - self.oaf[points] < self.oae2_oaf_threshold and self.oaf[points] > 0 and self.oaf[points] < 1.25): # No fault detected. oae2_result.append(30) elif (1.0 - self.oaf[points] > self.oae2_oaf_threshold and self.oaf[points] > 0 and self.oaf[points] < 1.25): # OAF is too low (Fault). oae2_result.append(32) else: # OAF resulted in unexpected value (No fault). oae2_result.append(38) elif not ((self.heating[points] and self.compressor[points]) and math.fabs(self.oatemp[points] - self.ratemp[points]) > 5.0 and self.matemp_missing): if (1.0 - self.oaf[points] < self.oae2_oaf_threshold and self.oaf[points] > 0 and self.oaf[points] < 1.25): oae2_result.append(30) elif (1.0 - self.oaf[points] > self.oae2_oaf_threshold and self.oaf[points] > 0 and self.oaf[points] < 1.25): # OAF is too low when unit is economizing (Fault). oae2_result.append(32) else: oae2_result.append(38) else: oae2_result.append(36) else: # Damper is not open when conditions are favorable for economizing (Fault). oae2_result.append(33) else: oae2_result.append(31) else: #Missing data (No fault). oae2_result.append(37) else: # Supply fan is off (No fault). oae2_result.append(39) else: oae2_result.append(37) return oae2_result def economizer_diagnostic2(self): oae3_result = [] for points in xrange(0, self.newdata): if self.fan_status[points] != -99: if self.fan_status[points]: if (self.compressor[points] != -99 and self.ratemp[points] != -99 and self.oatemp[points] != -99 and self.damper[points] != -99): if ((self.oatemp[points] - self.ratemp[points] > self.temp_deadband and self.economizer_type == 0.0) or (self.oatemp[points] - self.high_limit > self.temp_deadband and self.economizer_type == 1.0)): if (self.compressor[points]): if self.damper[ points] <= self.damper_minimum: # No fault detected. oae3_result.append(40) else: # Damper should be at minimum # for ventilation(Fault). oae3_result.append(41) else: # Conditions are favorable for economizing oae3_result.append(43) else: if self.damper[points] <= self.damper_minimum: # Damper should be at minimum # for ventilation(Fault). oae3_result.append(41) else: # Missing Data (No fault). oae3_result.append(47) else: # Supply fan is off (No fault). oae3_result.append(49) else: # Missing data (No fault). oae3_result.append(47) return oae3_result def excess_oa_intake(self): oae4_result = [] for points in xrange(0, self.newdata): if self.fan_status[points] != -99: if self.fan_status[points]: if (self.compressor[points] != -99 and self.oatemp[points] != -99 and self.ratemp[points] != -99 and self.damper[points] != -99): if ((self.oatemp[points] - self.ratemp[points] > self.temp_deadband and self.economizer_type == 0.0) or (self.oatemp[points] - self.high_limit > self.temp_deadband and self.economizer_type == 1.0)): if self.damper[points] <= self.damper_minimum: if (not self.matemp_missing and math.fabs(self.oatemp[points] - self.ratemp[points]) > 5.0): if ((self.oaf[points] - self.minimum_oa ) < self.oae4_oaf_threshold and self.oaf[points] > 0 and self.oaf[points] < 1.25): # No Fault detected. oae4_result.append(50) elif ( (self.oaf[points] - self.minimum_oa ) > self.oae4_oaf_threshold and self.oaf[points] > 0 and self.oaf[points] < 1.25): # Excess OA intake (Fault). oae4_result.append(51) else: # OAF calculation resulted in unexpected value (No fault). oae4_result.append(58) elif (not int(self.heating[points]) and not int(self.compressor[points]) and math.fabs(self.oatemp[points] - self.ratemp[points]) > 5.0 and self.matemp_missing): if (self.oaf[points] - self.minimum_oa < self.oae4_oaf_threshold and self.oaf[points] > 0 and self.oaf[points] < 1.25): # No fault detected. oae4_result.append(50) elif ( (self.oaf[points] - self.minimum_oa ) > self.oae4_oaf_threshold and self.oaf[points] > 0 and self.oaf[points] < 1.25): # The unit is bringing in excess OA (Fault). oae4_result.append(51) else: # OAF calculation resulted in unexpected value (No Fault). oae4_result.append(58) else: # Conditions are not favorable for OAF calculation (No Fault). oae4_result.append(52) else: # Damper is not at minimum (Fault). oae4_result.append(53) else: # Unit may be economizing (No fault). oae4_result.append(56) else: # Missing data (No fault). oae4_result.append(57) else: # Supply fan is off (No Fault). oae4_result.append(59) else: # Missing data (No fault). oae4_result.append(57) return oae4_result def insufficient_ventilation(self): oae5_result = [] for points in xrange(0, self.newdata): if self.fan_status[points] != -99: if int(self.fan_status[points]): if (self.compressor[points] != -99 and self.oatemp[points] != -99 and self.ratemp[points] != -99 and self.damper[points] != -99): if (self.damper_minimum - self.damper[points] <= self.damper_deadband): if (math.fabs(self.oatemp[points] - self.ratemp[points] > 5.0) and not self.matemp_missing): if ((self.minimum_oa - self.oaf[points]) > self.oae5_oaf_threshold and self.oaf[points] > 0 and self.oaf[points] < 1.25): # Unit is bringing in insufficient OA (Fault) oae5_result.append(61) elif ((self.minimum_oa - self.oaf[points]) < self.oae5_oaf_threshold and self.oaf[points] > 0 and self.oaf[points] < 1.25): # No problem detected. oae5_result.append(60) else: # Unexpected result for OAF calculation # (No Fault) oae5_result.append(68) elif (math.fabs(self.oatemp[points] - self.ratemp[points]) > 5.0 and self.matemp_missing and not int(self.compressor[points]) and int(self.heating[points])): if ((self.minimum_oa - self.oaf[points]) > self.oae5_oaf_threshold and self.oaf[points] > 0 and self.oaf[points] < 1.25): oae5_result.append( 61) # Insufficient OA (Fault) elif ((self.minimum_oa - self.oaf[points]) < self.oae5_oaf_threshold and self.oaf[points] > 0 and self.oaf[points] < 1.25): oae5_result.append(60) # No Fault else: # Unexpected result for OAF calculation # (No Fault). oae5_result.append(68) else: # Conditions are not favorable for OAF calculation (No Fault). oae5_result.append(62) else: # Damper is significantly below the minimum # damper set point (Fault) oae5_result.append(63) else: # Missing required data (No fault) oae5_result.append(67) else: # Unit is off (No fault). oae5_result.append(69) else: oae5_result.append(67) # Missing data (No Fault) return oae5_result def schedule_diagnostic(self): oae6_result = [] for points in xrange(0, self.newdata): if (self.fan_status[points] != -99 and self.compressor[points] != -99): if (int(self.fan_status[points]) or int(self.compressor[points])): day = self.timestamp[points].weekday() sched = self.schedule_dict[day] start = int(sched[0]) end = int(sched[1]) if (self.timestamp[points].hour < start or self.timestamp[points].hour > end): oae6_result.append(71) else: oae6_result.append(70) else: oae6_result.append(70) else: oae6_result.append(77) return oae6_result def calculate_energy_impact(self, oae_2, oae_3, oae_4): energy_impact = [] month_abbr = {k: v for k, v in enumerate(calendar.month_abbr)} if not self.matemp_missing: for points in xrange(0, self.newdata): if oae_2[points] == 32 or oae_2[points] == 33: energy_impact.append( 1.08 * self.cfm * (self.matemp[points] - self.oatemp[points]) / (1000 * self.eer)) elif (oae_3[points] == 41 or oae_4[points] == 51 or oae_4[points] == 53 and self.oatemp[points] > self.matemp[points]): ei = 1.08 * self.cfm / (1000 * self.eer) ei = ei * (self.matemp[points] - (self.oatemp[points] * self.minimum_oa + self.ratemp[points] * (1 - self.minimum_oa))) energy_impact.append(ei) elif (oae_3[points] == 41 or oae_4[points] == 51 or oae_4[points] == 53 and self.oatemp[points] > self.matemp[points]): ei = (1.08 * (self.oatemp[points] * self.minimum_oa + self.ratemp[points] * (1 - self.minimum_oa)) - self.cfm * (self.matemp[points]) / (1000 * self.eer)) energy_impact.append(ei) else: energy_impact.append(0) if energy_impact[points] < 0: energy_impact[points] = 0 return energy_impact @matching.match_exact(topics.DEVICES_VALUE(point='all', **rtu_path)) def datahandler(self, topic, header, message, match): '''Subscribes to data and assembles raw data arrays. data_handler subscribes to a device or simulated device on the message bus and assembles the array (lists) of data to be aggregated for analysis. ''' data = jsonapi.loads(message[0]) _log.info('Getting Data from message bus') publisher_id = header.get('AgentID', 0) if ((self.run_aggregate is False or self.run_aggregate is None) and publisher_id != 'publisher'): _log.info('Real-time device data.') self.run_aggregate = True event_time = (datetime.datetime.now().replace( hour=0, minute=0, second=0) + datetime.timedelta(days=1)) event = sched.Event(self.process_data) self.schedule(event_time, event) self.oaf_raw = [] self.timestamp_raw = [] self.matemp_raw = [] self.oatemp_raw = [] self.ratemp_raw = [] self.compressor_raw = [] self.heating_raw = [] self.damper_raw = [] self.fan_status_raw = [] elif publisher_id == 'publisher': _log.info('Simulated device data.') if self.run_aggregate is None: self.prev_time = dateutil.parser.parse( data[self.timestamp_name]) self.run_aggregate = True time = dateutil.parser.parse(data[self.timestamp_name], fuzzy=True) time_delta = time - self.prev_time time_check = time + time_delta self.timestamp_raw.append(time) self.fan_status_raw.append(data[self.fan_status_name]) self.compressor_raw.append(data[self.coolcmd1_name]) self.heating_raw.append(data[self.heat_cmd1_name]) self.damper_raw.append(data[self.damper_name]) self.oatemp_raw.append(data[self.oat_name]) self.ratemp_raw.append(data[self.rat_name]) self.matemp_raw.append(data[self.mat_name]) if time.day < time_check.day: self.timestamp_raw.append(time_check) self.process_data() self.oaf_raw = [] self.timestamp_raw = [] self.oatemp_raw = [] self.ratemp_raw = [] self.compressor_raw = [] self.heating_raw = [] self.damper_raw = [] self.fan_status_raw = [] self.prev_time = time if publisher_id != 'publisher': self.timestamp_raw.append(datetime.datetime.now()) self.fan_status_raw.append(data[self.fan_status_name]) self.compressor_raw.append(data[self.coolcmd1_name]) self.heating_raw.append(data[self.heat_cmd1_name]) self.damper_raw.append(data[self.damper_name]) self.oatemp_raw.append(data[self.oat_name]) self.ratemp_raw.append(data[self.rat_name]) self.matemp_raw.append(data[self.mat_name])
def driven_agent(config_path, **kwargs): """Driven harness for deployment of OpenEIS applications in VOLTTRON.""" config = utils.load_config(config_path) arguments = config.get('arguments') mode = True if config.get('mode', 'PASSIVE') == 'ACTIVE' else False multiple_devices = isinstance(config['device']['unit'], dict) campus_building_config = config['device'] analysis_name = campus_building_config.get('analysis_name', 'analysis_name') analysis_dict = {'analysis_name': analysis_name} arguments.update(analysis_dict) agent_id = config.get('agentid', None) agent_id = actuator_id = agent_id if agent_id is not None else analysis_name campus_building = dict( (key, campus_building_config[key]) for key in ['campus', 'building']) analysis = deepcopy(campus_building) analysis.update(analysis_dict) device_config = config['device']['unit'] command_devices = device_config.keys() device_topic_dict = {} device_topic_list = [] subdevices_list = [] from_file = config.get('from_file') for device_name in device_config: device_topic = topics.DEVICES_VALUE( campus=campus_building.get('campus'), building=campus_building.get('building'), unit=device_name, path='', point='all') device_topic_dict.update({device_topic: device_name}) device_topic_list.append(device_name) if multiple_devices: for subdevice in device_config[device_name]['subdevices']: subdevices_list.append(subdevice) subdevice_topic = topics.DEVICES_VALUE( campus=campus_building.get('campus'), building=campus_building.get('building'), unit=device_name, path=subdevice, point='all') subdevice_name = device_name + "/" + subdevice device_topic_dict.update({subdevice_topic: subdevice_name}) device_topic_list.append(subdevice_name) base_actuator_path = topics.ACTUATOR_WRITE( campus=campus_building.get('campus', ''), building=campus_building.get('building', ''), unit=None, path='', point=None) conversion_map = config.get('conversion_map') map_names = {} for key, value in conversion_map.items(): map_names[key.lower() if isinstance(key, str) else key] = value application = config.get('application') validation_error = '' if not application: validation_error = 'Invalid application specified in config\n' if validation_error: _log.error(validation_error) raise ValueError(validation_error) config.update(config.get('arguments')) converter = ConversionMapper() output_file_prefix = config.get('output_file') #unittype_map = config.get('unittype_map', None) #assert unittype_map klass = _get_class(application) # This instances is used to call the applications run method when # data comes in on the message bus. It is constructed here # so that_process_results each time run is called the application # can keep it state. app_instance = klass(**arguments) class DrivenAgent(Agent): '''Agent listens to message bus device and runs when data is published. ''' def __init__(self, **kwargs): super(DrivenAgent, self).__init__(**kwargs) # master is where we copy from to get a poppable list of # subdevices that should be present before we run the analysis. self._master_devices = device_topic_list self._needed_devices = [] self._device_values = {} self._initialize_devices() self.received_input_datetime = None self._kwargs = kwargs self._header_written = False self.file_creation_set = set() def _initialize_devices(self): self._needed_devices = deepcopy(self._master_devices) self._device_values = {} @Core.receiver('onstart') def starup(self, sender, **kwargs): self._initialize_devices() for device_topic in device_topic_dict: _log.debug('Subscribing to ' + device_topic) self.vip.pubsub.subscribe(peer='pubsub', prefix=device_topic, callback=self.on_analysis_message) def _should_run_now(self): # Assumes the unit/all values will have values. if not len(self._device_values.keys()) > 0: return False return not len(self._needed_devices) > 0 def on_analysis_message(self, peer, sender, bus, topic, headers, message): """Subscribe to device data and assemble data set to pass to applications. """ device_data = message[0] if isinstance(device_data, list): device_data = device_data[0] def aggregate_subdevice(device_data): tagged_device_data = {} device_tag = device_topic_dict[topic] if device_tag not in self._needed_devices: return False for key, value in device_data.items(): device_data_tag = '&'.join([key, device_tag]) tagged_device_data[device_data_tag] = value self._device_values.update(tagged_device_data) self._needed_devices.remove(device_tag) return True device_needed = aggregate_subdevice(device_data) if not device_needed: _log.error("Warning device values already present, " "reinitializing") if self._should_run_now(): field_names = {} for k, v in self._device_values.items(): field_names[k.lower() if isinstance(k, str) else k] = v if not converter.initialized and conversion_map is not None: converter.setup_conversion_map(map_names, field_names) if from_file: _timestamp = parse(headers.get('Date')) self.received_input_datetime = _timestamp else: _timestamp = dt.now() self.received_input_datetime = dt.utcnow() device_data = converter.process_row(field_names) results = app_instance.run(_timestamp, device_data) # results = app_instance.run( # dateutil.parser.parse(self._subdevice_values['Timestamp'], # fuzzy=True), self._subdevice_values) self._process_results(results) self._initialize_devices() else: _log.info("Still need {} before running.".format( self._needed_devices)) def _process_results(self, results): """Run driven application with converted data and write the app results to a file or database. """ _log.debug('Processing Results!') for device, point_value_dict in results.devices.items(): for key, value in point_value_dict.items(): _log.debug("COMMAND TABLE: {}->{}".format(key, value)) if mode: _log.debug('ACTUATE ON DEVICE.') results, actuator_error = self.actuator_request( results) if not actuator_error: self.actuator_set(results) for key, value in results.commands.iteritems(): _log.debug("COMMAND TABLE: {}->{}".format(key, value)) if mode: _log.debug('ACTUATE ON DEVICE.') results, actuator_error = self.actuator_request(results) if not actuator_error: self.actuator_set(results) for value in results.log_messages: _log.debug("LOG: {}".format(value)) for key, value in results.table_output.items(): _log.debug("TABLE: {}->{}".format(key, value)) if output_file_prefix is not None: results = self.create_file_output(results) if len(results.table_output.keys()): results = self.publish_analysis_results(results) return results def publish_analysis_results(self, results): """publish analysis results to the message bus for capture by the data historian """ headers = { headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON, headers_mod.DATE: str(self.received_input_datetime), } for app, analysis_table in results.table_output.items(): try: name_timestamp = app.split('&') _name = name_timestamp[0] timestamp = name_timestamp[1] except: _name = app timestamp = str(self.received_input_datetime) headers = { headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON, headers_mod.DATE: timestamp, } for entry in analysis_table: for key, value in entry.items(): for _device in command_devices: analysis['unit'] = _device analysis_topic = topics.ANALYSIS_VALUE(point=key, **analysis) datatype = 'float' if isinstance(value, int): datatype = 'int' kbase = key[key.rfind('/') + 1:] message = [{ kbase: value }, { kbase: { 'tz': 'US/Pacific', 'type': datatype, 'units': 'float', } }] self.vip.pubsub.publish('pubsub', analysis_topic, headers, message) return results def create_file_output(self, results): """Create results/data files for testing and algorithm validation.""" for key, value in results.table_output.items(): name_timestamp = key.split('&') _name = name_timestamp[0] timestamp = name_timestamp[1] file_name = output_file_prefix + "-" + _name + ".csv" if file_name not in self.file_creation_set: self._header_written = False self.file_creation_set.update([file_name]) for row in value: with open(file_name, 'a+') as file_to_write: row.update({'Timestamp': timestamp}) _keys = row.keys() file_output = csv.DictWriter(file_to_write, _keys) if not self._header_written: file_output.writeheader() self._header_written = True file_output.writerow(row) file_to_write.close() return results def actuator_request(self, results): """Make actuaor request for modification of device set points.""" _now = dt.now() str_now = _now.strftime(DATE_FORMAT) _end = _now + td(minutes=1) str_end = _end.strftime(DATE_FORMAT) for _device in command_devices: actuation_device = base_actuator_path(unit=_device, point='') schedule_request = [[actuation_device, str_now, str_end]] # # try: # result = self.vip.rpc.call('platform.actuator', # 'request_new_schedule', # agent_id, _device, 'HIGH', # schedule_request).get(timeout=4) # except RemoteError as ex: # _log.warning("Failed to schedule device {} (RemoteError): {}".format(_device, str(ex))) # request_error = True # # if result['result'] == 'FAILURE': # _log.warn('Failed to schedule device (unavailable) ' + _device) # request_error = True # else: # request_error = False # _log.debug('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@') # _log.debug(str(result)) # _log.debug(str_now) # _log.debug(str_end) headers = { 'type': 'NEW_SCHEDULE', 'requesterID': agent_id, 'taskID': actuator_id, 'priority': 'HIGH' } device_path = "{campus}/{building}/".format(**campus_building) self.vip.pubsub.publish( peer='pubsub', topic=topics.ACTUATOR_SCHEDULE_REQUEST(), headers=headers, message=[[device_path + _device, str_now, str_end]]) # return results, False def actuator_set(self, results): """Set point on device.""" for device, point_value_dict in results.devices.items(): for point, value in point_value_dict.items(): point_path = base_actuator_path(unit=device, point=point) try: # result = self.vip.rpc.call('platform.actuator', 'set_point', # agent_id, point_path, # new_value).get(timeout=4) headers = { 'Content-Type': 'text/plain', 'requesterID': agent_id, } self.vip.pubsub.publish(peer="pubsub", topic=topics.ACTUATOR_SET( point=point, unit=device, **campus_building), headers=headers, message=str(value)) _log.debug("Set point {} to {}".format( point_path, value)) except RemoteError as ex: _log.warning("Failed to set {} to {}: {}".format( point_path, value, str(ex))) continue for _device in command_devices: for point, new_value in results.commands.items(): point_path = base_actuator_path(unit=_device, point=point) try: # result = self.vip.rpc.call('platform.actuator', 'set_point', # agent_id, point_path, # new_value).get(timeout=4) headers = { 'Content-Type': 'text/plain', 'requesterID': agent_id, } self.vip.pubsub.publish(peer="pubsub", topic=topics.ACTUATOR_SET( point=point, unit=_device, **campus_building), headers=headers, message=str(new_value)) _log.debug("Set point {} to {}".format( point_path, new_value)) except RemoteError as ex: _log.warning("Failed to set {} to {}: {}".format( point_path, new_value, str(ex))) continue DrivenAgent.__name__ = 'DrivenLoggerAgent' return DrivenAgent(**kwargs)
class Agent(PublishMixin, BaseAgent): def __init__(self, **kwargs): super(Agent, self).__init__(**kwargs) self.lock_acquired = False self.thread = None self.data_queue = multithreading.WaitQueue() self.value_queue = multithreading.WaitQueue() def setup(self): super(Agent, self).setup() headers = { 'Content-Type': 'text/plain', 'requesterID': agent_id, } self.publish(topics.ACTUATOR_LOCK_ACQUIRE(**rtu_path), headers) def start(self, algo=None): if algo is None: algo = afdd def run(): sock = messaging.Socket(zmq.PUSH) sock.connect(publish_address) with contextlib.closing(sock): algo(self, sock) self.thread = threading.Thread(target=run) self.thread.daemon = True self.thread.start() @matching.match_exact(topics.ACTUATOR_LOCK_RESULT(**rtu_path)) def on_lock_result(self, topic, headers, message, match): msg = jsonapi.loads(message[0]) holding_lock = self.lock_acquired if headers['requesterID'] == agent_id: self.lock_acquired = msg == 'SUCCESS' elif msg == 'SUCCESS': self.lock_acquired = False if self.lock_acquired and not holding_lock: self.start() @matching.match_exact(topics.DEVICES_VALUE(point='all', **rtu_path)) def on_new_data(self, topic, headers, message, match): data = jsonapi.loads(message[0]) self.data_queue.notify_all(data) @matching.match_glob(topics.ACTUATOR_VALUE(point='*', **rtu_path)) def on_set_result(self, topic, headers, message, match): self.value_queue.notify_all((match.group(1), True)) @matching.match_glob(topics.ACTUATOR_ERROR(point='*', **rtu_path)) def on_set_error(self, topic, headers, message, match): self.value_queue.notify_all((match.group(1), False)) def get_new_data(self, timeout=None): _log.debug('get_new_data({})'.format(timeout)) return self.data_queue.wait(timeout) def set_point(self, sock, point_name, value, timeout=None): _log.debug('set_point({}, {}, {})'.format(point_name, value, timeout)) headers = { 'Content-Type': 'text/plain', 'requesterID': agent_id, } with self.value_queue.condition: sock.send_message(topics.ACTUATOR_SET(point=point_name, **rtu_path), headers, str(value), flags=zmq.NOBLOCK) try: return self.value_queue._wait(timeout) except multithreading.Timeout: return None
class Agent(PublishMixin, BaseAgent): """Class agent""" def __init__(self, **kwargs): super(Agent, self).__init__(**kwargs) self.default_firststage_fanspeed = 0.0 self.default_secondstage_fanspeed = 0.0 self.default_damperstpt = 0.0 self.default_coolingstpt = 0.0 self.default_heatingstpt = 65.0 self.current_spacetemp = 72.0 self.state = 'STARTUP' self.e_start_msg = None self.lock_handler = None self.error_handler = None self.actuator_handler = None self.all_scheduled_events = {} self.currently_running_dr_event_handlers = [] self.headers = { headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON, 'requesterID': agent_id } @matching.match_exact(topics.ACTUATOR_LOCK_RESULT(**rtu_path)) def _on_lock_result(self, topic, headers, message, match): """lock result""" msg = jsonapi.loads(message[0]) if headers['requesterID'] == agent_id: if msg == 'SUCCESS' and self.lock_handler is not None: self.lock_handler() if msg == 'FAILURE' and self.error_handler is not None: self.error_handler(msg) @matching.match_glob(topics.ACTUATOR_ERROR(point='*', **rtu_path)) def _on_error_result(self, topic, headers, message, match): """lock result""" if headers.get('requesterID', '') == agent_id: if self.error_handler is not None: self.error_handler(match, jsonapi.loads(message[0])) @matching.match_glob(topics.ACTUATOR_VALUE(point='*', **rtu_path)) def _on_actuator_result(self, topic, headers, message, match): """lock result""" msg = jsonapi.loads(message[0]) print 'Actuator Results:', match, msg if headers['requesterID'] == agent_id: if self.actuator_handler is not None: self.actuator_handler(match, jsonapi.loads(message[0])) @matching.match_exact(topics.DEVICES_VALUE(point='all', **rtu_path)) def _on_new_data(self, topic, headers, message, match): """watching for new data""" data = jsonapi.loads(message[0]) # self.current_spacetemp = float(data["ZoneTemp"]) self.current_spacetemp = 76 droveride = bool(int(data["CoolCall2"])) occupied = bool(int(data["Occupied"])) if droveride and self.state not in ('IDLE', 'CLEANUP', 'STARTUP'): print 'User Override Initiated' self.cancel_event() if not occupied and self.state in ('DR_EVENT', 'RESTORE'): self.cancel_event() if self.state == 'IDLE' or self.state == 'STARTUP': #self.default_coolingstpt = float(data["CoolingStPt"]) #self.default_heatingstpt = float(data["HeatingStPt"]) self.default_coolingstpt = 75.0 self.default_heatingstpt = 65.0 self.default_firststage_fanspeed = float( data["CoolSupplyFanSpeed1"]) self.default_secondstage_fanspeed = float( data["CoolSupplyFanSpeed2"]) self.default_damperstpt = float(data["ESMDamperMinPosition"]) if self.state == 'STARTUP': self.state = 'IDLE' @matching.match_exact(topics.OPENADR_EVENT()) def _on_dr_event(self, topic, headers, message, match): if self.state == 'STARTUP': print "DR event ignored because of startup." return """handle openADR events""" msg = jsonapi.loads(message[0]) print('EVENT Received') print(msg) e_id = msg['id'] e_status = msg['status'] e_start = msg['start'] e_start = datetime.datetime.strptime(e_start, datefmt) today = datetime.datetime.now().date() #e_start_day = e_start.date() #e_end = e_start.replace(hour=cpp_end_hour, minute =0, second = 0) current_datetime = datetime.datetime.now() e_end = e_start + datetime.timedelta(minutes=2) if current_datetime > e_end: print 'Too Late Event is Over' return if e_status == 'cancelled': if e_start in self.all_scheduled_events: print 'Event Cancelled' self.all_scheduled_events[e_start].cancel() del self.all_scheduled_events[e_start] if e_start.date() == today and (self.state == 'PRECOOL' or self.state == 'DR_EVENT'): self.cancel_event() return #TODO: change this to UTC later #utc_now = datetime.datetime.utcnow() if today > e_start.date(): if e_start in self.all_scheduled_events: self.all_scheduled_events[e_start].cancel() del self.all_scheduled_events[e_start] return for item in self.all_scheduled_events.keys(): if e_start.date() == item.date(): if e_start.time() != item.time(): print "Updating Event" self.all_scheduled_events[item].cancel() del self.all_scheduled_events[item] if e_start.date() == today and (self.state == 'PRECOOL' or self.state == 'DR_EVENT'): self.update_running_event() self.state = 'IDLE' break elif e_start.time() == item.time(): print "same event" return #if e_id in self.all_scheduled_dr_events and update is None: # if e_id == self.currently_running_msg: # return #return #Minutes used for testing #event_start = e_start - datetime.timedelta(hours = max_precool_hours) event_start = e_start - datetime.timedelta( minutes=max_precool_hours) event = sched.Event(self.pre_cool_get_lock, args=[e_start, e_end]) self.schedule(event_start, event) self.all_scheduled_events[e_start] = event def pre_cool_get_lock(self, e_start, e_end): now = datetime.datetime.now() day = now.weekday() if not Schedule[day]: print "Unoccupied today" return self.state = 'PRECOOL' #e_end = e_start.replace(hour=cpp_end_hour, minute =0, second = 0) #e_end = e_start + datetime.timedelta(minutes=2) e_start_unix = time.mktime(e_start.timetuple()) e_end_unix = time.mktime(e_end.timetuple()) def run_schedule_builder(): #current_time = time.mktime(current_time.timetuple()) self.schedule_builder( e_start_unix, e_end_unix, current_spacetemp=77.0, pre_csp=csp_pre, building_thermal_constant=building_thermal_constant, normal_coolingstpt=76.0, timestep_length=timestep_length, dr_csp=csp_cpp) self.lock_handler = None self.lock_handler = run_schedule_builder headers = { headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON, 'requesterID': agent_id } self.publish(topics.ACTUATOR_LOCK_ACQUIRE(**rtu_path), headers) def modify_temp_set_point(self, csp, hsp): self.publish( topics.ACTUATOR_SET(point='StandardDamperChangeOverSetPoint', **rtu_path), self.headers, str(csp)) self.publish( topics.ACTUATOR_SET(point='StandardDamperMinPosition', **rtu_path), self.headers, str(hsp)) def backup_run(): self.modify_temp_set_point(csp, hsp) self.lock_handler = None self.lock_handler = backup_run def start_dr_event(self): self.state = 'DR_EVENT' self.publish( topics.ACTUATOR_SET(point='StandardDamperChangeOverSetPoint', **rtu_path), self.headers, str(csp_cpp)) new_fan_speed = self.default_firststage_fanspeed - ( self.default_firststage_fanspeed * fan_reduction) new_fan_speed = max(new_fan_speed, 0) self.publish( topics.ACTUATOR_SET(point='CoolSupplyFanSpeed1', **rtu_path), self.headers, str(new_fan_speed)) new_fan_speed = self.default_secondstage_fanspeed - ( self.default_firststage_fanspeed * fan_reduction) new_fan_speed = max(new_fan_speed, 0) self.publish( topics.ACTUATOR_SET(point='CoolSupplyFanSpeed2', **rtu_path), self.headers, str(new_fan_speed)) self.publish( topics.ACTUATOR_SET(point='ESMDamperMinPosition', **rtu_path), self.headers, str(damper_cpp)) def backup_run(): self.start_dr_event() self.lock_handler = None self.lock_handler = backup_run def start_restore_event(self, csp, hsp): self.state = 'RESTORE' print 'Restore: Begin restoring normal operations' self.publish( topics.ACTUATOR_SET(point='StandardDamperChangeOverSetPoint', **rtu_path), self.headers, str(csp)) self.publish( topics.ACTUATOR_SET(point='StandardDamperMinPosition', **rtu_path), self.headers, str(hsp)) #heating self.publish( topics.ACTUATOR_SET(point='CoolSupplyFanSpeed1', **rtu_path), self.headers, str(self.default_firststage_fanspeed)) self.publish( topics.ACTUATOR_SET(point='CoolSupplyFanSpeed2', **rtu_path), self.headers, str(self.default_secondstage_fanspeed)) self.publish( topics.ACTUATOR_SET(point='ESMDamperMinPosition', **rtu_path), self.headers, str(self.default_damperstpt)) def backup_run(): self.start_restore_event(csp, hsp) self.lock_handler = None self.lock_handler = backup_run def update_running_event(self): self.publish( topics.ACTUATOR_SET(point='StandardDamperChangeOverSetPoint', **rtu_path), self.headers, str(self.default_coolingstpt)) self.publish( topics.ACTUATOR_SET(point='StandardDamperMinPosition', **rtu_path), self.headers, str(self.default_heatingstpt)) self.publish( topics.ACTUATOR_SET(point='CoolSupplyFanSpeed1', **rtu_path), self.headers, str(self.default_firststage_fanspeed)) self.publish( topics.ACTUATOR_SET(point='CoolSupplyFanSpeed2', **rtu_path), self.headers, str(self.default_secondstage_fanspeed)) self.publish( topics.ACTUATOR_SET(point='ESMDamperMinPosition', **rtu_path), self.headers, str(self.default_damperstpt)) for event in self.currently_running_dr_event_handlers: event.cancel() self.currently_running_dr_event_handlers = [] def cancel_event(self): self.state = 'CLEANUP' self.publish( topics.ACTUATOR_SET(point='StandardDamperChangeOverSetPoint', **rtu_path), self.headers, str(self.default_coolingstpt)) self.publish( topics.ACTUATOR_SET(point='StandardDamperMinPosition', **rtu_path), self.headers, str(self.default_heatingstpt)) self.publish( topics.ACTUATOR_SET(point='CoolSupplyFanSpeed1', **rtu_path), self.headers, str(self.default_firststage_fanspeed)) self.publish( topics.ACTUATOR_SET(point='CoolSupplyFanSpeed2', **rtu_path), self.headers, str(self.default_secondstage_fanspeed)) self.publish( topics.ACTUATOR_SET(point='ESMDamperMinPosition', **rtu_path), self.headers, str(self.default_damperstpt)) for event in self.currently_running_dr_event_handlers: event.cancel() self.currently_running_dr_event_handlers = [] def backup_run(): self.cancel_event() self.lock_handler = None self.lock_handler = backup_run expected_values = { 'StandardDamperChangeOverSetPoint': self.default_coolingstpt, 'StandardDamperMinPosition': self.default_heatingstpt, 'CoolSupplyFanSpeed1': self.default_firststage_fanspeed, 'CoolSupplyFanSpeed2': self.default_secondstage_fanspeed, 'ESMDamperMinPosition': self.default_damperstpt } EPSILON = 0.5 #allowed difference from expected value def result_handler(point, value): #print "actuator point being handled:", point, value expected_value = expected_values.pop(point, None) if expected_value is not None: diff = abs(expected_value - value) if diff > EPSILON: _log.debug("Did not get back expected value for", point) if not expected_values: self.actuator_handler = None self.lock_handler = None self.state = 'IDLE' headers = { headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON, 'requesterID': agent_id } self.publish(topics.ACTUATOR_LOCK_RELEASE(**rtu_path), headers) self.actuator_handler = result_handler def schedule_builder(self, start_time, end_time, current_spacetemp, pre_csp, building_thermal_constant, normal_coolingstpt, timestep_length, dr_csp): """schedule all events for a DR event.""" print 'Scheduling all DR actions' pre_hsp = pre_csp - 5.0 current_time = time.time() ideal_cooling_window = int( ((current_spacetemp - pre_csp) / building_thermal_constant) * 3600) ideal_precool_start_time = start_time - ideal_cooling_window max_cooling_window = start_time - current_time cooling_window = ideal_cooling_window if ideal_cooling_window < max_cooling_window else max_cooling_window precool_start_time = start_time - cooling_window if (max_cooling_window > 0): print "Schedule Pre Cooling" num_cooling_timesteps = int( math.ceil(float(cooling_window) / float(timestep_length))) cooling_step_delta = (normal_coolingstpt - pre_csp) / num_cooling_timesteps for step_index in range(1, num_cooling_timesteps + 1): event_time = start_time - (step_index * timestep_length) csp = pre_csp + ((step_index - 1) * cooling_step_delta) print 'Precool step:', datetime.datetime.fromtimestamp( event_time), csp event = sched.Event(self.modify_temp_set_point, args=[csp, pre_hsp]) self.schedule(event_time, event) self.currently_running_dr_event_handlers.append(event) else: print "Too late to pre-cool!" restore_window = int( ((dr_csp - normal_coolingstpt) / building_thermal_constant) * 3600) restore_start_time = end_time num_restore_timesteps = int( math.ceil(float(restore_window) / float(timestep_length))) restore_step_delta = (dr_csp - normal_coolingstpt) / num_restore_timesteps print 'Schedule DR Event:', datetime.datetime.fromtimestamp( start_time), dr_csp event = sched.Event(self.start_dr_event) self.schedule(start_time, event) self.currently_running_dr_event_handlers.append(event) print 'Schedule Restore Event:', datetime.datetime.fromtimestamp( end_time), dr_csp - restore_step_delta event = sched.Event( self.start_restore_event, args=[dr_csp - restore_step_delta, self.default_heatingstpt]) self.schedule(end_time, event) self.currently_running_dr_event_handlers.append(event) for step_index in range(1, num_restore_timesteps): event_time = end_time + (step_index * timestep_length) csp = dr_csp - ((step_index + 1) * restore_step_delta) print 'Restore step:', datetime.datetime.fromtimestamp( event_time), csp event = sched.Event(self.modify_temp_set_point, args=[csp, self.default_heatingstpt]) self.schedule(event_time, event) self.currently_running_dr_event_handlers.append(event) event_time = end_time + (num_restore_timesteps * timestep_length) print 'Schedule Cleanup Event:', datetime.datetime.fromtimestamp( event_time) event = sched.Event(self.cancel_event) self.schedule(event_time, event) self.currently_running_dr_event_handlers.append(event)
def driven_agent(config_path, **kwargs): """ Reads agent configuration and converts it to run driven agent. :param config_path: :param kwargs: :return: """ config = utils.load_config(config_path) arguments = config.get("arguments") actuation_mode = True if config.get("actuation_mode", "PASSIVE") == "ACTIVE" else False actuator_lock_required = config.get("require_actuator_lock", False) campus = config["device"].get("campus", "") building = config["device"].get("building", "") analysis_name = config.get("analysis_name", "analysis_name") publish_base = "/".join([analysis_name, campus, building]) application_name = config.get("pretty_name", analysis_name) arguments.update({"analysis_name": analysis_name}) device_config = config["device"]["unit"] multiple_devices = isinstance(device_config, dict) command_devices = list(device_config.keys()) device_topic_dict = {} device_topic_list = [] subdevices_list = [] interval = config.get("interval", 60) vip_destination = config.get("vip_destination", None) timezone = config.get("local_timezone", "US/Pacific") for device_name in device_config: device_topic = topics.DEVICES_VALUE(campus=campus, building=building, unit=device_name, path="", point="all") device_topic_dict.update({device_topic: device_name}) device_topic_list.append(device_name) if multiple_devices: for subdevice in device_config[device_name]["subdevices"]: subdevices_list.append(subdevice) subdevice_topic = topics.DEVICES_VALUE(campus=campus, building=building, unit=device_name, path=subdevice, point="all") subdevice_name = device_name + "/" + subdevice device_topic_dict.update({subdevice_topic: subdevice_name}) device_topic_list.append(subdevice_name) base_actuator_path = topics.RPC_DEVICE_PATH(campus=campus, building=building, unit=None, path="", point=None) device_lock_duration = config.get("device_lock_duration", 10.0) conversion_map = config.get("conversion_map") missing_data_threshold = config.get("missing_data_threshold", 15.0) / 100.0 map_names = {} for key, value in conversion_map.items(): map_names[key.lower() if isinstance(key, str) else key] = value application = config.get("application") validation_error = "" if not application: validation_error = "Invalid application specified in config\n" if validation_error: _log.error(validation_error) raise ValueError(validation_error) converter = ConversionMapper() # output_file_prefix = config.get("output_file") klass = _get_class(application) # This instances is used to call the applications run method when # data comes in on the message bus. It is constructed here # so that_process_results each time run is called the application # can keep it state. # points = arguments.pop("point_mapping") app_instance = klass(**arguments) class DrivenAgent(Agent): """Agent listens to message bus device and runs when data is published. """ def __init__(self, **kwargs): """ Initializes agent :param kwargs: Any driver specific parameters""" super(DrivenAgent, self).__init__(**kwargs) # master is where we copy from to get a poppable list of # subdevices that should be present before we run the analysis. self.master_devices = device_topic_list self.needed_devices = [] self.device_values = self.master_devices[:] self.initialize_devices() self.received_input_datetime = None self._header_written = False self.file_creation_set = set() self.actuation_vip = self.vip.rpc self.initialize_time = None if vip_destination: self.agent = setup_remote_actuation(vip_destination) self.actuation_vip = self.agent.vip.rpc def initialize_devices(self): self.needed_devices = self.master_devices[:] self.device_values = {} @Core.receiver("onstart") def startup(self, sender, **kwargs): """ Starts up the agent and subscribes to device topics based on agent configuration. :param sender: :param kwargs: Any driver specific parameters :type sender: str """ for device in device_topic_dict: _log.info("Subscribing to " + device) self.vip.pubsub.subscribe(peer="pubsub", prefix=device, callback=self.on_analysis_message) def _should_run_now(self): """ Checks if messages from all the devices are received before running application :returns: True or False based on received messages. :rtype: boolean """ # Assumes the unit/all values will have values. if not self.device_values.keys(): return False return not self.needed_devices def aggregate_subdevice(self, device_data, topic): """ Aggregates device and subdevice data for application :returns: True or False based on if device data is needed. :rtype: boolean""" tagged_device_data = {} device_tag = device_topic_dict[topic] _log.debug("Current device to aggregate: {}".format(device_tag)) if device_tag not in self.needed_devices: return False for key, value in device_data.items(): device_data_tag = "&".join([key, device_tag]) tagged_device_data[device_data_tag] = value self.device_values.update(tagged_device_data) self.needed_devices.remove(device_tag) return True def on_analysis_message(self, peer, sender, bus, topic, headers, message): """ Subscribe to device data and assemble data set to pass to applications. :param peer: :param sender: device name :param bus: :param topic: device path topic :param headers: message headers :param message: message containing points and values dict from device with point type :type peer: str :type sender: str :type bus: str :type topic: str :type headers: dict :type message: dict """ timestamp = parse(headers.get("Date")) missing_but_running = False if self.initialize_time is None and len(self.master_devices) > 1: self.initialize_time = find_reinitialize_time(timestamp) if self.initialize_time is not None and timestamp < self.initialize_time: if len(self.master_devices) > 1: return to_zone = dateutil.tz.gettz(timezone) timestamp = timestamp.astimezone(to_zone) self.received_input_datetime = timestamp _log.debug("Current time of publish: {}".format(timestamp)) device_data = message[0] if isinstance(device_data, list): device_data = device_data[0] device_needed = self.aggregate_subdevice(device_data, topic) if not device_needed: fraction_missing = float(len(self.needed_devices)) / len( self.master_devices) if fraction_missing > missing_data_threshold: _log.error( "Device values already present, reinitializing at publish: {}" .format(timestamp)) self.initialize_devices() device_needed = self.aggregate_subdevice( device_data, topic) return missing_but_running = True _log.warning( "Device already present. Using available data for diagnostic.: {}" .format(timestamp)) _log.warning( "Device already present - topic: {}".format(topic)) _log.warning("All devices: {}".format(self.master_devices)) _log.warning("Needed devices: {}".format(self.needed_devices)) if self._should_run_now() or missing_but_running: field_names = {} for point, data in self.device_values.items(): field_names[point] = data if not converter.initialized and conversion_map is not None: converter.setup_conversion_map(map_names, field_names) device_data = converter.process_row(field_names) results = app_instance.run(timestamp, device_data) self.process_results(results) self.initialize_devices() if missing_but_running: device_needed = self.aggregate_subdevice( device_data, topic) else: _log.info("Still need {} before running.".format( self.needed_devices)) def process_results(self, results): """ Runs driven application with converted data. Calls appropriate methods to process commands, log and table_data in results. :param results: Results object containing commands for devices, log messages and table data. :type results: Results object \\volttron.platform.agent.driven :returns: Same as results param. :rtype: Results object \\volttron.platform.agent.driven """ _log.info("Processing Results!") actuator_error = False if actuation_mode: if results.devices and actuator_lock_required: actuator_error = self.actuator_request(results.devices) elif results.commands and actuator_lock_required: actuator_error = self.actuator_request(command_devices) if not actuator_error: results = self.actuator_set(results) for log in results.log_messages: _log.info("LOG: {}".format(log)) for key, value in results.table_output.items(): _log.info("TABLE: {}->{}".format(key, value)) #if output_file_prefix is not None: # results = self.create_file_output(results) if len(results.table_output.keys()): results = self.publish_analysis_results(results) return results def publish_analysis_results(self, results): """ Publish table_data in analysis results to the message bus for capture by the data historian. :param results: Results object containing commands for devices, log messages and table data. :type results: Results object \\volttron.platform.agent.driven :returns: Same as results param. :rtype: Results object \\volttron.platform.agent.driven """ to_publish = defaultdict(dict) for app, analysis_table in list(results.table_output.items()): try: name_timestamp = app.split("&") timestamp = name_timestamp[1] except: timestamp = self.received_input_datetime timestamp = format_timestamp(timestamp) headers = { headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON, headers_mod.DATE: timestamp, } for entry in analysis_table: for point, result in list(entry.items()): for device in command_devices: publish_topic = "/".join( [publish_base, device, point]) analysis_topic = topics.RECORD( subtopic=publish_topic) datatype = str(type(value)) to_publish[analysis_topic] = result for result_topic, result in to_publish.items(): self.vip.pubsub.publish("pubsub", result_topic, headers, result) to_publish.clear() return results def create_file_output(self, results): """ Create results/data files for testing and algorithm validation if table data is present in the results. :param results: Results object containing commands for devices, log messages and table data. :type results: Results object \\volttron.platform.agent.driven :returns: Same as results param. :rtype: Results object \\volttron.platform.agent.driven""" tag = 0 for key, value in list(results.table_output.items()): for row in value: name_timestamp = key.split("&") _name = name_timestamp[0] timestamp = name_timestamp[1] file_name = _name + str(tag) + ".csv" tag += 1 if file_name not in self.file_creation_set: self._header_written = False self.file_creation_set.update([file_name]) with open(file_name, "a+") as file_to_write: row.update({"Timestamp": timestamp}) _keys = list(row.keys()) file_output = csv.DictWriter(file_to_write, _keys) if not self._header_written: file_output.writeheader() self._header_written = True file_output.writerow(row) file_to_write.close() return results def actuator_request(self, command_equip): """ Calls the actuator"s request_new_schedule method to get device schedule :param command_equip: contains the names of the devices that will be scheduled with the ActuatorAgent. :type: dict or list :returns: Return result from request_new_schedule method and True or False for error in scheduling device. :rtype: boolean :Return Values: request_error = True/False warning:: Calling without previously scheduling a device and not within the time allotted will raise a LockError""" _now = get_aware_utc_now() str_now = format_timestamp(_now) _end = _now + td(minutes=device_lock_duration) str_end = format_timestamp(_end) for device in command_equip: actuation_device = base_actuator_path(unit=device, point="") schedule_request = [[actuation_device, str_now, str_end]] try: _log.info("Make Request {} for start {} and end {}".format( actuation_device, str_now, str_end)) result = self.actuation_vip.call( "platform.actuator", "request_new_schedule", "rcx", actuation_device, "HIGH", schedule_request).get(timeout=15) except RemoteError as ex: _log.warning( "Failed to schedule device {} (RemoteError): {}". format(device, str(ex))) request_error = True if result["result"] == "FAILURE": if result["info"] == "TASK_ID_ALREADY_EXISTS": _log.info("Task to schedule device already exists " + device) request_error = False else: _log.warning( "Failed to schedule device (unavailable) " + device) request_error = True else: request_error = False return request_error def actuator_set(self, results): """ Calls the actuator"s set_point method to set point on device :param results: Results object containing commands for devices, log messages and table data. :type results: Results object \\volttron.platform.agent.driven""" def make_actuator_set(device, point_value_dict): for point, new_value in point_value_dict.items(): point_path = base_actuator_path(unit=device, point=point) try: _log.info("Set point {} to {}".format( point_path, new_value)) result = self.actuation_vip.call( "platform.actuator", "set_point", "rcx", point_path, new_value).get(timeout=15) except RemoteError as ex: _log.warning("Failed to set {} to {}: {}".format( point_path, new_value, str(ex))) continue for device, point_value_dict in results.devices.items(): make_actuator_set(device, point_value_dict) for device in command_devices: make_actuator_set(device, results.commands) return results def find_reinitialize_time(current_time): midnight = current_time.replace(hour=0, minute=0, second=0, microsecond=0) seconds_from_midnight = (current_time - midnight).total_seconds() offset = seconds_from_midnight % interval previous_in_seconds = seconds_from_midnight - offset next_in_seconds = previous_in_seconds + interval from_midnight = td(seconds=next_in_seconds) _log.debug("Start of next scrape interval: {}".format(midnight + from_midnight)) return midnight + from_midnight def setup_remote_actuation(vip_destination): event = gevent.event.Event() agent = Agent(address=vip_destination) gevent.spawn(agent.core.run, event) event.wait(timeout=15) return agent DrivenAgent.__name__ = "DrivenLoggerAgent" return DrivenAgent(**kwargs)
def vav_agent(config_path, **kwargs): """Parses the Electric Meter Agent configuration and returns an instance of the agent created using that configuation. :param config_path: Path to a configuation file. :type config_path: str :returns: Market Service Agent :rtype: MarketServiceAgent """ try: config = utils.load_config(config_path) except StandardError: config = {} if not config: _log.info("Using defaults for starting configuration.") market_name = config.get('market_name') x0 = config.get('x0', 0) x1 = config.get('x1', 0) x2 = config.get('x2', 0) x3 = config.get('x3', 0) x4 = config.get('x4', 0) c0 = config.get('c0', 0) c1 = config.get('c1', 0) c2 = config.get('c2', 0) c3 = config.get('c3', 0) c4 = config.get('c4', 0) tMinAdj = config.get('tMin', 0) tMaxAdj = config.get('tMax', 0) mDotMin = config.get('mDotMin', 0) mDotMax = config.get('mDotMax', 0) sim_flag = config.get('sim_flag', False) tIn = config.get('tIn', 0) nonResponsive = config.get('nonResponsive', False) agent_name = config.get('agent_name') actuator = config.get('actuator', 'platform.actuator') mode = config.get('mode') device_points = config.get("device_points") parent_device_points = config.get("parent_device_points") setpoint = config.get('setpoint') activate_topic = "/".join([config.get("building", agent_name), "actuate"]) setpoint_mode = config.get("setpoint_mode", 0) parent_device_topic = topics.DEVICES_VALUE(campus=config.get("campus", ""), building=config.get("building", ""), unit=config.get("parent_device", ""), path="", point="all") device_topic = topics.DEVICES_VALUE(campus=config.get("campus", ""), building=config.get("building", ""), unit=config.get("parent_device", ""), path=config.get("device", ""), point="all") base_rpc_path = topics.RPC_DEVICE_PATH(campus=config.get("campus", ""), building=config.get("building", ""), unit=config.get("parent_device", ""), path=config.get("device", ""), point=setpoint) verbose_logging = config.get('verbose_logging', True) return VAVAgent(market_name, agent_name, x0, x1, x2, x3, x4, c0, c1, c2, c3, c4, tMinAdj, tMaxAdj, mDotMin, mDotMax, tIn, nonResponsive, verbose_logging, device_topic, device_points, parent_device_topic, parent_device_points, base_rpc_path, activate_topic, actuator, mode, setpoint_mode, sim_flag, **kwargs)
def __init__(self, config_path, **kwargs): super(ILCAgent, self).__init__(**kwargs) config = utils.load_config(config_path) campus = config.get("campus", "") building = config.get("building", "") # For dash board message publishes self.agent_id = config.get("agent_id", "Intelligent Load Control Agent") dashboard_topic = config.get("dashboard_topic") self.application_category = config.get("application_category", "Load Control") self.application_name = config.get("application_name", "Intelligent Load Control") self.ilc_start_topic = self.agent_id # -------------------------------------------------------------------------------- # For Target agent updates... analysis_prefix_topic = config.get("analysis_prefix_topic", "record") self.target_agent_subscription = "{}/target_agent".format(analysis_prefix_topic) # -------------------------------------------------------------------------------- self.update_base_topic = "/".join([analysis_prefix_topic, self.agent_id]) if campus: self.update_base_topic = "/".join([self.update_base_topic, campus]) ilc_start_topic = "/".join([self.agent_id, campus]) if dashboard_topic is not None: dashboard_topic = "/".join([dashboard_topic, self.agent_id, campus]) if building: self.update_base_topic = "/".join([self.update_base_topic, building]) ilc_start_topic = "/".join([ilc_start_topic, building]) if dashboard_topic is not None: dashboard_topic = "/".join([dashboard_topic, building]) self.ilc_topic = dashboard_topic if dashboard_topic is not None else self.update_base_topic self.ilc_start_topic = "/".join([ilc_start_topic, "ilc/start"]) cluster_configs = config["clusters"] self.criteria = CriteriaContainer() self.curtailment = CurtailmentContainer() for cluster_config in cluster_configs: criteria_file_name = cluster_config["pairwise_criteria_file"] if criteria_file_name[0] == "~": criteria_file_name = os.path.expanduser(criteria_file_name) device_criteria_config = cluster_config["device_criteria_file"] device_curtailment_config = cluster_config["device_curtailment_file"] cluster_priority = cluster_config["cluster_priority"] cluster_actuator = cluster_config.get("cluster_actuator", "platform.actuator") criteria_labels, criteria_array = extract_criteria(criteria_file_name) col_sums = calc_column_sums(criteria_array) row_average = normalize_matrix(criteria_array, col_sums) if not validate_input(criteria_array, col_sums): _log.debug("Inconsistent criteria matrix. Check configuration " "in: {}" .format(criteria_file_name)) sys.exit() if device_criteria_config[0] == "~": device_criteria_config = os.path.expanduser(device_criteria_config) criteria_config = utils.load_config(device_criteria_config) criteria_cluster = CriteriaCluster(cluster_priority, criteria_labels, row_average, criteria_config) self.criteria.add_criteria_cluster(criteria_cluster) if device_curtailment_config[0] == "~": device_curtailment_config = os.path.expanduser(device_curtailment_config) curtailment_config = utils.load_config(device_curtailment_config) curtailment_cluster = CurtailmentCluster(curtailment_config, cluster_actuator) self.curtailment.add_curtailment_cluster(curtailment_cluster) self.base_device_topic = topics.DEVICES_VALUE(campus=campus, building=building, unit=None, path="", point=None) self.base_rpc_path = topics.RPC_DEVICE_PATH(campus=campus, building=building, unit=None, path="", point=None) self.device_topic_list = [] self.device_topic_map = {} all_devices = self.curtailment.get_device_name_list() for device_name in all_devices: device_topic = topics.DEVICES_VALUE(campus=campus, building=building, unit=device_name[0], path="", point="all") self.device_topic_list.append(device_topic) self.device_topic_map[device_topic] = device_name power_token = config["power_meter"] power_meter = power_token["device"] self.power_point = power_token["point"] demand_formula = power_token.get("demand_formula") self.calculate_demand = False if demand_formula is not None: self.calculate_demand = True try: demand_operation = parse_sympy(demand_formula["operation"]) _log.debug("Demand calculation - expression: {}".format(demand_operation)) self.demand_expr = parse_expr(parse_sympy(demand_operation)) self.demand_args = parse_sympy(demand_formula["operation_args"]) self.demand_points = symbols(self.demand_args) except (KeyError, ValueError): _log.debug("Missing 'operation_args' or 'operation' for setting demand formula!") self.calculate_demand = False except: _log.debug("Unexpected error when reading demand formula parameters!") self.calculate_demand = False self.power_meter_topic = topics.DEVICES_VALUE(campus=campus, building=building, unit=power_meter, path="", point="all") self.kill_device_topic = None kill_token = config.get("kill_switch") if kill_token is not None: kill_device = kill_token["device"] self.kill_pt = kill_token["point"] self.kill_device_topic = topics.DEVICES_VALUE(campus=campus, building=building, unit=kill_device, path="", point="all") demand_limit = config["demand_limit"] if isinstance(demand_limit, (int, float)): self.demand_limit = float(demand_limit) else: self.demand_limit = None self.demand_schedule = config.get("demand_schedule") self.curtail_time = td(minutes=config.get("curtailment_time", 15)) self.average_building_power_window = td(minutes=config.get("average_building_power_window", 15)) self.curtail_confirm = td(minutes=config.get("curtailment_confirm", 5)) self.curtail_break = td(minutes=config.get("curtailment_break", 15)) self.actuator_schedule_buffer = td(minutes=config.get("actuator_schedule_buffer", 15)) + self.curtail_break self.reset_curtail_count_time = td(hours=config.get("reset_curtail_count_time", 6)) self.longest_possible_curtail = len(all_devices) * self.curtail_time * 2 maximum_time_without_release = config.get("maximum_time_without_release") self.maximum_time_without_release = td(minutes=maximum_time_without_release) if maximum_time_without_release is not None else None self.stagger_release_time = float(config.get("curtailment_break", 15.0)) self.stagger_release = config.get("stagger_release", False) self.stagger_off_time = config.get("stagger_off_time", True) need_actuator_schedule = config.get("need_actuator_schedule", False) self.running_ahp = False self.next_curtail_confirm = None self.curtail_end = None self.break_end = None self.reset_curtail_count = None self.kill_signal_received = False self.scheduled_devices = set() self.devices_curtailed = [] self.bldg_power = [] self.device_group_size = None self.current_stagger = None self.next_release = None self.power_meta = None self.tasks = {} self.tz = None self.simulation_running = config.get("simulation_running", False)
class Agent(PublishMixin, BaseAgent): def __init__(self, **kwargs): super(Agent, self).__init__(**kwargs) self.lock_timer = None self.lock_acquired = False self.tasklet = None self.data_queue = green.WaitQueue(self.timer) self.value_queue = green.WaitQueue(self.timer) self.weather_data_queue = green.WaitQueue(self.timer) self.last_run_time = None self.is_running = False self.remaining_time = None self.task_id = agent_id self.retry_schedule = None self.start = None self.end = None def setup(self): super(Agent, self).setup() self.scheduled_task() def startrun(self, algo=None): _log.debug('start diagnostic') if algo is None: algo = afdd.AFDD(self, config_path).run_all self.tasklet = greenlet.greenlet(algo) self.is_running = True self.last_run_time = datetime.datetime.now() self.tasklet.switch() def scheduled_task(self): ''' Schedule re-occuring diagnostics ''' _log.debug('Schedule Dx') headers = { 'type': 'NEW_SCHEDULE', 'requesterID': agent_id, 'taskID': agent_id, 'priority': 'LOW_PREEMPT' } min_run_hour = math.floor(min_run_window / 3600) min_run_minute = int((min_run_window / 3600 - min_run_hour) * 60) self.start = datetime.datetime.now().replace(hour=start_hour, minute=start_minute) self.end = self.start + datetime.timedelta(hours=2, minutes=30) run_start = self.end - datetime.datetime.now() required_diagnostic_time = datetime.timedelta( hours=min_run_hour, minutes=min_run_minute) if run_start < required_diagnostic_time: self.start = self.start + datetime.timedelta(days=1) self.end = self.start + datetime.timedelta(hours=2, minutes=30) sched_time = datetime.datetime.now() + datetime.timedelta( days=day_run_interval + 1) sched_time = sched_time.replace(hour=0, minute=1) else: sched_time = datetime.datetime.now() + datetime.timedelta( days=day_run_interval) self.start = str(self.start) self.end = str(self.end) self.task_timer = self.periodic_timer( 60, self.publish_json, topics.ACTUATOR_SCHEDULE_REQUEST(), headers, [[ "{campus}/{building}/{unit}".format(**rtu_path), self.start, self.end ]]) event = sched.Event(self.scheduled_task) self.next = self.schedule(sched_time, event) @matching.match_headers({ headers_mod.REQUESTER_ID: agent_id, 'type': 'CANCEL_SCHEDULE' }) @matching.match_exact(topics.ACTUATOR_SCHEDULE_RESULT()) def preempt(self): if self.is_running: self.cancel_greenlet() @matching.match_headers({headers_mod.REQUESTER_ID: agent_id}) @matching.match_exact(topics.ACTUATOR_SCHEDULE_ANNOUNCE(**rtu_path)) def on_schedule(self, topic, headers, message, match): msg = jsonapi.loads(message[0]) now = datetime.datetime.now() self.remaining_time = headers.get('window', 0) if self.task_id == headers.get('taskID', ''): if self.remaining_time < termination_window: if self.is_running: self.cancel_greenlet() elif (self.remaining_time > min_run_window and (self.last_run_time is None or (now - self.last_run_time) > datetime.timedelta(hours=23, minutes=50))): self.startrun() @matching.match_headers({headers_mod.REQUESTER_ID: agent_id}) @matching.match_exact(topics.ACTUATOR_SCHEDULE_RESULT()) def schedule_result(self, topic, headers, message, match): msg = jsonapi.loads(message[0]) _log.debug('Actuator response received') self.task_timer.cancel() @matching.match_exact(topics.DEVICES_VALUE(point='all', **rtu_path)) def on_new_data(self, topic, headers, message, match): data = jsonapi.loads(message[0]) #Check override status if int(data["VoltronPBStatus"]) == 1: if self.is_running: _log.debug("User override is initiated...") headers = { 'Content-Type': 'text/plain', 'requesterID': agent_id, } self.publish( topics.ACTUATOR_SET(point="VoltronFlag", **rtu_path), headers, str(0.0)) self.cancel_greenlet() else: self.data_queue.notify_all(data) @matching.match_headers({headers_mod.REQUESTER_ID: agent_id}) @matching.match_glob(topics.ACTUATOR_VALUE(point='*', **rtu_path)) def on_set_result(self, topic, headers, message, match): self.value_queue.notify_all((match.group(1), True)) @matching.match_headers({headers_mod.REQUESTER_ID: agent_id}) @matching.match_glob(topics.ACTUATOR_ERROR(point='*', **rtu_path)) def on_set_error(self, topic, headers, message, match): self.value_queue.notify_all((match.group(1), False)) def cancel_greenlet(self): #kill all tasks currently in the queue self.data_queue.kill_all() self.value_queue.kill_all() #kill current tasklet self.tasklet.throw() self.is_running = False def sleep(self, timeout): _log.debug('wait for steady state({})'.format(timeout)) green.sleep(timeout, self.timer) def get_new_data(self, timeout=None): _log.debug('get_new_data({})'.format(timeout)) return self.data_queue.wait(timeout) def command_equip(self, point_name, value, timeout=None): _log.debug('set_point({}, {}, {})'.format(point_name, value, timeout)) headers = { 'Content-Type': 'text/plain', 'requesterID': agent_id, } self.publish(topics.ACTUATOR_SET(point=point_name, **rtu_path), headers, str(value)) try: return self.value_queue.wait(timeout) except green.Timeout: return True def weather_request(self, timeout=None): _log.debug('weather request for {}'.format(zip_code)) headers = {'Content-Type': 'text/plain', 'requesterID': agent_id} msg = {'zipcode': str(zip_code)} self.publish_json('weather/request', headers, msg) try: return self.weather_data_queue.wait(timeout) except green.Timeout: return 'INCONCLUSIVE' matching.match_headers({headers_mod.REQUESTER_ID: agent_id}) @matching.match_exact('weather/response/temperature/temp_f') def weather_response(self, topic, headers, message, match): data = float(jsonapi.loads(message[0])) print data self.weather_data_queue.notify_all(data)
def ilc_agent(config_path, **kwargs): '''Intelligent Load Curtailment (ILC) Application using Analytical Hierarchical Process (AHP). ''' config = utils.load_config(config_path) location = {} location['campus'] = config.get('campus') location['building'] = config.get('building') cluster_configs = config['clusters'] agent_id = config.get('agent_id') global mappers try: mappers = config['mappers'] except KeyError: mappers = {} clusters = Clusters() for cluster_config in cluster_configs: excel_file_name = cluster_config['critieria_file_path'] cluster_config_file_name = cluster_config['device_file_path'] cluster_priority = cluster_config['cluster_priority'] crit_labels, criteria_arr = extract_criteria(excel_file_name, 'CriteriaMatrix') col_sums = calc_column_sums(criteria_arr) _, row_average = normalize_matrix(criteria_arr, col_sums) if not (validate_input(criteria_arr, col_sums, crit_labels, CRITERIA_LABELSTRING, MATRIX_ROWSTRING)): _log.info('Inconsistent criteria matrix. Check configuration ' 'in ' + excel_file_name) sys.exit() cluster_config = utils.load_config(cluster_config_file_name) device_cluster = DeviceCluster(cluster_priority, crit_labels, row_average, cluster_config) _log.debug('Crit Labels: ' + str(crit_labels)) clusters.add_device_cluster(device_cluster) base_device_topic = topics.DEVICES_VALUE(campus=config.get('campus', ''), building=config.get('building', ''), unit=None, path='', point=None) base_rpc_path = topics.RPC_DEVICE_PATH(campus=config.get('campus', ''), building=config.get('building', ''), unit=None, path='', point=None) device_topic_list = [] device_topic_map = {} all_devices = clusters.get_device_name_list() for device_name in all_devices: device_topic = topics.DEVICES_VALUE(campus=config.get('campus', ''), building=config.get('building', ''), unit=device_name, path='', point='all') device_topic_list.append(device_topic) device_topic_map[device_topic] = device_name power_token = config['power_meter'] power_meter = power_token['device'] power_pt = power_token['point'] power_meter_topic = topics.DEVICES_VALUE(campus=config.get('campus', ''), building=config.get('building', ''), unit=power_meter, path='', point='all') kill_device_topic = None kill_token = config.get('kill_switch') if kill_token is not None: kill_device = kill_token['device'] kill_pt = kill_token['point'] kill_device_topic = topics.DEVICES_VALUE(campus=config.get('campus', ''), building=config.get('building', ''), unit=kill_device, path='', point='all') demand_limit = float(config['demand_limit']) curtail_time = td(minutes=config.get('curtailment_time', 15.0)) average_building_power_window = td(minutes=config.get('average_building_power_window', 5.0)) curtail_confirm = td(minutes=config.get('curtailment_confirm', 5.0)) curtail_break = td(minutes=config.get('curtailment_break', 15.0)) actuator_schedule_buffer = td(minutes=config.get('actuator_schedule_buffer', 5.0)) reset_curtail_count_time = td(hours=config.get('reset_curtail_count_time', 6.0)) longest_possible_curtail = len(clusters.devices) * curtail_time stagger_release_time = config.get('curtailment_break', 15.0)*60.0 stagger_release = config.get('stagger_release', False) minimum_stagger_window = config.get('minimum_stagger_window', 120) class AHP(Agent): def __init__(self, **kwargs): super(AHP, self).__init__(**kwargs) self.running_ahp = False self.row_average = None self.next_curtail_confirm = None self.curtail_end = None self.break_end = None self.reset_curtail_count_time = None self.kill_signal_recieved = False self.scheduled_devices = set() self.devices_curtailed = set() self.bldg_power = [] @Core.receiver('onstart') def starting_base(self, sender, **kwargs): '''startup method: - Extract Criteria Matrix from excel file. - Setup subscriptions to curtailable devices. - Setup subscription to building power meter. ''' for device_topic in device_topic_list: _log.debug('Subscribing to '+device_topic) self.vip.pubsub.subscribe(peer='pubsub', prefix=device_topic, callback=self.new_data) _log.debug('Subscribing to '+power_meter_topic) self.vip.pubsub.subscribe(peer='pubsub', prefix=power_meter_topic, callback=self.load_message_handler) if kill_device_topic is not None: _log.debug('Subscribing to '+kill_device_topic) self.vip.pubsub.subscribe(peer='pubsub', prefix=kill_device_topic, callback=self.handle_agent_kill) def handle_agent_kill(self, peer, sender, bus, topic, headers, message): ''' Locally implemented override for ILC application. When an override is detected the ILC application will return operations for all units to normal. ''' data = message[0] _log.info('Checking kill signal') kill_signal = bool(data[kill_pt]) if kill_signal: _log.info('Kill signal received, shutting down') self.kill_signal_recieved = False gevent.sleep(8) self.end_curtail() sys.exit() def new_data(self, peer, sender, bus, topic, headers, message): '''Call back method for curtailable device data subscription.''' if self.kill_signal_recieved: return _log.info('Data Received for {}'.format(topic)) # topic of form: devices/campus/building/device device_name = device_topic_map[topic] data = message[0] now = parser.parse(headers['Date']) clusters.get_device(device_name).ingest_data(now, data) def load_message_handler(self, peer, sender, bus, topic, headers, message): '''Call back method for building power meter. Calculates the average building demand over a configurable time and manages the curtailment time and curtailment break times. ''' if self.kill_signal_recieved: return _log.debug('Reading building power data.') current_power = float(message[0][power_pt]) if current_power < 0: current_power = 0.0 now = parser.parse(headers['Date']) self.bldg_power.append((now, current_power)) if self.bldg_power[-1][0] - self.bldg_power[0][0] > average_building_power_window: self.bldg_power.pop(0) average_power = sum(power[1] for power in self.bldg_power)/len(self.bldg_power) _log.debug('Reported time: '+str(now)) _log.info('Current load: {}'.format(average_power)) if self.reset_curtail_count_time is not None: if self.reset_curtail_count_time <= now: _log.debug('Resetting curtail count') clusters.reset_curtail_count() if self.running_ahp: if now >= self.curtail_end: self.end_curtail() elif now >= self.next_curtail_confirm: self.curtail_confirm(average_power, now) return elif self.break_end is not None and now < self.break_end: _log.debug('Skipping load check, still on curtailment break.') return self.check_load(average_power, now) def check_load(self, bldg_power, now): '''Check whole building power and if the value is above the the demand limit (demand_limit) then initiate the ILC (AHP) sequence. ''' _log.debug('Checking building load.') if bldg_power > demand_limit: _log.info('Current load ({load}) exceeds limit or {limit}.' .format(load=bldg_power, limit=demand_limit)) score_order = clusters.get_score_order() if not score_order: _log.info('All devices are off, nothing to curtail.') return scored_devices = self.actuator_request(score_order) self.curtail(scored_devices, bldg_power, now) def curtail(self, scored_devices, bldg_power, now): '''Curtail loads by turning off device (or device components)''' need_curtailed = bldg_power - demand_limit est_curtailed = 0.0 remaining_devices = scored_devices[:] for device in self.devices_curtailed: if device in remaining_devices: remaining_devices.remove(device) if not self.running_ahp: _log.info('Starting AHP') self.running_ahp = True if not remaining_devices: _log.debug('Everything available has already been curtailed') return self.curtail_end = now + curtail_time self.break_end = now + curtail_break + curtail_time self.reset_curtail_count_time = self.curtail_end + reset_curtail_count_time self.next_curtail_confirm = now + curtail_confirm _log.info('Curtialing load.') for item in remaining_devices: device_name, command = item curtail = clusters.get_device(device_name).get_curtailment(command) curtail_pt = curtail['point'] curtail_val = curtail['value'] curtail_load = curtail['load'] curtailed_point = base_rpc_path(unit=device_name, point=curtail_pt) # TODO: catch errors. _log.debug('Setting '+curtailed_point+' to '+str(curtail_val)) try: if self.kill_signal_recieved: break result = self.vip.rpc.call('platform.actuator', 'set_point', agent_id, curtailed_point, curtail_val).get(timeout=4) except RemoteError as ex: _log.warning('Failed to set {} to {}: {}' .format(curtailed_point, curtail_val, str(ex))) continue est_curtailed += curtail_load clusters.get_device(device_name).increment_curtail(command) self.devices_curtailed.add(item) if est_curtailed >= need_curtailed: break return def curtail_confirm(self, cur_pwr, now): '''Check if load shed has been met. If the demand goal is not met and there are additional devices to curtail then the ILC will shed additional load by curtailing more devices. ''' if cur_pwr < demand_limit: _log.info('Curtail goal for building load met.') else: _log.info('Curtail goal for building load NOT met.') self.check_load(cur_pwr, now) def actuator_request(self, score_order): '''request access to devices.''' _now = dt.now() str_now = _now.strftime(DATE_FORMAT) _end = _now + longest_possible_curtail + actuator_schedule_buffer str_end = _end.strftime(DATE_FORMAT) ctrl_dev = [] already_handled = dict((device, True) for device in self.scheduled_devices) for item in score_order: device, point = item _log.debug('Reserving device: ' + device) if device in already_handled: if already_handled[device]: _log.debug('Skipping reserve device (previously reserved): ' + device) ctrl_dev.append(item) continue curtailed_device = base_rpc_path(unit=device, point='') schedule_request = [[curtailed_device, str_now, str_end]] try: if self.kill_signal_recieved: break result = self.vip.rpc.call( 'platform.actuator', 'request_new_schedule', agent_id, device, 'HIGH', schedule_request).get(timeout=4) except RemoteError as ex: _log.warning('Failed to schedule device {} (RemoteError): {}' .format(device, str(ex))) continue if result['result'] == 'FAILURE': _log.warn('Failed to schedule device (unavailable) ' + device) already_handled[device] = False else: already_handled[device] = True self.scheduled_devices.add(device) ctrl_dev.append(item) return ctrl_dev def end_curtail(self): self.running_ahp = False self.reset_devices() self.release_devices() def reset_devices(self): _log.info('Resetting devices') current_stagger = stagger_release_time/len(self.devices_curtailed) device_group_size = 1 group_count = 0 while current_stagger < minimum_stagger_window: device_group_size += 1 current_stagger = current_stagger + current_stagger if device_group_size == len(self.devices_curtailed): device_group_size += 1 break for item in self.devices_curtailed: group_count += 1 if stagger_release and group_count == device_group_size: gevent.sleep(current_stagger) device_name, command = item curtail = clusters.get_device(device_name).get_curtailment(command) curtail_pt = curtail['point'] curtailed_point = base_rpc_path(unit=device_name, point=curtail_pt) try: result = self.vip.rpc.call('platform.actuator', 'revert_point', agent_id, curtailed_point).get(timeout=10) _log.debug('Reverted point: {}'.format(curtailed_point)) except RemoteError as ex: _log.warning('Failed to revert point {} (RemoteError): {}' .format(curtailed_point, str(ex))) continue self.devices_curtailed = set() def release_devices(self): for device in self.scheduled_devices: result = self.vip.rpc.call( 'platform.actuator', 'request_cancel_schedule', agent_id, device).get(timeout=10) self.scheduled_devices = set() return AHP(**kwargs)
class Agent(PublishMixin, BaseAgent): """Class agent""" def __init__(self, **kwargs): super(Agent, self).__init__(**kwargs) self.normal_firststage_fanspeed = config.get( 'normal_firststage_fanspeed', 75.0) self.normal_secondstage_fanspeed = config.get( 'normal_secondstage_fanspeed', 90.0) self.normal_damper_stpt = config.get('normal_damper_stpt', 5.0) self.normal_coolingstpt = config.get('normal_coolingstpt', 74.0) self.normal_heatingstpt = config.get('normal_heatingstpt', 67.0) self.smap_path = config.get('smap_path') self.default_cooling_stage_differential = 0.5 self.current_spacetemp = 0.0 self.building_thermal_constant = config.get( 'building_thermal_constant', 4.0) self.timestep_length = config.get('timestep_length', 900) self.csp_cpp = config.get('csp_cpp', 80.0) self.csp_pre = config.get('csp_pre', 67.0) self.restore_window = int( ((self.csp_cpp - self.normal_coolingstpt) / self.building_thermal_constant) * 3600) self.state = 'STARTUP' self.e_start_msg = None self.error_handler = None self.actuator_handler = None self.pre_cool_idle = None self.e_start = None self.e_end = None self.pre_stored_spacetemp = None self.device_schedule = {} self.all_scheduled_events = {} self.currently_running_dr_event_handlers = [] self.headers = { headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON, 'requesterID': agent_id } utils.setup_logging() self._log = logging.getLogger(__name__) @matching.match_headers({headers_mod.REQUESTER_ID: agent_id}) @matching.match_exact(topics.ACTUATOR_SCHEDULE_RESULT()) def schedule_result(self, topic, headers, message, match): msg = jsonapi.loads(message[0]) self._log.info('Schedule Request Acknowledged') self.task_timer.cancel() task_id = headers.get('taskID', 0) response_type = headers.get('type', 0) schedule_start = self.device_schedule[task_id]["schedule_start"] event_start = schedule_start + datetime.timedelta(minutes=1) schedule_end = self.device_schedule[task_id]["schedule_end"] e_start = self.device_schedule[task_id]["event_start"] e_end = self.device_schedule[task_id]["event_end"] if response_type == 'NEW_SCHEDULE' and self.error_handler == None: if msg.get('result', 0) == 'SUCCESS': event = sched.Event(self.pre_cool_setup, args=[e_start, e_end]) self.schedule(event_start, event) self.all_scheduled_events[e_start] = event elif msg.get('result', 0) == 'FAILURE' and schedule_start < schedule_end: schedule_start = schedule_start + datetime.timedelta( minutes=10) headers = { 'type': 'NEW_SCHEDULE', 'requesterID': agent_id, 'taskID': task_id, 'priority': 'High' } self.task_timer = self.periodic_timer( 20, self.publish_json, topics.ACTUATOR_SCHEDULE_REQUEST(), headers, [[ "{campus}/{building}/{unit}".format(**rtu_path), str(schedule_start), schedule_end ]]) elif schedule_start >= schedule_end: return if self.error_handler is not None: self.error_handler() @matching.match_headers({headers_mod.REQUESTER_ID: agent_id}) @matching.match_glob(topics.ACTUATOR_ERROR(point='*', **rtu_path)) def _on_error_result(self, topic, headers, message, match): """ERROR result""" point = match.group(1) msg = jsonapi.loads(message[0]) point = match.group(1) today = datetime.datetime.now().date() for key, schedule in self.device_schedule.items(): if schedule["date"] == today: schedule_start = schedule["schedule_start"] schedule_end = schedule["schedule_end"] task_id = key break self._log.info('Error Results: ' + str(point) + ' ' + str(msg)) if msg.get('type', 0) == 'LockError': headers = { 'type': 'NEW_SCHEDULE', 'requesterID': agent_id, 'taskID': task_id, 'priority': 'HIGH' } self.task_timer = self.periodic_timer( 20, self.publish_json, topics.ACTUATOR_SCHEDULE_REQUEST(), headers, [[ "{campus}/{building}/{unit}".format(**rtu_path), str(schedule_start), str(schedule_end) ]]) elif self.error_handler is not None: self._log.info('Running error handler') self.error_handler() @matching.match_exact(topics.DEVICES_VALUE(point='all', **rtu_path)) def _on_new_data(self, topic, headers, message, match): """watching for new data""" data = jsonapi.loads(message[0]) self.current_spacetemp = float(data[space_temp]) dr_override = bool(int(data[override_command])) occupied = bool(int(data[occupied_status])) if dr_override and self.state not in ('IDLE', 'CLEANUP', 'STARTUP'): self._log.info('User Override Initiated') self.cancel_event(cancel_type='OVERRIDE') if not occupied and self.state in ('DR_EVENT', 'RESTORE'): self.cancel_event() if self.state == 'STARTUP': self._log.info('Finished Startup') self.state = 'IDLE' @matching.match_exact(topics.OPENADR_EVENT()) def _on_dr_event(self, topic, headers, message, match): if self.state == 'STARTUP': self._log.info('DR event ignored because of startup.') return """handle openADR events""" msg = jsonapi.loads(message[0]) self._log.info('EVENT Received: ' + str(msg)) e_id = msg['id'] e_status = msg['status'] e_start = msg['start_at'] task_id = msg['id'] #e_start = datetime.datetime.strptime(e_start,datefmt) today = datetime.datetime.now().date() e_end = msg['end_at'] e_end = parser.parse(e_end, fuzzy=True) e_start = parser.parse(e_start, fuzzy=True) dr_date = e_start.date() current_datetime = datetime.datetime.now() if current_datetime > e_end: self._log.info('Too Late Event is Over') return if e_status == 'cancelled': if e_start in self.all_scheduled_events: self._log.info('Event Cancelled') self.all_scheduled_events[e_start].cancel() del self.all_scheduled_events[e_start] if e_start.date() == today and (self.state == 'PRECOOL' or self.state == 'DR_EVENT'): self.cancel_event() return if today > e_start.date(): if e_start in self.all_scheduled_events: self.all_scheduled_events[e_start].cancel() del self.all_scheduled_events[e_start] return for item in self.all_scheduled_events: if e_start.date() == item.date(): if e_start.time() != item.time(): self._log.info('Updating Event') self.all_scheduled_events[item].cancel() del self.all_scheduled_events[item] if e_start.date() == today and (self.state == 'PRECOOL' or self.state == 'DR_EVENT'): self.cancel_event(cancel_type='UPDATING') break elif e_start.time() == item.time(): self._log.info("same event") return #Don't schedule an event if we are currently in OVERRIDE state. if e_start.date() == today and (self.state == 'OVERRIDE'): return schedule_start = e_start - datetime.timedelta( hours=max_precool_hours) schedule_end = e_end + datetime.timedelta( seconds=self.restore_window) schedule_end = schedule_end + datetime.timedelta(minutes=10) self.device_schedule[task_id] = { "date": dr_date, "schedule_start": schedule_start, "schedule_end": schedule_end, "event_start": e_start, "event_end": e_end } headers = { 'type': 'NEW_SCHEDULE', 'requesterID': agent_id, 'taskID': task_id, 'priority': 'HIGH' } self.task_timer = self.periodic_timer( 20, self.publish_json, topics.ACTUATOR_SCHEDULE_REQUEST(), headers, [[ "{campus}/{building}/{unit}".format(**rtu_path), str(schedule_start), str(schedule_end) ]]) def pre_cool_setup(self, e_start, e_end): if self.state == 'OVERRIDE': self._log.info("Override today") return if self.pre_cool_idle == False: return now = datetime.datetime.now() day = now.weekday() if not schedule[day]: self._log.info("Unoccupied today") return if self.state == 'PRECOOL' and self.pre_cool_idle == True: for event in self.currently_running_dr_event_handlers: event.cancel() self.currently_running_dr_event_handlers = [] self.state = 'PRECOOL' e_start_unix = time.mktime(e_start.timetuple()) e_end_unix = time.mktime(e_end.timetuple()) event_start = now + datetime.timedelta(minutes=15) event = sched.Event(self.pre_cool_setup, args=[e_start, e_end]) self.schedule(event_start, event) self.all_scheduled_events[e_start] = event self.schedule_builder(e_start_unix, e_end_unix) def modify_temp_set_point(self, csp, hsp): self.publish(topics.ACTUATOR_SET(point=volttron_flag, **rtu_path), self.headers, str(3.0)) self.publish( topics.ACTUATOR_SET(point=min_damper_stpt, **rtu_path), self.headers, str(self.normal_damper_stpt)) self.publish( topics.ACTUATOR_SET(point=cooling_stage_diff, **rtu_path), self.headers, str(self.default_cooling_stage_differential)) self.publish(topics.ACTUATOR_SET(point=cooling_stpt, **rtu_path), self.headers, str(csp)) self.publish(topics.ACTUATOR_SET(point=heating_stpt, **rtu_path), self.headers, str(hsp)) if self.pre_cool_idle == True: self.pre_cool_idle = False def backup_run(): self.modify_temp_set_point(csp, hsp) self.error_handler = None self.error_handler = backup_run def start_dr_event(self): self.state = 'DR_EVENT' self.publish(topics.ACTUATOR_SET(point=volttron_flag, **rtu_path), self.headers, str(3.0)) self.publish(topics.ACTUATOR_SET(point=cooling_stpt, **rtu_path), self.headers, str(self.csp_cpp)) new_fan_speed = self.normal_firststage_fanspeed - ( self.normal_firststage_fanspeed * fan_reduction) new_fan_speed = max(new_fan_speed, 0) self.publish( topics.ACTUATOR_SET(point=cooling_fan_sp1, **rtu_path), self.headers, str(new_fan_speed)) new_fan_speed = self.normal_secondstage_fanspeed - ( self.normal_firststage_fanspeed * fan_reduction) new_fan_speed = max(new_fan_speed, 0) self.publish( topics.ACTUATOR_SET(point=cooling_fan_sp2, **rtu_path), self.headers, str(new_fan_speed)) self.publish( topics.ACTUATOR_SET(point=min_damper_stpt, **rtu_path), self.headers, str(damper_cpp)) self.publish( topics.ACTUATOR_SET(point=cooling_stage_diff, **rtu_path), self.headers, str(cooling_stage_differential)) mytime = int(time.time()) content = { "Demand Response Event": { "Readings": [[mytime, 1.0]], "Units": "TU", "data_type": "double" } } self.publish(self.smap_path, self.headers, jsonapi.dumps(content)) def backup_run(): self.start_dr_event() self.error_handler = None self.error_handler = backup_run def start_restore_event(self, csp, hsp): self.state = 'RESTORE' self._log.info('Restore: Begin restoring normal operations') self.publish(topics.ACTUATOR_SET(point=cooling_stpt, **rtu_path), self.headers, str(csp)) self.publish(topics.ACTUATOR_SET(point=heating_stpt, **rtu_path), self.headers, str(hsp)) #heating self.publish( topics.ACTUATOR_SET(point=cooling_fan_sp1, **rtu_path), self.headers, str(self.normal_firststage_fanspeed)) self.publish( topics.ACTUATOR_SET(point=cooling_fan_sp2, **rtu_path), self.headers, str(self.normal_secondstage_fanspeed)) self.publish( topics.ACTUATOR_SET(point=min_damper_stpt, **rtu_path), self.headers, str(self.normal_damper_stpt)) self.publish( topics.ACTUATOR_SET(point=cooling_stage_diff, **rtu_path), self.headers, str(self.default_cooling_stage_differential)) def backup_run(): self.start_restore_event(csp, hsp) self.error_handler = None self.error_handler = backup_run def cancel_event(self, cancel_type='NORMAL'): if cancel_type == 'OVERRIDE': self.state = 'OVERRIDE' smap_input = 3.0 elif cancel_type != 'UPDATING': self.state = 'CLEANUP' smap_input = 2.0 self.publish(topics.ACTUATOR_SET(point=cooling_stpt, **rtu_path), self.headers, str(self.normal_coolingstpt)) self.publish(topics.ACTUATOR_SET(point=heating_stpt, **rtu_path), self.headers, str(self.normal_heatingstpt)) self.publish( topics.ACTUATOR_SET(point=cooling_fan_sp1, **rtu_path), self.headers, str(self.normal_firststage_fanspeed)) self.publish( topics.ACTUATOR_SET(point=cooling_fan_sp2, **rtu_path), self.headers, str(self.normal_secondstage_fanspeed)) self.publish( topics.ACTUATOR_SET(point=min_damper_stpt, **rtu_path), self.headers, str(self.normal_damper_stpt)) self.publish( topics.ACTUATOR_SET(point=cooling_stage_diff, **rtu_path), self.headers, str(self.default_cooling_stage_differential)) self.publish(topics.ACTUATOR_SET(point=volttron_flag, **rtu_path), self.headers, str(0)) for event in self.currently_running_dr_event_handlers: event.cancel() if cancel_type != 'UPDATING': mytime = int(time.time()) content = { "Demand Response Event": { "Readings": [[mytime, smap_input]], "Units": "TU", "data_type": "double" } } self.publish(self.smap_path, self.headers, jsonapi.dumps(content)) self.device_schedule = {} self.all_scheduled_events = {} self.currently_running_dr_event_handlers = [] def backup_run(): self.cancel_event() self.error_handler = None self.error_handler = backup_run expected_values = { cooling_stpt: self.normal_coolingstpt, heating_stpt: self.normal_heatingstpt, cooling_fan_sp1: self.normal_firststage_fanspeed, cooling_fan_sp2: self.normal_secondstage_fanspeed, min_damper_stpt: self.normal_damper_stpt, cooling_stage_diff: self.default_cooling_stage_differential } EPSILON = 0.5 #allowed difference from expected value def result_handler(point, value): #print "actuator point being handled:", point, value expected_value = expected_values.pop(point, None) if expected_value is not None: diff = abs(expected_value - value) if diff > EPSILON: self._log.info( "Did not get back expected value for: " + str(point)) if not expected_values: self.actuator_handler = None self.error_handler = None self.state = 'IDLE' if not cancel_type == 'OVERRIDE' else 'OVERRIDE' if cancel_type != 'UPDATING': self.actuator_handler = result_handler else: self.actuator_handler = None if cancel_type == 'OVERRIDE': def on_reset(): self.error_handler = None self.state = 'IDLE' today = datetime.datetime.now() reset_time = today + datetime.timedelta(days=1) reset_time = reset_time.replace(hour=0, minute=0, second=0) event = sched.Event(on_reset) self.schedule(reset_time, event) def schedule_builder(self, start_time, end_time): """schedule all events for a DR event.""" current_time = time.time() if current_time > end_time: return self._log.info('Scheduling all DR actions') pre_hsp = self.csp_pre - 5.0 ideal_cooling_window = int( ((self.current_spacetemp - self.csp_pre) / self.building_thermal_constant) * 3600) ideal_precool_start_time = start_time - ideal_cooling_window max_cooling_window = start_time - current_time cooling_window = ideal_cooling_window if ideal_cooling_window < max_cooling_window else max_cooling_window precool_start_time = start_time - cooling_window pre_cool_step = 0 if (max_cooling_window > 0): self._log.info('Schedule Pre Cooling') num_cooling_timesteps = int( math.ceil( float(cooling_window) / float(self.timestep_length))) cooling_step_delta = (self.normal_coolingstpt - self.csp_pre) / num_cooling_timesteps if num_cooling_timesteps <= 0: num_cooling_timesteps = 1 for step_index in range(1, num_cooling_timesteps): if step_index == 1: pre_cool_step = 2 * self.timestep_length else: pre_cool_step += self.timestep_length event_time = start_time - pre_cool_step csp = self.csp_pre + ( (step_index - 1) * cooling_step_delta) self._log.info( 'Precool step: ' + str(datetime.datetime.fromtimestamp(event_time)) + ' CSP: ' + str(csp)) event = sched.Event(self.modify_temp_set_point, args=[csp, pre_hsp]) self.schedule(event_time, event) self.currently_running_dr_event_handlers.append(event) else: self._log.info('Too late to pre-cool!') restore_start_time = end_time num_restore_timesteps = int( math.ceil( float(self.restore_window) / float(self.timestep_length))) restore_step_delta = ( self.csp_pre - self.normal_coolingstpt) / num_restore_timesteps self._log.info('Schedule DR Event: ' + str(datetime.datetime.fromtimestamp(start_time)) + ' CSP: ' + str(self.csp_cpp)) event = sched.Event(self.start_dr_event) self.schedule(start_time, event) self.currently_running_dr_event_handlers.append(event) self._log.info('Schedule Restore Event: ' + str(datetime.datetime.fromtimestamp(end_time)) + ' CSP: ' + str(self.csp_pre - restore_step_delta)) event = sched.Event(self.start_restore_event, args=[ self.csp_pre - restore_step_delta, self.normal_heatingstpt ]) self.schedule(end_time, event) self.currently_running_dr_event_handlers.append(event) for step_index in range(1, num_restore_timesteps): event_time = end_time + (step_index * self.timestep_length) csp = self.csp_pre - ((step_index + 1) * restore_step_delta) self._log.info( 'Restore step: ' + str(datetime.datetime.fromtimestamp(event_time)) + ' CSP: ' + str(csp)) event = sched.Event(self.modify_temp_set_point, args=[csp, self.normal_heatingstpt]) self.schedule(event_time, event) self.currently_running_dr_event_handlers.append(event) event_time = end_time + (num_restore_timesteps * self.timestep_length) self._log.info('Schedule Cleanup Event: ' + str(datetime.datetime.fromtimestamp(event_time))) event = sched.Event(self.cancel_event) self.schedule(event_time, event) self.currently_running_dr_event_handlers.append(event)
def configure_main(self, config_name, action, contents): """This triggers configuration of the ProactiveDiagnostic via the VOLTTRON configuration store. :param config_name: canonical name is config :param action: on instantiation this is "NEW" or "UPDATE" if user uploads update config to store :param contents: configuration contents :return: None """ LOG.debug("Update %s for %s", config_name, self.core.identity) config = self.default_config.copy() config.update(contents) if action == "NEW" or "UPDATE": # The run schedule should be a cron string # https://volttron.readthedocs.io/en/develop/devguides/agent_development/Agent-Development-Cheatsheet.html # https://crontab.guru/ self.run_schedule = config.get("run_schedule") # The campus, building, device parameters are used to build the # (devices/campus/building/device/all) subscription for device data # coming from master driver and the rpc to do actuation # (campus/building/device/point) campus = config.get("campus", "") building = config.get("building", "") device_list = config.get("device", []) self.revert_action = config.get("revert_action", "release") # Configure global diagnostic prerequisites. # Data mechanism is through subscription. # Evaluation is only done prior to running diagnostic. prerequisites = config.get("prerequisites", {}) self.actuator = config.get("actuator_vip", "platform.actuator") self.remote_platform = config.get("remote_platform") self.base_rpc_path = [] self.device_topics_list = [] if not device_list: LOG.warning("Configuration ERROR: no device_list " "configured for diagnostic!") LOG.warning("Check configuration and update " "device_list!") for device in device_list: self.base_rpc_path.append( topics.RPC_DEVICE_PATH(campus=campus, building=building, unit=device, path="", point=None)) self.device_topics_list.append( topics.DEVICES_VALUE(campus=campus, building=building, unit=device, path="", point="all")) diagnostics = config.get("diagnostics", []) if not diagnostics: LOG.warning("Configuration ERROR diagnostics" "information is not configured!") LOG.warning("Diagnostic cannot be performed, " "Update configuration!") self.diagnostics = diagnostics self.diagnostics_container = [] self.prerequisites_expr_list = [] self.prerequisites_data_required = {} self.prerequisites_variables = None if prerequisites: self.initialize_prerequisites(prerequisites) else: LOG.debug("No diagnostic prerequisites configured!") self.starting_base()
def driven_agent(config_path, **kwargs): """Reads agent configuration and converts it to run driven agent. :param kwargs: Any driver specific parameters""" config = utils.load_config(config_path) arguments = config.get('arguments') mode = True if config.get('mode', 'PASSIVE') == 'ACTIVE' else False multiple_devices = isinstance(config['device']['unit'], dict) campus_building_config = config['device'] analysis_name = campus_building_config.get('analysis_name', 'analysis_name') analysis_dict = {'analysis_name': analysis_name} arguments.update(analysis_dict) agent_id = config.get('agentid', None) actuator_id = agent_id if agent_id is not None else analysis_name campus_building = dict( (key, campus_building_config[key]) for key in ['campus', 'building']) analysis = deepcopy(campus_building) analysis.update(analysis_dict) device_config = config['device']['unit'] command_devices = device_config.keys() device_topic_dict = {} device_topic_list = [] subdevices_list = [] vip_destination = config.get('vip_destination', None) from_file = config.get('from_file') for device_name in device_config: device_topic = topics.DEVICES_VALUE( campus=campus_building.get('campus'), building=campus_building.get('building'), unit=device_name, path='', point='all') device_topic_dict.update({device_topic: device_name}) device_topic_list.append(device_name) if multiple_devices: for subdevice in device_config[device_name]['subdevices']: subdevices_list.append(subdevice) subdevice_topic = topics.DEVICES_VALUE( campus=campus_building.get('campus'), building=campus_building.get('building'), unit=device_name, path=subdevice, point='all') subdevice_name = device_name + "/" + subdevice device_topic_dict.update({subdevice_topic: subdevice_name}) device_topic_list.append(subdevice_name) base_actuator_path = topics.RPC_DEVICE_PATH( campus=campus_building.get('campus', ''), building=campus_building.get('building', ''), unit=None, path='', point=None) device_lock_duration = config.get('device_lock_duration', 1.25) conversion_map = config.get('conversion_map') map_names = {} for key, value in conversion_map.items(): map_names[key.lower() if isinstance(key, str) else key] = value application = config.get('application') validation_error = '' if not application: validation_error = 'Invalid application specified in config\n' if validation_error: _log.error(validation_error) raise ValueError(validation_error) config.update(config.get('arguments')) converter = ConversionMapper() output_file_prefix = config.get('output_file') #unittype_map = config.get('unittype_map', None) #assert unittype_map klass = _get_class(application) # This instances is used to call the applications run method when # data comes in on the message bus. It is constructed here # so that_process_results each time run is called the application # can keep it state. app_instance = klass(**arguments) class DrivenAgent(Agent): """Agent listens to message bus device and runs when data is published. """ def __init__(self, **kwargs): """ Initializes agent :param kwargs: Any driver specific parameters""" super(DrivenAgent, self).__init__(**kwargs) # master is where we copy from to get a poppable list of # subdevices that should be present before we run the analysis. self._master_devices = device_topic_list self._needed_devices = [] self._device_values = {} self._initialize_devices() self.received_input_datetime = None self._kwargs = kwargs self._header_written = False self.file_creation_set = set() self.actuation_vip = self.vip.rpc if vip_destination: self.agent = self.setup_remote_actuation(vip_destination) self.actuation_vip = self.agent.vip.rpc def _initialize_devices(self): self._needed_devices = deepcopy(self._master_devices) self._device_values = {} def setup_remote_actuation(self, vip_destination): event = gevent.event.Event() agent = Agent(address=vip_destination) gevent.spawn(agent.core.run, event) event.wait(timeout=15) return agent @Core.receiver('onstart') def starup(self, sender, **kwargs): """ Starts up the agent and subscribes to device topics based on agent configuration. :param sender: :param kwargs: Any driver specific parameters :type sender: str""" self._initialize_devices() for device_topic in device_topic_dict: _log.info('Subscribing to ' + device_topic) self.vip.pubsub.subscribe(peer='pubsub', prefix=device_topic, callback=self.on_analysis_message) def _should_run_now(self): """ Checks if messages from all the devices are received before running application :returns: True or False based on received messages. :rtype: boolean""" # Assumes the unit/all values will have values. if not len(self._device_values.keys()) > 0: return False return not len(self._needed_devices) > 0 def on_analysis_message(self, peer, sender, bus, topic, headers, message): """ Subscribe to device data and assemble data set to pass to applications. :param peer: :param sender: device name :param bus: :param topic: device path topic :param headers: message headers :param message: message containing points and values dict from device with point type :type peer: str :type sender: str :type bus: str :type topic: str :type headers: dict :type message: dict""" device_data = message[0] if isinstance(device_data, list): device_data = device_data[0] def aggregate_subdevice(device_data): tagged_device_data = {} device_tag = device_topic_dict[topic] if device_tag not in self._needed_devices: return False for key, value in device_data.items(): device_data_tag = '&'.join([key, device_tag]) tagged_device_data[device_data_tag] = value self._device_values.update(tagged_device_data) self._needed_devices.remove(device_tag) return True device_needed = aggregate_subdevice(device_data) if not device_needed: _log.error("Warning device values already present, " "reinitializing") self._initialize_devices() if self._should_run_now(): field_names = {} for key, value in self._device_values.items(): field_names[key.lower() if isinstance(key, str ) else key] = value if not converter.initialized and conversion_map is not None: converter.setup_conversion_map(map_names, field_names) if from_file: _timestamp = parse(headers.get('Date')) self.received_input_datetime = _timestamp else: _timestamp = dt.now() self.received_input_datetime = dt.utcnow() device_data = converter.process_row(field_names) results = app_instance.run(_timestamp, device_data) # results = app_instance.run( # dateutil.parser.parse(self._subdevice_values['Timestamp'], # fuzzy=True), self._subdevice_values) self._process_results(results) self._initialize_devices() else: _log.info("Still need {} before running.".format( self._needed_devices)) def _process_results(self, results): """ Runs driven application with converted data. Calls appropriate methods to process commands, log and table_data in results. :param results: Results object containing commands for devices, log messages and table data. :type results: Results object \\volttron.platform.agent.driven :returns: Same as results param. :rtype: Results object \\volttron.platform.agent.driven""" _log.info('Processing Results!') actuator_error = True if mode: if results.devices: actuator_error = self.actuator_request(results.devices) elif results.commands: actuator_error = self.actuator_request(command_devices) if not actuator_error: results = self.actuator_set(results) for value in results.log_messages: _log.info("LOG: {}".format(value)) for key, value in results.table_output.items(): _log.info("TABLE: {}->{}".format(key, value)) if output_file_prefix is not None: results = self.create_file_output(results) if len(results.table_output.keys()): results = self.publish_analysis_results(results) return results def publish_analysis_results(self, results): """ Publish table_data in analysis results to the message bus for capture by the data historian. :param results: Results object containing commands for devices, log messages and table data. :type results: Results object \\volttron.platform.agent.driven :returns: Same as results param. :rtype: Results object \\volttron.platform.agent.driven""" headers = { headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON, headers_mod.DATE: str(self.received_input_datetime), } for app, analysis_table in results.table_output.items(): try: name_timestamp = app.split('&') _name = name_timestamp[0] timestamp = name_timestamp[1] except: _name = app timestamp = str(self.received_input_datetime) headers = { headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON, headers_mod.DATE: timestamp, } # The keys in this publish should look like the following # with the values being a dictionary of points off of these # base topics # # Schedule-Reset ACCx/data/interior_ahu/vav1600e # Schedule-Reset ACCx/data/interior_ahu/vav1534 to_publish = defaultdict(list) for entry in analysis_table: for key, value in entry.items(): for _device in command_devices: analysis['unit'] = _device analysis_topic = topics.ANALYSIS_VALUE(point=key, **analysis) datatype = 'float' if isinstance(value, int): datatype = 'int' kbase = key[key.rfind('/') + 1:] topic_without_point = analysis_topic[: analysis_topic .rfind('/')] if not to_publish[topic_without_point]: to_publish[topic_without_point] = [{}, {}] to_publish[topic_without_point][0][kbase] = value to_publish[topic_without_point][1][kbase] = { 'tz': 'US/Pacific', 'type': datatype, 'units': 'float', } for equipment, _analysis in to_publish.items(): self.vip.pubsub.publish('pubsub', equipment, headers, _analysis) to_publish.clear() return results def create_file_output(self, results): """ Create results/data files for testing and algorithm validation if table data is present in the results. :param results: Results object containing commands for devices, log messages and table data. :type results: Results object \\volttron.platform.agent.driven :returns: Same as results param. :rtype: Results object \\volttron.platform.agent.driven""" for key, value in results.table_output.items(): name_timestamp = key.split('&') _name = name_timestamp[0] timestamp = name_timestamp[1] file_name = output_file_prefix + "-" + _name + ".csv" if file_name not in self.file_creation_set: self._header_written = False self.file_creation_set.update([file_name]) for row in value: with open(file_name, 'a+') as file_to_write: row.update({'Timestamp': timestamp}) _keys = row.keys() file_output = csv.DictWriter(file_to_write, _keys) if not self._header_written: file_output.writeheader() self._header_written = True file_output.writerow(row) file_to_write.close() return results def actuator_request(self, command_equip): """ Calls the actuator's request_new_schedule method to get device schedule :param command_equip: contains the names of the devices that will be scheduled with the ActuatorAgent. :type: dict or list :returns: Return result from request_new_schedule method and True or False for error in scheduling device. :rtype: boolean :Return Values: request_error = True/False warning:: Calling without previously scheduling a device and not within the time allotted will raise a LockError""" _now = dt.now() str_now = _now.strftime(DATE_FORMAT) _end = _now + td(minutes=device_lock_duration) str_end = _end.strftime(DATE_FORMAT) for device in command_equip: actuation_device = base_actuator_path(unit=device, point='') schedule_request = [[actuation_device, str_now, str_end]] try: _log.info('Make Request {} for start {} and end {}'.format( actuation_device, str_now, str_end)) result = self.actuation_vip.call( 'platform.actuator', 'request_new_schedule', actuator_id, actuation_device, 'HIGH', schedule_request).get(timeout=15) except RemoteError as ex: _log.warning( "Failed to schedule device {} (RemoteError): {}". format(device, str(ex))) request_error = True if result['result'] == 'FAILURE': if result['info'] == 'TASK_ID_ALREADY_EXISTS': _log.info('Task to schedule device already exists ' + device) request_error = False else: _log.warn('Failed to schedule device (unavailable) ' + device) request_error = True else: request_error = False return request_error def actuator_set(self, results): """ Calls the actuator's set_point method to set point on device :param results: Results object containing commands for devices, log messages and table data. :type results: Results object \\volttron.platform.agent.driven""" def make_actuator_set(device, point_value_dict): for point, new_value in point_value_dict.items(): point_path = base_actuator_path(unit=device, point=point) try: _log.info('Set point {} to {}'.format( point_path, new_value)) result = self.actuation_vip.call( 'platform.actuator', 'set_point', actuator_id, point_path, new_value).get(timeout=15) except RemoteError as ex: _log.warning("Failed to set {} to {}: {}".format( point_path, new_value, str(ex))) continue for device, point_value_dict in results.devices.items(): make_actuator_set(device, point_value_dict) for device in command_devices: make_actuator_set(device, results.commands) return results DrivenAgent.__name__ = 'DrivenLoggerAgent' return DrivenAgent(**kwargs)
def __init__(self, config_path, **kwargs): super(TransactiveIlcCoordinator, self).__init__(**kwargs) config = utils.load_config(config_path) campus = config.get("campus", "") building = config.get("building", "") logging_topic = config.get("logging_topic", "record") self.target_topic = '/'.join( ['record', 'target_agent', campus, building, 'goal']) self.logging_topic = '/'.join( [logging_topic, campus, building, "TCILC"]) cluster_configs = config["clusters"] self.clusters = ClusterContainer() for cluster_config in cluster_configs: device_cluster_config = cluster_config["device_cluster_file"] load_type = cluster_config.get("load_type", "discreet") if device_cluster_config[0] == "~": device_cluster_config = os.path.expanduser( device_cluster_config) cluster_config = utils.load_config(device_cluster_config) cluster = DeviceClusters(cluster_config, load_type) self.clusters.add_curtailment_cluster(cluster) self.device_topic_list = [] self.device_topic_map = {} self.static_price_flag = config.get('static_price_flag', False) self.default_min_price = config.get('static_minimum_price', 0.01) self.default_max_price = config.get('static_maximum_price', 0.1) all_devices = self.clusters.get_device_name_list() occupancy_schedule = config.get("occupancy_schedule", False) self.occupancy_schedule = init_schedule(occupancy_schedule) for device_name in all_devices: device_topic = topics.DEVICES_VALUE(campus=campus, building=building, unit=device_name, path="", point="all") self.device_topic_list.append(device_topic) self.device_topic_map[device_topic] = device_name power_token = config["power_meter"] power_meter = power_token["device"] self.power_point = power_token["point"] self.current_time = None self.power_meter_topic = topics.DEVICES_VALUE(campus=campus, building=building, unit=power_meter, path="", point="all") self.demand_limit = None self.bldg_power = [] self.avg_power = 0. self.last_demand_update = None self.demand_curve = None self.power_prices = None self.power_min = None self.power_max = None self.current_price = None self.average_building_power_window = td( minutes=config.get("average_building_power_window", 15)) self.minimum_update_time = td( minutes=config.get("minimum_update_time", 5)) self.market_name = config.get("market", "electric_0") self.tz = config.get("timezone", "US/Pacific") # self.prices = power_prices self.oat_predictions = [] self.comfort_to_dollar = config.get('comfort_to_dollar', 1.0) self.prices_from = config.get("prices_from", 'pubsub') self.prices_topic = config.get("price_topic", "prices") self.prices_file = config.get("price_file") self.join_market(self.market_name, BUYER, None, self.offer_callback, None, self.price_callback, self.error_callback)