class EventSender(object): @Inject def __init__(self, cloud_api_client=INJECTED): # type: (CloudAPIClient) -> None self._queue = deque() # type: deque self._stopped = True self._cloud_client = cloud_api_client self._event_enabled_cache = {} # type: Dict[int, bool] self._events_queue = deque() # type: deque self._events_thread = DaemonThread(name='eventsender', target=self._send_events_loop, interval=0.1, delay=0.2) def start(self): # type: () -> None self._events_thread.start() def stop(self): # type: () -> None self._events_thread.stop() def enqueue_event(self, event): if Config.get_entry('cloud_enabled', False) is False: return if event.type == GatewayEvent.Types.CONFIG_CHANGE: if event.data.get('type') == 'input': self._event_enabled_cache = {} if self._should_send_event(event): event.data['timestamp'] = time.time() self._queue.appendleft(event) def _should_send_event(self, event): if event.type != GatewayEvent.Types.INPUT_CHANGE: return True input_id = event.data['id'] enabled = self._event_enabled_cache.get(input_id) if enabled is not None: return enabled self._event_enabled_cache = InputController.load_inputs_event_enabled() return self._event_enabled_cache.get(input_id, False) def _send_events_loop(self): # type: () -> None try: if not self._batch_send_events(): raise DaemonThreadWait except APIException as ex: logger.error('Error sending events to the cloud: {}'.format(str(ex))) def _batch_send_events(self): events = [] while len(events) < 25: try: events.append(self._queue.pop()) except IndexError: break if len(events) > 0: self._cloud_client.send_events(events) return True return False
class ThermostatControllerMaster(ThermostatController): @Inject def __init__(self, output_controller=INJECTED, master_controller=INJECTED, pubsub=INJECTED): # type: (OutputController, MasterClassicController, PubSub) -> None super(ThermostatControllerMaster, self).__init__(output_controller) self._master_controller = master_controller # classic only self._pubsub = pubsub self._monitor_thread = DaemonThread(name='thermostatctl', target=self._monitor, interval=30, delay=10) self._thermostat_status = ThermostatStatusMaster( on_thermostat_change=self._thermostat_changed, on_thermostat_group_change=self._thermostat_group_changed) self._thermostats_original_interval = 30 self._thermostats_interval = self._thermostats_original_interval self._thermostats_last_updated = 0.0 self._thermostats_restore = 0 self._thermostats_config = {} # type: Dict[int, ThermostatDTO] self._pubsub.subscribe_master_events(PubSub.MasterTopics.EEPROM, self._handle_master_event) def start(self): # type: () -> None self._monitor_thread.start() def stop(self): # type: () -> None self._monitor_thread.stop() def _handle_master_event(self, master_event): # type: (MasterEvent) -> None if master_event.type == MasterEvent.Types.EEPROM_CHANGE: self.invalidate_cache(THERMOSTATS) def _thermostat_changed(self, thermostat_id, status): # type: (int, Dict[str,Any]) -> None """ Executed by the Thermostat Status tracker when an output changed state """ location = { 'room_id': Toolbox.denonify(self._thermostats_config[thermostat_id].room, 255) } gateway_event = GatewayEvent( GatewayEvent.Types.THERMOSTAT_CHANGE, { 'id': thermostat_id, 'status': { 'preset': status['preset'], 'current_setpoint': status['current_setpoint'], 'actual_temperature': status['actual_temperature'], 'output_0': status['output_0'], 'output_1': status['output_1'] }, 'location': location }) self._pubsub.publish_gateway_event(PubSub.GatewayTopics.STATE, gateway_event) def _thermostat_group_changed(self, status): # type: (Dict[str,Any]) -> None gateway_event = GatewayEvent( GatewayEvent.Types.THERMOSTAT_GROUP_CHANGE, { 'id': 0, 'status': { 'state': status['state'], 'mode': status['mode'] }, 'location': {} }) self._pubsub.publish_gateway_event(PubSub.GatewayTopics.STATE, gateway_event) @staticmethod def check_basic_action(ret_dict): """ Checks if the response is 'OK', throws a ValueError otherwise. """ if ret_dict['resp'] != 'OK': raise ValueError('Basic action did not return OK.') def increase_interval(self, object_type, interval, window): """ Increases a certain interval to a new setting for a given amount of time """ if object_type == THERMOSTATS: self._thermostats_interval = interval self._thermostats_restore = time.time() + window def invalidate_cache(self, object_type=None): """ Triggered when an external service knows certain settings might be changed in the background. For example: maintenance mode or module discovery """ if object_type is None or object_type == THERMOSTATS: self._thermostats_last_updated = 0 ################################ # New API ################################ def get_current_preset(self, thermostat_number): raise NotImplementedError() def set_current_preset(self, thermostat_number, preset_type): raise NotImplementedError() ################################ # Legacy API ################################ def load_heating_thermostat(self, thermostat_id): # type: (int) -> ThermostatDTO return self._master_controller.load_heating_thermostat(thermostat_id) def load_heating_thermostats(self): # type: () -> List[ThermostatDTO] return self._master_controller.load_heating_thermostats() def save_heating_thermostats( self, thermostats ): # type: (List[Tuple[ThermostatDTO, List[str]]]) -> None self._master_controller.save_heating_thermostats(thermostats) self.invalidate_cache(THERMOSTATS) def load_cooling_thermostat(self, thermostat_id): # type: (int) -> ThermostatDTO return self._master_controller.load_cooling_thermostat(thermostat_id) def load_cooling_thermostats(self): # type: () -> List[ThermostatDTO] return self._master_controller.load_cooling_thermostats() def save_cooling_thermostats( self, thermostats ): # type: (List[Tuple[ThermostatDTO, List[str]]]) -> None self._master_controller.save_cooling_thermostats(thermostats) self.invalidate_cache(THERMOSTATS) def load_cooling_pump_group(self, pump_group_id): # type: (int) -> PumpGroupDTO return self._master_controller.load_cooling_pump_group(pump_group_id) def load_cooling_pump_groups(self): # type: () -> List[PumpGroupDTO] return self._master_controller.load_cooling_pump_groups() def save_cooling_pump_groups( self, pump_groups ): # type: (List[Tuple[PumpGroupDTO, List[str]]]) -> None self._master_controller.save_cooling_pump_groups(pump_groups) def load_global_rtd10(self): # type: () -> GlobalRTD10DTO return self._master_controller.load_global_rtd10() def save_global_rtd10( self, global_rtd10): # type: (Tuple[GlobalRTD10DTO, List[str]]) -> None self._master_controller.save_global_rtd10(global_rtd10) def load_heating_rtd10(self, rtd10_id): # type: (int) -> RTD10DTO return self._master_controller.load_heating_rtd10(rtd10_id) def load_heating_rtd10s(self): # type: () -> List[RTD10DTO] return self._master_controller.load_heating_rtd10s() def save_heating_rtd10s( self, rtd10s): # type: (List[Tuple[RTD10DTO, List[str]]]) -> None self._master_controller.save_heating_rtd10s(rtd10s) def load_cooling_rtd10(self, rtd10_id): # type: (int) -> RTD10DTO return self._master_controller.load_cooling_rtd10(rtd10_id) def load_cooling_rtd10s(self): # type: () -> List[RTD10DTO] return self._master_controller.load_cooling_rtd10s() def save_cooling_rtd10s( self, rtd10s): # type: (List[Tuple[RTD10DTO, List[str]]]) -> None self._master_controller.save_cooling_rtd10s(rtd10s) def load_thermostat_group(self): # type: () -> ThermostatGroupDTO return self._master_controller.load_thermostat_group() def save_thermostat_group(self, thermostat_group): # type: (Tuple[ThermostatGroupDTO, List[str]]) -> None self._master_controller.save_thermostat_group(thermostat_group) self.invalidate_cache(THERMOSTATS) def load_heating_pump_group(self, pump_group_id): # type: (int) -> PumpGroupDTO return self._master_controller.load_heating_pump_group(pump_group_id) def load_heating_pump_groups(self): # type: () -> List[PumpGroupDTO] return self._master_controller.load_heating_pump_groups() def save_heating_pump_groups( self, pump_groups ): # type: (List[Tuple[PumpGroupDTO, List[str]]]) -> None self._master_controller.save_heating_pump_groups(pump_groups) def set_thermostat_mode(self, thermostat_on, cooling_mode=False, cooling_on=False, automatic=None, setpoint=None): # type: (bool, bool, bool, Optional[bool], Optional[int]) -> None """ Set the mode of the thermostats. """ _ = thermostat_on # Still accept `thermostat_on` for backwards compatibility # Figure out whether the system should be on or off set_on = False if cooling_mode is True and cooling_on is True: set_on = True if cooling_mode is False: # Heating means threshold based thermostat_group = self.load_thermostat_group() outside_sensor = Toolbox.denonify( thermostat_group.outside_sensor_id, 255) current_temperatures = self._master_controller.get_sensors_temperature( )[:32] if len(current_temperatures) < 32: current_temperatures += [None ] * (32 - len(current_temperatures)) if len(current_temperatures) > outside_sensor: current_temperature = current_temperatures[outside_sensor] set_on = thermostat_group.threshold_temperature > current_temperature else: set_on = True # Calculate and set the global mode mode = 0 mode |= (1 if set_on is True else 0) << 7 mode |= 1 << 6 # multi-tenant mode mode |= (1 if cooling_mode else 0) << 4 if automatic is not None: mode |= (1 if automatic else 0) << 3 self._master_controller.set_thermostat_mode(mode) # Caclulate and set the cooling/heating mode cooling_heating_mode = 0 if cooling_mode is True: cooling_heating_mode = 1 if cooling_on is False else 2 self._master_controller.set_thermostat_cooling_heating( cooling_heating_mode) # Then, set manual/auto if automatic is not None: action_number = 1 if automatic is True else 0 self._master_controller.set_thermostat_automatic(action_number) # If manual, set the setpoint if appropriate if automatic is False and setpoint is not None and 3 <= setpoint <= 5: self._master_controller.set_thermostat_all_setpoints(setpoint) self.invalidate_cache(THERMOSTATS) self.increase_interval(THERMOSTATS, interval=2, window=10) def set_per_thermostat_mode(self, thermostat_id, automatic, setpoint): # type: (int, bool, int) -> None """ Set the setpoint/mode for a certain thermostat. """ if thermostat_id < 0 or thermostat_id > 31: raise ValueError('Thermostat_id not in [0, 31]: %d' % thermostat_id) if setpoint < 0 or setpoint > 5: raise ValueError('Setpoint not in [0, 5]: %d' % setpoint) if automatic: self._master_controller.set_thermostat_tenant_auto(thermostat_id) else: self._master_controller.set_thermostat_tenant_manual(thermostat_id) self._master_controller.set_thermostat_setpoint( thermostat_id, setpoint) self.invalidate_cache(THERMOSTATS) self.increase_interval(THERMOSTATS, interval=2, window=10) def set_airco_status(self, thermostat_id, airco_on): # type: (int, bool) -> None """ Set the mode of the airco attached to a given thermostat. """ if thermostat_id < 0 or thermostat_id > 31: raise ValueError( 'Thermostat id not in [0, 31]: {0}'.format(thermostat_id)) self._master_controller.set_airco_status(thermostat_id, airco_on) def load_airco_status(self): # type: () -> ThermostatAircoStatusDTO """ Get the mode of the airco attached to a all thermostats. """ return self._master_controller.load_airco_status() @staticmethod def __check_thermostat(thermostat): """ :raises ValueError if thermostat not in range [0, 32]. """ if thermostat not in range(0, 32): raise ValueError('Thermostat not in [0,32]: %d' % thermostat) def set_current_setpoint(self, thermostat_number, temperature=None, heating_temperature=None, cooling_temperature=None): # type: (int, Optional[float], Optional[float], Optional[float]) -> None """ Set the current setpoint of a thermostat. """ if temperature is None: temperature = heating_temperature if temperature is None: temperature = cooling_temperature self.__check_thermostat(thermostat_number) self._master_controller.write_thermostat_setpoint( thermostat_number, temperature) self.invalidate_cache(THERMOSTATS) self.increase_interval(THERMOSTATS, interval=2, window=10) def _monitor(self): # type: () -> None """ Monitors certain system states to detect changes without events """ try: # Refresh if required if self._thermostats_last_updated + self._thermostats_interval < time.time( ): self._refresh_thermostats() # Restore interval if required if self._thermostats_restore < time.time(): self._thermostats_interval = self._thermostats_original_interval except CommunicationTimedOutException: logger.error( 'Got communication timeout during thermostat monitoring, waiting 10 seconds.' ) raise DaemonThreadWait def _refresh_thermostats(self): # type: () -> None """ Get basic information about all thermostats and pushes it in to the Thermostat Status tracker """ def get_automatic_setpoint(_mode): _automatic = bool(_mode & 1 << 3) return _automatic, 0 if _automatic else (_mode & 0b00000111) try: thermostat_info = self._master_controller.get_thermostats() thermostat_mode = self._master_controller.get_thermostat_modes() aircos = self._master_controller.load_airco_status() except CommunicationFailure: return status = { state.id: state for state in self._output_controller.get_output_statuses() } # type: Dict[int,OutputStateDTO] mode = thermostat_info['mode'] thermostats_on = bool(mode & 1 << 7) cooling = bool(mode & 1 << 4) automatic, setpoint = get_automatic_setpoint(thermostat_mode['mode0']) try: if cooling: self._thermostats_config = { thermostat.id: thermostat for thermostat in self.load_cooling_thermostats() } else: self._thermostats_config = { thermostat.id: thermostat for thermostat in self.load_heating_thermostats() } except CommunicationFailure: return thermostats = [] for thermostat_id in range(32): thermostat_dto = self._thermostats_config[ thermostat_id] # type: ThermostatDTO if thermostat_dto.in_use: t_mode = thermostat_mode['mode{0}'.format(thermostat_id)] t_automatic, t_setpoint = get_automatic_setpoint(t_mode) thermostat = { 'id': thermostat_id, 'act': thermostat_info['tmp{0}'.format( thermostat_id)].get_temperature(), 'csetp': thermostat_info['setp{0}'.format( thermostat_id)].get_temperature(), 'outside': thermostat_info['outside'].get_temperature(), 'mode': t_mode, 'automatic': t_automatic, 'setpoint': t_setpoint, 'name': thermostat_dto.name, 'sensor_nr': thermostat_dto.sensor, 'airco': 1 if aircos.status[thermostat_id] else 0 } for output in [0, 1]: output_id = getattr(thermostat_dto, 'output{0}'.format(output)) output_state_dto = status.get(output_id) if output_id is not None and output_state_dto is not None and output_state_dto.status: thermostat['output{0}'.format( output)] = output_state_dto.dimmer else: thermostat['output{0}'.format(output)] = 0 thermostats.append(thermostat) self._thermostat_status.full_update({ 'thermostats_on': thermostats_on, 'automatic': automatic, 'setpoint': setpoint, 'cooling': cooling, 'status': thermostats }) self._thermostats_last_updated = time.time() def get_thermostat_status(self): # type: () -> ThermostatGroupStatusDTO """ Returns thermostat information """ self._refresh_thermostats() # Always return the latest information master_status = self._thermostat_status.get_thermostats() return ThermostatGroupStatusDTO( id=0, on=master_status['thermostats_on'], automatic=master_status['automatic'], setpoint=master_status['setpoint'], cooling=master_status['cooling'], statusses=[ ThermostatStatusDTO(id=thermostat['id'], actual_temperature=thermostat['act'], setpoint_temperature=thermostat['csetp'], outside_temperature=thermostat['outside'], mode=thermostat['mode'], automatic=thermostat['automatic'], setpoint=thermostat['setpoint'], name=thermostat['name'], sensor_id=thermostat['sensor_nr'], airco=thermostat['airco'], output_0_level=thermostat['output0'], output_1_level=thermostat['output1']) for thermostat in master_status['status'] ]) return self._thermostat_status.get_thermostats()
class MasterHeartbeat(object): """ Monitors the status of the master communication. """ @Inject def __init__(self, master_communicator=INJECTED): # type: (MasterCommunicator) -> None self._master_communicator = master_communicator self._failures = -1 # Start "offline" self._backoff = 60 self._last_restart = 0.0 self._min_threshold = 2 self._thread = DaemonThread(name='masterheartbeat', target=self._heartbeat, interval=30, delay=5) def start(self): # type: () -> None logger.info('Starting master heartbeat') self._thread.start() def stop(self): # type: () -> None self._thread.stop() def is_online(self): # type: () -> bool if self._failures == -1: self._thread.request_single_run() time.sleep(2) return self._failures == 0 def set_offline(self): # type: () -> None self._failures += 1 def get_communicator_health(self): # type: () -> HEALTH if self._failures > self._min_threshold: stats = self._check_stats() if stats is None: return CommunicationStatus.UNSTABLE elif stats: return CommunicationStatus.SUCCESS else: return CommunicationStatus.FAILURE else: return CommunicationStatus.SUCCESS def _heartbeat(self): # type: () -> None if self._failures > self._min_threshold and self._last_restart < time.time( ) - self._backoff: logger.error('Master heartbeat failure, restarting communication') try: self._master_communicator.stop() finally: self._master_communicator.start() self._last_restart = time.time() self._backoff = self._backoff * 2 try: self._master_communicator.do_command(master_api.status()) if self._failures > 0: logger.info('Master heartbeat recovered after %s failures', self._failures) self._failures = 0 except CommunicationTimedOutException: self._failures += 1 logger.error('Master heartbeat %s failures', self._failures) raise DaemonThreadWait() except Exception: logger.error('Master heartbeat unhandled exception') raise def _check_stats(self): # type: () -> Optional[bool] """ """ stats = self._master_communicator.get_communication_statistics() calls_timedout = [call for call in stats['calls_timedout']] calls_succeeded = [call for call in stats['calls_succeeded']] all_calls = sorted(calls_timedout + calls_succeeded) if len(calls_timedout) == 0: # If there are no timeouts at all return True elif len(all_calls) <= 10: # Not enough calls made to have a decent view on what's going on logger.warning( 'Observed master communication failures, but not enough calls') return None elif not any(t in calls_timedout for t in all_calls[-10:]): logger.warning( 'Observed master communication failures, but recent calls recovered' ) # The last X calls are successfull return None calls_last_x_minutes = [t for t in all_calls if t > time.time() - 180] if len(calls_last_x_minutes) <= 5: logger.warning( 'Observed master communication failures, but not recent enough' ) # Not enough recent calls return None ratio = len([t for t in calls_last_x_minutes if t in calls_timedout ]) / float(len(calls_last_x_minutes)) if ratio < 0.25: # Less than 25% of the calls fail, let's assume everything is just "fine" logger.warning( 'Observed master communication failures, but there\'s only a failure ratio of {:.2f}%' .format(ratio * 100)) return None else: return False
class TimeKeeper(object): """ The TimeKeeper keeps track of time and sets the day or night mode on the power modules. """ def __init__(self, power_communicator, power_controller, period): # type: (Any, Any, int) -> None self.__power_communicator = power_communicator self.__power_controller = power_controller self.__period = period self.__mode = {} # type: Dict[str, List[int]] self.__thread = None # type: Optional[DaemonThread] self.__stop = False def start(self): # type: () -> None """ Start the background thread of the TimeKeeper. """ if self.__thread is None: logger.info("Starting TimeKeeper") self.__stop = False self.__thread = DaemonThread(name='timekeeper', target=self.__run, interval=self.__period) self.__thread.start() else: raise Exception("TimeKeeper thread already running.") def stop(self): # type: () -> None """ Stop the background thread in the TimeKeeper. """ if self.__thread is not None: self.__thread.stop() self.__thread = None else: raise Exception("TimeKeeper thread not running.") def __run(self): # type: () -> None """ One run of the background thread. """ date = datetime.now() for module in self.__power_controller.get_power_modules().values(): version = module['version'] if version == power_api.P1_CONCENTRATOR: continue daynight = [] for i in range(power_api.NUM_PORTS[version]): if self.is_day_time(module['times%d' % i], date): daynight.append(power_api.DAY) else: daynight.append(power_api.NIGHT) self.__set_mode(version, module['address'], daynight) @staticmethod def is_day_time(_times, date): # type: (Optional[str], datetime) -> bool """ Check if a date is in day time. """ if _times is None: times = [0 for _ in range(14)] # type: List[int] else: times = [int(t.replace(":", "")) for t in _times.split(",")] day_of_week = date.weekday() # 0 = Monday, 6 = Sunday current_time = date.hour * 100 + date.minute start = times[day_of_week * 2] stop = times[day_of_week * 2 + 1] return stop > current_time >= start def __set_mode(self, version, address, bytes): # type: (int, str, List[int]) -> None """ Set the power modules mode. """ if address not in self.__mode or self.__mode[address] != bytes: logger.info("Setting day/night mode to " + str(bytes)) self.__power_communicator.do_command(address, power_api.set_day_night(version), *bytes) self.__mode[address] = bytes
class OutputController(BaseController): SYNC_STRUCTURES = [SyncStructure(Output, 'output')] @Inject def __init__(self, master_controller=INJECTED): # type: (MasterController) -> None super(OutputController, self).__init__(master_controller) self._cache = OutputStateCache() self._sync_state_thread = None # type: Optional[DaemonThread] self._pubsub.subscribe_master_events(PubSub.MasterTopics.OUTPUT, self._handle_master_event) def start(self): # type: () -> None super(OutputController, self).start() self._sync_state_thread = DaemonThread(name='outputsyncstate', target=self._sync_state, interval=600, delay=10) self._sync_state_thread.start() def stop(self): # type: () -> None super(OutputController, self).stop() if self._sync_state_thread: self._sync_state_thread.stop() self._sync_state_thread = None def _handle_master_event(self, master_event): # type: (MasterEvent) -> None super(OutputController, self)._handle_master_event(master_event) if master_event.type == MasterEvent.Types.MODULE_DISCOVERY: if self._sync_state_thread: self._sync_state_thread.request_single_run() if master_event.type == MasterEvent.Types.OUTPUT_STATUS: self._handle_output_status(master_event.data['state']) if master_event.type == MasterEvent.Types.EXECUTE_GATEWAY_API: if master_event.data['type'] == MasterEvent.APITypes.SET_LIGHTS: action = master_event.data['data'][ 'action'] # type: Literal['ON', 'OFF', 'TOGGLE'] floor_id = master_event.data['data'][ 'floor_id'] # type: Optional[int] self.set_all_lights(action=action, floor_id=floor_id) def _handle_output_status(self, state_dto): # type: (OutputStateDTO) -> None changed, output_dto = self._cache.handle_change(state_dto) if changed and output_dto is not None: self._publish_output_change(output_dto) def _sync_state(self): try: self.load_outputs() for state_dto in self._master_controller.load_output_status(): _, output_dto = self._cache.handle_change(state_dto) if output_dto is not None: # Always send events on the background sync self._publish_output_change(output_dto) except CommunicationTimedOutException: logger.error( 'Got communication timeout during synchronization, waiting 10 seconds.' ) raise DaemonThreadWait except CommunicationFailure: # This is an expected situation raise DaemonThreadWait def _publish_output_change(self, output_dto): # type: (OutputDTO) -> None event_status = { 'on': output_dto.state.status, 'locked': output_dto.state.locked } if output_dto.module_type in ['d', 'D']: event_status['value'] = output_dto.state.dimmer event_data = { 'id': output_dto.id, 'status': event_status, 'location': { 'room_id': Toolbox.denonify(output_dto.room, 255) } } gateway_event = GatewayEvent(GatewayEvent.Types.OUTPUT_CHANGE, event_data) self._pubsub.publish_gateway_event(PubSub.GatewayTopics.STATE, gateway_event) def get_output_status(self, output_id): # type: (int) -> OutputStateDTO # TODO also support plugins output_state_dto = self._cache.get_state().get(output_id) if output_state_dto is None: raise ValueError( 'Output with id {} does not exist'.format(output_id)) return output_state_dto def get_output_statuses(self): # type: () -> List[OutputStateDTO] # TODO also support plugins return list(self._cache.get_state().values()) def load_output(self, output_id): # type: (int) -> OutputDTO output = Output.select(Room) \ .join_from(Output, Room, join_type=JOIN.LEFT_OUTER) \ .where(Output.number == output_id) \ .get() # type: Output # TODO: Load dict output_dto = self._master_controller.load_output(output_id=output_id) output_dto.room = output.room.number if output.room is not None else None return output_dto def load_outputs(self): # type: () -> List[OutputDTO] output_dtos = [] for output in list( Output.select(Output, Room).join_from( Output, Room, join_type=JOIN.LEFT_OUTER)): # TODO: Load dicts output_dto = self._master_controller.load_output( output_id=output.number) output_dto.room = output.room.number if output.room is not None else None output_dtos.append(output_dto) self._cache.update_outputs(output_dtos) return output_dtos def save_outputs(self, outputs): # type: (List[OutputDTO]) -> None outputs_to_save = [] for output_dto in outputs: output = Output.get_or_none(number=output_dto.id) # type: Output if output is None: logger.info('Ignored saving non-existing Output {0}'.format( output_dto.id)) if 'room' in output_dto.loaded_fields: if output_dto.room is None: output.room = None elif 0 <= output_dto.room <= 100: output.room, _ = Room.get_or_create(number=output_dto.room) output.save() outputs_to_save.append(output_dto) self._master_controller.save_outputs(outputs_to_save) def set_all_lights( self, action, floor_id=None ): # type: (Literal['ON', 'OFF', 'TOGGLE'], Optional[int]) -> None # TODO: Also include other sources (e.g. plugins) once implemented if floor_id is None: self._master_controller.set_all_lights(action=action) return # TODO: Filter on output type "light" once available query = Output.select(Output.number) \ .join_from(Output, Room, join_type=JOIN.INNER) \ .join_from(Room, Floor, join_type=JOIN.INNER) \ .where(Floor.number == floor_id) output_ids = [output['number'] for output in query.dicts()] # It is unknown whether `floor` is known to the Master implementation. So pass both the floor_id # and the list of Output ids to the MasterController self._master_controller.set_all_lights(action=action, floor_id=floor_id, output_ids=output_ids) def set_output_status(self, output_id, is_on, dimmer=None, timer=None): # type: (int, bool, Optional[int], Optional[int]) -> None self._master_controller.set_output(output_id=output_id, state=is_on, dimmer=dimmer, timer=timer) # Global (led) feedback def load_global_feedback( self, global_feedback_id): # type: (int) -> GlobalFeedbackDTO return self._master_controller.load_global_feedback( global_feedback_id=global_feedback_id) def load_global_feedbacks(self): # type: () -> List[GlobalFeedbackDTO] return self._master_controller.load_global_feedbacks() def save_global_feedbacks( self, global_feedbacks): # type: (List[GlobalFeedbackDTO]) -> None self._master_controller.save_global_feedbacks(global_feedbacks)
class BaseController(object): SYNC_STRUCTURES = None # type: Optional[List[SyncStructure]] @Inject def __init__(self, master_controller, maintenance_controller=INJECTED, pubsub=INJECTED, sync_interval=900): # type: (MasterController, MaintenanceController, PubSub, float) -> None self._master_controller = master_controller self._maintenance_controller = maintenance_controller self._pubsub = pubsub self._sync_orm_thread = None # type: Optional[DaemonThread] self._sync_orm_interval = sync_interval self._sync_dirty = True # Always sync after restart. self._sync_running = False self._pubsub.subscribe_master_events(PubSub.MasterTopics.EEPROM, self._handle_master_event) self._pubsub.subscribe_master_events(PubSub.MasterTopics.MODULE, self._handle_master_event) def _handle_master_event(self, master_event): # type: (MasterEvent) -> None if master_event.type in [ MasterEvent.Types.EEPROM_CHANGE, MasterEvent.Types.MODULE_DISCOVERY ]: self._sync_dirty = True self.request_sync_orm() def start(self): self._sync_orm_thread = DaemonThread(name='{0}sync'.format( self.__class__.__name__.lower()[:10]), target=self._sync_orm, interval=self._sync_orm_interval, delay=300) self._sync_orm_thread.start() def stop(self): if self._sync_orm_thread is not None: self._sync_orm_thread.stop() def request_sync_orm(self): if self._sync_orm_thread is not None: self._sync_orm_thread.request_single_run() def run_sync_orm(self): self._sync_orm() def _sync_orm(self): # type: () -> bool if self.SYNC_STRUCTURES is None: return False if self._sync_running: for structure in self.SYNC_STRUCTURES: orm_model = structure.orm_model logger.info('ORM sync ({0}): Already running'.format( orm_model.__name__)) return False self._sync_running = True try: for structure in self.SYNC_STRUCTURES: orm_model = structure.orm_model try: name = structure.name skip = structure.skip start = time.time() logger.info('ORM sync ({0})'.format(orm_model.__name__)) ids = [] for dto in getattr(self._master_controller, 'load_{0}s'.format(name))(): if skip is not None and skip(dto): continue id_ = dto.id ids.append(id_) if not orm_model.select().where( orm_model.number == id_).exists(): orm_model.create(number=id_) orm_model.delete().where( orm_model.number.not_in(ids)).execute() duration = time.time() - start logger.info( 'ORM sync ({0}): completed after {1:.1f}s'.format( orm_model.__name__, duration)) except CommunicationTimedOutException as ex: logger.error('ORM sync ({0}): Failed: {1}'.format( orm_model.__name__, ex)) except Exception: logger.exception('ORM sync ({0}): Failed'.format( orm_model.__name__)) if self._sync_dirty: type_name = orm_model.__name__.lower() gateway_event = GatewayEvent( GatewayEvent.Types.CONFIG_CHANGE, {'type': type_name}) self._pubsub.publish_gateway_event( PubSub.GatewayTopics.CONFIG, gateway_event) self._sync_dirty = False finally: self._sync_running = False return True
class FrontpanelClassicController(FrontpanelController): IOCTL_I2C_SLAVE = 0x0703 BOARD_TYPE = Hardware.get_board_type() ACTION_BUTTON_GPIO = 38 if BOARD_TYPE == Hardware.BoardType.BB else 26 BUTTON = FrontpanelController.Buttons.ACTION AUTH_MODE_LEDS = [ FrontpanelController.Leds.ALIVE, FrontpanelController.Leds.CLOUD, FrontpanelController.Leds.VPN, FrontpanelController.Leds.COMMUNICATION_1, FrontpanelController.Leds.COMMUNICATION_2 ] if not BOARD_TYPE == Hardware.BoardType.BB: GPIO_LED_CONFIG = { FrontpanelController.Leds.POWER: 60, FrontpanelController.Leds.STATUS_RED: 48 } I2C_LED_CONFIG = { FrontpanelController.Leds.COMMUNICATION_1: 64, FrontpanelController.Leds.COMMUNICATION_2: 128, FrontpanelController.Leds.VPN: 16, FrontpanelController.Leds.ALIVE: 1, FrontpanelController.Leds.CLOUD: 4 } else: GPIO_LED_CONFIG = { FrontpanelController.Leds.POWER: 75, FrontpanelController.Leds.STATUS_RED: 60, FrontpanelController.Leds.ALIVE: 49 } I2C_LED_CONFIG = { FrontpanelController.Leds.COMMUNICATION_1: 64, FrontpanelController.Leds.COMMUNICATION_2: 128, FrontpanelController.Leds.VPN: 16, FrontpanelController.Leds.CLOUD: 4 } I2C_DEVICE = '/dev/i2c-2' if BOARD_TYPE == Hardware.BoardType.BB else '/dev/i2c-1' ALL_LEDS = [ FrontpanelController.Leds.POWER, FrontpanelController.Leds.STATUS_RED, FrontpanelController.Leds.COMMUNICATION_1, FrontpanelController.Leds.COMMUNICATION_2, FrontpanelController.Leds.VPN, FrontpanelController.Leds.ALIVE, FrontpanelController.Leds.CLOUD ] BLINK_SEQUENCE = { FrontpanelController.LedStates.OFF: [], FrontpanelController.LedStates.BLINKING_25: [0], FrontpanelController.LedStates.BLINKING_50: [0, 1], FrontpanelController.LedStates.BLINKING_75: [0, 1, 2], FrontpanelController.LedStates.SOLID: [0, 1, 2, 3] } @Inject def __init__(self, leds_i2c_address=INJECTED): # type: (int) -> None super(FrontpanelClassicController, self).__init__() self._leds_i2c_address = leds_i2c_address self._button_states = {} # type: Dict[str, bool] self._poll_button_thread = None self._write_leds_thread = None self._enabled_leds = {} # type: Dict[str, str] self._previous_leds = {} # type: Dict[str, bool] self._last_i2c_led_code = None # type: Optional[int] self._button_pressed_since = None # type: Optional[float] self._button_released = False self._blink_counter = 0 def _poll_button(self): # Check new state with open( '/sys/class/gpio/gpio{0}/value'.format( FrontpanelClassicController.ACTION_BUTTON_GPIO), 'r') as fh_inp: line = fh_inp.read() button_pressed = int(line) == 0 self._button_states[ FrontpanelClassicController.BUTTON] = button_pressed # Check for authorized mode if not button_pressed: self._button_released = True if self._authorized_mode: if time.time() > self._authorized_mode_timeout or ( button_pressed and self._button_released): self._authorized_mode = False else: if button_pressed: self._button_released = False if self._button_pressed_since is None: self._button_pressed_since = time.time() if time.time( ) - self._button_pressed_since > FrontpanelController.AUTH_MODE_PRESS_DURATION: self._authorized_mode = True self._authorized_mode_timeout = time.time( ) + FrontpanelController.AUTH_MODE_TIMEOUT self._button_pressed_since = None else: self._button_pressed_since = None def start(self): super(FrontpanelClassicController, self).start() # Enable power led self._enabled_leds[FrontpanelController.Leds. POWER] = FrontpanelController.LedStates.SOLID # Start polling/writing threads self._poll_button_thread = DaemonThread(name='buttonpoller', target=self._poll_button, interval=0.25) self._poll_button_thread.start() self._write_leds_thread = DaemonThread(name='ledwriter', target=self._write_leds, interval=0.25) self._write_leds_thread.start() def stop(self): super(FrontpanelClassicController, self).stop() if self._poll_button_thread is not None: self._poll_button_thread.stop() if self._write_leds_thread is not None: self._write_leds_thread.stop() def _report_carrier(self, carrier): # type: (bool) -> None state = FrontpanelController.LedStates.OFF if carrier else FrontpanelController.LedStates.SOLID self._enabled_leds[FrontpanelController.Leds.STATUS_RED] = state def _report_connectivity(self, connectivity): # type: (bool) -> None pass # No support for connectivity def _report_network_activity(self, activity): # type: (bool) -> None state = FrontpanelController.LedStates.BLINKING_50 if activity else FrontpanelController.LedStates.OFF self._enabled_leds[FrontpanelController.Leds.ALIVE] = state def _report_serial_activity(self, serial_port, activity): # type: (str, Optional[bool]) -> None led = { FrontpanelController.SerialPorts.ENERGY: FrontpanelController.Leds.COMMUNICATION_1, FrontpanelController.SerialPorts.MASTER_API: FrontpanelController.Leds.COMMUNICATION_2 }.get(serial_port) if led is None: return state = FrontpanelController.LedStates.BLINKING_50 if activity else FrontpanelController.LedStates.OFF self._enabled_leds[led] = state def _report_cloud_reachable(self, reachable): # type: (bool) -> None state = FrontpanelController.LedStates.SOLID if reachable else FrontpanelController.LedStates.OFF self._enabled_leds[FrontpanelController.Leds.CLOUD] = state def _report_vpn_open(self, vpn_open): # type: (bool) -> None state = FrontpanelController.LedStates.SOLID if vpn_open else FrontpanelController.LedStates.OFF self._enabled_leds[FrontpanelController.Leds.VPN] = state def _write_leds(self): # Override for indicate if self._indicate: self._enabled_leds[ FrontpanelController.Leds. STATUS_RED] = FrontpanelController.LedStates.BLINKING_25 # Map blinking states current_leds = self._map_states() # Drive I2C leds try: code = 0x0 for led in FrontpanelClassicController.I2C_LED_CONFIG: if current_leds.get(led, False) is True: code |= FrontpanelClassicController.I2C_LED_CONFIG[led] if self._authorized_mode: # Light all leds in authorized mode for led in FrontpanelClassicController.AUTH_MODE_LEDS: code |= FrontpanelClassicController.I2C_LED_CONFIG.get( led, 0x0) code = (~code) & 0xFF # Push code if needed if code != self._last_i2c_led_code: self._last_i2c_led_code = code with open(FrontpanelClassicController.I2C_DEVICE, 'r+', 1) as i2c: fcntl.ioctl(i2c, FrontpanelClassicController.IOCTL_I2C_SLAVE, self._leds_i2c_address) i2c.write(chr(code)) except Exception as ex: logger.error('Error while writing to i2c: {0}'.format(ex)) # Drive GPIO leds try: for led in FrontpanelClassicController.GPIO_LED_CONFIG: on = current_leds.get(led, False) if self._previous_leds.get(led) != on: self._previous_leds[led] = on try: gpio = FrontpanelClassicController.GPIO_LED_CONFIG[led] with open('/sys/class/gpio/gpio{0}/value'.format(gpio), 'w') as fh_s: fh_s.write('1' if on else '0') except IOError: pass # The GPIO doesn't exist or is read only except Exception as ex: logger.error('Error while writing to GPIO: {0}'.format(ex)) def _map_states(self): # type: () -> Dict[str, bool] current_leds = {} # type: Dict[str, bool] for led in FrontpanelClassicController.ALL_LEDS: requested_state = self._enabled_leds.get( led, FrontpanelController.LedStates.OFF) if requested_state == FrontpanelController.LedStates.OFF: current_leds[led] = False else: current_leds[ led] = self._blink_counter in FrontpanelClassicController.BLINK_SEQUENCE[ requested_state] self._blink_counter += 1 if self._blink_counter >= 4: self._blink_counter = 0 return current_leds
class Watchdog(object): """ The watchdog monitors various internal threads """ @Inject def __init__(self, power_communicator=INJECTED, master_controller=INJECTED): # type: (Optional[PowerCommunicator], MasterController) -> None self._master_controller = master_controller self._power_communicator = power_communicator self._watchdog_thread = None # type: Optional[DaemonThread] self.start_time = 0.0 def start(self): # type: () -> None if self._watchdog_thread is None: self.start_time = time.time() self._watchdog_thread = DaemonThread(name='watchdog', target=self._watch, interval=60, delay=10) self._watchdog_thread.start() def stop(self): # type: () -> None if self._watchdog_thread is not None: self._watchdog_thread.stop() self._watchdog_thread = None def _watch(self): # type: () -> None self._controller_health('master', self._master_controller, self._master_controller.cold_reset) if self._power_communicator: self._controller_health('energy', self._power_communicator, self._master_controller.power_cycle_bus) def _controller_health(self, name, controller, device_reset): # type: (str, Union[PowerCommunicator,MasterController], Callable[[],None]) -> None status = controller.get_communicator_health() if status == CommunicationStatus.SUCCESS: Config.remove_entry('communication_recovery_{0}'.format(name)) # Cleanup legacy Config.remove_entry('communication_recovery') elif status == CommunicationStatus.UNSTABLE: logger.warning('Observed unstable communication for %s', name) else: reset_action = self._get_reset_action(name, controller) if reset_action is not None: device_reset() if reset_action == 'service': time.sleep(15) os._exit(1) def _get_reset_action(self, name, controller): # type: (str, Union[MasterController,PowerCommunicator]) -> Optional[str] recovery_data_key = 'communication_recovery_{0}'.format(name) recovery_data = Config.get_entry( recovery_data_key, None) # type: Optional[Dict[str, Any]] if recovery_data is None: # Make mypy happy recovery_data = {} stats = controller.get_communication_statistics() calls_timedout = [call for call in stats['calls_timedout']] calls_succeeded = [call for call in stats['calls_succeeded']] service_restart = None device_reset = None backoff = 300 max_attempts = 3 last_device_reset = recovery_data.get('device_reset') last_service_restart = recovery_data.get('service_restart') if len(recovery_data) == 0: device_reset = 'communication_errors' else: backoff = 0 if last_device_reset is None else last_device_reset.get( 'backoff', backoff) if last_device_reset is None or last_device_reset[ 'time'] < time.time() - backoff: device_reset = 'communication_errors' backoff = min(1200, backoff * 2) else: if last_service_restart is None: service_restart = 'communication_errors' else: backoff = last_service_restart.get('backoff', backoff) if last_service_restart['time'] < time.time() - backoff: service_restart = 'communication_errors' backoff = min(1200, backoff * 2) if service_restart is not None or device_reset is not None: # Log debug information try: debug_buffer = controller.get_debug_buffer() action = device_reset or service_restart debug_data = { 'type': 'communication_recovery', 'info': { 'controller': name, 'action': action }, 'data': { 'buffer': debug_buffer, 'calls': { 'timedout': calls_timedout, 'succeeded': calls_succeeded } } } with open( '/tmp/debug_{0}_{1}.json'.format( name, int(time.time())), 'w') as recovery_file: recovery_file.write( json.dumps(debug_data, indent=4, sort_keys=True)) check_output( "ls -tp /tmp/ | grep 'debug_{0}_.*json' | tail -n +10 | while read file; do rm -r /tmp/$file; done" .format(name), shell=True) except Exception as ex: logger.exception('Could not store debug file: {0}'.format(ex)) if service_restart is not None: last_service_restart = last_service_restart or {} attempts = last_service_restart.get('attempts', 0) if attempts < max_attempts: logger.critical( 'Major issues in communication with {0}. Restarting service...' .format(name)) recovery_data['service_restart'] = { 'reason': service_restart, 'time': time.time(), 'attempts': attempts + 1, 'backoff': backoff } Config.set_entry(recovery_data_key, recovery_data) return 'service' else: logger.critical( 'Unable to recover issues in communication with {0}'. format(name)) if device_reset is not None: last_device_reset = last_device_reset or {} attempts = last_device_reset.get('attempts', 0) if attempts < max_attempts: logger.critical( 'Major issues in communication with {0}. Resetting {0}'. format(name)) recovery_data['device_reset'] = { 'reason': device_reset, 'time': time.time(), 'attempts': attempts + 1, 'backoff': backoff } Config.set_entry(recovery_data_key, recovery_data) return 'device' else: logger.critical( 'Unable to recover issues in communication with {0}'. format(name)) return None
class MetricsController(object): """ The Metrics Controller collects all metrics and pushses them to all subscribers """ @Inject def __init__(self, plugin_controller=INJECTED, metrics_collector=INJECTED, metrics_cache_controller=INJECTED, gateway_uuid=INJECTED): # type: (PluginController, MetricsCollector, MetricsCacheController, str) -> None self._plugin_controller = plugin_controller self._metrics_collector = metrics_collector self._metrics_cache_controller = metrics_cache_controller self._persist_counters = {} # type: Dict self._buffer_counters = {} # type: Dict self.definitions = {} # type: Dict self._definition_filters = {'source': {}, 'metric_type': {}} # type: Dict self._metrics_cache = {} # type: Dict self._collector_plugins = None # type: Optional[DaemonThread] self._collector_openmotics = None # type: Optional[DaemonThread] self._internal_stats = None self._distributor_plugins = None # type: Optional[DaemonThread] self._distributor_openmotics = None # type: Optional[DaemonThread] self.metrics_queue_plugins = deque() # type: deque self.metrics_queue_openmotics = deque() # type: deque self.inbound_rates = {'total': 0} self.outbound_rates = {'total': 0} self._openmotics_receivers = [] # type: List self._cloud_cache = {} # type: Dict self._cloud_queue = [] # type: List self._cloud_buffer = [] # type: List self._cloud_buffer_length = 0 self._load_cloud_buffer() self._cloud_last_send = time.time() self._cloud_last_try = time.time() self._cloud_retry_interval = None # type: Optional[int] self._gateway_uuid = gateway_uuid self._throttled_down = False self.cloud_stats = {'queue': 0, 'buffer': self._cloud_buffer_length, 'time_ago_send': 0, 'time_ago_try': 0} # Metrics generated by the Metrics_Controller_ are also defined in the collector. Trying to get them in one place. for definition in self._metrics_collector.get_definitions(): self.definitions.setdefault('OpenMotics', {})[definition['type']] = definition settings = MetricsController._parse_definition(definition) self._persist_counters.setdefault('OpenMotics', {})[definition['type']] = settings['persist'] self._buffer_counters.setdefault('OpenMotics', {})[definition['type']] = settings['buffer'] def start(self): self._refresh_cloud_interval() self._collector_plugins = DaemonThread(name='metricplugincoll', target=self._collect_plugins, interval=1) self._collector_plugins.start() self._collector_openmotics = DaemonThread(name='metricplugindist', target=self._collect_openmotics, interval=1) self._collector_openmotics.start() self._distributor_plugins = DaemonThread(name='metricplugindist', target=self._distribute_plugins, interval=0, delay=0.1) self._distributor_plugins.start() self._distributor_openmotics = DaemonThread(name='metricomdist', target=self._distribute_openmotics, interval=0, delay=0.1) self._distributor_openmotics.start() def stop(self): # type: () -> None if self._collector_plugins is not None: self._collector_plugins.stop() if self._collector_openmotics is not None: self._collector_openmotics.stop() if self._distributor_plugins is not None: self._distributor_plugins.stop() if self._distributor_openmotics is not None: self._distributor_openmotics.stop() def set_cloud_interval(self, metric_type, interval, save=True): logger.info('Setting cloud interval {0}_{1}'.format(metric_type, interval)) self._metrics_collector.set_cloud_interval(metric_type, interval) if save: Config.set_entry('cloud_metrics_interval|{0}'.format(metric_type), interval) def _refresh_cloud_interval(self): for metric_type in self._metrics_collector.intervals: interval = Config.get_entry('cloud_metrics_interval|{0}'.format(metric_type), 300) self.set_cloud_interval(metric_type, interval, save=False) self._throttled_down = False def add_receiver(self, receiver): self._openmotics_receivers.append(receiver) def get_filter(self, filter_type, metric_filter): if metric_filter in self._definition_filters[filter_type]: return self._definition_filters[filter_type][metric_filter] if filter_type == 'source': results = [] re_filter = None if metric_filter is None else re.compile(metric_filter) for source in self.definitions.keys(): if re_filter is None or re_filter.match(source): results.append(source) results = set(results) self._definition_filters['source'][metric_filter] = results return results if filter_type == 'metric_type': results = [] re_filter = None if metric_filter is None else re.compile(metric_filter) for source in self.definitions.keys(): for metric_type in self.definitions.get(source, []): if re_filter is None or re_filter.match(metric_type): results.append(metric_type) results = set(results) self._definition_filters['metric_type'][metric_filter] = results return results def set_plugin_definitions(self, definitions): # { # "type": "energy", # "tags": ["device", "id"], # "metrics": [{"name": "power", # "description": "Total energy consumed (in kWh)", # "type": "counter", # "unit": "kWh"}] # } required_keys = {'type': six.string_types, 'metrics': list, 'tags': list} metrics_keys = {'name': six.string_types, 'description': six.string_types, 'type': six.string_types, 'unit': six.string_types} expected_plugins = [] for plugin, plugin_definitions in six.iteritems(definitions): log = self._plugin_controller.get_logger(plugin) for definition in plugin_definitions: definition_ok = True for key, key_type in required_keys.items(): if key not in definition: log('Definitions should contain keys: {0}'.format(', '.join(list(required_keys.keys())))) definition_ok = False break if not isinstance(definition[key], key_type): log('Definitions key {0} should be of type {1}'.format(key, key_type)) definition_ok = False break if key == 'metrics': for metric_definition in definition[key]: if definition_ok is False: break if not isinstance(metric_definition, dict): log('Metric definitions should be dictionaries') definition_ok = False break for mkey, mkey_type in metrics_keys.items(): if mkey not in metric_definition: log('Metric definitions should contain keys: {0}'.format(', '.join(list(metrics_keys.keys())))) definition_ok = False break if not isinstance(metric_definition[mkey], mkey_type): log('Metric definitions key {0} should be of type {1}'.format(mkey, mkey_type)) definition_ok = False break if definition_ok is False: break if definition_ok is True: expected_plugins.append(plugin) self.definitions.setdefault(plugin, {})[definition['type']] = definition settings = MetricsController._parse_definition(definition) self._persist_counters.setdefault(plugin, {})[definition['type']] = settings['persist'] self._buffer_counters.setdefault(plugin, {})[definition['type']] = settings['buffer'] for source in self.definitions.keys(): # Remove plugins from the self.definitions dict that are not found anymore if source != 'OpenMotics' and source not in expected_plugins: self.definitions.pop(source, None) self._persist_counters.pop(source, None) self._buffer_counters.pop(source, None) self._definition_filters['source'] = {} self._definition_filters['metric_type'] = {} def _load_cloud_buffer(self): oldest_queue_timestamp = min([time.time()] + [metric[0]['timestamp'] for metric in self._cloud_queue]) self._cloud_buffer = [[metric] for metric in self._metrics_cache_controller.load_buffer(before=oldest_queue_timestamp)] self._cloud_buffer_length = len(self._cloud_buffer) @staticmethod def _parse_definition(definition): settings = {'persist': {}, 'buffer': {}} for metric in definition['metrics']: if metric['type'] == 'counter': for policy in metric.get('policies', []): setting = True if isinstance(policy, dict): setting = {'key': policy['key'], 'matches': policy['matches']} policy = policy['policy'] # Backwards compatibility if policy == 'buffered': policy = 'buffer' if policy == 'persistent': policy = 'persist' settings[policy][metric['name']] = setting return settings def _needs_upload_to_cloud(self, metric): metric_type = metric['type'] metric_source = metric['source'] # get definition for metric source and type, getting the definitions for a metric_source is case sensitive! definition = self.definitions.get(metric_source, {}).get(metric_type) if definition is None: return False if Config.get_entry('cloud_enabled', False) is False: return False if metric_source == 'OpenMotics': if Config.get_entry('cloud_metrics_enabled|{0}'.format(metric_type), True) is False: return False # filter openmotics metrics that are not listed in cloud_metrics_types metric_types = Config.get_entry('cloud_metrics_types', []) if metric_type not in metric_types: return False else: # filter 3rd party (plugin) metrics that are not listed in cloud_metrics_sources metric_sources = Config.get_entry('cloud_metrics_sources', []) # make sure to get the lowercase metric_source if metric_source.lower() not in metric_sources: return False return True def receiver(self, metric): # type: (Dict[str,Any]) -> None """ Collects all metrics made available by the MetricsCollector and the plugins. These metrics are cached locally for configurable (and optional) pushing metrics to the Cloud. > example_definition = {"type": "energy", > "tags": ["device", "id"], > "metrics": [{"name": "power", > "description": "Total energy consumed (in kWh)", > "type": "counter", > "unit": "kWh"}]} > example_metric = {"source": "OpenMotics", > "type": "energy", > "timestamp": 1497677091, > "tags": {"device": "OpenMotics energy ID1", > "id": "E7.3"}, > "values": {"power": 1234}} """ metric_type = metric['type'] metric_source = metric['source'] if not self._needs_upload_to_cloud(metric): return if metric_source == 'OpenMotics': # round off timestamps for openmotics metrics modulo_interval = Config.get_entry('cloud_metrics_interval|{0}'.format(metric_type), 900) timestamp = int(metric['timestamp'] - metric['timestamp'] % modulo_interval) else: timestamp = int(metric['timestamp']) cloud_batch_size = Config.get_entry('cloud_metrics_batch_size', 0) cloud_min_interval = Config.get_entry('cloud_metrics_min_interval', None) # type: Optional[int] if cloud_min_interval is not None: self._cloud_retry_interval = cloud_min_interval endpoint = Config.get_entry('cloud_endpoint', None) # type: Optional[str] if endpoint is None: return metrics_endpoint = '{0}/{1}?uuid={2}'.format( endpoint if endpoint.startswith('http') else 'https://{0}'.format(endpoint), Config.get_entry('cloud_endpoint_metrics', ''), self._gateway_uuid ) counters_to_buffer = self._buffer_counters.get(metric_source, {}).get(metric_type, {}) definition = self.definitions.get(metric_source, {}).get(metric_type) identifier = '|'.join(['{0}={1}'.format(tag, metric['tags'][tag]) for tag in sorted(definition['tags'])]) # Check if the metric needs to be send entry = self._cloud_cache.setdefault(metric_source, {}).setdefault(metric_type, {}).setdefault(identifier, {}) include_this_metric = False if 'timestamp' not in entry: include_this_metric = True else: old_timestamp = entry['timestamp'] if old_timestamp < timestamp: include_this_metric = True # Add metrics to the send queue if they need to be send if include_this_metric is True: entry['timestamp'] = timestamp self._cloud_queue.append([metric]) self._cloud_queue = self._cloud_queue[-5000:] # 5k metrics buffer # Check timings/rates now = time.time() time_ago_send = int(now - self._cloud_last_send) time_ago_try = int(now - self._cloud_last_try) outstanding_data_length = len(self._cloud_buffer) + len(self._cloud_queue) send = False if outstanding_data_length > 0: # There must be outstanding data # Last send was successful, but the buffer length > batch size send |= outstanding_data_length >= cloud_batch_size and time_ago_send == time_ago_try if cloud_min_interval is not None: # Last send was successful, but it has been too long ago send |= time_ago_send > cloud_min_interval and time_ago_send == time_ago_try if self._cloud_retry_interval is not None: # Last send was unsuccessful, and it has been a while send |= time_ago_send > time_ago_try > self._cloud_retry_interval self.cloud_stats['queue'] = len(self._cloud_queue) self.cloud_stats['buffer'] = self._cloud_buffer_length self.cloud_stats['time_ago_send'] = time_ago_send self.cloud_stats['time_ago_try'] = time_ago_try if send is True: self._cloud_last_try = now try: # Try to send the metrics request = requests.post(metrics_endpoint, data={'metrics': json.dumps(self._cloud_buffer + self._cloud_queue)}, timeout=30.0) return_data = json.loads(request.text) if return_data.get('success', False) is False: raise RuntimeError('{0}'.format(return_data.get('error'))) # If successful; clear buffers if self._metrics_cache_controller.clear_buffer(metric['timestamp']) > 0: self._load_cloud_buffer() self._cloud_queue = [] self._cloud_last_send = now self._cloud_retry_interval = cloud_min_interval if self._throttled_down: self._refresh_cloud_interval() except Exception as ex: logger.error('Error sending metrics to Cloud: {0}'.format(ex)) if time_ago_send > 60 * 60: # Decrease metrics rate, but at least every 2 hours # Decrease cloud try interval, but at least every hour if time_ago_send < 6 * 60 * 60: self._cloud_retry_interval = 15 * 60 new_interval = 30 * 60 elif time_ago_send < 24 * 60 * 60: self._cloud_retry_interval = 30 * 60 new_interval = 60 * 60 else: self._cloud_retry_interval = 60 * 60 new_interval = 2 * 60 * 60 self._throttled_down = True metric_types = Config.get_entry('cloud_metrics_types', []) # type: List[str] for mtype in metric_types: self.set_cloud_interval(mtype, new_interval, save=False) # Buffer metrics if appropriate time_ago_send = int(now - self._cloud_last_send) time_ago_try = int(now - self._cloud_last_try) if time_ago_send > time_ago_try and include_this_metric is True and len(counters_to_buffer) > 0: cache_data = {} for counter, match_setting in six.iteritems(counters_to_buffer): if match_setting is not True: if metric['tags'][match_setting['key']] not in match_setting['matches']: continue cache_data[counter] = metric['values'][counter] if self._metrics_cache_controller.buffer_counter(metric_source, metric_type, metric['tags'], cache_data, metric['timestamp']): self._cloud_buffer_length += 1 if self._metrics_cache_controller.clear_buffer(time.time() - 365 * 24 * 60 * 60) > 0: self._load_cloud_buffer() def _put(self, metric): rate_key = '{0}.{1}'.format(metric['source'].lower(), metric['type'].lower()) if rate_key not in self.inbound_rates: self.inbound_rates[rate_key] = 0 self.inbound_rates[rate_key] += 1 self.inbound_rates['total'] += 1 self._transform_counters(metric) # Convert counters to "ever increasing counters" # No need to make a deep copy; openmotics doesn't alter the object, and for the plugins the metric gets (de)serialized self.metrics_queue_plugins.appendleft(metric) self.metrics_queue_openmotics.appendleft(metric) def _transform_counters(self, metric): # TODO: The 'persist' policy should be a part of the PulseCounterController source = metric['source'] mtype = metric['type'] for counter, match_setting in six.iteritems(self._persist_counters.get(source, {}).get(mtype, {})): if counter not in metric['values']: continue if match_setting is not True: if metric['tags'][match_setting['key']] not in match_setting['matches']: continue counter_type = type(metric['values'][counter]) counter_value = self._metrics_cache_controller.process_counter(source=source, mtype=mtype, tags=metric['tags'], name=counter, value=metric['values'][counter], timestamp=metric['timestamp']) metric['values'][counter] = counter_type(counter_value) def _collect_plugins(self): """ > example_definition = {"type": "energy", > "tags": ["device", "id"], > "metrics": [{"name": "power", > "description": "Total energy consumed (in kWh)", > "type": "counter", > "unit": "kWh"}]} > example_metric = {"source": "OpenMotics", > "type": "energy", > "timestamp": 1497677091, > "tags": {"device": "OpenMotics energy ID1", > "id": 0}, > "values": {"power": 1234}} """ start = time.time() for metric in self._plugin_controller.collect_metrics(): # Validation, part 1 source = metric['source'] log = self._plugin_controller.get_logger(source) required_keys = {'type': six.string_types, 'timestamp': (float, int), 'values': dict, 'tags': dict} metric_ok = True for key, key_type in required_keys.items(): if key not in metric: log('Metric should contain keys {0}'.format(', '.join(list(required_keys.keys())))) metric_ok = False break if not isinstance(metric[key], key_type): log('Metric key {0} should be of type {1}'.format(key, key_type)) metric_ok = False break if metric_ok is False: continue # Get metric definition definition = self.definitions.get(metric['source'], {}).get(metric['type']) if definition is None: continue # Validate metric based on definition if len(metric['tags']) == 0: log('At least one metric tag should be defined') metric_ok = False else: for tag_name, tag_value in six.iteritems(metric['tags']): # tags are optional but should be in the definition if tag_name not in definition['tags']: log('Metric tag {0} should be defined'.format(tag_name)) metric_ok = False if tag_value is None: log('Metric tag {0} should not be None'.format(tag_name)) metric_ok = False metric_values = set(metric['values'].keys()) if len(metric_values) == 0: log('Metric should have at least one value') metric_ok = False unknown_metrics = metric_values - set([mdef['name'] for mdef in definition['metrics']]) if len(unknown_metrics) > 0: log('Metric contains unknown values: {0}'.format(', '.join(unknown_metrics))) metric_ok = False if metric_ok is False: continue self._put(metric) def _collect_openmotics(self): # type: () -> None start = time.time() for metric in self._metrics_collector.collect_metrics(): self._put(metric) def _distribute_plugins(self): try: metrics = [] try: while len(metrics) < 250: metrics.append(self.metrics_queue_plugins.pop()) except IndexError: pass if metrics: rates = self._plugin_controller.distribute_metrics(metrics) for key, rate in six.iteritems(rates): if key not in self.outbound_rates: self.outbound_rates[key] = 0 self.outbound_rates[key] += rate else: raise DaemonThreadWait() except DaemonThreadWait: raise except Exception as ex: raise MetricsDistributeFailed('Error distributing metrics to plugins: {0}'.format(ex)) def _distribute_openmotics(self): # type: () -> None try: metric = self.metrics_queue_openmotics.pop() for receiver in self._openmotics_receivers: try: receiver(metric) except Exception as ex: logger.exception('error distributing metrics') raise MetricsDistributeFailed('Error distributing metrics to internal receivers: {0}'.format(ex)) rate_key = '{0}.{1}'.format(metric['source'].lower(), metric['type'].lower()) if rate_key not in self.outbound_rates: self.outbound_rates[rate_key] = 0 self.outbound_rates[rate_key] += 1 self.outbound_rates['total'] += 1 except IndexError: raise DaemonThreadWait() def event_receiver(self, event, payload): if event == OMBusEvents.METRICS_INTERVAL_CHANGE: for metric_type, interval in six.iteritems(payload): self.set_cloud_interval(metric_type, interval)
class VentilationController(object): @Inject def __init__(self, pubsub=INJECTED): # type: (PubSub) -> None self._pubsub = pubsub self._status = {} # type: Dict[int, VentilationStatusDTO] self.check_connected_runner = DaemonThread( 'check_connected_thread', self._check_connected_timeout, interval=30, delay=15) self.periodic_event_update_runner = DaemonThread( 'periodic_update', self._periodic_event_update, interval=900, delay=90) def start(self): # type: () -> None self._publish_config() self.check_connected_runner.start() self.periodic_event_update_runner.start() def stop(self): # type: () -> None self.check_connected_runner.stop() self.periodic_event_update_runner.stop() def _publish_config(self): # type: () -> None gateway_event = GatewayEvent(GatewayEvent.Types.CONFIG_CHANGE, {'type': 'ventilation'}) self._pubsub.publish_gateway_event(PubSub.GatewayTopics.CONFIG, gateway_event) def _save_status_cache(self, state_dto): if self._status.get(state_dto.id) is not None and \ not (state_dto.timer is None and state_dto.remaining_time is None): if state_dto.timer is None: state_dto.timer = self._status[state_dto.id].timer if state_dto.remaining_time is None: state_dto.remaining_time = self._status[ state_dto.id].remaining_time self._status[state_dto.id] = state_dto return state_dto def _publish_state(self, state_dto): # type: (VentilationStatusDTO) -> None # if the timer or remaining time is set, the other value will not be set, # so cache the previous value so it does not get lost state_dto = self._save_status_cache(state_dto) event_data = { 'id': state_dto.id, 'mode': state_dto.mode, 'level': state_dto.level, 'timer': state_dto.timer, 'remaining_time': state_dto.remaining_time, 'is_connected': state_dto.is_connected } gateway_event = GatewayEvent(GatewayEvent.Types.VENTILATION_CHANGE, event_data) self._pubsub.publish_gateway_event(PubSub.GatewayTopics.STATE, gateway_event) def _periodic_event_update(self): for ventilation_id, ventilation_status_dto in self._status.items(): # Send the notification on a regular basis # The cloud will handle these events correctly based on the connected flag. self._publish_state(ventilation_status_dto) def _check_connected_timeout(self): for ventilation_id, ventilation_status_dto in self._status.items(): # Send the notification on a regular basis # The cloud will handle these events correctly based on the connected flag. if not ventilation_status_dto.is_connected and ventilation_status_dto.mode is not None: ventilation_status_dto.mode = None ventilation_status_dto.level = None ventilation_status_dto.remaining_time = None ventilation_status_dto.timer = None # also update the instance in the dict self._status[ventilation_id] = ventilation_status_dto # timeout has passed, send a disconnect event with all relevant fields as None. # This will also update the is_connected flag to the cloud. self._publish_state(ventilation_status_dto) def load_ventilations(self): # type: () -> List[VentilationDTO] return [ VentilationMapper.orm_to_dto(ventilation) for ventilation in Ventilation.select() ] def load_ventilation(self, ventilation_id): # type: (int) -> VentilationDTO ventilation = Ventilation.get(id=ventilation_id) return VentilationMapper.orm_to_dto(ventilation) def save_ventilation(self, ventilation_dto): # type: (VentilationDTO) -> None ventilation = VentilationMapper.dto_to_orm(ventilation_dto) if ventilation.id is None: logger.info('Registered new ventilation unit %s', ventilation) changed = ventilation.save() > 0 ventilation_dto.id = ventilation.id if changed: self._publish_config() def get_status(self): # type: () -> List[VentilationStatusDTO] status = [] for ventilation in Ventilation.select(): state_dto = self._status.get(ventilation.id) if state_dto: status.append(state_dto) return status def set_status(self, status_dto): # type: (VentilationStatusDTO) -> VentilationStatusDTO ventilation_dto = self.load_ventilation(status_dto.id) self._validate_state(ventilation_dto, status_dto) if not (status_dto == self._status.get(status_dto.id)): self._publish_state(status_dto) self._save_status_cache(status_dto) return status_dto def set_mode_auto(self, ventilation_id): # type: (int) -> None _ = self.load_ventilation(ventilation_id) status_dto = VentilationStatusDTO(ventilation_id, mode=VentilationStatusDTO.Mode.AUTO) if not (status_dto == self._status.get(ventilation_id)): self._save_status_cache(status_dto) self._publish_state(status_dto) def set_level(self, ventilation_id, level, timer=None): # type: (int, int, Optional[float]) -> None ventilation_dto = self.load_ventilation(ventilation_id) status_dto = VentilationStatusDTO( ventilation_id, mode=VentilationStatusDTO.Mode.MANUAL, level=level, timer=timer) self._validate_state(ventilation_dto, status_dto) if not (status_dto == self._status.get(ventilation_id)): self._save_status_cache(status_dto) self._publish_state(status_dto) def _validate_state(self, ventilation_dto, status_dto): # type: (VentilationDTO, VentilationStatusDTO) -> None if status_dto.level: if status_dto.mode == VentilationStatusDTO.Mode.AUTO: raise ValueError( 'ventilation mode {} does not support level'.format( status_dto.level)) if status_dto.level < 0 or status_dto.level > ventilation_dto.amount_of_levels: values = list(range(ventilation_dto.amount_of_levels + 1)) raise ValueError('ventilation level {0} not in {1}'.format( status_dto.level, values))
class OutputController(BaseController): SYNC_STRUCTURES = [SyncStructure(Output, 'output')] @Inject def __init__(self, master_controller=INJECTED): # type: (MasterController) -> None super(OutputController, self).__init__(master_controller) self._cache = OutputStateCache() self._sync_state_thread = None # type: Optional[DaemonThread] self._pubsub.subscribe_master_events(PubSub.MasterTopics.OUTPUT, self._handle_master_event) def start(self): # type: () -> None super(OutputController, self).start() self._sync_state_thread = DaemonThread(name='outputsyncstate', target=self._sync_state, interval=600, delay=10) self._sync_state_thread.start() def stop(self): # type: () -> None super(OutputController, self).stop() if self._sync_state_thread: self._sync_state_thread.stop() self._sync_state_thread = None def _handle_master_event(self, master_event): # type: (MasterEvent) -> None super(OutputController, self)._handle_master_event(master_event) if master_event.type == MasterEvent.Types.MODULE_DISCOVERY: if self._sync_state_thread: self._sync_state_thread.request_single_run() if master_event.type == MasterEvent.Types.OUTPUT_STATUS: self._handle_output_status(master_event.data) def _handle_output_status(self, change_data): # type: (Dict[str,Any]) -> None changed, output_dto = self._cache.handle_change( change_data['id'], change_data) if changed and output_dto is not None: self._publish_output_change(output_dto) def _sync_state(self): try: self.load_outputs() for state_data in self._master_controller.load_output_status(): if 'id' in state_data: _, output_dto = self._cache.handle_change( state_data['id'], state_data) if output_dto is not None: # Always send events on the background sync self._publish_output_change(output_dto) except CommunicationTimedOutException: logger.error( 'Got communication timeout during synchronization, waiting 10 seconds.' ) raise DaemonThreadWait except CommunicationFailure: # This is an expected situation raise DaemonThreadWait def _publish_output_change(self, output_dto): # type: (OutputDTO) -> None event_status = { 'on': output_dto.state.status, 'locked': output_dto.state.locked } if output_dto.module_type in ['d', 'D']: event_status['value'] = output_dto.state.dimmer event_data = { 'id': output_dto.id, 'status': event_status, 'location': { 'room_id': Toolbox.denonify(output_dto.room, 255) } } gateway_event = GatewayEvent(GatewayEvent.Types.OUTPUT_CHANGE, event_data) self._pubsub.publish_gateway_event(PubSub.GatewayTopics.STATE, gateway_event) def get_output_status(self, output_id): # type: (int) -> OutputStateDTO # TODO also support plugins output_state_dto = self._cache.get_state().get(output_id) if output_state_dto is None: raise ValueError( 'Output with id {} does not exist'.format(output_id)) return output_state_dto def get_output_statuses(self): # type: () -> List[OutputStateDTO] # TODO also support plugins return list(self._cache.get_state().values()) def load_output(self, output_id): # type: (int) -> OutputDTO output = Output.select(Room) \ .join_from(Output, Room, join_type=JOIN.LEFT_OUTER) \ .where(Output.number == output_id) \ .get() # type: Output # TODO: Load dict output_dto = self._master_controller.load_output(output_id=output_id) output_dto.room = output.room.number if output.room is not None else None return output_dto def load_outputs(self): # type: () -> List[OutputDTO] output_dtos = [] for output in list( Output.select(Output, Room).join_from( Output, Room, join_type=JOIN.LEFT_OUTER)): # TODO: Load dicts output_dto = self._master_controller.load_output( output_id=output.number) output_dto.room = output.room.number if output.room is not None else None output_dtos.append(output_dto) self._cache.update_outputs(output_dtos) return output_dtos def save_outputs( self, outputs): # type: (List[Tuple[OutputDTO, List[str]]]) -> None outputs_to_save = [] for output_dto, fields in outputs: output = Output.get_or_none(number=output_dto.id) # type: Output if output is None: logger.info('Ignored saving non-existing Output {0}'.format( output_dto.id)) if 'room' in fields: if output_dto.room is None: output.room = None elif 0 <= output_dto.room <= 100: output.room, _ = Room.get_or_create(number=output_dto.room) output.save() outputs_to_save.append((output_dto, fields)) self._master_controller.save_outputs(outputs_to_save) def set_all_lights_off(self): # type: () -> None return self._master_controller.set_all_lights_off() def set_all_lights_floor_off(self, floor): # type: (int) -> None return self._master_controller.set_all_lights_floor_off(floor=floor) def set_all_lights_floor_on(self, floor): # type: (int) -> None return self._master_controller.set_all_lights_floor_on(floor=floor) def set_output_status(self, output_id, is_on, dimmer=None, timer=None): # type: (int, bool, Optional[int], Optional[int]) -> None self._master_controller.set_output(output_id=output_id, state=is_on, dimmer=dimmer, timer=timer)
class PubSub(object): class MasterTopics(object): EEPROM = 'eeprom' # type: MASTER_TOPIC MAINTENANCE = 'maintenance' # type: MASTER_TOPIC POWER = 'power' # type: MASTER_TOPIC MODULE = 'module' # type: MASTER_TOPIC OUTPUT = 'output' # type: MASTER_TOPIC INPUT = 'input' # type: MASTER_TOPIC SHUTTER = 'shutter' # type: MASTER_TOPIC class GatewayTopics(object): CONFIG = 'config' # type: GATEWAY_TOPIC STATE = 'state' # type: GATEWAY_TOPIC def __init__(self): # type: () -> None self._gateway_topics = defaultdict( list ) # type: Dict[GATEWAY_TOPIC,List[Callable[[GatewayEvent],None]]] self._master_topics = defaultdict( list ) # type: Dict[MASTER_TOPIC,List[Callable[[MasterEvent],None]]] self._master_events = Queue( ) # type: Queue # Queue[Tuple[str, MasterEvent]] self._gateway_events = Queue( ) # type: Queue # Queue[Tuple[str, GatewayEvent]] self._pub_thread = DaemonThread(name='pubsub', target=self._publisher_loop, interval=0.1, delay=0.2) self._is_running = False def start(self): # type: () -> None self._is_running = True self._pub_thread.start() def stop(self): # type: () -> None self._is_running = False self._master_events.put(None) self._gateway_events.put(None) self._pub_thread.stop() def _publisher_loop(self): while self._is_running: self._publish_all_events() def _publish_all_events(self): while True: try: event = self._master_events.get(block=True, timeout=0.25) if event is None: return self._publish_master_event(*event) except Empty: break while True: try: event = self._gateway_events.get(block=True, timeout=0.25) if event is None: return self._publish_gateway_event(*event) except Empty: break def subscribe_master_events(self, topic, callback): # type: (MASTER_TOPIC, Callable[[MasterEvent],None]) -> None self._master_topics[topic].append(callback) def publish_master_event(self, topic, master_event): # type: (MASTER_TOPIC, MasterEvent) -> None self._master_events.put((topic, master_event)) def _publish_master_event(self, topic, master_event): # type: (MASTER_TOPIC, MasterEvent) -> None callbacks = self._master_topics[topic] if callbacks: logger.debug('Received master event %s on topic "%s"', master_event.type, topic) else: logger.warning( 'Received master event %s on topic "%s" without subscribers', master_event.type, topic) for callback in callbacks: try: callback(master_event) except Exception: logger.exception('Failed to call handle %s for topic %s', callback, topic) def subscribe_gateway_events(self, topic, callback): # type: (GATEWAY_TOPIC, Callable[[GatewayEvent],None]) -> None self._gateway_topics[topic].append(callback) def publish_gateway_event(self, topic, gateway_event): # type: (GATEWAY_TOPIC, GatewayEvent) -> None self._gateway_events.put((topic, gateway_event)) def _publish_gateway_event(self, topic, gateway_event): # type: (GATEWAY_TOPIC, GatewayEvent) -> None callbacks = self._gateway_topics[topic] if callbacks: logger.debug('Received gateway event %s on topic "%s"', gateway_event.type, topic) else: logger.warning( 'Received gateway event %s on topic "%s" without subscribers', gateway_event.type, topic) for callback in callbacks: try: callback(gateway_event) except Exception: logger.exception('Failed to call handle %s for topic %s', callback, topic)
class SchedulingController(object): """ The SchedulingController controls schedules and executes them. Based on their type, they can trigger different behavior. Supported actions: * GROUP_ACTION: Executes a Group Action * Required arguments: json encoded Group Action id * BASIC_ACTION: Executes a Basic Action * Required arguments: {'action_type': <action type>, 'action_number': <action number>} * LOCAL_API: Executes a local API call * Required arguments: {'name': '<name of the call>', 'parameters': {<kwargs for the call>}} Supported repeats: * None: Single execution at start time * String: Cron format, docs at https://github.com/kiorky/croniter """ NO_NTP_LOWER_LIMIT = 1546300800.0 # 2019-01-01 TIMEZONE = None @Inject def __init__(self, gateway_api=INJECTED, group_action_controller=INJECTED): # type: (GatewayApi, GroupActionController) -> None self._gateway_api = gateway_api self._group_action_controller = group_action_controller self._web_interface = None self._stop = False self._processor = None # type: Optional[DaemonThread] self._schedules = {} # type: Dict[int, Tuple[ScheduleDTO, Schedule]] SchedulingController.TIMEZONE = gateway_api.get_timezone() self.reload_schedules() def set_webinterface(self, web_interface): self._web_interface = web_interface def reload_schedules(self): found_ids = [] for schedule in Schedule.select(): schedule_dto = ScheduleMapper.orm_to_dto(schedule) schedule_dto.next_execution = SchedulingController._get_next_execution(schedule_dto) self._schedules[schedule_dto.id] = (schedule_dto, schedule) found_ids.append(schedule_dto.id) for schedule_id in list(self._schedules.keys()): if schedule_id not in found_ids: self._schedules.pop(schedule_id, None) def load_schedule(self, schedule_id): # type: (int) -> ScheduleDTO schedule = self._schedules.get(schedule_id) if schedule is None: raise Schedule.DoesNotExist('Schedule {0} does not exist'.format(schedule_id)) return schedule[0] def load_schedules(self): # type: () -> List[ScheduleDTO] return [dto for dto, _ in self._schedules.values()] def save_schedules(self, schedules): # type: (List[Tuple[ScheduleDTO, List[str]]]) -> None for schedule_dto, fields in schedules: schedule = ScheduleMapper.dto_to_orm(schedule_dto, fields) self._validate(schedule) schedule.save() self.reload_schedules() def remove_schedules(self, schedules): # type: (List[ScheduleDTO]) -> None _ = self Schedule.delete().where(Schedule.id.in_([s.id for s in schedules])).execute() self.reload_schedules() def start(self): self._stop = False self._processor = DaemonThread(target=self._process, name='schedulingctl', interval=60) self._processor.start() def stop(self): if self._processor is not None: self._processor.stop() def _process(self): for schedule_id in list(self._schedules.keys()): schedule_tuple = self._schedules.get(schedule_id) if schedule_tuple is None: continue schedule_dto, schedule = schedule_tuple if schedule_dto.status != 'ACTIVE': continue if schedule_dto.end is not None and schedule_dto.end < time.time(): schedule_dto.status = 'COMPLETED' schedule.status = 'COMPLETED' schedule.save() continue if schedule_dto.is_due: thread = BaseThread(name='schedulingexc', target=self._execute_schedule, args=(schedule_dto, schedule)) thread.daemon = True thread.start() @staticmethod def _get_next_execution(schedule_dto): # type: (ScheduleDTO) -> Optional[float] if schedule_dto.repeat is None: return None base_time = max(SchedulingController.NO_NTP_LOWER_LIMIT, schedule_dto.start, time.time()) cron = croniter(schedule_dto.repeat, datetime.fromtimestamp(base_time, pytz.timezone(SchedulingController.TIMEZONE))) return cron.get_next(ret_type=float) def _execute_schedule(self, schedule_dto, schedule): # type: (ScheduleDTO, Schedule) -> None if schedule_dto.running: return try: schedule_dto.running = True if schedule_dto.arguments is None: raise ValueError('Invalid schedule arguments') # Execute if schedule_dto.action == 'GROUP_ACTION': self._group_action_controller.do_group_action(schedule_dto.arguments) elif schedule_dto.action == 'BASIC_ACTION': self._gateway_api.do_basic_action(**schedule_dto.arguments) elif schedule_dto.action == 'LOCAL_API': func = getattr(self._web_interface, schedule_dto.arguments['name']) func(**schedule_dto.arguments['parameters']) else: logger.warning('Did not process schedule_dto {0}'.format(schedule_dto.name)) # Cleanup or prepare for next run schedule_dto.last_executed = time.time() if schedule_dto.has_ended: schedule_dto.status = 'COMPLETED' schedule.status = 'COMPLETED' schedule.save() except CommunicationTimedOutException as ex: logger.error('Got error while executing schedule: {0}'.format(ex)) except Exception as ex: logger.error('Got error while executing schedule: {0}'.format(ex)) schedule_dto.last_executed = time.time() finally: schedule_dto.running = False schedule_dto.next_execution = SchedulingController._get_next_execution(schedule_dto) def _validate(self, schedule): # type: (Schedule) -> None if schedule.name is None or not isinstance(schedule.name, six.string_types) or schedule.name.strip() == '': raise RuntimeError('A schedule must have a name') # Check whether the requested type is valid accepted_types = ['GROUP_ACTION', 'BASIC_ACTION', 'LOCAL_API'] if schedule.action not in accepted_types: raise RuntimeError('Unknown schedule type. Allowed: {0}'.format(', '.join(accepted_types))) # Check duration/repeat/end combinations if schedule.repeat is None: if schedule.end is not None: raise RuntimeError('No `end` is allowed when it is a non-repeated schedule') else: if not croniter.is_valid(schedule.repeat): raise RuntimeError('Invalid `repeat`. Should be a cron-style string. See croniter documentation') if schedule.duration is not None and schedule.duration <= 60: raise RuntimeError('If a duration is specified, it should be at least more than 60s') # Type specifc checks if schedule.action == 'BASIC_ACTION': if schedule.duration is not None: raise RuntimeError('A schedule of type BASIC_ACTION does not have a duration. It is a one-time trigger') arguments = None if schedule.arguments is None else json.loads(schedule.arguments) if (not isinstance(arguments, dict) or 'action_type' not in arguments or not isinstance(arguments['action_type'], int) or 'action_number' not in arguments or not isinstance(arguments['action_number'], int) or len(arguments) != 2): raise RuntimeError('The arguments of a BASIC_ACTION schedule must be of type dict with arguments `action_type` and `action_number`') elif schedule.action == 'GROUP_ACTION': if schedule.duration is not None: raise RuntimeError('A schedule of type GROUP_ACTION does not have a duration. It is a one-time trigger') arguments = None if schedule.arguments is None else json.loads(schedule.arguments) if not isinstance(arguments, int) or arguments < 0 or arguments > 254: raise RuntimeError('The arguments of a GROUP_ACTION schedule must be an integer, representing the Group Action to be executed') elif schedule.action == 'LOCAL_API': if schedule.duration is not None: raise RuntimeError('A schedule of type LOCAL_API does not have a duration. It is a one-time trigger') arguments = None if schedule.arguments is None else json.loads(schedule.arguments) if (not isinstance(arguments, dict) or 'name' not in arguments or 'parameters' not in arguments or not isinstance(arguments['parameters'], dict)): raise RuntimeError('The arguments of a LOCAL_API schedule must be of type dict with arguments `name` and `parameters`') func = getattr(self._web_interface, arguments['name']) if hasattr(self._web_interface, arguments['name']) else None if func is None or not callable(func) or not hasattr(func, 'plugin_exposed') or getattr(func, 'plugin_exposed') is False: raise RuntimeError('The arguments of a LOCAL_API schedule must specify a valid and (plugin_)exposed call') check = getattr(func, 'check') if check is not None: params_parser(arguments['parameters'], check)
class FrontpanelController(object): INDICATE_TIMEOUT = 30 AUTH_MODE_PRESS_DURATION = 5 AUTH_MODE_TIMEOUT = 60 BOARD_TYPE = Hardware.get_board_type() MAIN_INTERFACE = Hardware.get_main_interface() class Leds(object): EXPANSION = 'EXPANSION' STATUS_GREEN = 'STATUS_GREEN' STATUS_RED = 'STATUS_RED' CAN_STATUS_GREEN = 'CAN_STATUS_GREEN' CAN_STATUS_RED = 'CAN_STATUS_RED' CAN_COMMUNICATION = 'CAN_COMMUNICATION' P1 = 'P1' LAN_GREEN = 'LAN_GREEN' LAN_RED = 'LAN_RED' CLOUD = 'CLOUD' SETUP = 'SETUP' RELAYS_1_8 = 'RELAYS_1_8' RELAYS_9_16 = 'RELAYS_9_16' OUTPUTS_DIG_1_4 = 'OUTPUTS_DIG_1_4' OUTPUTS_DIG_5_7 = 'OUTPUTS_DIG_5_7' OUTPUTS_ANA_1_4 = 'OUTPUTS_ANA_1_4' INPUTS = 'INPUTS' POWER = 'POWER' ALIVE = 'ALIVE' VPN = 'VPN' COMMUNICATION_1 = 'COMMUNICATION_1' COMMUNICATION_2 = 'COMMUNICATION_2' class LedStates(object): OFF = 'OFF' BLINKING_25 = 'BLINKING_25' BLINKING_50 = 'BLINKING_50' BLINKING_75 = 'BLINKING_75' SOLID = 'SOLID' class Buttons(object): SELECT = 'SELECT' SETUP = 'SETUP' ACTION = 'ACTION' CAN_POWER = 'CAN_POWER' class ButtonStates(object): PRESSED = 'PRESSED' RELEASED = 'RELEASED' class SerialPorts(object): MASTER_API = 'MASTER_API' ENERGY = 'ENERGY' P1 = 'P1' @Inject def __init__(self, master_controller=INJECTED, power_communicator=INJECTED): # type: (MasterController, PowerCommunicator) -> None self._master_controller = master_controller self._power_communicator = power_communicator self._network_carrier = None self._network_activity = None self._network_activity_scan_counter = 0 self._network_bytes = 0 self._check_network_activity_thread = None self._authorized_mode = False self._authorized_mode_timeout = 0 self._indicate = False self._indicate_timeout = 0 self._master_stats = 0, 0 self._power_stats = 0, 0 @property def authorized_mode(self): # return Platform.get_platform() == Platform.Type.CORE_PLUS or self._authorized_mode # Needed to validate Brain+ with no front panel attached return self._authorized_mode def event_receiver(self, event, payload): if event == OMBusEvents.CLOUD_REACHABLE: self._report_cloud_reachable(payload) elif event == OMBusEvents.VPN_OPEN: self._report_vpn_open(payload) elif event == OMBusEvents.CONNECTIVITY: self._report_connectivity(payload) def start(self): self._check_network_activity_thread = DaemonThread( name='frontpanel', target=self._do_frontpanel_tasks, interval=0.5) self._check_network_activity_thread.start() def stop(self): if self._check_network_activity_thread is not None: self._check_network_activity_thread.stop() def _report_carrier(self, carrier): # type: (bool) -> None raise NotImplementedError() def _report_connectivity(self, connectivity): # type: (bool) -> None raise NotImplementedError() def _report_network_activity(self, activity): # type: (bool) -> None raise NotImplementedError() def _report_serial_activity(self, serial_port, activity): # type: (str, Optional[bool]) -> None raise NotImplementedError() def _report_cloud_reachable(self, reachable): # type: (bool) -> None raise NotImplementedError() def _report_vpn_open(self, vpn_open): # type: (bool) -> None raise NotImplementedError() def indicate(self): self._indicate = True self._indicate_timeout = time.time( ) + FrontpanelController.INDICATE_TIMEOUT def _do_frontpanel_tasks(self): # Check network activity try: with open( '/sys/class/net/{0}/carrier'.format( FrontpanelController.MAIN_INTERFACE), 'r') as fh_up: line = fh_up.read() carrier = int(line) == 1 carrier_changed = self._network_carrier != carrier if carrier_changed: self._network_carrier = carrier self._report_carrier(carrier) # Check network activity every second, or if the carrier changed if self._network_activity_scan_counter >= 9 or carrier_changed: self._network_activity_scan_counter = 0 network_activity = False if self._network_carrier: # There's no activity when there's no carrier with open('/proc/net/dev', 'r') as fh_stat: for line in fh_stat.readlines(): if FrontpanelController.MAIN_INTERFACE not in line: continue received, transmitted = 0, 0 parts = line.split() if len(parts) == 17: received = parts[1] transmitted = parts[9] elif len(parts) == 16: (_, received) = tuple(parts[0].split(':')) transmitted = parts[8] new_bytes = received + transmitted if self._network_bytes != new_bytes: self._network_bytes = new_bytes network_activity = True else: network_activity = False if self._network_activity != network_activity: self._report_network_activity(network_activity) self._network_activity = network_activity self._network_activity_scan_counter += 1 except Exception as exception: logger.error( 'Error while checking network activity: {0}'.format(exception)) # Monitor serial activity try: stats = self._master_controller.get_communication_statistics() new_master_stats = (stats['bytes_read'], stats['bytes_written']) activity = self._master_stats[0] != new_master_stats[ 0] or self._master_stats[1] != new_master_stats[1] self._report_serial_activity( FrontpanelController.SerialPorts.MASTER_API, activity) self._master_stats = new_master_stats if self._power_communicator is None: new_power_stats = 0, 0 else: stats = self._power_communicator.get_communication_statistics() new_power_stats = (stats['bytes_read'], stats['bytes_written']) activity = self._power_stats[0] != new_power_stats[ 0] or self._power_stats[1] != new_power_stats[1] self._report_serial_activity( FrontpanelController.SerialPorts.ENERGY, activity) self._power_stats = new_power_stats activity = None # type: Optional[bool] # TODO: Load P1/RS232 activity self._report_serial_activity(FrontpanelController.SerialPorts.P1, activity) except Exception as exception: logger.error( 'Error while checking serial activity: {0}'.format(exception)) # Clear indicate timeout if time.time() > self._indicate_timeout: self._indicate = False
class FrontpanelCoreController(FrontpanelController): LED_MAPPING_ID_TO_ENUM = { Platform.Type.CORE: { 0: { 4: FrontpanelController.Leds.STATUS_RED, 5: FrontpanelController.Leds.STATUS_GREEN, 13: FrontpanelController.Leds.SETUP, 14: FrontpanelController.Leds.CLOUD }, 1: { 4: FrontpanelController.Leds.CAN_STATUS_GREEN, 5: FrontpanelController.Leds.CAN_STATUS_RED, 11: FrontpanelController.Leds.LAN_RED, 12: FrontpanelController.Leds.LAN_GREEN, 13: FrontpanelController.Leds.P1, 15: FrontpanelController.Leds.CAN_COMMUNICATION } }, Platform.Type.CORE_PLUS: { 0: { 0: FrontpanelController.Leds.INPUTS, 1: FrontpanelController.Leds.EXPANSION, 2: FrontpanelController.Leds.STATUS_RED, 3: FrontpanelController.Leds.STATUS_GREEN, 5: FrontpanelController.Leds.LAN_RED, 6: FrontpanelController.Leds.CLOUD, 7: FrontpanelController.Leds.SETUP, 8: FrontpanelController.Leds.LAN_GREEN, 9: FrontpanelController.Leds.P1, 10: FrontpanelController.Leds.CAN_COMMUNICATION, 11: FrontpanelController.Leds.CAN_STATUS_RED, 12: FrontpanelController.Leds.CAN_STATUS_GREEN, 13: FrontpanelController.Leds.OUTPUTS_DIG_5_7, 14: FrontpanelController.Leds.OUTPUTS_ANA_1_4, 15: FrontpanelController.Leds.RELAYS_9_16 }, 1: { 6: FrontpanelController.Leds.RELAYS_1_8, 7: FrontpanelController.Leds.OUTPUTS_DIG_1_4 } } } LED_TO_BA = { FrontpanelController.Leds.P1: 6, FrontpanelController.Leds.LAN_GREEN: 7, FrontpanelController.Leds.LAN_RED: 8, FrontpanelController.Leds.CLOUD: 9 } BLINKING_MAP = { FrontpanelController.LedStates.BLINKING_25: 25, FrontpanelController.LedStates.BLINKING_50: 50, FrontpanelController.LedStates.BLINKING_75: 75, FrontpanelController.LedStates.SOLID: 100 } BUTTON_STATE_MAPPING_ID_TO_ENUM = { 0: FrontpanelController.ButtonStates.RELEASED, 1: FrontpanelController.ButtonStates.PRESSED } BUTTON_MAPPING_ID_TO_ENUM = { 0: FrontpanelController.Buttons.SETUP, 1: FrontpanelController.Buttons.ACTION, 2: FrontpanelController.Buttons.CAN_POWER, 3: FrontpanelController.Buttons.SELECT } @Inject def __init__( self, master_communicator=INJECTED): # type: (CoreCommunicator) -> None super(FrontpanelCoreController, self).__init__() self._master_communicator = master_communicator self._master_communicator.register_consumer( BackgroundConsumer(CoreAPI.event_information(), 0, self._handle_event)) self._led_states = {} # type: Dict[str, LedStateTracker] self._led_event_lock = Lock() self._carrier = True self._connectivity = True self._activity = False self._cloud = False self._vpn = False self._led_drive_states = {} # type: Dict[str, Tuple[bool, str]] self._check_buttons_thread = None self._authorized_mode_buttons = [False, False] self._authorized_mode_buttons_pressed_since = None # type: Optional[float] self._authorized_mode_buttons_released = False self._platform = Platform.get_platform() def _handle_event(self, data): # type: (Dict[str, Any]) -> None # From both the LED_BLINK and LED_ON event, the LED_ON event will always be send first core_event = MasterCoreEvent(data) if core_event.type == MasterCoreEvent.Types.LED_BLINK: with self._led_event_lock: chip = core_event.data['chip'] if chip in FrontpanelCoreController.LED_MAPPING_ID_TO_ENUM[ self._platform]: for led_id in range(16): led_name = FrontpanelCoreController.LED_MAPPING_ID_TO_ENUM[ self._platform][chip].get(led_id) if led_name is None: continue state_tracker = self._led_states.setdefault( led_name, LedStateTracker(led_name)) state_tracker.set_mode(core_event.data['leds'][led_id]) changed, state = state_tracker.get_state() if changed: logger.info('Led {0} state: {1}'.format( led_name, state)) elif core_event.type == MasterCoreEvent.Types.LED_ON: with self._led_event_lock: chip = core_event.data['chip'] if chip in FrontpanelCoreController.LED_MAPPING_ID_TO_ENUM[ self._platform]: for led_id in range(16): led_name = FrontpanelCoreController.LED_MAPPING_ID_TO_ENUM[ self._platform][chip].get(led_id) if led_name is None: continue state_tracker = self._led_states.setdefault( led_name, LedStateTracker(led_name)) event_state = core_event.data['leds'].get( led_id, MasterCoreEvent.LedStates.OFF) state_tracker.set_on( event_state != MasterCoreEvent.LedStates.OFF) elif core_event.type == MasterCoreEvent.Types.BUTTON_PRESS: state = FrontpanelCoreController.BUTTON_STATE_MAPPING_ID_TO_ENUM.get( core_event.data['state']) if state is not None: button = FrontpanelCoreController.BUTTON_MAPPING_ID_TO_ENUM[ core_event.data['button']] logger.info('Button {0} was {1}'.format(button, state)) # Detect authorized mode if button == FrontpanelController.Buttons.ACTION: self._authorized_mode_buttons[ 0] = state == FrontpanelController.ButtonStates.PRESSED elif button == FrontpanelController.Buttons.SETUP: self._authorized_mode_buttons[ 1] = state == FrontpanelController.ButtonStates.PRESSED def start(self): super(FrontpanelCoreController, self).start() # Start polling/writing threads self._check_buttons_thread = DaemonThread(name='buttonchecker', target=self._check_buttons, interval=0.25) self._check_buttons_thread.start() def stop(self): super(FrontpanelCoreController, self).stop() if self._check_buttons_thread is not None: self._check_buttons_thread.stop() def _check_buttons(self): buttons_pressed = self._authorized_mode_buttons == [True, True] if not buttons_pressed: self._authorized_mode_buttons_released = True if self._authorized_mode: if time.time() > self._authorized_mode_timeout or ( buttons_pressed and self._authorized_mode_buttons_released): logger.info('Authorized mode: inactive') self._authorized_mode = False else: if buttons_pressed: self._authorized_mode_buttons_released = False if self._authorized_mode_buttons_pressed_since is None: self._authorized_mode_buttons_pressed_since = time.time() if time.time( ) - self._authorized_mode_buttons_pressed_since > FrontpanelController.AUTH_MODE_PRESS_DURATION: logger.info('Authorized mode: active') self._authorized_mode = True self._authorized_mode_timeout = time.time( ) + FrontpanelController.AUTH_MODE_TIMEOUT self._authorized_mode_buttons_pressed_since = None else: self._authorized_mode_buttons_pressed_since = None def _report_carrier(self, carrier): # type: (bool) -> None self._carrier = carrier self._update_lan_leds() def _report_connectivity(self, connectivity): # type: (bool) -> None self._connectivity = connectivity self._update_lan_leds() def _report_network_activity(self, activity): # type: (bool) -> None self._activity = activity self._update_lan_leds() def _update_lan_leds(self): if not self._carrier or not self._connectivity: self._set_led(led=FrontpanelController.Leds.LAN_GREEN, on=False, mode=FrontpanelController.LedStates.SOLID) mode = FrontpanelController.LedStates.SOLID if self._carrier: mode = FrontpanelController.LedStates.BLINKING_50 self._set_led(led=FrontpanelController.Leds.LAN_RED, on=True, mode=mode) else: self._set_led(led=FrontpanelController.Leds.LAN_RED, on=False, mode=FrontpanelController.LedStates.SOLID) mode = FrontpanelController.LedStates.SOLID if self._activity: mode = FrontpanelController.LedStates.BLINKING_50 self._set_led(led=FrontpanelController.Leds.LAN_GREEN, on=True, mode=mode) def _report_serial_activity(self, serial_port, activity): # type: (str, Optional[bool]) -> None if serial_port != FrontpanelController.SerialPorts.P1: return mode = FrontpanelController.LedStates.SOLID on = True if activity is None: on = False elif activity: mode = FrontpanelController.LedStates.BLINKING_50 self._set_led(led=FrontpanelController.Leds.P1, on=on, mode=mode) def _report_cloud_reachable(self, reachable): # type: (bool) -> None self._cloud = reachable self._update_cloud_led() def _report_vpn_open(self, vpn_open): # type: (bool) -> None self._vpn = vpn_open self._update_cloud_led() def _update_cloud_led(self): # Cloud led state: # * Off: No heartbeat # * Blinking: Heartbeat but VPN not (yet) open # * Solid: Heartbeat and VPN is open on = True if not self._cloud and not self._vpn: mode = FrontpanelController.LedStates.SOLID on = False elif self._cloud != self._vpn: mode = FrontpanelController.LedStates.BLINKING_50 else: mode = FrontpanelController.LedStates.SOLID self._set_led(led=FrontpanelController.Leds.CLOUD, on=on, mode=mode) def _set_led(self, led, on, mode): # type: (str, bool, str) -> None if led not in FrontpanelCoreController.LED_TO_BA: return action = FrontpanelCoreController.LED_TO_BA[led] if mode not in FrontpanelCoreController.BLINKING_MAP: return state = self._led_drive_states.get(led) if state != (on, mode): extra_parameter = FrontpanelCoreController.BLINKING_MAP[mode] self._master_communicator.do_basic_action( BasicAction(action_type=210, action=action, device_nr=1 if on else 0, extra_parameter=extra_parameter)) self._led_drive_states[led] = on, mode
class ThermostatControllerGateway(ThermostatController): # TODO: At this moment, a pump group strictly speaking is not related to any thermostat, # nor to cooling/heating. Yet in the `classic` implementation there is. This means that # changing a pump group could influence another pump group, since their `number` is shared. THERMOSTAT_PID_UPDATE_INTERVAL = 60 PUMP_UPDATE_INTERVAL = 30 SYNC_CONFIG_INTERVAL = 900 @Inject def __init__(self, gateway_api=INJECTED, output_controller=INJECTED, pubsub=INJECTED): # type: (GatewayApi, OutputController, PubSub) -> None super(ThermostatControllerGateway, self).__init__(output_controller) self._gateway_api = gateway_api self._pubsub = pubsub self._running = False self._pid_loop_thread = None # type: Optional[DaemonThread] self._update_pumps_thread = None # type: Optional[DaemonThread] self._periodic_sync_thread = None # type: Optional[DaemonThread] self.thermostat_pids = {} # type: Dict[int, ThermostatPid] self._pump_valve_controller = PumpValveController() timezone = gateway_api.get_timezone() # we could also use an in-memory store, but this allows us to detect 'missed' transitions # e.g. in case when gateway was rebooting during a scheduled transition db_filename = constants.get_thermostats_scheduler_database_file() jobstores = { 'default': SQLAlchemyJobStore(url='sqlite:///{})'.format(db_filename)) } self._scheduler = BackgroundScheduler(jobstores=jobstores, timezone=timezone) def start(self): # type: () -> None logger.info('Starting gateway thermostatcontroller...') if not self._running: self._running = True self.refresh_config_from_db() self._pid_loop_thread = DaemonThread( name='thermostatpid', target=self._pid_tick, interval=self.THERMOSTAT_PID_UPDATE_INTERVAL) self._pid_loop_thread.start() self._update_pumps_thread = DaemonThread( name='thermostatpumps', target=self._update_pumps, interval=self.PUMP_UPDATE_INTERVAL) self._update_pumps_thread.start() self._periodic_sync_thread = DaemonThread( name='thermostatsync', target=self._periodic_sync, interval=self.SYNC_CONFIG_INTERVAL) self._periodic_sync_thread.start() self._scheduler.start() logger.info('Starting gateway thermostatcontroller... Done') else: raise RuntimeError( 'GatewayThermostatController already running. Please stop it first.' ) def stop(self): # type: () -> None if not self._running: logger.warning( 'Stopping an already stopped GatewayThermostatController.') self._running = False self._scheduler.shutdown(wait=False) if self._pid_loop_thread is not None: self._pid_loop_thread.stop() if self._update_pumps_thread is not None: self._update_pumps_thread.stop() if self._periodic_sync_thread is not None: self._periodic_sync_thread.stop() def _pid_tick(self): # type: () -> None for thermostat_number, thermostat_pid in self.thermostat_pids.items(): try: thermostat_pid.tick() except Exception: logger.exception( 'There was a problem with calculating thermostat PID {}'. format(thermostat_pid)) def refresh_config_from_db(self): # type: () -> None self.refresh_thermostats_from_db() self._pump_valve_controller.refresh_from_db() def refresh_thermostats_from_db(self): # type: () -> None for thermostat in Thermostat.select(): thermostat_pid = self.thermostat_pids.get(thermostat.number) if thermostat_pid is None: thermostat_pid = ThermostatPid(thermostat, self._pump_valve_controller) thermostat_pid.subscribe_state_changes( self._thermostat_changed) self.thermostat_pids[thermostat.number] = thermostat_pid thermostat_pid.update_thermostat(thermostat) thermostat_pid.tick() # TODO: Delete stale/removed thermostats def _update_pumps(self): # type: () -> None try: self._pump_valve_controller.steer() except Exception: logger.exception('Could not update pumps.') def _periodic_sync(self): # type: () -> None try: self.refresh_config_from_db() except Exception: logger.exception('Could not get thermostat config.') def _sync_scheduler(self): # type: () -> None self._scheduler.remove_all_jobs( ) # TODO: This might have to be more efficient, as this generates I/O for thermostat_number, thermostat_pid in self.thermostat_pids.items(): start_date = datetime.datetime.utcfromtimestamp( float(thermostat_pid.thermostat.start)) day_schedules = thermostat_pid.thermostat.day_schedules schedule_length = len(day_schedules) for schedule in day_schedules: for seconds_of_day, new_setpoint in schedule.schedule_data.items( ): m, s = divmod(int(seconds_of_day), 60) h, m = divmod(m, 60) if schedule.mode == 'heating': args = [thermostat_number, new_setpoint, None] else: args = [thermostat_number, None, new_setpoint] if schedule_length % 7 == 0: self._scheduler.add_job(ThermostatControllerGateway. set_setpoint_from_scheduler, 'cron', start_date=start_date, day_of_week=schedule.index, hour=h, minute=m, second=s, args=args, name='T{}: {} ({}) {}'.format( thermostat_number, new_setpoint, schedule.mode, seconds_of_day)) else: # calendarinterval trigger is only supported in a future release of apscheduler # https://apscheduler.readthedocs.io/en/latest/modules/triggers/calendarinterval.html#module-apscheduler.triggers.calendarinterval day_start_date = start_date + datetime.timedelta( days=schedule.index) self._scheduler.add_job(ThermostatControllerGateway. set_setpoint_from_scheduler, 'calendarinterval', start_date=day_start_date, days=schedule_length, hour=h, minute=m, second=s, args=args, name='T{}: {} ({}) {}'.format( thermostat_number, new_setpoint, schedule.mode, seconds_of_day)) def set_current_setpoint(self, thermostat_number, temperature=None, heating_temperature=None, cooling_temperature=None): # type: (int, Optional[float], Optional[float], Optional[float]) -> None if temperature is None and heating_temperature is None and cooling_temperature is None: return thermostat = Thermostat.get(number=thermostat_number) # When setting a setpoint manually, switch to manual preset except for when we are in scheduled mode # scheduled mode will override the setpoint when the next edge in the schedule is triggered active_preset = thermostat.active_preset if active_preset.type not in [ Preset.Types.SCHEDULE, Preset.Types.MANUAL ]: active_preset = thermostat.get_preset(Preset.Types.MANUAL) thermostat.active_preset = active_preset if heating_temperature is None: heating_temperature = temperature if heating_temperature is not None: active_preset.heating_setpoint = float(heating_temperature) if cooling_temperature is None: cooling_temperature = temperature if cooling_temperature is not None: active_preset.cooling_setpoint = float(cooling_temperature) active_preset.save() thermostat_pid = self.thermostat_pids[thermostat_number] thermostat_pid.update_thermostat(thermostat) thermostat_pid.tick() def get_current_preset(self, thermostat_number): # type: (int) -> Preset thermostat = Thermostat.get(number=thermostat_number) return thermostat.active_preset def set_current_preset(self, thermostat_number, preset_type): # type: (int, str) -> None thermostat = Thermostat.get( number=thermostat_number) # type: Thermostat preset = thermostat.get_preset(preset_type) thermostat.active_preset = preset thermostat.save() thermostat_pid = self.thermostat_pids[thermostat_number] thermostat_pid.update_thermostat(thermostat) thermostat_pid.tick() @classmethod @Inject def set_setpoint_from_scheduler(cls, thermostat_number, heating_temperature=None, cooling_temperature=None, thermostat_controller=INJECTED): # type: (int, Optional[float], Optional[float], ThermostatControllerGateway) -> None logger.info( 'Setting setpoint from scheduler for thermostat {}: H{} C{}'. format(thermostat_number, heating_temperature, cooling_temperature)) thermostat = Thermostat.get(number=thermostat_number) active_preset = thermostat.active_preset # Only update when not in preset mode like away, party, ... if active_preset.type == Preset.Types.SCHEDULE: thermostat_controller.set_current_setpoint( thermostat_number=thermostat_number, heating_temperature=heating_temperature, cooling_temperature=cooling_temperature) else: logger.info( 'Thermostat is currently in preset mode, skipping update setpoint from scheduler.' ) def get_thermostat_status(self): # type: () -> ThermostatGroupStatusDTO def get_output_level(output_number): if output_number is None: return 0 # we are returning 0 if outputs are not configured try: output = self._output_controller.get_output_status( output_number) except ValueError: logger.info( 'Output {0} state not yet available'.format(output_number)) return 0 # Output state is not yet cached (during startup) if output.dimmer is None: status_ = output.status output_level = 0 if status_ is None else int(status_) * 100 else: output_level = output.dimmer return output_level global_thermostat = ThermostatGroup.get(number=0) if global_thermostat is None: raise RuntimeError('Global thermostat not found!') group_status = ThermostatGroupStatusDTO( id=0, on=global_thermostat.on, automatic=True, # Default, will be updated below setpoint=0, # Default, will be updated below cooling=global_thermostat.mode == ThermostatMode.COOLING) for thermostat in global_thermostat.thermostats: valves = thermostat.cooling_valves if global_thermostat.mode == 'cooling' else thermostat.heating_valves db_outputs = [valve.output.number for valve in valves] number_of_outputs = len(db_outputs) if number_of_outputs > 2: logger.warning( 'Only 2 outputs are supported in the old format. Total: {0} outputs.' .format(number_of_outputs)) output0 = db_outputs[0] if number_of_outputs > 0 else None output1 = db_outputs[1] if number_of_outputs > 1 else None active_preset = thermostat.active_preset if global_thermostat.mode == ThermostatMode.COOLING: setpoint_temperature = active_preset.cooling_setpoint else: setpoint_temperature = active_preset.heating_setpoint group_status.statusses.append( ThermostatStatusDTO( id=thermostat.number, actual_temperature=self._gateway_api. get_sensor_temperature_status(thermostat.sensor), setpoint_temperature=setpoint_temperature, outside_temperature=self._gateway_api. get_sensor_temperature_status(global_thermostat.sensor), mode=0, # TODO: Need to be fixed automatic=active_preset.type == Preset.Types.SCHEDULE, setpoint=Preset.TYPE_TO_SETPOINT.get( active_preset.type, 0), name=thermostat.name, sensor_id=thermostat.sensor.number, airco=0, # TODO: Check if still used output_0_level=get_output_level(output0), output_1_level=get_output_level(output1))) # Update global references group_status.automatic = all(status.automatic for status in group_status.statusses) used_setpoints = set(status.setpoint for status in group_status.statusses) group_status.setpoint = next(iter(used_setpoints)) if len( used_setpoints) == 1 else 0 # 0 is a fallback return group_status def set_thermostat_mode(self, thermostat_on, cooling_mode=False, cooling_on=False, automatic=None, setpoint=None): # type: (bool, bool, bool, Optional[bool], Optional[int]) -> None mode = ThermostatMode.COOLING if cooling_mode else ThermostatMode.HEATING # type: Literal['cooling', 'heating'] global_thermosat = ThermostatGroup.get(number=0) global_thermosat.on = thermostat_on global_thermosat.mode = mode global_thermosat.save() for thermostat_number, thermostat_pid in self.thermostat_pids.items(): thermostat = Thermostat.get(number=thermostat_number) if thermostat is not None: if automatic is False and setpoint is not None and 3 <= setpoint <= 5: preset = thermostat.get_preset( preset_type=Preset.SETPOINT_TO_TYPE.get( setpoint, Preset.Types.SCHEDULE)) thermostat.active_preset = preset else: thermostat.active_preset = thermostat.get_preset( preset_type=Preset.Types.SCHEDULE) thermostat_pid.update_thermostat(thermostat) thermostat_pid.tick() def load_heating_thermostat(self, thermostat_id): # type: (int) -> ThermostatDTO thermostat = Thermostat.get(number=thermostat_id) return ThermostatMapper.orm_to_dto(thermostat, 'heating') def load_heating_thermostats(self): # type: () -> List[ThermostatDTO] return [ ThermostatMapper.orm_to_dto(thermostat, 'heating') for thermostat in Thermostat.select() ] def save_heating_thermostats( self, thermostats ): # type: (List[Tuple[ThermostatDTO, List[str]]]) -> None for thermostat_dto, fields in thermostats: thermostat = ThermostatMapper.dto_to_orm(thermostat_dto, fields, 'heating') self.refresh_set_configuration(thermostat) def load_cooling_thermostat(self, thermostat_id): # type: (int) -> ThermostatDTO thermostat = Thermostat.get(number=thermostat_id) return ThermostatMapper.orm_to_dto(thermostat, 'cooling') def load_cooling_thermostats(self): # type: () -> List[ThermostatDTO] return [ ThermostatMapper.orm_to_dto(thermostat, 'cooling') for thermostat in Thermostat.select() ] def save_cooling_thermostats( self, thermostats ): # type: (List[Tuple[ThermostatDTO, List[str]]]) -> None for thermostat_dto, fields in thermostats: thermostat = ThermostatMapper.dto_to_orm(thermostat_dto, fields, 'cooling') self.refresh_set_configuration(thermostat) def set_per_thermostat_mode(self, thermostat_number, automatic, setpoint): # type: (int, bool, float) -> None thermostat_pid = self.thermostat_pids.get(thermostat_number) if thermostat_pid is not None: thermostat = thermostat_pid.thermostat thermostat.automatic = automatic thermostat.save() preset = thermostat.active_preset if thermostat.thermostat_group.mode == ThermostatGroup.Modes.HEATING: preset.heating_setpoint = setpoint else: preset.cooling_setpoint = setpoint preset.save() thermostat_pid.update_thermostat(thermostat) thermostat_pid.tick() def load_thermostat_group(self): # type: () -> ThermostatGroupDTO thermostat_group = ThermostatGroup.get(number=0) pump_delay = None for thermostat in thermostat_group.thermostats: for valve in thermostat.valves: pump_delay = valve.delay break sensor_number = None if thermostat_group.sensor is None else thermostat_group.sensor.number thermostat_group_dto = ThermostatGroupDTO( id=0, outside_sensor_id=sensor_number, threshold_temperature=thermostat_group.threshold_temperature, pump_delay=pump_delay) for link in OutputToThermostatGroup.select(OutputToThermostatGroup, Output) \ .join_from(OutputToThermostatGroup, Output) \ .where(OutputToThermostatGroup.thermostat_group == thermostat_group): if link.index > 3 or link.output is None: continue field = 'switch_to_{0}_{1}'.format(link.mode, link.index) setattr(thermostat_group_dto, field, (link.output.number, link.value)) return thermostat_group_dto def save_thermostat_group(self, thermostat_group): # type: (Tuple[ThermostatGroupDTO, List[str]]) -> None thermostat_group_dto, fields = thermostat_group # Update thermostat group configuration orm_object = ThermostatGroup.get(number=0) # type: ThermostatGroup if 'outside_sensor_id' in fields: orm_object.sensor = Sensor.get( number=thermostat_group_dto.outside_sensor_id) if 'threshold_temperature' in fields: orm_object.threshold_temperature = thermostat_group_dto.threshold_temperature # type: ignore orm_object.save() # Link configuration outputs to global thermostat config for mode in ['cooling', 'heating']: links = { link.index: link for link in OutputToThermostatGroup.select().where( (OutputToThermostatGroup.thermostat_group == orm_object) & (OutputToThermostatGroup.mode == mode)) } for i in range(4): field = 'switch_to_{0}_{1}'.format(mode, i) if field not in fields: continue link = links.get(i) data = getattr(thermostat_group_dto, field) if data is None: if link is not None: link.delete_instance() else: output_number, value = data output = Output.get(number=output_number) if link is None: OutputToThermostatGroup.create( output=output, thermostat_group=orm_object, mode=mode, index=i, value=value) else: link.output = output link.value = value link.save() if 'pump_delay' in fields: # Set valve delay for all valves in this group for thermostat in orm_object.thermostats: for valve in thermostat.valves: valve.delay = thermostat_group_dto.pump_delay # type: ignore valve.save() def load_heating_pump_group(self, pump_group_id): # type: (int) -> PumpGroupDTO pump = Pump.get(number=pump_group_id) return PumpGroupDTO(id=pump_group_id, pump_output_id=pump.output.number, valve_output_ids=[ valve.output.number for valve in pump.heating_valves ], room_id=None) def load_heating_pump_groups(self): # type: () -> List[PumpGroupDTO] pump_groups = [] for pump in Pump.select(): pump_groups.append( PumpGroupDTO(id=pump.id, pump_output_id=pump.output.number, valve_output_ids=[ valve.output.number for valve in pump.heating_valves ], room_id=None)) return pump_groups def save_heating_pump_groups( self, pump_groups ): # type: (List[Tuple[PumpGroupDTO, List[str]]]) -> None return ThermostatControllerGateway._save_pump_groups( ThermostatGroup.Modes.HEATING, pump_groups) def load_cooling_pump_group(self, pump_group_id): # type: (int) -> PumpGroupDTO pump = Pump.get(number=pump_group_id) return PumpGroupDTO(id=pump_group_id, pump_output_id=pump.output.number, valve_output_ids=[ valve.output.number for valve in pump.cooling_valves ], room_id=None) def load_cooling_pump_groups(self): # type: () -> List[PumpGroupDTO] pump_groups = [] for pump in Pump.select(): pump_groups.append( PumpGroupDTO(id=pump.id, pump_output_id=pump.output.number, valve_output_ids=[ valve.output.number for valve in pump.cooling_valves ], room_id=None)) return pump_groups def save_cooling_pump_groups( self, pump_groups ): # type: (List[Tuple[PumpGroupDTO, List[str]]]) -> None return ThermostatControllerGateway._save_pump_groups( ThermostatGroup.Modes.COOLING, pump_groups) @staticmethod def _save_pump_groups( mode, pump_groups ): # type: (str, List[Tuple[PumpGroupDTO, List[str]]]) -> None for pump_group_dto, fields in pump_groups: if 'pump_output_id' in fields and 'valve_output_ids' in fields: valve_output_ids = pump_group_dto.valve_output_ids pump = Pump.get(id=pump_group_dto.id) # type: Pump pump.output = Output.get(number=pump_group_dto.pump_output_id) links = { pump_to_valve.valve.output.number: pump_to_valve for pump_to_valve in PumpToValve.select( PumpToValve, Pump, Valve, Output).join_from( PumpToValve, Valve).join_from( PumpToValve, Pump).join_from(Valve, Output). join_from(Valve, ValveToThermostat).where(( ValveToThermostat.mode == mode) & (Pump.id == pump.id)) } for output_id in list(links.keys()): if output_id not in valve_output_ids: pump_to_valve = links.pop( output_id) # type: PumpToValve pump_to_valve.delete_instance() else: valve_output_ids.remove(output_id) for output_id in valve_output_ids: output = Output.get(number=output_id) valve = Valve.get_or_none(output=output) if valve is None: valve = Valve(name=output.name, output=output) valve.save() PumpToValve.create(pump=pump, valve=valve) def load_global_rtd10(self): # type: () -> GlobalRTD10DTO raise UnsupportedException() def refresh_set_configuration(self, thermostat): # type: (Thermostat) -> None thermostat_pid = self.thermostat_pids.get(thermostat.number) if thermostat_pid is not None: thermostat_pid.update_thermostat(thermostat) else: thermostat_pid = ThermostatPid(thermostat, self._pump_valve_controller) self.thermostat_pids[thermostat.number] = thermostat_pid self._sync_scheduler() thermostat_pid.tick() def _thermostat_changed(self, thermostat_number, active_preset, current_setpoint, actual_temperature, percentages, room): # type: (int, str, float, Optional[float], List[float], int) -> None location = {'room_id': room} gateway_event = GatewayEvent( GatewayEvent.Types.THERMOSTAT_CHANGE, { 'id': thermostat_number, 'status': { 'preset': active_preset, 'current_setpoint': current_setpoint, 'actual_temperature': actual_temperature, 'output_0': percentages[0] if len(percentages) >= 1 else None, 'output_1': percentages[1] if len(percentages) >= 2 else None }, 'location': location }) self._pubsub.publish_gateway_event(PubSub.GatewayTopics.STATE, gateway_event) def _thermostat_group_changed(self, thermostat_group): # type: (ThermostatGroup) -> None gateway_event = GatewayEvent( GatewayEvent.Types.THERMOSTAT_GROUP_CHANGE, { 'id': 0, 'status': { 'state': 'ON' if thermostat_group.on else 'OFF', 'mode': 'COOLING' if thermostat_group.mode == 'cooling' else 'HEATING' }, 'location': {} }) self._pubsub.publish_gateway_event(PubSub.GatewayTopics.STATE, gateway_event) # Obsolete unsupported calls def save_global_rtd10( self, rtd10): # type: (Tuple[GlobalRTD10DTO, List[str]]) -> None raise UnsupportedException() def load_heating_rtd10(self, rtd10_id): # type: (int) -> RTD10DTO raise UnsupportedException() def load_heating_rtd10s(self): # type: () -> List[RTD10DTO] raise UnsupportedException() def save_heating_rtd10s( self, rtd10s): # type: (List[Tuple[RTD10DTO, List[str]]]) -> None raise UnsupportedException() def load_cooling_rtd10(self, rtd10_id): # type: (int) -> RTD10DTO raise UnsupportedException() def load_cooling_rtd10s(self): # type: () -> List[RTD10DTO] raise UnsupportedException() def save_cooling_rtd10s( self, rtd10s): # type: (List[Tuple[RTD10DTO, List[str]]]) -> None raise UnsupportedException() def set_airco_status(self, thermostat_id, airco_on): raise UnsupportedException() def load_airco_status(self): raise UnsupportedException()
class ShutterController(BaseController): """ Controls everything related to shutters. Important assumptions: * A shutter can go UP and go DOWN * A shutter that is UP is considered open and has a position of 0 * A shutter that is DOWN is considered closed and has a position of `steps` # TODO: The states OPEN and CLOSED make more sense but is a reasonable heavy change at this moment. To be updated if/when a new Gateway API is introduced """ SYNC_STRUCTURES = [ SyncStructure(Shutter, 'shutter'), SyncStructure(ShutterGroup, 'shutter_group') ] DIRECTION_STATE_MAP = { ShutterEnums.Direction.UP: ShutterEnums.State.GOING_UP, ShutterEnums.Direction.DOWN: ShutterEnums.State.GOING_DOWN, ShutterEnums.Direction.STOP: ShutterEnums.State.STOPPED } DIRECTION_END_STATE_MAP = { ShutterEnums.Direction.UP: ShutterEnums.State.UP, ShutterEnums.Direction.DOWN: ShutterEnums.State.DOWN, ShutterEnums.Direction.STOP: ShutterEnums.State.STOPPED } STATE_DIRECTION_MAP = { ShutterEnums.State.GOING_UP: ShutterEnums.Direction.UP, ShutterEnums.State.GOING_DOWN: ShutterEnums.Direction.DOWN, ShutterEnums.State.STOPPED: ShutterEnums.Direction.STOP } TIME_BASED_SHUTTER_STEPS = 100 SINGLE_ACTION_ACCURACY_LOSS_PERCENTAGE = 20 MIN_POSITION_TIMER_SHUTTER = 2 @Inject def __init__(self, master_controller=INJECTED, verbose=False): # type: (MasterController, bool) -> None super(ShutterController, self).__init__(master_controller) self._shutters = {} # type: Dict[int, ShutterDTO] self._actual_positions = {} # type: Dict[int, Optional[int]] self._desired_positions = {} # type: Dict[int, Optional[int]] self._directions = {} # type: Dict[int, str] self._states = {} # type: Dict[int, Tuple[float, str]] self._position_accuracy = {} # type: Dict[int, float] self._verbose = verbose self._config_lock = Lock() self._sync_state_thread = None # type: Optional[DaemonThread] self._pubsub.subscribe_master_events(PubSub.MasterTopics.SHUTTER, self._handle_master_event) # Update internal shutter configuration cache def start(self): # type: () -> None super(ShutterController, self).start() self._sync_state_thread = DaemonThread(name='shuttersyncstate', target=self._sync_state, interval=600, delay=10) self._sync_state_thread.start() def stop(self): # type: () -> None super(ShutterController, self).stop() if self._sync_state_thread: self._sync_state_thread.stop() self._sync_state_thread = None def _sync_state(self): # this is not syncing the shutter state with the master, but is used to publish the state periodically for shutter_id, shutter_dto in self._shutters.items(): try: self._publish_shutter_state(shutter_id, shutter_dto, self._states[shutter_id]) except KeyError: logger.error( 'No state found for shutter {}'.format(shutter_id)) def _handle_master_event(self, event): # type: (MasterEvent) -> None super(ShutterController, self)._handle_master_event(event) if event.type == MasterEvent.Types.SHUTTER_CHANGE: self._report_shutter_state(event.data['id'], event.data['status']) def _sync_orm(self): super(ShutterController, self)._sync_orm() try: self.update_config(self.load_shutters()) except CommunicationTimedOutException as ex: logger.error('ORM sync (Shutter config): Failed: {0}'.format(ex)) except Exception: logger.exception('ORM sync (Shutter config): Failed') def update_config(self, config): # type: (List[ShutterDTO]) -> None with self._config_lock: shutter_ids = [] for shutter_dto in config: shutter_id = shutter_dto.id shutter_ids.append(shutter_id) if shutter_dto != self._shutters.get(shutter_id): self._shutters[shutter_id] = shutter_dto self._states[shutter_id] = (0.0, ShutterEnums.State.STOPPED) self._actual_positions[shutter_id] = None self._desired_positions[shutter_id] = None self._directions[shutter_id] = ShutterEnums.Direction.STOP self._position_accuracy[ shutter_id] = 100 if shutter_dto.steps else 0 for shutter_id in list(self._shutters.keys()): if shutter_id not in shutter_ids: del self._shutters[shutter_id] del self._states[shutter_id] del self._actual_positions[shutter_id] del self._desired_positions[shutter_id] del self._directions[shutter_id] del self._position_accuracy[shutter_id] # Allow shutter positions to be reported def report_shutter_position(self, shutter_id, position, direction=None): # type: (int, int, Optional[str]) -> None logger.debug('Shutter {0} reports position {1}'.format( shutter_id, position)) # Fetch and validate information shutter = self._get_shutter(shutter_id) steps = ShutterController._get_steps(shutter) ShutterController._validate_position(shutter_id, position, steps) # Store new position self._actual_positions[shutter_id] = position # Update the direction and report if changed expected_direction = self._directions[shutter_id] if direction is not None and expected_direction != direction: # We received a more accurate direction logger.debug('Shutter {0} report direction change to {1}'.format( shutter_id, direction)) self._report_shutter_state( shutter_id, ShutterController.DIRECTION_STATE_MAP[direction]) direction = self._directions[shutter_id] desired_position = self._desired_positions[shutter_id] if desired_position is None: return if ShutterController._is_position_reached(direction, desired_position, position, stopped=True): logger.debug( 'Shutter {0} reported position is desired position: Stopping'. format(shutter_id)) self.shutter_stop(shutter_id) def report_shutter_lost_position(self, shutter_id): # type: (int) -> None logger.debug('Shutter {0} reports lost position') # Clear position & force report self._actual_positions[shutter_id] = None self._report_shutter_state(shutter_id, ShutterEnums.State.STOPPED, force_report=True) # Configure shutters def load_shutter(self, shutter_id): # type: (int) -> ShutterDTO shutter = Shutter.select(Room) \ .join_from(Shutter, Room, join_type=JOIN.LEFT_OUTER) \ .where(Shutter.number == shutter_id) \ .get() # type: Shutter shutter_dto = self._master_controller.load_shutter( shutter_id=shutter_id) # TODO: Load dict shutter_dto.room = shutter.room.number if shutter.room is not None else None return shutter_dto def load_shutters(self): # type: () -> List[ShutterDTO] shutter_dtos = [] for shutter in list( Shutter.select(Shutter, Room).join_from( Shutter, Room, join_type=JOIN.LEFT_OUTER)): # TODO: Load dicts shutter_dto = self._master_controller.load_shutter( shutter_id=shutter.number) shutter_dto.room = shutter.room.number if shutter.room is not None else None shutter_dtos.append(shutter_dto) return shutter_dtos def save_shutters(self, shutters): # type: (List[ShutterDTO]) -> None shutters_to_save = [] for shutter_dto in shutters: shutter = Shutter.get_or_none( number=shutter_dto.id) # type: Shutter if shutter is None: logger.info('Ignored saving non-existing Shutter {0}'.format( shutter_dto.id)) if 'room' in shutter_dto.loaded_fields: if shutter_dto.room is None: shutter.room = None elif 0 <= shutter_dto.room <= 100: shutter.room, _ = Room.get_or_create( number=shutter_dto.room) shutter.save() shutters_to_save.append(shutter_dto) self._master_controller.save_shutters(shutters_to_save) self.update_config(self.load_shutters()) def load_shutter_group(self, group_id): # type: (int) -> ShutterGroupDTO shutter_group = ShutterGroup.select(Room) \ .join_from(ShutterGroup, Room, join_type=JOIN.LEFT_OUTER) \ .where(ShutterGroup.number == group_id) \ .get() # type: ShutterGroup shutter_group_dto = self._master_controller.load_shutter_group( shutter_group_id=group_id) # TODO: Load dict shutter_group_dto.room = shutter_group.room.number if shutter_group.room is not None else None return shutter_group_dto def load_shutter_groups(self): # type: () -> List[ShutterGroupDTO] shutter_group_dtos = [] for shutter_group in list( ShutterGroup.select(ShutterGroup, Room).join_from( ShutterGroup, Room, join_type=JOIN.LEFT_OUTER)): # TODO: Load dicts shutter_group_dto = self._master_controller.load_shutter_group( shutter_group_id=shutter_group.number) shutter_group_dto.room = shutter_group.room.number if shutter_group.room is not None else None shutter_group_dtos.append(shutter_group_dto) return shutter_group_dtos def save_shutter_groups( self, shutter_groups): # type: (List[ShutterGroupDTO]) -> None shutter_groups_to_save = [] for shutter_group_dto in shutter_groups: shutter_group = ShutterGroup.get_or_none( number=shutter_group_dto.id) # type: ShutterGroup if shutter_group is None: continue if 'room' in shutter_group_dto.loaded_fields: if shutter_group_dto.room is None: shutter_group.room = None elif 0 <= shutter_group_dto.room <= 100: shutter_group.room, _ = Room.get_or_create( number=shutter_group_dto.room) shutter_group.save() shutter_groups_to_save.append(shutter_group_dto) self._master_controller.save_shutter_groups(shutter_groups_to_save) # Control shutters def shutter_group_down(self, group_id): # type: (int) -> None self._master_controller.shutter_group_down(group_id) def shutter_group_up(self, group_id): # type: (int) -> None self._master_controller.shutter_group_up(group_id) def shutter_group_stop(self, group_id): # type: (int) -> None self._master_controller.shutter_group_stop(group_id) def shutter_up( self, shutter_id, desired_position=None): # type: (int, Optional[int]) -> None return self._shutter_goto_direction(shutter_id, ShutterEnums.Direction.UP, desired_position) def shutter_down( self, shutter_id, desired_position=None): # type: (int, Optional[int]) -> None return self._shutter_goto_direction(shutter_id, ShutterEnums.Direction.DOWN, desired_position) def shutter_goto(self, shutter_id, desired_position): # type: (int, int) -> None # Fetch and validate data shutter = self._get_shutter(shutter_id) steps = ShutterController._get_steps(shutter) timer = None if steps is None: ShutterController._validate_position(shutter_id, desired_position, self.TIME_BASED_SHUTTER_STEPS) timer = self._calculate_shutter_timer(shutter_id, desired_position) else: ShutterController._validate_position(shutter_id, desired_position, steps) actual_position = self._actual_positions.get(shutter_id) if actual_position is None: raise RuntimeError( 'Shutter {0} has unknown actual position'.format(shutter_id)) old_desired_position = self._desired_positions[shutter_id] direction = self._get_direction(actual_position, desired_position) self._directions[shutter_id] = direction logger.debug('Shutter {0} setting desired position to {1}'.format( shutter_id, desired_position)) self._desired_positions[shutter_id] = desired_position if timer is not None and desired_position == old_desired_position and timer < self.MIN_POSITION_TIMER_SHUTTER: # this is path where timers are used, and we avoid too much cumulative error by not repeating the actions logger.warning( 'Shutter {0} skipping shutter action as timer < {1} seconds ({2:.2f}s)' .format(shutter_id, self.MIN_POSITION_TIMER_SHUTTER, timer)) else: self._execute_shutter(shutter_id, direction, timer=timer) def shutter_stop(self, shutter_id): # type: (int) -> None # Validate data self._get_shutter(shutter_id) logger.debug('Shutter {0} stopped. Removing desired position'.format( shutter_id)) self._desired_positions[shutter_id] = None self._directions[shutter_id] = ShutterEnums.Direction.STOP self._execute_shutter(shutter_id, ShutterEnums.Direction.STOP) # Control operations def _shutter_goto_direction(self, shutter_id, direction, desired_position=None): # type: (int, str, Optional[int]) -> None # Fetch and validate data timer = None shutter = self._get_shutter(shutter_id) steps = ShutterController._get_steps(shutter) if desired_position is None: if steps is None: desired_position = ShutterController._get_limit( direction, self.TIME_BASED_SHUTTER_STEPS) else: desired_position = ShutterController._get_limit( direction, steps) else: if steps is None: # we use a percentage (steps=100) to mimic the steps timer = self._calculate_shutter_timer(shutter_id, desired_position) else: ShutterController._validate_position(shutter_id, desired_position, steps) logger.debug('Shutter {0} setting direction to {1} {2}'.format( shutter_id, direction, 'without position' if desired_position is None else 'with position {0}'.format(desired_position))) old_desired_position = self._desired_positions[shutter_id] self._directions[shutter_id] = direction logger.debug('Shutter {0} setting desired position to {1}'.format( shutter_id, desired_position)) self._desired_positions[shutter_id] = desired_position if timer is not None and desired_position == old_desired_position and timer < self.MIN_POSITION_TIMER_SHUTTER: # this is path where timers are used, and we avoid too much cumulative error by not repeating the actions logger.warning( 'Shutter {0} skipping shutter action as timer < {1} seconds ({2:.2f}s)' .format(shutter_id, self.MIN_POSITION_TIMER_SHUTTER, timer)) else: self._execute_shutter(shutter_id, direction, timer=timer) def _calculate_shutter_timer(self, shutter_id, desired_position): ShutterController._validate_position(shutter_id, desired_position) actual_position = self._actual_positions.get(shutter_id) if actual_position is None or self._position_accuracy[shutter_id] <= 0: self.reset_shutter(shutter_id) actual_position = self._actual_positions.get(shutter_id) if actual_position is None: raise RuntimeError( 'Shutter {0} has unknown actual position'.format( shutter_id)) if self._position_accuracy[shutter_id] <= 0: raise RuntimeError( 'Could not get accurate position for shutter {}'.format( shutter_id)) ShutterController._validate_position(shutter_id, desired_position) shutter = self._get_shutter(shutter_id) delta_position = desired_position - actual_position direction = self._get_direction(actual_position, desired_position) if direction == ShutterEnums.Direction.STOP: return 0 else: configured_timer = getattr(shutter, 'timer_{0}'.format(direction.lower())) return int( abs(delta_position) / float(self.TIME_BASED_SHUTTER_STEPS - 1) * configured_timer) def _execute_shutter( self, shutter_id, direction, timer=None): # type: (int, str, Optional[int]) -> None logger.debug('_execute_shutter({}, {}, timer={})'.format( shutter_id, direction, timer)) if direction == ShutterEnums.Direction.STOP or timer == 0: self._master_controller.shutter_stop(shutter_id) else: if direction == ShutterEnums.Direction.UP: self._master_controller.shutter_up(shutter_id, timer=timer) elif direction == ShutterEnums.Direction.DOWN: self._master_controller.shutter_down(shutter_id, timer=timer) def reset_shutter(self, shutter_id): # type: (int) -> None # reset shutter to known state logger.debug('reset_shutter({})'.format(shutter_id)) shutter = self._get_shutter(shutter_id) configured_timer = getattr(shutter, 'timer_up') start = time.time() self._execute_shutter(shutter_id, ShutterEnums.Direction.UP) # TODO: https://openmotics.atlassian.net/browse/OM-2026 while self._actual_positions[shutter_id] != 0: if time.time() - start > configured_timer * 1.1: raise RuntimeError( 'Timer expired when resetting shutter {}, could not get actual position' .format(shutter_id)) time.sleep(1) self._position_accuracy[shutter_id] = 100 logger.info('shutter {} reset complete'.format(shutter_id)) # Internal checks and validators def _get_shutter( self, shutter_id, return_none=False): # type: (int, bool) -> Optional[ShutterDTO] shutter = self._shutters.get(shutter_id) if shutter is None: self.update_config(self.load_shutters()) shutter = self._shutters.get(shutter_id) if shutter is None and return_none is False: raise RuntimeError( 'Shutter {0} is not available'.format(shutter_id)) return shutter @staticmethod def _is_position_reached(direction, desired_position, actual_position, stopped=True): # type: (str, int, int, bool) -> bool if desired_position == actual_position: return True # Obviously reached if direction == ShutterEnums.Direction.STOP: return stopped # Can't be decided, so return user value # An overshoot is considered as "position reached" if direction == ShutterEnums.Direction.UP: return actual_position < desired_position return actual_position > desired_position @staticmethod def _get_limit(direction, steps): # type: (str, Optional[int]) -> Optional[int] if steps is None: return None if direction == ShutterEnums.Direction.UP: return 0 return steps - 1 @staticmethod def _get_direction(actual_position, desired_position): # type: (int, int) -> str if actual_position == desired_position: return ShutterEnums.Direction.STOP if actual_position > desired_position: return ShutterEnums.Direction.UP return ShutterEnums.Direction.DOWN @staticmethod def _get_steps(shutter): # type: (ShutterDTO) -> Optional[int] steps = shutter.steps if steps in [0, 1, None]: # These step values are considered "not configured" and thus "no position support" return None return steps @staticmethod def clamp_position(shutter, position): # type: (ShutterDTO, int) -> int steps = ShutterController._get_steps(shutter) max_position = steps - 1 if steps is not None else ShutterController.TIME_BASED_SHUTTER_STEPS - 1 return max(0, min(position, max_position)) @staticmethod def _validate_position(shutter_id, position, steps=TIME_BASED_SHUTTER_STEPS ): # type: (int, int, Optional[int]) -> None if steps is None: steps = ShutterController.TIME_BASED_SHUTTER_STEPS if not (0 <= position < steps): raise RuntimeError( 'Shutter {0} has a position limit of 0 <= position <= {1}'. format(shutter_id, steps - 1)) # Reporting def _report_shutter_state(self, shutter_id, new_state, force_report=False): # type: (int, str, bool) -> None now = time.time() shutter = self._get_shutter(shutter_id, return_none=True) if shutter is None: logger.warning('Shutter {0} unknown'.format(shutter_id)) return self._directions[shutter_id] = ShutterController.STATE_DIRECTION_MAP[ new_state] logger.debug( 'Shutter {0} reports state {1}, which is direction {2}'.format( shutter_id, new_state, self._directions[shutter_id])) current_state_timestamp, current_state = self._states[shutter_id] if new_state == current_state or ( new_state == ShutterEnums.State.STOPPED and current_state in [ShutterEnums.State.DOWN, ShutterEnums.State.UP]): if force_report: logger.debug('Shutter {0} force reported new state {1}'.format( shutter_id, new_state)) self._states[shutter_id] = (time.time(), new_state) self._publish_shutter_state(shutter_id, shutter, self._states[shutter_id]) else: logger.debug( 'Shutter {0} new state {1} ignored since it equals {2}'. format(shutter_id, new_state, current_state)) return # State didn't change, nothing to do if new_state != ShutterEnums.State.STOPPED: # Shutter started moving self._states[shutter_id] = (now, new_state) logger.debug('Shutter {0} started moving'.format(shutter_id)) else: direction = ShutterController.STATE_DIRECTION_MAP[current_state] steps = ShutterController._get_steps(shutter) if steps is None: # Time based state calculation timer = getattr(shutter, 'timer_{0}'.format(direction.lower())) if timer is None: logger.debug( 'Shutter {0} is time-based but has no valid timer. New state {1}' .format(shutter_id, ShutterEnums.State.STOPPED)) new_state = ShutterEnums.State.STOPPED else: elapsed_time = now - current_state_timestamp threshold_timer = 0.90 * timer # Allow 5% difference if elapsed_time >= threshold_timer: # The shutter was going up/down for the whole `timer`. So it's now up/down logger.info( 'Shutter {0} going {1} for {2:.2f}s passed time threshold. New state {3}' .format( shutter_id, direction, elapsed_time, ShutterController. DIRECTION_END_STATE_MAP[direction])) new_state = ShutterController.DIRECTION_END_STATE_MAP[ direction] new_actual_position = 0 if direction == ShutterEnums.Direction.UP else self.TIME_BASED_SHUTTER_STEPS - 1 self._actual_positions[ shutter_id] = ShutterController.clamp_position( shutter, new_actual_position) self._position_accuracy[shutter_id] = 100 else: new_state = ShutterEnums.State.STOPPED abs_position_delta = int( round(elapsed_time / float(timer) * self.TIME_BASED_SHUTTER_STEPS)) position_delta = -abs_position_delta if direction == ShutterEnums.Direction.UP else abs_position_delta actual_position = self._actual_positions[shutter_id] if actual_position is not None: new_actual_position = actual_position + position_delta self._actual_positions[ shutter_id] = ShutterController.clamp_position( shutter, new_actual_position) self._position_accuracy[ shutter_id] = self._position_accuracy.get( shutter_id, 0 ) - self.SINGLE_ACTION_ACCURACY_LOSS_PERCENTAGE else: self._position_accuracy[shutter_id] = 0 logger.info( 'Shutter {0} going {1} for {2} steps ({3:.2f}s). New state {4}.' 'Actual position: {5}. Position accuracy: {6}'. format(shutter_id, direction, position_delta, elapsed_time, new_state, self._actual_positions[shutter_id], self._position_accuracy[shutter_id])) else: # Supports position, so state will be calculated on position limit_position = ShutterController._get_limit(direction, steps) if ShutterController._is_position_reached( direction, limit_position, self._actual_positions[shutter_id]): logger.debug( 'Shutter {0} going {1} reached limit. New state {2}'. format( shutter_id, direction, ShutterController. DIRECTION_END_STATE_MAP[direction])) new_state = ShutterController.DIRECTION_END_STATE_MAP[ direction] else: logger.debug( 'Shutter {0} going {1} did not reach limit. New state {2}' .format(shutter_id, direction, ShutterEnums.State.STOPPED)) new_state = ShutterEnums.State.STOPPED self._states[shutter_id] = (now, new_state) self._publish_shutter_state(shutter_id, shutter, self._states[shutter_id]) def get_states(self): # type: () -> Dict[str, Any] all_states = [] for i in sorted(self._states.keys()): all_states.append(self._states[i][1]) return { 'status': all_states, 'detail': { shutter_id: { 'state': self._states[shutter_id][1], 'actual_position': self._actual_positions[shutter_id], 'desired_position': self._desired_positions[shutter_id], 'last_change': self._states[shutter_id][0] } for shutter_id in self._shutters } } def _publish_shutter_state( self, shutter_id, shutter_data, shutter_state ): # type: (int, ShutterDTO, Tuple[float, str]) -> None gateway_event = GatewayEvent( event_type=GatewayEvent.Types.SHUTTER_CHANGE, data={ 'id': shutter_id, 'status': { 'state': shutter_state[1].upper(), 'position': self._actual_positions.get(shutter_id), 'last_change': shutter_state[0] }, 'location': { 'room_id': Toolbox.nonify(shutter_data.room, 255) } }) logger.debug('_publish_shutter_change: {}'.format(gateway_event)) self._pubsub.publish_gateway_event(PubSub.GatewayTopics.STATE, gateway_event)