def exec_auto_pwron(self, pwr_key): """Launch auto_pwron in thread params: menu_dev:str, The tty dev user is connecting to """ pwr_key_pretty = pwr_key.replace("/dev/", "").replace("/host/", "") if ( pwr_key in config.outlets["linked"] ): # verify against config here as pwr may not be completely init _msg = ( f"Ensuring {pwr_key_pretty} Linked Outlets ({' '.join(config.outlets['linked'][pwr_key])}) " "are Powered \033[1;32mON\033[0m" ) _dots = "-" * (len(_msg) + 4) _msg = f"\n{_dots}\n {_msg} \n{_dots}\n" # TODO send to formatter in menu ... __init__ print(_msg) threading.Thread( target=self.auto_pwron_thread, args=(pwr_key,), name="auto_pwr_on_" + pwr_key_pretty, ).start() log.debug( "[AUTO PWRON] Active Threads: {}".format( [t.name for t in threading.enumerate() if t.name != "MainThread"] ) )
def tasmota_req(**kwargs): querystring = kwargs['querystring'] try: response = requests.request("GET", url, headers=headers, params=querystring, timeout=config.so_timeout) if response.status_code == 200: if json.loads(response.text)['POWER'] == 'ON': _response = True elif json.loads(response.text)['POWER'] == 'OFF': _response = False else: _response = 'invalid state returned {}'.format( response.text) else: _response = '[{}] error returned {}'.format( response.status_code, response.text) except (requests.exceptions.Timeout, requests.exceptions.ConnectionError): _response = 'Unreachable' except requests.exceptions.RequestException as e: log.debug( f"[tasmota_req] {url.replace('http://', '').replace('https://', '').split('/')[0]} Exception: {e}" ) _response = 'Unreachable ~ hit catchall exception handler' # determine if other exceptions types are possible return _response
def esphome_req(*args, command=command): '''sub function to perform operation on outlet ''' try: method = "GET" if command is None else "POST" response = requests.request(method, url=url, headers=headers, timeout=config.so_timeout) if response.status_code == 200: if command is None: _response = response.json().get('value') else: # espHome returns status Code 200 with no content for /toggle if command in ['toggle', 'cycle']: _response = not cur_state else: _response = command # success return bool command else: _response = '[{}] error returned {}'.format( response.status_code, response.text) except (requests.exceptions.Timeout, requests.exceptions.ConnectionError): _response = 'Unreachable' except requests.exceptions.RequestException as e: log.debug( f"[esphome_req] {url.replace('http://', '').replace('https://', '').split('/')[0]} Exception: {e}" ) _response = 'Unreachable' # So I can determine if other exceptions types are possible when unreachable return _response
def trigger_cloud_update(self): local = self.cpi.local remotes = self.cpi.remotes log.info( '[MDNS REG] Cloud Update triggered delaying {} seconds'.format( UPDATE_DELAY)) time.sleep( UPDATE_DELAY ) # Wait 30 seconds and then update, to accomodate multiple add removes data = local.build_local_dict(refresh=True) for a in local.data[local.hostname].get('adapters', {}): if 'udev' in local.data[local.hostname]['adapters'][a]: del local.data[local.hostname]['adapters'][a]['udev'] log.debug( f'[MDNS REG] Final Data set collected for {local.hostname}: \n{json.dumps(data)}' ) remote_consoles = {} if config.cloud_svc == 'gdrive': # pylint: disable=maybe-no-member cloud = GoogleDrive(local.hostname) remote_consoles = cloud.update_files(data) # Send remotes learned from cloud file to local cache if len(remote_consoles) > 0 and 'Gdrive-Error' not in remote_consoles: remotes.update_local_cloud_file(remote_consoles) log.info( '[MDNS REG] Cloud Update Completed, Found {} Remote ConsolePis' .format(len(remote_consoles))) else: log.warning( '[MDNS REG] Cloud Update Completed, No remotes found, or Error Occured' )
def get_credentials(self): '''Get credentials for google drive / sheets Returns: Object -- Credentials Object ''' log.debug('[GDRIVE]: -->get_credentials() {}'.format(log.name)) creds = None # The file token.pickle stores the user's access and refresh tokens, and is # created automatically when the authorization flow completes for the first # time. os.chdir('/etc/ConsolePi/cloud/gdrive/') if os.path.exists('.credentials/token.pickle'): with open('.credentials/token.pickle', 'rb') as token: creds = pickle.load(token) # If there are no (valid) credentials available, let the user log in. if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( '.credentials/credentials.json', SCOPES) creds = flow.run_local_server() # Save the credentials for the next run with open('.credentials/token.pickle', 'wb') as token: pickle.dump(creds, token) return creds
def get_adapters_via_api(self, ip: str, port: int = 5000, rename: bool = False, log_host: str = None): """Send RestFul GET request to Remote ConsolePi to collect adapter info params: ip(str): ip address or FQDN of remote ConsolePi rename(bool): TODO log_host(str): friendly string for logging purposes "hostname(ip)" returns: adapter dict for remote if successful and adapters exist status_code 200 if successful but no adapters or Falsey or response status_code if an error occurred. """ if not log_host: log_host = ip url = f"http://{ip}:{port}/api/v1.0/adapters" if rename: url = f"{url}?refresh=true" log.debug(url) headers = { "Accept": "*/*", "Cache-Control": "no-cache", "Host": f"{ip}:{port}", "accept-encoding": "gzip, deflate", "Connection": "keep-alive", "cache-control": "no-cache", } try: response = requests.request("GET", url, headers=headers, timeout=config.remote_timeout) except (OSError, TimeoutError): log.warning( f"[API RQST OUT] Remote ConsolePi: {log_host} TimeOut when querying via API - Unreachable." ) return False if response.ok: ret = response.json() ret = ret["adapters"] if ret["adapters"] else response.status_code _msg = f"Adapters Successfully retrieved via API for Remote ConsolePi: {log_host}" log.info("[API RQST OUT] {}".format(_msg)) log.debugv("[API RQST OUT] Response: \n{}".format( json.dumps(ret, indent=4, sort_keys=True))) else: ret = response.status_code log.error( f"[API RQST OUT] Failed to retrieve adapters via API for Remote ConsolePi: {log_host}\n{ret}:{response.text}" ) return ret
def resize_cols(self): service = self.sheets_svc body = {"requests": [{"autoResizeDimensions": {"dimensions": {"sheetId": 0, "dimension": "COLUMNS", "startIndex": 0, "endIndex": 2}}}]} request = service.spreadsheets().batchUpdate( spreadsheetId=self.file_id, body=body) response = self.exec_request(request) log.debug('[GDRIVE]: resize_cols response: {}'.format(response))
def try_build_info(self): # Try sending with all data local = self.cpi.local try: info = self.build_info() except struct.error as e: log.debug( '[MDNS REG] data is too big for mdns, removing adapter data \n {} {}' .format(e.__class__.__name__, e)) log.debug('[MDNS REG] offending payload \n {}'.format( json.dumps(local.data, indent=4, sort_keys=True))) # Too Big - Try sending without adapter data try: info = self.build_info(squash='adapters') except struct.error as e: log.warning( '[MDNS REG] data is still too big for mdns, reducing interface payload \n' ' {} {}'.format(e.__class__.__name__, e)) log.debug( '[MDNS REG] offending interface data \n {}'.format( json.dumps(local.interfaces, indent=4, sort_keys=True))) try: info = self.build_info(squash='interfaces') except struct.error: log.critical('[MDNS REG] data is still too big for mdns') log.debug( '[MDNS REG] offending interface data \n {}'.format( json.dumps(local.interfaces, indent=4, sort_keys=True))) return info
def update_mdns(self, device=None, action=None, *args, **kwargs): zeroconf = self.zeroconf info = self.try_build_info() def sub_restart_zc(): log.info( '[MDNS REG] mdns_refresh thread Start... Delaying {} Seconds'. format(UPDATE_DELAY)) time.sleep( UPDATE_DELAY ) # Wait x seconds and then update, to accomodate multiple add removes zeroconf.update_service(info) zeroconf.unregister_service(info) time.sleep(5) zeroconf.register_service(info) log.info('[MDNS REG] mdns_refresh thread Completed') if device is not None: abort_mdns = False for thread in threading.enumerate(): if 'mdns_refresh' in thread.name: log.debug( '[MDNS REG] Another mdns_refresh thread already queued, this thread will abort' ) abort_mdns = True break if not abort_mdns: threading.Thread(target=sub_restart_zc, name='mdns_refresh', args=()).start() log.debug( '[MDNS REG] mdns_refresh Thread Started. Current Threads:\n {}' .format(threading.enumerate())) log.info('[MDNS REG] detected change: {} {}'.format( device.action, device.sys_name)) if config.cloud: # pylint: disable=maybe-no-member abort = False for thread in threading.enumerate(): if 'cloud_update' in thread.name: log.debug( '[MDNS REG] Another cloud Update thread already queued, this thread will abort' ) abort = True break if not abort: threading.Thread(target=self.trigger_cloud_update, name='cloud_update', args=()).start() log.debug( '[MDNS REG] Cloud Update Thread Started. Current Threads:\n {}' .format(threading.enumerate()))
def do_api_request(self, ip: str, path: str, *args, **kwargs): """Send RestFul GET request to Remote ConsolePi to collect data params: ip(str): ip address or FQDN of remote ConsolePi path(str): path beyond /api/v1.0/ returns: response object """ url = f"http://{ip}:5000/api/v1.0/{path}" log.debug(f'[do_api_request] URL: {url}') headers = { "Accept": "*/*", "Cache-Control": "no-cache", "Host": f"{ip}:5000", "accept-encoding": "gzip, deflate", "Connection": "keep-alive", "cache-control": "no-cache", } try: response = requests.request("GET", url, headers=headers, timeout=config.remote_timeout) except (OSError, TimeoutError): log.warning( f"[API RQST OUT] Remote ConsolePi @ {ip} TimeOut when querying via API - Unreachable." ) return False if response.ok: log.info(f"[API RQST OUT] {url} Response: OK") log.debugv( f"[API RQST OUT] Response: \n{json.dumps(response.json(), indent=4, sort_keys=True)}" ) else: log.error(f"[API RQST OUT] API Request Failed {url}") return response
def build_info(self, squash=None, local_adapters=None): local = self.cpi.local local.data = local.build_local_dict(refresh=True) loc = local.data[local.hostname] loc['hostname'] = local.hostname for a in loc['adapters']: if 'udev' in loc['adapters'][a]: del loc['adapters'][a]['udev'] # ip_w_gw = loc['interfaces'].get('_ip_w_gw', '127.0.0.1') # if data set is too large for mdns browser on other side will retrieve via API if squash is not None: if squash == 'interfaces': del loc['adapters'] x = loc['interfaces'] loc['interfaces'] = { k: {i: v[i] for i in v if i not in ['mac', 'isgw']} for k, v in x.items() if '.' not in k and not k.startswith('_') } else: del loc['adapters'] log.debug('[MDNS REG] Current content of local_data \n{}'.format( json.dumps(loc, indent=4, sort_keys=True))) loc = { k: '{}'.format( loc[k] if isinstance(loc[k], str) else json.dumps(loc[k])) for k in loc.keys() } info = ServiceInfo( "_consolepi._tcp.local.", local.hostname + "._consolepi._tcp.local.", addresses=[socket.inet_aton(ip) for ip in local.get_ip_list()], port=5000, properties=loc, server=f'{local.hostname}.local.') return info
def esphome_req(*args, command: str = command): try: method = "GET" if command is None else "POST" response = requests.request(method, url=url if command is not None else status_url, headers=headers, timeout=3) if response.status_code == 200: if command is None: _response = response.json().get('value') else: if command in ['toggle', 'cycle']: _response = not cur_state else: _response = command # _response = requests.request("GET", status_url, # headers=headers, timeout=3).json().get('value') else: _response = '[{}] error returned {}'.format(response.status_code, response.text) except (requests.exceptions.Timeout, requests.exceptions.ConnectionError): _response = 'Unreachable' except requests.exceptions.RequestException as e: log.debug(f"[esphome_req] {url.replace('http://', '').replace('https://', '').split('/')[0]} Exception: {e}") _response = 'Unreachable' # So I can determine if other exceptions types are possible when unreachable return _response
def try_build_info(self): # Try sending with all data local = self.cpi.local info = ServiceInfo("_consolepi._tcp.local.", local.hostname + "._consolepi._tcp.local.", server=f'{local.hostname}.local.') try: info = self.build_info() except (struct.error, ValueError) as e: log.debug( '[MDNS REG] data is too big for mdns, removing adapter data \n {} {}' .format(e.__class__.__name__, e)) log.debug('[MDNS REG] offending payload \n {}'.format( json.dumps(local.data, indent=4, sort_keys=True))) # Too Big - Try sending without adapter data try: info = self.build_info(squash='adapters') except (struct.error, ValueError) as e: log.warning( '[MDNS REG] data is still too big for mdns, reducing interface payload \n' ' {} {}'.format(e.__class__.__name__, e)) log.debug( '[MDNS REG] offending interface data \n {}'.format( json.dumps(local.interfaces, indent=4, sort_keys=True))) try: info = self.build_info(squash='interfaces') except (struct.error, ValueError): log.critical('[MDNS REG] data is still too big for mdns') log.debug( '[MDNS REG] offending interface data \n {}'.format( json.dumps(local.interfaces, indent=4, sort_keys=True))) return info
def auto_pwron_thread(self, pwr_key): """Ensure any outlets linked to device are powered on Called by consolepi_menu exec_menu function and remote_launcher (for sessions to remotes) when a connection initiated with adapter. Powers any linked outlets associated with the adapter on. params: menu_dev:str, The tty device user is connecting to. Returns: No Return - Updates class attributes """ if self.wait_for_threads("init"): return outlets = self.pwr.data if "linked" not in outlets: _msg = "Error linked key not found in outlet dict\nUnable to perform auto power on" log.show(_msg, show=True) return if not outlets["linked"].get(pwr_key): return # -- // Perform Auto Power On (if not already on) \\ -- for o in outlets["linked"][pwr_key]: outlet = outlets["defined"].get(o.split(":")[0]) ports = [] if ":" not in o else json.loads( o.replace("'", '"').split(":")[1]) _addr = outlet["address"] # -- // DLI web power switch Auto Power On \\ -- # # TODO combine all ports from same pwr_key and sent to pwr_toggle once # TODO Update outlet if return is OK, then run refresh in the background to validate # TODO Add class attribute to cpi_menu ~ cpi_menu.new_data = "power", "main", etc # Then in wait_for_input run loop to check for updates and re-display menu # TODO power_menu and dli_menu wait_for_threads auto power ... check cpiexec.autopwr_wait first # if outlet["type"].lower() == "dli": for p in ports: log.debug( f"[Auto PwrOn] Power ON {pwr_key} Linked Outlet {outlet['type']}:{_addr} p{p}" ) if not outlet["is_on"][p][ "state"]: # This is just checking what's in the dict not querying the DLI r = self.pwr.pwr_toggle(outlet["type"], _addr, desired_state=True, port=p) if isinstance(r, bool): if r: threading.Thread( target=self.outlet_update, kwargs={ "refresh": True, "upd_linked": True }, name="auto_pwr_refresh_dli", ).start() self.autopwr_wait = True else: log.warning( f"{pwr_key} Error operating linked outlet @ {o}", show=True, ) # -- // esphome Auto Power On \\ -- elif outlet["type"].lower() == "esphome": for p in ports: log.debug( f"[Auto PwrOn] Power ON {pwr_key} Linked Outlet {outlet['type']}:{_addr} p{p}" ) if not outlet["is_on"][p][ "state"]: # This is just checking what's in the dict r = self.pwr.pwr_toggle(outlet["type"], _addr, desired_state=True, port=p) if isinstance(r, bool): self.pwr.data['defined'][o.split( ':')[0]]['is_on'][p]['state'] = r else: log.show(r) log.warning( f"{pwr_key} Error operating linked outlet @ {o}", show=True, ) # -- // GPIO & TASMOTA Auto Power On \\ -- else: log.debug( f"[Auto PwrOn] Power ON {pwr_key} Linked Outlet {outlet['type']}:{_addr}" ) r = self.pwr.pwr_toggle( outlet["type"], _addr, desired_state=True, noff=outlet.get("noff", True) if outlet["type"].upper() == "GPIO" else True, ) if isinstance(r, int) and r > 1: # return is an error r = False else: # return is bool which is what we expect if r: self.pwr.data["defined"][o]["state"] = r self.autopwr_wait = True # self.pwr.pwr_get_outlets(upd_linked=True) else: # self.config.log_and_show(f"Error operating linked outlet {o}:{outlet['address']}", log=log.warning) log.show( f"Error operating linked outlet {o}:{outlet['address']}", show=True, )
def api_reachable(self, remote_host: str, cache_data: dict, rename: bool = False): """Check Rechability & Fetch adapter data via API for remote ConsolePi params: remote_host:str, The hostname of the Remote ConsolePi cache_data:dict, The ConsolePi dictionary for the remote (from cache file) rename:bool, rename = True will do api call with refresh=True Query parameter which tells the api to first update connection data from ser2net as it likely changed as a result of remote rename operation. returns: tuple [0]: Bool, indicating if data is different than cache [1]: dict, Updated ConsolePi dictionary for the remote """ class ApiReachableResponse: def __init__(self, update, data, reachable): self.update = update self.data = data self.reachable = reachable update = False local = self.local _iface_dict = cache_data["interfaces"] rem_ip_list = [ _iface_dict[_iface].get("ip") for _iface in _iface_dict if not _iface.startswith("_") and _iface_dict[_iface].get("ip") not in local.ip_list ] # if inbound data includes rem_ip make sure to try that first for _ip in [cache_data.get("rem_ip"), cache_data.get("last_ip")]: if _ip: if _ip not in rem_ip_list or rem_ip_list.index(_ip) != 0: rem_ip_list.remove(_ip) rem_ip_list.insert(0, _ip) rem_ip = None for _ip in rem_ip_list: log.debug(f"[API_REACHABLE] verifying {remote_host}") _adapters = self.get_adapters_via_api(_ip, rename=rename) if _adapters: rem_ip = _ip # Remote is reachable if not isinstance( _adapters, int): # indicates an html error code was returned if isinstance( _adapters, list ): # indicates need for conversion from old api format _adapters = self.convert_adapters(_adapters) if not self.old_api_log_sent: log.warning( f"{remote_host} provided old api schema. Recommend Upgrading to current." ) self.old_api_log_sent = True # Only compare config dict for each adapter as udev dict will generally be different due to time_since_init if not cache_data.get("adapters") or { a: { "config": _adapters[a].get("config", {}) } for a in _adapters } != { a: { "config": cache_data["adapters"][a].get( "config", {}) } for a in cache_data["adapters"] }: cache_data["adapters"] = _adapters update = True # --> Update if adapter dict is different else: cached_udev = [ False for a in cache_data["adapters"] if 'udev' not in cache_data["adapters"][a] ] if False in cached_udev: cache_data["adapters"] = _adapters update = True # --> Update if udev key not in existing data (udev not sent to cloud) elif _adapters == 200: log.show( f"Remote {remote_host} is reachable via {_ip}," " but has no adapters attached\nit's still available in remote shell menu" ) # remote was reachable update last_ip, even if returned bad status_code still reachable if not cache_data.get("last_ip", "") == _ip: cache_data["last_ip"] = _ip update = True # --> Update if last_ip is different than currently reachable IP break if cache_data.get("rem_ip") != rem_ip: cache_data["rem_ip"] = rem_ip update = ( True # --> Update if rem_ip didn't match (was previously unreachable) ) if not _adapters: reachable = False if isinstance(cache_data.get("adapters"), list): _adapters = cache_data.get("adapters") _adapters = { _adapters[_adapters.index(d)]["dev"]: { "config": { k: _adapters[_adapters.index(d)][k] for k in _adapters[_adapters.index(d)] } } for d in _adapters } cache_data["adapters"] = _adapters _msg = ( f"{remote_host} Cached adapter data was in old format... Converted to new.\n" f"\t\t{remote_host} Should be upgraded to the current version of ConsolePi." ) log.warning(_msg, show=True) update = True # --> Convert to new and Update if cache data was in old format else: reachable = True return ApiReachableResponse(update, cache_data, reachable)
def update_local_cloud_file(self, remote_consoles=None, current_remotes=None, local_cloud_file=None): """Update local cloud cache (cloud.json). Verifies the newly discovered data is more current than what we already know and updates the local cloud.json file if so The Menu uses cloud.json to populate remote menu items params: remote_consoles: The newly discovered data (from Gdrive or mdns) current_remotes: The current remote data fetched from the local cloud cache (cloud.json) - func will retrieve this if not provided local_cloud_file The path to the local cloud file (global var cloud.json) returns: dict: The resulting remote console dict representing the most recent data for each remote. """ local_cloud_file = (config.static.get("LOCAL_CLOUD_FILE") if local_cloud_file is None else local_cloud_file) if len(remote_consoles) > 0: if current_remotes is None: current_remotes = self.data = config.remote_update( ) # grabs the remote data from local cloud cache # update current_remotes dict with data passed to function if len(remote_consoles) > 0: if current_remotes is not None: for _ in current_remotes: if _ not in remote_consoles: if ("fail_cnt" not in current_remotes[_] or current_remotes[_]["fail_cnt"] < 2): remote_consoles[_] = current_remotes[_] elif (remote_consoles.get(_) and "fail_cnt" not in remote_consoles[_] and "fail_cnt" in current_remotes[_]): remote_consoles[_]["fail_cnt"] = current_remotes[ _]["fail_cnt"] else: # -- VERBOSE DEBUG -- log.debugv( "[CACHE UPD] \n--{}-- \n remote upd_time: {}\n remote rem_ip: {}\n remote source: {}\n cache rem upd_time: {}\n cache rem_ip: {}\n cache source: {}\n" .format( # NoQA _, time.strftime( "%a %x %I:%M:%S %p %Z", time.localtime( remote_consoles[_]["upd_time"]), ) if "upd_time" in remote_consoles[_] else None, # NoQA remote_consoles[_]["rem_ip"] if "rem_ip" in remote_consoles[_] else None, remote_consoles[_]["source"] if "source" in remote_consoles[_] else None, time.strftime( "%a %x %I:%M:%S %p %Z", time.localtime( current_remotes[_]["upd_time"]), ) if "upd_time" in current_remotes[_] else None, # NoQA current_remotes[_]["rem_ip"] if "rem_ip" in current_remotes[_] else None, current_remotes[_]["source"] if "source" in current_remotes[_] else None, )) # -- END VERBOSE DEBUG -- # No Change Detected (data passed to function matches cache) if "last_ip" in current_remotes[_]: del current_remotes[_]["last_ip"] if remote_consoles[_] == current_remotes[_]: log.debug( "[CACHE UPD] {} No Change in info detected". format(_)) # only factor in existing data if source is not mdns elif ("upd_time" in remote_consoles[_] or "upd_time" in current_remotes[_]): if ("upd_time" in remote_consoles[_] and "upd_time" in current_remotes[_]): if (current_remotes[_]["upd_time"] > remote_consoles[_]["upd_time"]): remote_consoles[_] = current_remotes[_] log.info( f"[CACHE UPD] {_} Keeping existing data from {current_remotes[_].get('source', '')} " "based on more current update time") elif (remote_consoles[_]["upd_time"] > current_remotes[_]["upd_time"]): log.info( "[CACHE UPD] {} Updating data from {} " "based on more current update time". format(_, remote_consoles[_]["source"])) else: # -- Update Times are equal -- if (current_remotes[_].get("adapters") and remote_consoles[_].get("adapters") and current_remotes[_]["adapters"]. keys() != remote_consoles[_] ["adapters"].keys() ) or remote_consoles[_].get( "interfaces", {}) != current_remotes[_].get( "interfaces", {}): log.warning( "[CACHE UPD] {} current cache update time and {} update time are equal" " but data appears to have changed. Updating" .format( _, remote_consoles[_]["source"])) elif "upd_time" in current_remotes[_]: remote_consoles[_] = current_remotes[_] log.info( "[CACHE UPD] {} Keeping existing data based *existence* of update time " "which is lacking in this update from {}". format(_, remote_consoles[_]["source"])) for _try in range(0, 2): try: with open(local_cloud_file, "w") as cloud_file: cloud_file.write( json.dumps(remote_consoles, indent=4, sort_keys=True)) utils.set_perm( local_cloud_file ) # a hack to deal with perms ~ consolepi-details del func break except PermissionError: utils.set_perm(local_cloud_file) else: log.warning( "[CACHE UPD] cache update called with no data passed, doing nothing" ) return remote_consoles
ztp_opts_conf = '/etc/ConsolePi/dnsmasq.d/wired-dhcp/ztp-opts/ztp-opts.conf' ztp_hosts_conf = '/etc/ConsolePi/dnsmasq.d/wired-dhcp/ztp-hosts/ztp-hosts.conf' match = [m for m in config.cfg_yml.get('ZTP', {}).get('ordered_ztp', {})] ztp_lease_time = '2m' # -- DEBUG STUFF DEBUG = False if DEBUG: log.setLevel(10) # 10 = logging.DEBUG env = '' for k, v in os.environ.items(): if 'DNS' in k: env += f"{k}: {v}\n" if env: log.debug(f"Environment:\n{env}") log.debug(f"Arguments:\n{', '.join(sys.argv[1:])}") # -- # dhcp args: add aa:bb:cc:dd:ee:ff 10.33.0.151 # tftp args: tftp 12261 10.33.0.113 /srv/tftp/6200_1.cfg add_del = sys.argv[1] # the word tftp when tftp mac_bytes = sys.argv[2] # bytes sent when tftp ip = sys.argv[3] cfg_file = None if len(sys.argv) > 4: cfg_file = sys.argv[4] # file sent via tftp # Available from environ when called by dhcp iface = os.environ.get('DNSMASQ_INTERFACE') vendor = os.environ.get('DNSMASQ_VENDOR_CLASS')
def pwr_get_outlets(self, outlet_data={}, upd_linked=False, failures={}): '''Get Details for Outlets defined in ConsolePi.yaml power section On Menu Launch this method is called in parallel (threaded) for each outlet On Refresh all outlets are passed to the method params: - All Optional outlet_data:dict, The outlets that need to be updated, if not provided will get all outlets defined in ConsolePi.yaml upd_linked:Bool, If True will update just the linked ports, False is for dli and will update all ports for the dli. failures:dict: when refreshing outlets pass in previous failures so they can be re-tried ''' # re-attempt connection to failed power controllers on refresh if not failures: failures = outlet_data.get('failures') if outlet_data.get('failures') else self.data.get('failures') outlet_data = self.data.get('defined') if not outlet_data else outlet_data if failures: outlet_data = {**outlet_data, **failures} failures = {} dli_power = self.data.get('dli_power', {}) for k in outlet_data: outlet = outlet_data[k] _start = time.time() # -- // GPIO \\ -- if outlet['type'].upper() == 'GPIO': if not is_rpi: log.warning('GPIO Outlet Defined, GPIO Only Supported on RPi - ignored', show=True) continue noff = True if 'noff' not in outlet else outlet['noff'] GPIO.setup(outlet['address'], GPIO.OUT) outlet_data[k]['is_on'] = bool(GPIO.input(outlet['address'])) if noff \ else not bool(GPIO.input(outlet['address'])) # -- // tasmota \\ -- elif outlet['type'] == 'tasmota': response = self.do_tasmota_cmd(outlet['address']) outlet['is_on'] = response if response not in [0, 1, True, False]: failures[k] = outlet_data[k] failures[k]['error'] = f'[PWR-TASMOTA] {k}:{failures[k]["address"]} {response} - Removed' log.warning(failures[k]['error'], show=True) # -- // esphome \\ -- elif outlet['type'] == 'esphome': # TODO have do_esphome accept list, slice, or str for one or multiple relays relays = utils.listify(outlet.get('relays', k)) # if they have not specified the relay try name of outlet outlet['is_on'] = {} for r in relays: response = self.do_esphome_cmd(outlet['address'], r) outlet['is_on'][r] = {'state': response, 'name': r} if response not in [True, False]: failures[k] = outlet_data[k] failures[k]['error'] = f'[PWR-ESP] {k}:{failures[k]["address"]} {response} - Removed' log.warning(failures[k]['error'], show=True) # -- // dli \\ -- elif outlet['type'].lower() == 'dli': if TIMING: dbg_line = '------------------------ // NOW PROCESSING {} \\\\ ------------------------'.format(k) print('\n{}'.format('=' * len(dbg_line))) print('{}\n{}\n{}'.format(dbg_line, outlet_data[k], '-' * len(dbg_line))) print('{}'.format('=' * len(dbg_line))) # -- // VALIDATE CONFIG FILE DATA FOR DLI \\ -- all_good = True # initial value for _ in ['address', 'username', 'password']: if not outlet.get(_): all_good = False failures[k] = outlet_data[k] failures[k]['error'] = f'[PWR-DLI {k}] {_} missing from {failures[k]["address"]} ' \ 'configuration - skipping' log.error(f'[PWR-DLI {k}] {_} missing from {failures[k]["address"]} ' 'configuration - skipping', show=True) break if not all_good: continue (this_dli, _update) = self.load_dli(outlet['address'], outlet['username'], outlet['password']) if this_dli is None or this_dli.dli is None: failures[k] = outlet_data[k] failures[k]['error'] = '[PWR-DLI {}] {} Unreachable - Removed'.format(k, failures[k]['address']) log.warning(f"[PWR-DLI {k}] {failures[k]['address']} Unreachable - Removed", show=True) else: if TIMING: xstart = time.time() print('this_dli.outlets: {} {}'.format(this_dli.outlets, 'update' if _update else 'init')) print(json.dumps(dli_power, indent=4, sort_keys=True)) # upd_linked is for faster update in power menu only refreshes data for linked ports vs entire dli if upd_linked and self.data['dli_power'].get(outlet['address']): if outlet.get('linked_devs'): (outlet, _p) = self.update_linked_devs(outlet) if k in outlet_data: outlet_data[k]['is_on'] = this_dli[_p] else: log.error(f'[PWR GET_OUTLETS] {k} appears to be unreachable') # TODO not actually using the error returned this turned into a hot mess if isinstance(outlet['is_on'], dict) and not outlet['is_on']: all_good = False # update dli_power for the refreshed / linked ports else: for _ in outlet['is_on']: dli_power[outlet['address']][_] = outlet['is_on'][_] else: if _update: dli_power[outlet['address']] = this_dli.get_dli_outlets() # data may not be fresh trigger dli update # handle error connecting to dli during refresh - when connect worked on menu launch if not dli_power[outlet['address']]: failures[k] = outlet_data[k] failures[k]['error'] = f"[PWR-DLI] {k} {failures[k]['address']} Unreachable - Removed" log.warning(f'[PWR-DLI {k}] {failures[k]["address"]} Unreachable - Removed', show=True) continue else: # dli was just instantiated data is fresh no need to update dli_power[outlet['address']] = this_dli.outlets if outlet.get('linked_devs'): (outlet, _p) = self.update_linked_devs(outlet) if TIMING: print('[TIMING] this_dli.outlets: {}'.format(time.time() - xstart)) # TIMING log.debug(f'dli {k} Updated. Elapsed Time(secs): {time.time() - _start}') # -- END for LOOP for k in outlet_data -- # Move failed outlets from the keys that populate the menu to the 'failures' key # failures are displayed in the footer section of the menu, then re-tried on refresh # TODO this may be causing - RuntimeError: dictionary changed size during iteration # in pwr_start_update_threads. witnessed on mdnsreg daemon on occasion (Move del logic after wait_for_threads?) for _dev in failures: if outlet_data.get(_dev): del outlet_data[_dev] if self.data['defined'].get(_dev): del self.data['defined'][_dev] if failures[_dev]['address'] in dli_power: del dli_power[failures[_dev]['address']] self.data['failures'][_dev] = failures[_dev] # restore outlets that failed on menu launch but found reachable during refresh for _dev in outlet_data: if _dev not in self.data['defined']: self.data['defined'][_dev] = outlet_data[_dev] if _dev in self.data['failures']: del self.data['failures'][_dev] self.data['dli_power'] = dli_power return self.data
ztp_hosts_conf = '/etc/ConsolePi/dnsmasq.d/wired-dhcp/ztp-hosts/ztp-hosts.conf' ZTP_CLI_FILE = config.static.get('ZTP_CLI_FILE', '/etc/ConsolePi/ztp/.ztpcli') match = [m for m in config.cfg_yml.get('ZTP', {}).get('ordered_ztp', {})] ztp_lease_time = '2m' # -- DEBUG STUFF DEBUG = False if DEBUG: log.setLevel(10) # 10 = logging.DEBUG env = '' for k, v in os.environ.items(): if 'DNS' in k: env += f"{k}: {v}\n" if env: log.debug(f"Environment:\n{env}") log.debug(f"Arguments:\n{', '.join(sys.argv[1:])}") # -- # dhcp args: add aa:bb:cc:dd:ee:ff 10.33.0.151 # tftp args: tftp 12261 10.33.0.113 /srv/tftp/6200_1.cfg add_del = sys.argv[1] # the word tftp when tftp mac_bytes = sys.argv[2] # bytes sent when tftp ip = sys.argv[3] cfg_file = None if len(sys.argv) > 4: cfg_file = sys.argv[4] # file sent via tftp # Available from environ when called by dhcp iface = os.environ.get('DNSMASQ_INTERFACE') vendor = os.environ.get('DNSMASQ_VENDOR_CLASS')
def on_service_state_change(self, zeroconf: Zeroconf, service_type: str, name: str, state_change: ServiceStateChange) -> None: cpi = self.cpi mdns_data = None update_cache = False if state_change is ServiceStateChange.Added: info = zeroconf.get_service_info(service_type, name) if info: if info.server.split('.')[0] != cpi.local.hostname: if info.properties: properties = info.properties mdns_data = { k.decode('UTF-8'): v.decode('UTF-8') if not v.decode('UTF-8')[0] in ['[', '{'] else json.loads(v.decode('UTF-8')) # NoQA for k, v in properties.items() } hostname = mdns_data.get('hostname') interfaces = mdns_data.get('interfaces', []) # interfaces = json.loads(properties[b'interfaces'].decode("utf-8")) log_out = json.dumps(mdns_data, indent=4, sort_keys=True) log.debug( f'[MDNS DSCVRY] {hostname} Properties Discovered via mdns:\n{log_out}' ) rem_ip = mdns_data.get('rem_ip') if not rem_ip: if len(mdns_data.get('interfaces', [])) == 1: rem_ip = [ interfaces[i]['ip'] for i in interfaces ] rem_ip = rem_ip[0] else: rem_ip = None if hostname not in cpi.remotes.data or 'rem_ip' not in cpi.remotes.data[hostname] \ else cpi.remotes.data[hostname]['rem_ip'] cur_known_adapters = cpi.remotes.data.get( hostname, { 'adapters': None }).get('adapters') # -- Log new entry only if this is the first time it's been discovered -- if hostname not in self.d_discovered: self.d_discovered.append(hostname) log.info( '[MDNS DSCVRY] {}({}) Discovered via mdns'. format(hostname, rem_ip if rem_ip is not None else '?')) from_mdns_adapters = mdns_data.get('adapters') mdns_data['rem_ip'] = rem_ip mdns_data[ 'adapters'] = from_mdns_adapters if from_mdns_adapters is not None else cur_known_adapters mdns_data['source'] = 'mdns' mdns_data['upd_time'] = int(time.time()) mdns_data = {hostname: mdns_data} # update from API only if no adapter data exists either in cache or from mdns that triggered this # adapter data is updated on menu_launch if not mdns_data[hostname][ 'adapters'] or hostname not in cpi.remotes.data: log.info( '[MDNS DSCVRY] {} provided no adapter data Collecting via API' .format(info.server.split('.')[0])) # TODO check this don't think needed had a hung process on one of my Pis added it to be safe try: res = cpi.remotes.api_reachable( hostname, mdns_data[hostname]) update_cache = res.update mdns_data[hostname] = res.data # reachable = res.reachable except Exception as e: log.error( f'Exception occured verifying reachability via API for {hostname}:\n{e}' ) if self.show: if hostname in self.discovered: self.discovered.remove(hostname) self.discovered.append('{}{}'.format( hostname, '*' if update_cache else '')) print(hostname + '({}) Discovered via mdns.'.format( rem_ip if rem_ip is not None else '?')) try: print('{}\n{}'.format( 'mdns: None' if from_mdns_adapters is None else 'mdns: {}'.format([ d.replace('/dev/', '') for d in from_mdns_adapters ] if not isinstance( from_mdns_adapters, list) else [ d['dev'].replace('/dev/', '') for d in from_mdns_adapters ]), 'cache: None' if cur_known_adapters is None else 'cache: {}'.format([ d.replace('/dev/', '') for d in cur_known_adapters ] if not isinstance( cur_known_adapters, list) else [ d['dev'].replace('/dev/', '') for d in cur_known_adapters ]))) except TypeError as e: print(f'EXCEPTION: {e}') print( f'\nDiscovered ConsolePis: {self.discovered}') print("press Ctrl-C to exit...\n") log.debug( '[MDNS DSCVRY] {} Final data set:\n{}'.format( hostname, json.dumps(mdns_data, indent=4, sort_keys=True))) if update_cache: if 'hostname' in mdns_data[hostname]: del mdns_data[hostname]['hostname'] cpi.remotes.data = cpi.remotes.update_local_cloud_file( remote_consoles=mdns_data) log.info( f'[MDNS DSCVRY] {hostname} Local Cache Updated after mdns discovery' ) else: log.warning( f'[MDNS DSCVRY] {hostname}: No properties found') else: log.warning(f'[MDNS DSCVRY] {info}: No info found')
def auto_pwron_thread(self, pwr_key): """Ensure any outlets linked to device are powered on Called by consolepi_menu exec_menu function and remote_launcher (for sessions to remotes) when a connection initiated with adapter. Powers any linked outlets associated with the adapter on. params: menu_dev:str, The tty device user is connecting to. Returns: No Return - Updates class attributes """ if self.wait_for_threads("init"): return outlets = self.pwr.data if "linked" not in outlets: _msg = "Error linked key not found in outlet dict\nUnable to perform auto power on" log.show(_msg, show=True) return if not outlets["linked"].get(pwr_key): return # -- // Perform Auto Power On (if not already on) \\ -- for o in outlets["linked"][pwr_key]: outlet = outlets["defined"].get(o.split(":")[0]) ports = [] if ":" not in o else json.loads(o.split(":")[1]) # NoQA .replace('\'', '"')) No longer necessary single port defs are listified in config.py _addr = outlet["address"] # -- // DLI web power switch Auto Power On \\ -- if outlet["type"].lower() == "dli": for p in ports: log.debug( f"[Auto PwrOn] Power ON {pwr_key} Linked Outlet {outlet['type']}:{_addr} p{p}" ) # TODO have seen this, but unable to recreate. may be transient failure??? # NoQA This log occurs: [ERROR]: [DLI GET OUTLETS] dli @ labpower2.kabrew.com reachable, but failed to fetch statuslist (outlet_list) # is_on in pwr.data['labpower2']['defined'] is being flushed based on error above so empty dict resulting in key error # Exception in thread auto_pwr_on_r1-8320T-sw: # Traceback (most recent call last): # File "/usr/lib/python3.7/threading.py", line 917, in _bootstrap_inner # self.run() # File "/usr/lib/python3.7/threading.py", line 865, in run # self._target(*self._args, **self._kwargs) # File "/etc/ConsolePi/src/pypkg/consolepi/exec.py", line 88, in auto_pwron_thread # if not outlet["is_on"][p][ # KeyError: 2 if not outlet["is_on"][p]["state"]: # This is just checking what's in the dict not querying the DLI r = self.pwr.pwr_toggle( outlet["type"], _addr, desired_state=True, port=p ) if isinstance(r, bool): if r: threading.Thread( target=self.outlet_update, kwargs={"refresh": True, "upd_linked": True}, name="auto_pwr_refresh_dli", ).start() self.autopwr_wait = True else: log.warning( f"{pwr_key} Error operating linked outlet @ {o}", show=True, ) # -- // esphome Auto Power On \\ -- elif outlet["type"].lower() == "esphome": for p in ports: log.debug( f"[Auto PwrOn] Power ON {pwr_key} Linked Outlet {outlet['type']}:{_addr} p{p}" ) if not outlet["is_on"][p]["state"]: # This is just checking what's in the dict r = self.pwr.pwr_toggle( outlet["type"], _addr, desired_state=True, port=p ) if isinstance(r, bool): self.pwr.data['defined'][o.split(':')[0]]['is_on'][p]['state'] = r else: log.show(r) log.warning( f"{pwr_key} Error operating linked outlet @ {o}", show=True, ) # -- // GPIO & TASMOTA Auto Power On \\ -- else: log.debug( f"[Auto PwrOn] Power ON {pwr_key} Linked Outlet {outlet['type']}:{_addr}" ) r = self.pwr.pwr_toggle( outlet["type"], _addr, desired_state=True, noff=outlet.get("noff", True) if outlet["type"].upper() == "GPIO" else True, ) if isinstance(r, int) and r > 1: # return is an error r = False else: # return is bool which is what we expect if r: self.pwr.data["defined"][o]["state"] = r self.autopwr_wait = True # self.pwr.pwr_get_outlets(upd_linked=True) else: # self.config.log_and_show(f"Error operating linked outlet {o}:{outlet['address']}", log=log.warning) log.show( f"Error operating linked outlet {o}:{outlet['address']}", show=True, )
def on_service_state_change(self, zeroconf: Zeroconf, service_type: str, name: str, state_change: ServiceStateChange) -> None: if self.cpi.local.hostname == name.split(".")[0]: return if state_change is not ServiceStateChange.Added: return info = zeroconf.get_service_info(service_type, name) if not info: log.warning(f'[MDNS DSCVRY] {name}: No info found') return if not hasattr(info, "properties") or not info.properties: log.warning(f'[MDNS DSCVRY] {name}: No properties found') return properties = info.properties cpi = self.cpi mdns_data = None update_cache = False try: mdns_data = { k.decode('UTF-8'): v.decode('UTF-8') if len(v) == 0 or not v.decode('UTF-8')[0] in ['[', '{'] else json.loads(v.decode('UTF-8')) # NoQA for k, v in properties.items() } except Exception as e: log.exception( f"[MDNS DSCVRY] {e.__class__.__name__} occured while parsing mdns_data:\n {mdns_data}\n" f"Exception: \n{e}" ) log.error(f"[MDNS DSCVRY] entry from {name} ignored due to parsing exception.") return hostname = mdns_data.get('hostname') interfaces = mdns_data.get('interfaces', []) log_out = json.dumps(mdns_data, indent=4, sort_keys=True) log.debug(f'[MDNS DSCVRY] {hostname} Properties Discovered via mdns:\n{log_out}') rem_ip = mdns_data.get('rem_ip') if not rem_ip: if len(mdns_data.get('interfaces', [])) == 1: rem_ip = [interfaces[i]['ip'] for i in interfaces] rem_ip = rem_ip[0] else: rem_ip = None if hostname not in cpi.remotes.data or 'rem_ip' not in cpi.remotes.data[hostname] \ else cpi.remotes.data[hostname]['rem_ip'] cur_known_adapters = cpi.remotes.data.get(hostname, {'adapters': None}).get('adapters') # -- Log new entry only if this is the first time it's been discovered -- if hostname not in self.d_discovered: self.d_discovered += [hostname] log.info('[MDNS DSCVRY] {}({}) Discovered via mdns'.format( hostname, rem_ip if rem_ip is not None else '?')) from_mdns_adapters = mdns_data.get('adapters') mdns_data['rem_ip'] = rem_ip mdns_data['adapters'] = from_mdns_adapters if from_mdns_adapters else cur_known_adapters mdns_data['source'] = 'mdns' mdns_data['upd_time'] = int(time.time()) mdns_data = {hostname: mdns_data} # update from API only if no adapter data exists either in cache or from mdns that triggered this # adapter data is updated on menu_launch either way if (not mdns_data[hostname]['adapters'] and hostname not in self.no_adapters) or \ hostname not in cpi.remotes.data: log.info(f"[MDNS DSCVRY] {info.server.split('.')[0]} provided no adapter data Collecting via API") # TODO check this don't think needed had a hung process on one of my Pis added it to be safe try: # TODO we are setting update time here so always result in a cache update with the restart timer res = cpi.remotes.api_reachable(hostname, mdns_data[hostname]) update_cache = res.update if not res.data.get('adapters'): self.no_adapters.append(hostname) elif hostname in self.no_adapters: self.no_adapters.remove(hostname) mdns_data[hostname] = res.data except Exception as e: log.exception(f'Exception occurred verifying reachability via API for {hostname}:\n{e}') if self.show: if hostname in self.discovered: self.discovered.remove(hostname) self.discovered.append('{}{}'.format(hostname, '*' if update_cache else '')) print(hostname + '({}) Discovered via mdns.'.format(rem_ip if rem_ip is not None else '?')) try: print( '{}\n{}\n{}'.format( 'mdns: None' if from_mdns_adapters is None else 'mdns: {}'.format( [d.replace('/dev/', '') for d in from_mdns_adapters] if not isinstance(from_mdns_adapters, list) else [d['dev'].replace('/dev/', '') for d in from_mdns_adapters] ), 'api (mdns trigger): None' if not mdns_data[hostname]['adapters'] else 'api (mdns trigger): {}'.format( [d.replace('/dev/', '') for d in mdns_data[hostname]['adapters']] if not isinstance(mdns_data[hostname]['adapters'], list) else [d['dev'].replace('/dev/', '') for d in mdns_data[hostname]['adapters']] ), 'cache: None' if cur_known_adapters is None else 'cache: {}'.format( [d.replace('/dev/', '') for d in cur_known_adapters] if not isinstance(cur_known_adapters, list) else [d['dev'].replace('/dev/', '') for d in cur_known_adapters] ) ) ) except TypeError as e: print(f'EXCEPTION: {e}') print(f'\nDiscovered ConsolePis: {self.discovered}') print("press Ctrl-C to exit...\n") log.debugv( f"[MDNS DSCVRY] {hostname} Final data set:\n{json.dumps(mdns_data, indent=4, sort_keys=True)}" ) # TODO could probably just put the call to cache update in the api_reachable method if update_cache: if 'hostname' in mdns_data[hostname]: del mdns_data[hostname]['hostname'] cpi.remotes.data = cpi.remotes.update_local_cloud_file(remote_consoles=mdns_data) log.info(f'[MDNS DSCVRY] {hostname} Local Cache Updated after mdns discovery')