def trigger_cloud_update(self): local = self.cpi.local remotes = self.cpi.remotes log.info( '[MDNS REG] Cloud Update triggered delaying {} seconds'.format( UPDATE_DELAY)) time.sleep( UPDATE_DELAY ) # Wait 30 seconds and then update, to accomodate multiple add removes data = local.build_local_dict(refresh=True) for a in local.data[local.hostname].get('adapters', {}): if 'udev' in local.data[local.hostname]['adapters'][a]: del local.data[local.hostname]['adapters'][a]['udev'] log.debug( f'[MDNS REG] Final Data set collected for {local.hostname}: \n{json.dumps(data)}' ) remote_consoles = {} if config.cloud_svc == 'gdrive': # pylint: disable=maybe-no-member cloud = GoogleDrive(local.hostname) remote_consoles = cloud.update_files(data) # Send remotes learned from cloud file to local cache if len(remote_consoles) > 0 and 'Gdrive-Error' not in remote_consoles: remotes.update_local_cloud_file(remote_consoles) log.info( '[MDNS REG] Cloud Update Completed, Found {} Remote ConsolePis' .format(len(remote_consoles))) else: log.warning( '[MDNS REG] Cloud Update Completed, No remotes found, or Error Occured' )
def verify_remote_thread(remotepi, data, rename): """sub to verify reachability and api data for remotes params: remotepi: The hostname currently being processed data: dict remote ConsolePi dict with hostname as key """ this = data[remotepi] res = self.api_reachable(remotepi, this, rename=rename) this = res.data if res.update: self.cache_update_pending = True if not res.reachable: log.warning( f"[GET REM] Found {remotepi} in Local Cloud Cache: UNREACHABLE" ) this["fail_cnt"] = (1 if not this.get("fail_cnt") else this["fail_cnt"] + 1) self.pop_list.append(remotepi) self.cache_update_pending = True else: self.connected = True if this.get("fail_cnt"): this["fail_cnt"] = 0 self.cache_update_pending = True if res.update: log.info( f"[GET REM] Updating Cache - Found {remotepi} in Local Cloud Cache, " f"reachable via {this['rem_ip']}") data[remotepi] = this
def __init__(self, local, cpiexec): self.cpiexec = cpiexec self.pop_list = [] self.old_api_log_sent = False self.log_sym_warn = log_sym.WARNING.value self.log_sym_error = log_sym.ERROR.value self.local = local self.connected = False self.cache_update_pending = False self.spin = Halo(spinner="dots") self.cloud = None # Set in refresh method if reachable self.do_cloud = config.cfg.get("cloud", False) CLOUD_CREDS_FILE = config.static.get("CLOUD_CREDS_FILE") if not CLOUD_CREDS_FILE: self.no_creds_error() if self.do_cloud and config.cloud_svc == "gdrive": if utils.is_reachable("www.googleapis.com", 443): self.local_only = False if not utils.valid_file(CLOUD_CREDS_FILE): self.no_creds_error() else: log.warning( f"failed to connect to {config.cloud_svc} - operating in local only mode", show=True, ) self.local_only = True self.data = self.get_remote(data=config.remote_update( )) # re-get cloud.json to capture any updates via mdns
def load_dli(self, address, username, password): ''' Returns instace of DLI class Response: tuple DLI-class-object, Bool: True if class was previously instantiated ~ needs update False indicating class was just instantiated ~ data is fresh i.e.: (<dli object>, True) ''' if not self._dli.get(address): try: self._dli[address] = DLI(address, username, password, timeout=config.dli_timeout, log=log) except Exception as e: log.warning( f"[PWR-DLI] DLI @ {address} is now unreachable {e.__class__.__name__}", show=True) return None, None # --// Return Pass or fail based on reachability \\-- if self._dli[address].reachable: return self._dli[address], False else: return None, None # --// DLI Already Loaded \\-- else: return self._dli[address], True
def try_build_info(self): # Try sending with all data local = self.cpi.local try: info = self.build_info() except struct.error as e: log.debug( '[MDNS REG] data is too big for mdns, removing adapter data \n {} {}' .format(e.__class__.__name__, e)) log.debug('[MDNS REG] offending payload \n {}'.format( json.dumps(local.data, indent=4, sort_keys=True))) # Too Big - Try sending without adapter data try: info = self.build_info(squash='adapters') except struct.error as e: log.warning( '[MDNS REG] data is still too big for mdns, reducing interface payload \n' ' {} {}'.format(e.__class__.__name__, e)) log.debug( '[MDNS REG] offending interface data \n {}'.format( json.dumps(local.interfaces, indent=4, sort_keys=True))) try: info = self.build_info(squash='interfaces') except struct.error: log.critical('[MDNS REG] data is still too big for mdns') log.debug( '[MDNS REG] offending interface data \n {}'.format( json.dumps(local.interfaces, indent=4, sort_keys=True))) return info
def get_json_file(self, json_file): '''Return dict from json file.''' if os.path.isfile(json_file) and os.stat(json_file).st_size > 0: with open(json_file) as f: try: return json.load(f) except ValueError as e: log.warning(f'Unable to load configuration from {json_file}\n\t{e}', show=True)
def get_yaml_file(self, yaml_file): '''Return dict from yaml file.''' if os.path.isfile(yaml_file) and os.stat(yaml_file).st_size > 0: with open(yaml_file) as f: try: return yaml.load(f, Loader=yaml.BaseLoader) except ValueError as e: log.warning(f'Unable to load configuration from {yaml_file}\n\t{e}', show=True)
def no_creds_error(self): cloud_svc = config.cfg.get("cloud_svc", "UNDEFINED!") log.warning( f"Required {cloud_svc} credentials files are missing refer to GitHub for details" ) log.warning(f"Disabling {cloud_svc} updates") log.show("Cloud Function Disabled by script - No Credentials Found") self.do_cloud = config.cfg["do_cloud"] = False
def next_ztp(filename, mac): _from = os.path.basename(filename) _to = f"{_from.split('_')[0]}_{int(_from.rstrip('.cfg').split('_')[-1]) + 1}.cfg" host_lines = [] if not os.path.isfile(ztp_opts_conf): log.warning(f"{ztp_opts_conf} not found. Noting to do.") else: if not os.path.isfile(f"{os.path.dirname(filename)}/{_to}"): log.info(f"No More Files for {_from.split('_')[0]}") with in_place.InPlace(ztp_opts_conf) as fp: line_num = 1 for line in fp: if _from in line: # TODO if not mac.ok don't write retry lines... print/log/warning if mac.ok: fp.write( f"# {mac.cols}|{ip} Sent {_from}" f"{' Success' if ztp_ok else 'WARN file size != xfer total check switch and logs'}\n" ) fp.write( f"# -- Retry Lines for {_from.rstrip('.cfg')} Based On mac {mac.cols} --\n" ) fp.write( f'tag:{mac.tag},option:bootfile-name,"{_from}"\n') host_lines.append( f"{mac.cols},{mac.tag},,{ztp_lease_time},set:{mac.tag},set:sent\n" ) else: print( f'Unable to write Retry Lines for previously updated device. Mac {mac.orig} appears invalid' ) fp.write(f"# SENT # {line}") log.info( f"Disabled {_from} on line {line_num} of {os.path.basename(ztp_opts_conf)}" ) log.info( f"Retry Entries Created for {_from.rstrip('.cfg')} | {mac.cols} | {ip}" ) elif _to in line: if not line.startswith('#'): log.warning( f'Expected {_to} option line to be commented out @ this point. It was not.' ) fp.write(line.lstrip('#').lstrip()) log.info( f"Enabled {_to} on line {line_num} of {os.path.basename(ztp_opts_conf)}" ) else: fp.write(line) line_num += 1 if host_lines: with open(ztp_hosts_conf, 'a') as fp: fp.writelines(host_lines) log.info( f"Retry Entries Written to file for {_from.rstrip('.cfg')} | {mac.cols} | {ip}" )
def get_adapters_via_api(self, ip: str, port: int = 5000, rename: bool = False, log_host: str = None): """Send RestFul GET request to Remote ConsolePi to collect adapter info params: ip(str): ip address or FQDN of remote ConsolePi rename(bool): TODO log_host(str): friendly string for logging purposes "hostname(ip)" returns: adapter dict for remote if successful and adapters exist status_code 200 if successful but no adapters or Falsey or response status_code if an error occurred. """ if not log_host: log_host = ip url = f"http://{ip}:{port}/api/v1.0/adapters" if rename: url = f"{url}?refresh=true" log.debug(url) headers = { "Accept": "*/*", "Cache-Control": "no-cache", "Host": f"{ip}:{port}", "accept-encoding": "gzip, deflate", "Connection": "keep-alive", "cache-control": "no-cache", } try: response = requests.request("GET", url, headers=headers, timeout=config.remote_timeout) except (OSError, TimeoutError): log.warning( f"[API RQST OUT] Remote ConsolePi: {log_host} TimeOut when querying via API - Unreachable." ) return False if response.ok: ret = response.json() ret = ret["adapters"] if ret["adapters"] else response.status_code _msg = f"Adapters Successfully retrieved via API for Remote ConsolePi: {log_host}" log.info("[API RQST OUT] {}".format(_msg)) log.debugv("[API RQST OUT] Response: \n{}".format( json.dumps(ret, indent=4, sort_keys=True))) else: ret = response.status_code log.error( f"[API RQST OUT] Failed to retrieve adapters via API for Remote ConsolePi: {log_host}\n{ret}:{response.text}" ) return ret
def get_cpu_serial(self): res = utils.do_shell_cmd( "/bin/cat /proc/cpuinfo | grep Serial | awk '{print $3}'", return_stdout=True) if res[0] > 0: log.warning( 'Unable to get unique identifier for this pi (cpuserial)', show=True) else: return res[1] or '0'
def get_adapters_via_api(self, ip: str, rename: bool = False): """Send RestFul GET request to Remote ConsolePi to collect adapter info params: ip(str): ip address or FQDN of remote ConsolePi returns: adapter dict for remote if successful Falsey or response status_code if an error occured. """ # log = self.config.log if rename: url = f"http://{ip}:5000/api/v1.0/adapters?refresh=true" else: url = f"http://{ip}:5000/api/v1.0/adapters" log.info(url) # DEBUG headers = { "Accept": "*/*", "Cache-Control": "no-cache", "Host": f"{ip}:5000", "accept-encoding": "gzip, deflate", "Connection": "keep-alive", "cache-control": "no-cache", } try: response = requests.request("GET", url, headers=headers, timeout=config.remote_timeout) except (OSError, TimeoutError): log.warning( "[API RQST OUT] Remote ConsolePi @ {} TimeOut when querying via API - Unreachable." .format(ip)) return False if response.ok: ret = response.json() ret = ret["adapters"] if ret["adapters"] else response.status_code _msg = "Adapters Successfully retrieved via API for Remote ConsolePi @ {}".format( ip) log.info("[API RQST OUT] {}".format(_msg)) log.debugv("[API RQST OUT] Response: \n{}".format( json.dumps(ret, indent=4, sort_keys=True))) else: ret = response.status_code log.error( "[API RQST OUT] Failed to retrieve adapters via API for Remote ConsolePi @ {}\n{}:{}" .format(ip, ret, response.text)) return ret
def do_api_request(self, ip: str, path: str, *args, **kwargs): """Send RestFul GET request to Remote ConsolePi to collect data params: ip(str): ip address or FQDN of remote ConsolePi path(str): path beyond /api/v1.0/ returns: response object """ url = f"http://{ip}:5000/api/v1.0/{path}" log.debug(f'[do_api_request] URL: {url}') headers = { "Accept": "*/*", "Cache-Control": "no-cache", "Host": f"{ip}:5000", "accept-encoding": "gzip, deflate", "Connection": "keep-alive", "cache-control": "no-cache", } try: response = requests.request("GET", url, headers=headers, timeout=config.remote_timeout) except (OSError, TimeoutError): log.warning( f"[API RQST OUT] Remote ConsolePi @ {ip} TimeOut when querying via API - Unreachable." ) return False if response.ok: log.info(f"[API RQST OUT] {url} Response: OK") log.debugv( f"[API RQST OUT] Response: \n{json.dumps(response.json(), indent=4, sort_keys=True)}" ) else: log.error(f"[API RQST OUT] API Request Failed {url}") return response
def main(): cpi = ConsolePi() cloud_svc = config.cfg.get("cloud_svc", "error") local = cpi.local remotes = cpi.remotes cpiexec = cpi.cpiexec log.info('[CLOUD TRIGGER (IP)]: Cloud Update triggered by IP Update') CLOUD_CREDS_FILE = config.static.get( "CLOUD_CREDS_FILE", '/etc/ConsolePi/cloud/gdrive/.credentials/credentials.json') if not utils.is_reachable("www.googleapis.com", 443): log.error(f"Not Updating {cloud_svc} due to connection failure") sys.exit(1) if not utils.valid_file(CLOUD_CREDS_FILE): log.error('Credentials file not found or invalid') sys.exit(1) # -- // Get details from Google Drive - once populated will skip \\ -- if cloud_svc == "gdrive" and remotes.cloud is None: remotes.cloud = GoogleDrive(hostname=local.hostname) if cpiexec.wait_for_threads(thread_type="remotes") and ( config.power and cpiexec.wait_for_threads(name="_toggle_refresh")): log.error( 'IP Change Cloud Update Trigger: TimeOut Waiting for Threads to Complete' ) remote_consoles = remotes.cloud.update_files(local.data) if remote_consoles and "Gdrive-Error:" in remote_consoles: log.error(remote_consoles) else: for r in remote_consoles: # -- Convert Any Remotes with old API schema to new API schema -- if isinstance(remote_consoles[r].get("adapters", {}), list): remote_consoles[r]["adapters"] = remotes.convert_adapters( remote_consoles[r]["adapters"]) log.warning( f"Adapter data for {r} retrieved from cloud in old API format... Converted" ) if len(remote_consoles) > 0: remotes.update_local_cloud_file(remote_consoles)
def try_build_info(self): # Try sending with all data local = self.cpi.local info = ServiceInfo("_consolepi._tcp.local.", local.hostname + "._consolepi._tcp.local.", server=f'{local.hostname}.local.') try: info = self.build_info() except (struct.error, ValueError) as e: log.debug( '[MDNS REG] data is too big for mdns, removing adapter data \n {} {}' .format(e.__class__.__name__, e)) log.debug('[MDNS REG] offending payload \n {}'.format( json.dumps(local.data, indent=4, sort_keys=True))) # Too Big - Try sending without adapter data try: info = self.build_info(squash='adapters') except (struct.error, ValueError) as e: log.warning( '[MDNS REG] data is still too big for mdns, reducing interface payload \n' ' {} {}'.format(e.__class__.__name__, e)) log.debug( '[MDNS REG] offending interface data \n {}'.format( json.dumps(local.interfaces, indent=4, sort_keys=True))) try: info = self.build_info(squash='interfaces') except (struct.error, ValueError): log.critical('[MDNS REG] data is still too big for mdns') log.debug( '[MDNS REG] offending interface data \n {}'.format( json.dumps(local.interfaces, indent=4, sort_keys=True))) return info
def get_ser2net(self): '''Parse ser2net.conf to extract connection info for serial adapters retruns 2 level dict (empty dict if ser2net.conf not found or empty): { <adapter name or alias>: { "baud": <baud>, "dbits": <data bits>, "flow": "<flow control>", "parity": "<parity>", "sbits": <stop bits>, "port": <telnet port (ser2net), "logfile": None or logfile if defined in ser2net.conf "cmd": picocom command string used in menu "line": The line from ser2net.conf } } ''' ######################################################## # --- ser2net (3.x) config lines look like this --- # ... 9600 NONE 1STOPBIT 8DATABITS XONXOFF LOCAL -RTSCTS # ... 9600 8DATABITS NONE 1STOPBIT banner ######################################################## # utils = self.utils if not utils.valid_file(self.static.get('SER2NET_FILE')): log.warning( 'No ser2net.conf file found unable to extract port definition', show=True) return {} ser2net_conf = {} trace_files = {} with open(self.static['SER2NET_FILE']) as cfg: for line in cfg: if 'TRACEFILE:' in line: line = line.split(':') trace_files[line[1]] = line[2] continue elif not line[0].isdigit(): continue _line = line.strip('\n') line = line.split(':') tty_port = int(line[0]) tty_dev = line[3] # Reset defaults # baud is used to determine parsing failure dbits = 8 parity = 'n' flow = 'n' sbits = 1 logfile = None log_ptr = None connect_params = line[4].replace(',', ' ').split() baud = None for option in connect_params: if option in self.static.get('VALID_BAUD', [ '300', '1200', '2400', '4800', '9600', '19200', '38400', '57600', '115200' ]): baud = int(option) elif 'DATABITS' in option: dbits = int(option.replace('DATABITS', '')) # int 5 - 8 if dbits < 5 or dbits > 8: log.warning( f'{tty_dev}: Invalid value for "data bits" found in ser2net.conf falling back to 8', show=True) dbits = 8 elif option in ['EVEN', 'ODD', 'NONE']: parity = option[0].lower( ) # converts to e o n used by picocom elif option == 'XONXOFF': flow = 'x' elif option == 'RTSCTS': flow = 'h' elif 'STOPBIT' in option: # Not used by picocom sbits = int(option[0]) if option[0].isdigit else 1 elif 'tb=' in option or 'tr=' in option or 'tw=' in option: log_ptr = option logfile = option.split('=')[1] # Use baud to determine if options were parsed correctly if baud is None: log.warning( f'{tty_dev} found in ser2net but unable to parse baud falling back to {self.default_baud}', show=True) baud = self.default_baud # parse TRACEFILE defined in ser2net.conf cmd_base = f'picocom {tty_dev} --baud {baud} --flow {flow} --databits {dbits} --parity {parity}' if self.picocom_ver > 1: # picocom ver 1.x in Stretch doesn't support "--stopbits" cmd_base = cmd_base + f' --stopbits {sbits}' if logfile: logfile = trace_files[logfile] logfile = logfile.replace('\\p', str(tty_port)).replace( '\\d', tty_dev.split('/')[-1]) logfile = logfile.replace( '\\s', f'{baud}_{dbits}{parity.upper()}{sbits}') logfile = logfile.split( '\\' )[0] + '-{{timestamp}}.log' # + time.strftime('%H.%M.log') cmd = cmd_base + f' --logfile {logfile}' utils.do_shell_cmd( f"mkdir -p {'/'.join(logfile.split('/')[0:-1])}") utils.set_perm('/'.join(logfile.split('/')[0:-1])) else: cmd = cmd_base # update dict with values for this device ser2net_conf[tty_dev] = { 'port': tty_port, 'baud': baud, 'dbits': dbits, 'parity': parity, 'flow': flow, 'sbits': sbits, 'logfile': logfile, 'log_ptr': log_ptr, 'cmd': cmd, 'line': _line } return ser2net_conf
def update_local_cloud_file(self, remote_consoles=None, current_remotes=None, local_cloud_file=None): """Update local cloud cache (cloud.json). Verifies the newly discovered data is more current than what we already know and updates the local cloud.json file if so The Menu uses cloud.json to populate remote menu items params: remote_consoles: The newly discovered data (from Gdrive or mdns) current_remotes: The current remote data fetched from the local cloud cache (cloud.json) - func will retrieve this if not provided local_cloud_file The path to the local cloud file (global var cloud.json) returns: dict: The resulting remote console dict representing the most recent data for each remote. """ local_cloud_file = (config.static.get("LOCAL_CLOUD_FILE") if local_cloud_file is None else local_cloud_file) if len(remote_consoles) > 0: if current_remotes is None: current_remotes = self.data = config.remote_update( ) # grabs the remote data from local cloud cache # update current_remotes dict with data passed to function if len(remote_consoles) > 0: if current_remotes is not None: for _ in current_remotes: if _ not in remote_consoles: if ("fail_cnt" not in current_remotes[_] or current_remotes[_]["fail_cnt"] < 2): remote_consoles[_] = current_remotes[_] elif (remote_consoles.get(_) and "fail_cnt" not in remote_consoles[_] and "fail_cnt" in current_remotes[_]): remote_consoles[_]["fail_cnt"] = current_remotes[ _]["fail_cnt"] else: # -- VERBOSE DEBUG -- log.debugv( "[CACHE UPD] \n--{}-- \n remote upd_time: {}\n remote rem_ip: {}\n remote source: {}\n cache rem upd_time: {}\n cache rem_ip: {}\n cache source: {}\n" .format( # NoQA _, time.strftime( "%a %x %I:%M:%S %p %Z", time.localtime( remote_consoles[_]["upd_time"]), ) if "upd_time" in remote_consoles[_] else None, # NoQA remote_consoles[_]["rem_ip"] if "rem_ip" in remote_consoles[_] else None, remote_consoles[_]["source"] if "source" in remote_consoles[_] else None, time.strftime( "%a %x %I:%M:%S %p %Z", time.localtime( current_remotes[_]["upd_time"]), ) if "upd_time" in current_remotes[_] else None, # NoQA current_remotes[_]["rem_ip"] if "rem_ip" in current_remotes[_] else None, current_remotes[_]["source"] if "source" in current_remotes[_] else None, )) # -- END VERBOSE DEBUG -- # No Change Detected (data passed to function matches cache) if "last_ip" in current_remotes[_]: del current_remotes[_]["last_ip"] if remote_consoles[_] == current_remotes[_]: log.debug( "[CACHE UPD] {} No Change in info detected". format(_)) # only factor in existing data if source is not mdns elif ("upd_time" in remote_consoles[_] or "upd_time" in current_remotes[_]): if ("upd_time" in remote_consoles[_] and "upd_time" in current_remotes[_]): if (current_remotes[_]["upd_time"] > remote_consoles[_]["upd_time"]): remote_consoles[_] = current_remotes[_] log.info( f"[CACHE UPD] {_} Keeping existing data from {current_remotes[_].get('source', '')} " "based on more current update time") elif (remote_consoles[_]["upd_time"] > current_remotes[_]["upd_time"]): log.info( "[CACHE UPD] {} Updating data from {} " "based on more current update time". format(_, remote_consoles[_]["source"])) else: # -- Update Times are equal -- if (current_remotes[_].get("adapters") and remote_consoles[_].get("adapters") and current_remotes[_]["adapters"]. keys() != remote_consoles[_] ["adapters"].keys() ) or remote_consoles[_].get( "interfaces", {}) != current_remotes[_].get( "interfaces", {}): log.warning( "[CACHE UPD] {} current cache update time and {} update time are equal" " but data appears to have changed. Updating" .format( _, remote_consoles[_]["source"])) elif "upd_time" in current_remotes[_]: remote_consoles[_] = current_remotes[_] log.info( "[CACHE UPD] {} Keeping existing data based *existence* of update time " "which is lacking in this update from {}". format(_, remote_consoles[_]["source"])) for _try in range(0, 2): try: with open(local_cloud_file, "w") as cloud_file: cloud_file.write( json.dumps(remote_consoles, indent=4, sort_keys=True)) utils.set_perm( local_cloud_file ) # a hack to deal with perms ~ consolepi-details del func break except PermissionError: utils.set_perm(local_cloud_file) else: log.warning( "[CACHE UPD] cache update called with no data passed, doing nothing" ) return remote_consoles
def refresh(self, bypass_cloud=False): remote_consoles = None cpiexec = self.cpiexec local = self.local cloud_svc = config.cfg.get("cloud_svc", "error") # TODO refactor wait_for_threads to have an all key or accept a list with Halo(text="Waiting For threads to complete", spinner="dots1"): if cpiexec.wait_for_threads(thread_type="remotes") and ( config.power and cpiexec.wait_for_threads(name="_toggle_refresh")): log.show( "Timeout Waiting for init or toggle threads to complete try again later or" " investigate logs") return # -- // Update/Refresh Local Data (Adapters/Interfaces) \\ -- local.data = local.build_local_dict(refresh=True) log.debugv( f"Final Data set collected for {local.hostname}: {local.data}") # -- // Get details from Google Drive - once populated will skip \\ -- if not bypass_cloud and self.do_cloud and not self.local_only: if cloud_svc == "gdrive" and self.cloud is None: # burried import until I find out why this import takes so @#%$@#% long. Not imported until 1st refresh is called with Halo(text="Loading Google Drive Library", spinner="dots1"): from consolepi.gdrive import GoogleDrive self.cloud = GoogleDrive(hostname=local.hostname) log.info("[MENU REFRESH] Gdrive init") # Pass Local Data to update_sheet method get remotes found on sheet as return # update sheets function updates local_cloud_file _msg = "[MENU REFRESH] Updating to/from {}".format(cloud_svc) log.info(_msg) if stdin.isatty(): self.spin.start(_msg) # -- // SYNC DATA WITH GDRIVE \\ -- remote_consoles = self.cloud.update_files( local.data) # local data refreshed above if remote_consoles and "Gdrive-Error:" not in remote_consoles: if stdin.isatty(): self.spin.succeed(_msg + "\n\tFound {} Remotes via Gdrive Sync". format(len(remote_consoles))) for r in remote_consoles: # -- Convert Any Remotes with old API schema to new API schema -- if isinstance(remote_consoles[r].get("adapters", {}), list): remote_consoles[r][ "adapters"] = self.convert_adapters( remote_consoles[r]["adapters"]) log.warning( f"Adapter data for {r} retrieved from cloud in old API format... Converted" ) elif "Gdrive-Error:" in remote_consoles: if stdin.isatty(): self.spin.fail("{}\n\t{} {}".format( _msg, self.log_sym_error, remote_consoles)) log.show(remote_consoles ) # display error returned from gdrive module remote_consoles = [] else: if stdin.isatty(): self.spin.warn(_msg + "\n\tNo Remotes Found via Gdrive Sync") if len(remote_consoles) > 0: _msg = f"[MENU REFRESH] Updating Local Cache with data from {cloud_svc}" log.info(_msg) if stdin.isatty(): self.spin.start(_msg) self.update_local_cloud_file(remote_consoles) if stdin.isatty(): self.spin.succeed(_msg) # no real error correction here else: log.warning( f"[MENU REFRESH] No Remote ConsolePis found on {cloud_svc}", show=True, ) else: if self.do_cloud and not bypass_cloud: log.show( f"Not Updating from {cloud_svc} due to connection failure\n" "Close and re-launch menu if network access has been restored" ) # Update Remote data with data from local_cloud cache / cloud self.data = self.get_remote(data=remote_consoles)
if __name__ == '__main__': if len(sys.argv) > 1: mdns = MDNS_Browser(show=True) RESTART_INTERVAL = 30 # when running in interactive mode reduce restart interval print("\nBrowsing services, press Ctrl-C to exit...\n") else: mdns = MDNS_Browser() try: while True: try: browser = mdns.run() except AttributeError: # hopefully this handles "Zeroconf object has no attribute '_handlers_lock'" log.warning( '[MDNS BROWSE] caught _handlers_lock exception retrying in 5 sec' ) time.sleep(5) continue except Exception as e: # Catch any other errors, usually related to transient connectivity issues." log.warning( f'[MDNS BROWSE] caught {e.__class__.__name__} retrying in 5 sec.\nException:\n{e}' ) time.sleep(5) continue start = time.time() # re-init zeroconf browser every RESTART_INTERVAL seconds while time.time() < start + RESTART_INTERVAL: time.sleep(0.1) if mdns.zc is not None:
f'[DHCP LEASE] DHCP Client Connected ({add_del}): iface: {iface}, mac: {mac_bytes}, ip: {ip}, vendor: {vendor}' ) ztp = False else: ztp = True file_size = os.stat(cfg_file).st_size ztp_ok = True if int(mac_bytes) == file_size else False mac = utils.Mac(get_mac(ip)) log.info( f"[ZTP - TFTP XFR] {os.path.basename(cfg_file)} sent to {ip}|{mac.cols}{' Success' if ztp_ok else ''}" ) _res = utils.do_shell_cmd( f"wall 'consolepi-ztp: {os.path.basename(cfg_file)} sent to " f"{ip}|{mac.cols}{' Success' if ztp_ok else ' WARNING xfr != file size'}'" ) if not ztp_ok: log.warning( f"File Size {file_size} and Xfr Total ({mac_bytes}) don't match") next_ztp(cfg_file, mac) # -- Some old log only stuff, may use for post deployment actions -- if vendor and 'ConsolePi' in vendor: log.info(f'A ConsolePi has connected to {iface}') elif vendor and iface and 'eth' in iface: for _ in match: if _ in vendor: if utils.is_reachable(ip, 22): log.info('{} is reachable via ssh @ {}'.format(_, ip)) elif utils.is_reachable(ip, 23): log.info('{} is reachable via telnet @ {}'.format(_, ip))
def do_rename_adapter(self, from_name): '''Rename USB to Serial Adapter Creates new or edits existing udev rules and ser2net conf for USB to serial adapters detected by the system. params: from_name(str): Devices current name passed in from rename_menu() returns: None type if no error, or Error (str) if Error occurred ''' from_name = from_name.replace('/dev/', '') local = self.cpi.local c = { 'green': '\033[1;32m', # Bold with normal ForeGround 'red': '\033[1;31m', 'norm': '\033[0m', # Reset to Normal } c_from_name = '{}{}{}'.format(c['red'], from_name, c['norm']) error = False use_def = True try: to_name = None while not to_name: print( " Press 'enter' to keep the same name and change baud/parity/..." ) to_name = input( f' [rename {c_from_name}]: Provide desired name: ') print("") to_name = to_name or from_name to_name = to_name.replace( '/dev/', '') # strip /dev/ if they thought they needed to include it # it's ok to essentialy rename with same name (to chg baud etc.), but not OK to rename to a name that is already # in use by another adapter # TODO collect not connected adapters as well to avoid dups if from_name != to_name and f"/dev/{to_name}" in local.adapters: return f"There is already an adapter using alias {to_name}" for _name in self.reserved_names: if to_name.startswith(_name): return f"You can't start the alias with {_name}. Matches system root device prefix" if ' ' in to_name or ':' in to_name or '(' in to_name or ')' in to_name: print( '\033[1;33m!!\033[0m Spaces, Colons and parentheses are not allowed by the associated config files.\n' '\033[1;33m!!\033[0m Swapping with valid characters\n') to_name = to_name.replace(' ', '_').replace('(', '_').replace( ')', '_') # not allowed in udev to_name = to_name.replace( ':', '-' ) # replace any colons with - as it's the field delim in ser2net except (KeyboardInterrupt, EOFError): return 'Rename Aborted based on User Input' c_to_name = f'{c["green"]}{to_name}{c["norm"]}' log_c_to_name = "".join(["{{green}}", to_name, "{{norm}}"]) go, con_only = True, False if from_name == to_name: log.show( f"Keeping {log_c_to_name}. Changing connection settings Only.") con_only = True use_def = False elif utils.user_input_bool(' Please Confirm Rename {} --> {}'.format( c_from_name, c_to_name)) is False: go = False if go: for i in local.adapters: if i == f'/dev/{from_name}': break _dev = local.adapters[i].get('config') # type: ignore # dict # -- these values are always safe, values set by config.py if not extracted from ser2net.conf baud = _dev['baud'] dbits = _dev['dbits'] flow = _dev['flow'] sbits = _dev['sbits'] parity = _dev['parity'] word = 'keep existing' for _name in self.reserved_names: if from_name.startswith(_name): word = 'Use default' # -- // Ask user if they want to update connection settings \\ -- if not con_only: use_def = utils.user_input_bool( ' {} connection values [{} {}{}1 Flow: {}]'.format( word, baud, dbits, parity.upper(), self.flow_pretty[flow])) if not use_def: self.con_menu(rename=True, con_dict={ 'baud': baud, 'data_bits': dbits, 'parity': parity, 'flow': flow, 'sbits': sbits }) baud = self.baud parity = self.parity dbits = self.data_bits parity = self.parity flow = self.flow sbits = self.sbits # restore defaults back to class attribute if we flipped them when we called con_menu # TODO believe this was an old hack, and can be removed if hasattr(self, 'con_dict') and self.con_dict: self.baud = self.con_dict['baud'] self.data_bits = self.con_dict['data_bits'] self.parity = self.con_dict['parity'] self.flow = self.con_dict['flow'] self.sbits = self.con_dict['sbits'] self.con_dict = None if word == 'Use default': # see above word is set if from_name matches a root_dev pfx devs = local.detect_adapters() if f'/dev/{from_name}' in devs: _tty = devs[f'/dev/{from_name}'] id_prod = _tty.get('id_model_id') id_model = _tty.get('id_model') # NoQA pylint: disable=unused-variable id_vendorid = _tty.get('id_vendor_id') id_vendor = _tty.get('id_vendor') # NoQA pylint: disable=unused-variable id_serial = _tty.get('id_serial_short') id_ifnum = _tty.get('id_ifnum') id_path = _tty.get('id_path') # NoQA lame_devpath = _tty.get('lame_devpath') root_dev = _tty.get('root_dev') else: return 'ERROR: Adapter no longer found' # -- // ADAPTERS WITH ALL ATTRIBUTES AND GPIO UART (TTYAMA) \\ -- if id_prod and id_serial and id_vendorid: if id_serial not in devs['_dup_ser']: udev_line = ( 'ATTRS{{idVendor}}=="{}", ATTRS{{idProduct}}=="{}", ' 'ATTRS{{serial}}=="{}", SYMLINK+="{}"'.format( id_vendorid, id_prod, id_serial, to_name)) error = None while not error: error = self.add_to_udev(udev_line, '# END BYSERIAL-DEVS') error = self.do_ser2net_line(from_name=from_name, to_name=to_name, baud=baud, dbits=dbits, parity=parity, flow=flow) break # -- // MULTI-PORT ADAPTERS WITH COMMON SERIAL (different ifnums) \\ -- else: # SUBSYSTEM=="tty", ATTRS{idVendor}=="0403", ATTRS{idProduct}=="6011", ATTRS{serial}=="FT4XXXXP", GOTO="FTXXXXP" # NoQA udev_line = ( 'ATTRS{{idVendor}}=="{0}", ATTRS{{idProduct}}=="{1}", ' 'ATTRS{{serial}}=="{2}", GOTO="{2}"'.format( id_vendorid, id_prod, id_serial)) error = None while not error: error = self.add_to_udev(udev_line, '# END BYPORT-POINTERS') # ENV{ID_USB_INTERFACE_NUM}=="00", SYMLINK+="FT4232H_port1", GOTO="END" udev_line = ( 'ENV{{ID_USB_INTERFACE_NUM}}=="{}", SYMLINK+="{}"' .format(id_ifnum, to_name)) error = self.add_to_udev(udev_line, '# END BYPORT-DEVS', label=id_serial) error = self.do_ser2net_line(from_name=from_name, to_name=to_name, baud=baud, dbits=dbits, parity=parity, flow=flow) break else: if f'/dev/{from_name}' in devs: devname = devs[f'/dev/{from_name}'].get('devname', '') # -- // local ttyAMA adapters \\ -- if 'ttyAMA' in devname: udev_line = ('KERNEL=="{}", SYMLINK+="{}"'.format( devname.replace('/dev/', ''), to_name)) # Testing simplification not using separate file for ttyAMA error = None while not error: error = self.add_to_udev( udev_line, '# END TTYAMA-DEVS') error = self.do_ser2net_line( from_name=from_name, to_name=to_name, baud=baud, dbits=dbits, parity=parity, flow=flow) break else: # -- // LAME ADAPTERS NO SERIAL NUM (map usb port) \\ -- log.warning( '[ADD ADAPTER] Lame adapter missing key detail: idVendor={}, idProduct={}, serial#={}' .format( # NoQA id_vendorid, id_prod, id_serial)) print( '\n\n This Device Does not present a serial # (LAME!). So the adapter itself can\'t be ' 'uniquely identified.\n There are 2 options for naming this device:' ) mlines = [ '1. Map it to the USB port it\'s plugged in to' '\n\tAnytime a {} {} tty device is plugged into the port it\n\tis currently plugged into it will ' 'adopt the {} alias'.format( _tty['id_vendor_from_database'], _tty['id_model_from_database'], to_name), '2. Map it by vedor ({0}) and model ({1}) alone.' '\n\tThis will only work if this is the only {0} {1} adapter you plan to plug in' .format(_tty['id_vendor_from_database'], _tty['id_model_from_database']) # 'Temporary mapping' \ # '\n\tnaming will only persist during this menu session\n' ] print(self.menu.format_subhead(mlines)) print('\n b. back (abort rename)\n') valid_ch = {'1': 'by_path', '2': 'by_id'} valid = False ch = '' while not valid: print(' Please Select an option') ch = self.wait_for_input() if ch.lower == 'b': log.show( f'Rename {from_name} --> {to_name} Aborted' ) return elif ch.lower in valid_ch: valid = True else: print( 'invalid choice {} Try Again.'.format( ch.orig)) udev_line = None if valid_ch[ch.lower] == 'temp': error = True print( 'The Temporary rename feature is not yet implemented' ) elif valid_ch[ch.lower] == 'by_path': udev_line = ( 'ATTRS{{idVendor}}=="{0}", ATTRS{{idProduct}}=="{1}", GOTO="{0}_{1}"'.format( # NoQA id_vendorid, id_prod), 'ATTRS{{devpath}}=="{}", ENV{{ID_USB_INTERFACE_NUM}}=="{}", '\ 'SYMLINK+="{}"'.format(lame_devpath, id_ifnum, to_name), ) elif valid_ch[ch.lower] == 'by_id': udev_line = ( 'SUBSYSTEM=="tty", ATTRS{{idVendor}}=="{0}", ATTRS{{idProduct}}=="{1}", GOTO="{0}_{1}"' .format( # NoQA id_vendorid, id_prod), 'ENV{{ID_USB_INTERFACE_NUM}}=="{}", SYMLINK+="{}", GOTO="END"' .format(id_ifnum, to_name) # NoQA ) else: error = [ 'Unable to add udev rule adapter missing details', 'idVendor={}, idProduct={}, serial#={}'. format( # NoQA id_vendorid, id_prod, id_serial) ] while udev_line: error = self.add_to_udev( udev_line[0], '# END BYPATH-POINTERS') error = self.add_to_udev(udev_line[1], '# END BYPATH-DEVS', label='{}_{}'.format( id_vendorid, id_prod)) # NoQA error = self.do_ser2net_line( from_name=from_name, to_name=to_name, baud=baud, dbits=dbits, parity=parity, flow=flow) break else: log.error(f'Device {from_name} No Longer Found', show=True) # TODO simplify once ser2net existing verified else: # renaming previously named port. # -- // local ttyAMA adapters \\ -- devname = local.adapters[f'/dev/{from_name}']['udev'].get( 'devname', '') rules_file = self.rules_file if 'ttyAMA' not in devname else self.ttyama_rules_file cmd = 'sudo sed -i "s/{0}{3}/{1}{3}/g" {2} && grep -q "{1}{3}" {2} && [ $(grep -c "{0}{3}" {2}) -eq 0 ]'.format( from_name, to_name, rules_file, '') error = utils.do_shell_cmd(cmd, shell=True) if not error: error = self.do_ser2net_line(from_name=from_name, to_name=to_name, baud=baud, dbits=dbits, parity=parity, flow=flow) else: return [ error.split('\n'), 'Failed to change {} --> {} in {}'.format( from_name, to_name, self.ser2net_file) ] if not error: # Update adapter variables with new_name local.adapters[f'/dev/{to_name}'] = local.adapters[ f'/dev/{from_name}'] local.adapters[f'/dev/{to_name}']['config'][ 'port'] = config.ser2net_conf[f'/dev/{to_name}'].get( 'port', 0) local.adapters[f'/dev/{to_name}']['config'][ 'cmd'] = config.ser2net_conf[f'/dev/{to_name}'].get('cmd') local.adapters[f'/dev/{to_name}']['config'][ 'line'] = config.ser2net_conf[f'/dev/{to_name}'].get( 'line') local.adapters[f'/dev/{to_name}']['config'][ 'log'] = config.ser2net_conf[f'/dev/{to_name}'].get('log') local.adapters[f'/dev/{to_name}']['config'][ 'log_ptr'] = config.ser2net_conf[f'/dev/{to_name}'].get( 'log_ptr') _config_dict = local.adapters[f'/dev/{to_name}']['config'] if not use_def: # overwrite con settings if they were changed updates = { 'baud': baud, 'dbits': dbits, 'flow': flow, 'parity': parity, 'sbits': sbits, } local.adapters[f'/dev/{to_name}']['config'] = { **_config_dict, **updates } if from_name != to_name: # facilitates changing con settings without actually renaming del local.adapters[f'/dev/{from_name}'] self.udev_pending = True # toggle for exit function if they exit directly from rename memu # update first item in first section of menu_body menu uses it to determine if section is a continuation try: self.cur_menu.body_in[0][0] = self.cur_menu.body_in[0][ 0].replace(from_name, to_name) if self.menu.body_in is not None: # Can be none when called via rename directly self.menu.body_in[0][0] = self.menu.body_in[0][ 0].replace(from_name, to_name) except Exception as e: log.exception( f"[DEV NOTE menu_body update after rename caused exception.\n{e}", show=False) else: return 'Aborted based on user input'
def pwr_get_outlets(self, outlet_data={}, upd_linked=False, failures={}): '''Get Details for Outlets defined in ConsolePi.yaml power section On Menu Launch this method is called in parallel (threaded) for each outlet On Refresh all outlets are passed to the method params: - All Optional outlet_data:dict, The outlets that need to be updated, if not provided will get all outlets defined in ConsolePi.yaml upd_linked:Bool, If True will update just the linked ports, False is for dli and will update all ports for the dli. failures:dict: when refreshing outlets pass in previous failures so they can be re-tried ''' # re-attempt connection to failed power controllers on refresh if not failures: failures = outlet_data.get('failures') if outlet_data.get('failures') else self.data.get('failures') outlet_data = self.data.get('defined') if not outlet_data else outlet_data if failures: outlet_data = {**outlet_data, **failures} failures = {} dli_power = self.data.get('dli_power', {}) for k in outlet_data: outlet = outlet_data[k] _start = time.time() # -- // GPIO \\ -- if outlet['type'].upper() == 'GPIO': if not is_rpi: log.warning('GPIO Outlet Defined, GPIO Only Supported on RPi - ignored', show=True) continue noff = True if 'noff' not in outlet else outlet['noff'] GPIO.setup(outlet['address'], GPIO.OUT) outlet_data[k]['is_on'] = bool(GPIO.input(outlet['address'])) if noff \ else not bool(GPIO.input(outlet['address'])) # -- // tasmota \\ -- elif outlet['type'] == 'tasmota': response = self.do_tasmota_cmd(outlet['address']) outlet['is_on'] = response if response not in [0, 1, True, False]: failures[k] = outlet_data[k] failures[k]['error'] = f'[PWR-TASMOTA] {k}:{failures[k]["address"]} {response} - Removed' log.warning(failures[k]['error'], show=True) # -- // esphome \\ -- elif outlet['type'] == 'esphome': # TODO have do_esphome accept list, slice, or str for one or multiple relays relays = utils.listify(outlet.get('relays', k)) # if they have not specified the relay try name of outlet outlet['is_on'] = {} for r in relays: response = self.do_esphome_cmd(outlet['address'], r) outlet['is_on'][r] = {'state': response, 'name': r} if response not in [True, False]: failures[k] = outlet_data[k] failures[k]['error'] = f'[PWR-ESP] {k}:{failures[k]["address"]} {response} - Removed' log.warning(failures[k]['error'], show=True) # -- // dli \\ -- elif outlet['type'].lower() == 'dli': if TIMING: dbg_line = '------------------------ // NOW PROCESSING {} \\\\ ------------------------'.format(k) print('\n{}'.format('=' * len(dbg_line))) print('{}\n{}\n{}'.format(dbg_line, outlet_data[k], '-' * len(dbg_line))) print('{}'.format('=' * len(dbg_line))) # -- // VALIDATE CONFIG FILE DATA FOR DLI \\ -- all_good = True # initial value for _ in ['address', 'username', 'password']: if not outlet.get(_): all_good = False failures[k] = outlet_data[k] failures[k]['error'] = f'[PWR-DLI {k}] {_} missing from {failures[k]["address"]} ' \ 'configuration - skipping' log.error(f'[PWR-DLI {k}] {_} missing from {failures[k]["address"]} ' 'configuration - skipping', show=True) break if not all_good: continue (this_dli, _update) = self.load_dli(outlet['address'], outlet['username'], outlet['password']) if this_dli is None or this_dli.dli is None: failures[k] = outlet_data[k] failures[k]['error'] = '[PWR-DLI {}] {} Unreachable - Removed'.format(k, failures[k]['address']) log.warning(f"[PWR-DLI {k}] {failures[k]['address']} Unreachable - Removed", show=True) else: if TIMING: xstart = time.time() print('this_dli.outlets: {} {}'.format(this_dli.outlets, 'update' if _update else 'init')) print(json.dumps(dli_power, indent=4, sort_keys=True)) # upd_linked is for faster update in power menu only refreshes data for linked ports vs entire dli if upd_linked and self.data['dli_power'].get(outlet['address']): if outlet.get('linked_devs'): (outlet, _p) = self.update_linked_devs(outlet) if k in outlet_data: outlet_data[k]['is_on'] = this_dli[_p] else: log.error(f'[PWR GET_OUTLETS] {k} appears to be unreachable') # TODO not actually using the error returned this turned into a hot mess if isinstance(outlet['is_on'], dict) and not outlet['is_on']: all_good = False # update dli_power for the refreshed / linked ports else: for _ in outlet['is_on']: dli_power[outlet['address']][_] = outlet['is_on'][_] else: if _update: dli_power[outlet['address']] = this_dli.get_dli_outlets() # data may not be fresh trigger dli update # handle error connecting to dli during refresh - when connect worked on menu launch if not dli_power[outlet['address']]: failures[k] = outlet_data[k] failures[k]['error'] = f"[PWR-DLI] {k} {failures[k]['address']} Unreachable - Removed" log.warning(f'[PWR-DLI {k}] {failures[k]["address"]} Unreachable - Removed', show=True) continue else: # dli was just instantiated data is fresh no need to update dli_power[outlet['address']] = this_dli.outlets if outlet.get('linked_devs'): (outlet, _p) = self.update_linked_devs(outlet) if TIMING: print('[TIMING] this_dli.outlets: {}'.format(time.time() - xstart)) # TIMING log.debug(f'dli {k} Updated. Elapsed Time(secs): {time.time() - _start}') # -- END for LOOP for k in outlet_data -- # Move failed outlets from the keys that populate the menu to the 'failures' key # failures are displayed in the footer section of the menu, then re-tried on refresh # TODO this may be causing - RuntimeError: dictionary changed size during iteration # in pwr_start_update_threads. witnessed on mdnsreg daemon on occasion (Move del logic after wait_for_threads?) for _dev in failures: if outlet_data.get(_dev): del outlet_data[_dev] if self.data['defined'].get(_dev): del self.data['defined'][_dev] if failures[_dev]['address'] in dli_power: del dli_power[failures[_dev]['address']] self.data['failures'][_dev] = failures[_dev] # restore outlets that failed on menu launch but found reachable during refresh for _dev in outlet_data: if _dev not in self.data['defined']: self.data['defined'][_dev] = outlet_data[_dev] if _dev in self.data['failures']: del self.data['failures'][_dev] self.data['dli_power'] = dli_power return self.data
def on_service_state_change(self, zeroconf: Zeroconf, service_type: str, name: str, state_change: ServiceStateChange) -> None: cpi = self.cpi mdns_data = None update_cache = False if state_change is ServiceStateChange.Added: info = zeroconf.get_service_info(service_type, name) if info: if info.server.split('.')[0] != cpi.local.hostname: if info.properties: properties = info.properties mdns_data = { k.decode('UTF-8'): v.decode('UTF-8') if not v.decode('UTF-8')[0] in ['[', '{'] else json.loads(v.decode('UTF-8')) # NoQA for k, v in properties.items() } hostname = mdns_data.get('hostname') interfaces = mdns_data.get('interfaces', []) # interfaces = json.loads(properties[b'interfaces'].decode("utf-8")) log_out = json.dumps(mdns_data, indent=4, sort_keys=True) log.debug( f'[MDNS DSCVRY] {hostname} Properties Discovered via mdns:\n{log_out}' ) rem_ip = mdns_data.get('rem_ip') if not rem_ip: if len(mdns_data.get('interfaces', [])) == 1: rem_ip = [ interfaces[i]['ip'] for i in interfaces ] rem_ip = rem_ip[0] else: rem_ip = None if hostname not in cpi.remotes.data or 'rem_ip' not in cpi.remotes.data[hostname] \ else cpi.remotes.data[hostname]['rem_ip'] cur_known_adapters = cpi.remotes.data.get( hostname, { 'adapters': None }).get('adapters') # -- Log new entry only if this is the first time it's been discovered -- if hostname not in self.d_discovered: self.d_discovered.append(hostname) log.info( '[MDNS DSCVRY] {}({}) Discovered via mdns'. format(hostname, rem_ip if rem_ip is not None else '?')) from_mdns_adapters = mdns_data.get('adapters') mdns_data['rem_ip'] = rem_ip mdns_data[ 'adapters'] = from_mdns_adapters if from_mdns_adapters is not None else cur_known_adapters mdns_data['source'] = 'mdns' mdns_data['upd_time'] = int(time.time()) mdns_data = {hostname: mdns_data} # update from API only if no adapter data exists either in cache or from mdns that triggered this # adapter data is updated on menu_launch if not mdns_data[hostname][ 'adapters'] or hostname not in cpi.remotes.data: log.info( '[MDNS DSCVRY] {} provided no adapter data Collecting via API' .format(info.server.split('.')[0])) # TODO check this don't think needed had a hung process on one of my Pis added it to be safe try: res = cpi.remotes.api_reachable( hostname, mdns_data[hostname]) update_cache = res.update mdns_data[hostname] = res.data # reachable = res.reachable except Exception as e: log.error( f'Exception occured verifying reachability via API for {hostname}:\n{e}' ) if self.show: if hostname in self.discovered: self.discovered.remove(hostname) self.discovered.append('{}{}'.format( hostname, '*' if update_cache else '')) print(hostname + '({}) Discovered via mdns.'.format( rem_ip if rem_ip is not None else '?')) try: print('{}\n{}'.format( 'mdns: None' if from_mdns_adapters is None else 'mdns: {}'.format([ d.replace('/dev/', '') for d in from_mdns_adapters ] if not isinstance( from_mdns_adapters, list) else [ d['dev'].replace('/dev/', '') for d in from_mdns_adapters ]), 'cache: None' if cur_known_adapters is None else 'cache: {}'.format([ d.replace('/dev/', '') for d in cur_known_adapters ] if not isinstance( cur_known_adapters, list) else [ d['dev'].replace('/dev/', '') for d in cur_known_adapters ]))) except TypeError as e: print(f'EXCEPTION: {e}') print( f'\nDiscovered ConsolePis: {self.discovered}') print("press Ctrl-C to exit...\n") log.debug( '[MDNS DSCVRY] {} Final data set:\n{}'.format( hostname, json.dumps(mdns_data, indent=4, sort_keys=True))) if update_cache: if 'hostname' in mdns_data[hostname]: del mdns_data[hostname]['hostname'] cpi.remotes.data = cpi.remotes.update_local_cloud_file( remote_consoles=mdns_data) log.info( f'[MDNS DSCVRY] {hostname} Local Cache Updated after mdns discovery' ) else: log.warning( f'[MDNS DSCVRY] {hostname}: No properties found') else: log.warning(f'[MDNS DSCVRY] {info}: No info found')
def get_remote(self, data=None, rename=False): spin = self.spin def verify_remote_thread(remotepi, data, rename): """sub to verify reachability and api data for remotes params: remotepi: The hostname currently being processed data: dict remote ConsolePi dict with hostname as key """ this = data[remotepi] res = self.api_reachable(remotepi, this, rename=rename) this = res.data if res.update: self.cache_update_pending = True if not res.reachable: log.warning( f"[GET REM] Found {remotepi} in Local Cloud Cache: UNREACHABLE" ) this["fail_cnt"] = (1 if not this.get("fail_cnt") else this["fail_cnt"] + 1) self.pop_list.append(remotepi) self.cache_update_pending = True else: self.connected = True if this.get("fail_cnt"): this["fail_cnt"] = 0 self.cache_update_pending = True if res.update: log.info( f"[GET REM] Updating Cache - Found {remotepi} in Local Cloud Cache, " f"reachable via {this['rem_ip']}") data[remotepi] = this if data is None or len(data) == 0: data = config.remotes # remotes from local cloud cache if not data: # print(self.log_sym_warn + " No Remotes in Local Cache") log.info("No Remotes found in Local Cache") data = {} # convert None type to empy dict else: # if self is in the remote-data remove and warn user (can occur in rare scenarios i.e. hostname changes) if socket.gethostname() in data: del data[socket.gethostname()] log.show( "Local cache included entry for self - do you have other ConsolePis using the same hostname?" ) # Verify Remote ConsolePi details and reachability if stdin.isatty(): spin.start( "Querying Remotes via API to verify reachability and adapter data" ) for remotepi in data: # -- // Launch Threads to verify all remotes in parallel \\ -- threading.Thread( target=verify_remote_thread, args=(remotepi, data, rename), name=f"vrfy_{remotepi}", ).start() # verify_remote_thread(remotepi, data) # Non-Threading DEBUG # -- wait for threads to complete -- if not self.cpiexec.wait_for_threads(name="vrfy_", thread_type="remote"): if config.remotes: if stdin.isatty(): spin.succeed( "[GET REM] Querying Remotes via API to verify reachability and adapter data\n\t" f"Found {len(config.remotes)} Remote ConsolePis") else: if stdin.isatty(): spin.warn( "[GET REM] Querying Remotes via API to verify reachability and adapter data\n\t" "No Reachable Remote ConsolePis Discovered") else: log.error( "[GET REM] Remote verify threads Still running / exceeded timeout" ) if stdin.isatty(): spin.stop() # update local cache if any ConsolePis found UnReachable if self.cache_update_pending: if self.pop_list: for remotepi in self.pop_list: if ( data[remotepi]["fail_cnt"] >= 3 ): # NoQA remove from local cache after 3 failures (cloud or mdns will repopulate if discovered) removed = data.pop(remotepi) log.warning( "[GET REM] {} has been removed from Local Cache after {} failed attempts" .format(remotepi, removed["fail_cnt"]), show=True, ) else: log.show("Cached Remote '{}' is unreachable".format( remotepi)) # update local cache file if rem_ip or adapter data changed data = self.update_local_cloud_file(data) self.pop_list = [] self.cache_update_pending = False return data
def api_reachable(self, remote_host: str, cache_data: dict, rename: bool = False): """Check Rechability & Fetch adapter data via API for remote ConsolePi params: remote_host:str, The hostname of the Remote ConsolePi cache_data:dict, The ConsolePi dictionary for the remote (from cache file) rename:bool, rename = True will do api call with refresh=True Query parameter which tells the api to first update connection data from ser2net as it likely changed as a result of remote rename operation. returns: tuple [0]: Bool, indicating if data is different than cache [1]: dict, Updated ConsolePi dictionary for the remote """ class ApiReachableResponse: def __init__(self, update, data, reachable): self.update = update self.data = data self.reachable = reachable update = False local = self.local _iface_dict = cache_data["interfaces"] rem_ip_list = [ _iface_dict[_iface].get("ip") for _iface in _iface_dict if not _iface.startswith("_") and _iface_dict[_iface].get("ip") not in local.ip_list ] # if inbound data includes rem_ip make sure to try that first for _ip in [cache_data.get("rem_ip"), cache_data.get("last_ip")]: if _ip: if _ip not in rem_ip_list or rem_ip_list.index(_ip) != 0: rem_ip_list.remove(_ip) rem_ip_list.insert(0, _ip) rem_ip = None for _ip in rem_ip_list: log.debug(f"[API_REACHABLE] verifying {remote_host}") _adapters = self.get_adapters_via_api(_ip, rename=rename) if _adapters: rem_ip = _ip # Remote is reachable if not isinstance( _adapters, int): # indicates an html error code was returned if isinstance( _adapters, list ): # indicates need for conversion from old api format _adapters = self.convert_adapters(_adapters) if not self.old_api_log_sent: log.warning( f"{remote_host} provided old api schema. Recommend Upgrading to current." ) self.old_api_log_sent = True # Only compare config dict for each adapter as udev dict will generally be different due to time_since_init if not cache_data.get("adapters") or { a: { "config": _adapters[a].get("config", {}) } for a in _adapters } != { a: { "config": cache_data["adapters"][a].get( "config", {}) } for a in cache_data["adapters"] }: cache_data["adapters"] = _adapters update = True # --> Update if adapter dict is different else: cached_udev = [ False for a in cache_data["adapters"] if 'udev' not in cache_data["adapters"][a] ] if False in cached_udev: cache_data["adapters"] = _adapters update = True # --> Update if udev key not in existing data (udev not sent to cloud) elif _adapters == 200: log.show( f"Remote {remote_host} is reachable via {_ip}," " but has no adapters attached\nit's still available in remote shell menu" ) # remote was reachable update last_ip, even if returned bad status_code still reachable if not cache_data.get("last_ip", "") == _ip: cache_data["last_ip"] = _ip update = True # --> Update if last_ip is different than currently reachable IP break if cache_data.get("rem_ip") != rem_ip: cache_data["rem_ip"] = rem_ip update = ( True # --> Update if rem_ip didn't match (was previously unreachable) ) if not _adapters: reachable = False if isinstance(cache_data.get("adapters"), list): _adapters = cache_data.get("adapters") _adapters = { _adapters[_adapters.index(d)]["dev"]: { "config": { k: _adapters[_adapters.index(d)][k] for k in _adapters[_adapters.index(d)] } } for d in _adapters } cache_data["adapters"] = _adapters _msg = ( f"{remote_host} Cached adapter data was in old format... Converted to new.\n" f"\t\t{remote_host} Should be upgraded to the current version of ConsolePi." ) log.warning(_msg, show=True) update = True # --> Convert to new and Update if cache data was in old format else: reachable = True return ApiReachableResponse(update, cache_data, reachable)
def auto_pwron_thread(self, pwr_key): """Ensure any outlets linked to device are powered on Called by consolepi_menu exec_menu function and remote_launcher (for sessions to remotes) when a connection initiated with adapter. Powers any linked outlets associated with the adapter on. params: menu_dev:str, The tty device user is connecting to. Returns: No Return - Updates class attributes """ if self.wait_for_threads("init"): return outlets = self.pwr.data if "linked" not in outlets: _msg = "Error linked key not found in outlet dict\nUnable to perform auto power on" log.show(_msg, show=True) return if not outlets["linked"].get(pwr_key): return # -- // Perform Auto Power On (if not already on) \\ -- for o in outlets["linked"][pwr_key]: outlet = outlets["defined"].get(o.split(":")[0]) ports = [] if ":" not in o else json.loads( o.replace("'", '"').split(":")[1]) _addr = outlet["address"] # -- // DLI web power switch Auto Power On \\ -- # # TODO combine all ports from same pwr_key and sent to pwr_toggle once # TODO Update outlet if return is OK, then run refresh in the background to validate # TODO Add class attribute to cpi_menu ~ cpi_menu.new_data = "power", "main", etc # Then in wait_for_input run loop to check for updates and re-display menu # TODO power_menu and dli_menu wait_for_threads auto power ... check cpiexec.autopwr_wait first # if outlet["type"].lower() == "dli": for p in ports: log.debug( f"[Auto PwrOn] Power ON {pwr_key} Linked Outlet {outlet['type']}:{_addr} p{p}" ) if not outlet["is_on"][p][ "state"]: # This is just checking what's in the dict not querying the DLI r = self.pwr.pwr_toggle(outlet["type"], _addr, desired_state=True, port=p) if isinstance(r, bool): if r: threading.Thread( target=self.outlet_update, kwargs={ "refresh": True, "upd_linked": True }, name="auto_pwr_refresh_dli", ).start() self.autopwr_wait = True else: log.warning( f"{pwr_key} Error operating linked outlet @ {o}", show=True, ) # -- // esphome Auto Power On \\ -- elif outlet["type"].lower() == "esphome": for p in ports: log.debug( f"[Auto PwrOn] Power ON {pwr_key} Linked Outlet {outlet['type']}:{_addr} p{p}" ) if not outlet["is_on"][p][ "state"]: # This is just checking what's in the dict r = self.pwr.pwr_toggle(outlet["type"], _addr, desired_state=True, port=p) if isinstance(r, bool): self.pwr.data['defined'][o.split( ':')[0]]['is_on'][p]['state'] = r else: log.show(r) log.warning( f"{pwr_key} Error operating linked outlet @ {o}", show=True, ) # -- // GPIO & TASMOTA Auto Power On \\ -- else: log.debug( f"[Auto PwrOn] Power ON {pwr_key} Linked Outlet {outlet['type']}:{_addr}" ) r = self.pwr.pwr_toggle( outlet["type"], _addr, desired_state=True, noff=outlet.get("noff", True) if outlet["type"].upper() == "GPIO" else True, ) if isinstance(r, int) and r > 1: # return is an error r = False else: # return is bool which is what we expect if r: self.pwr.data["defined"][o]["state"] = r self.autopwr_wait = True # self.pwr.pwr_get_outlets(upd_linked=True) else: # self.config.log_and_show(f"Error operating linked outlet {o}:{outlet['address']}", log=log.warning) log.show( f"Error operating linked outlet {o}:{outlet['address']}", show=True, )
def next_ztp(filename, mac): '''Transition dnsmasq configuration to next config file when using ordered ZTP mechanism. Args: filename (str): Full path of filename just sent via tftp. Provided by dnsmasq as argument. mac (Mac object): mac object with various attributes for the MAC address of the device that requested/rcvd the config. ''' _from = os.path.basename(filename) _to = None # init if _from.endswith('.cfg'): set_tag = "cfg_sent" _cfg_mac = utils.Mac(_from.rstrip('.cfg')) if _cfg_mac.ok and mac.clean not in [_cfg_mac.clean, _cfg_mac.oobm.clean]: _to = f"{_from.split('_')[0]}_{int(_from.rstrip('.cfg').split('_')[-1]) + 1}.cfg" else: set_tag = "img_sent" host_lines = [] opts_lines = [] if not os.path.isfile(ztp_opts_conf): log.warning(f"{ztp_opts_conf} not found. Noting to do.") else: if _to and not os.path.isfile(f"{os.path.dirname(filename)}/{_to}"): log.info(f"No More Files for {_from.split('_')[0]}") with in_place.InPlace(ztp_opts_conf) as fp: line_num = 1 cur_opts_lines = fp.readlines() for line in cur_opts_lines: if _from in line: if mac.ok: opts_lines.append( f"# {mac.cols}|{ip} Sent {_from}" f"{' Success' if ztp_ok else 'WARN file size != xfer total check switch and logs'}\n" ) if set_tag == "cfg_sent" and not line.startswith("#"): opts_lines.append(f"# SENT # {line}") opts_lines.append(f"# -- Retry Line for {_from.rstrip('.cfg')} Based On mac {mac.cols} --\n") opts_lines.append(f'tag:{mac.tag},option:bootfile-name,"{_from}"\n') log.info(f"Disabled {_from} on line {line_num} of {os.path.basename(ztp_opts_conf)}") log.info(f"Retry Entry Created for {_from.rstrip('.cfg')} | {mac.cols} | {ip}") else: opts_lines.append(line) host_lines.append(f"{mac.cols},{mac.tag},,{ztp_lease_time},set:{mac.tag},set:{set_tag}\n") else: print(f'Unable to write Retry Lines for previously updated device. Mac {mac.orig} appears invalid') log.warning( f"Unable to Create Retry Entry for {_from.rstrip('.cfg')} @ {ip}, " f"Invalid MAC address --> {mac.cols}" ) elif _to and _to in line: if not line.startswith('#'): log.warning(f'Expected {_to} option line to be commented out @ this point. It was not.') opts_lines.append(line.lstrip('#').lstrip()) log.info(f"Enabled {_to} on line {line_num} of {os.path.basename(ztp_opts_conf)}") else: opts_lines.append(line) line_num += 1 _write_to_file(fp, cur_opts_lines, opts_lines) if host_lines: with open(ztp_hosts_conf, 'a+') as fp: fp.seek(0) cur_host_lines = fp.readlines() _write_to_file(fp, cur_host_lines, host_lines) if set_tag.startswith('cfg'): log.info(f"Retry Entries Written to file for {_from.rstrip('.cfg')} | {mac.cols} | {ip}") else: log.info(f"{mac.cols} tagged as img_sent to prevent re-send of {_from}")
if len(sys.argv) > 1: mdns = MDNS_Browser(show=True) RESTART_INTERVAL = 30 # when running in interactive mode reduce restart interval # mdns.zc = mdns.run() print("\nBrowsing services, press Ctrl-C to exit...\n") else: mdns = MDNS_Browser() try: while True: try: mdns.zc = mdns.run() except AttributeError: # hopefully this handles "Zeroconf object has no attribute '_handlers_lock'" log.warning( '[MDNS BROWSE] caught _handlers_lock exception retrying in 5 sec' ) time.sleep(5) continue start = time.time() # re-init zeroconf browser every RESTART_INTERVAL seconds while time.time() < start + RESTART_INTERVAL: time.sleep(0.1) if mdns.zc is not None: mdns.zc.close() mdns.discovered = [] except KeyboardInterrupt: pass finally: if mdns.zc is not None:
def auto_pwron_thread(self, pwr_key): """Ensure any outlets linked to device are powered on Called by consolepi_menu exec_menu function and remote_launcher (for sessions to remotes) when a connection initiated with adapter. Powers any linked outlets associated with the adapter on. params: menu_dev:str, The tty device user is connecting to. Returns: No Return - Updates class attributes """ if self.wait_for_threads("init"): return outlets = self.pwr.data if "linked" not in outlets: _msg = "Error linked key not found in outlet dict\nUnable to perform auto power on" log.show(_msg, show=True) return if not outlets["linked"].get(pwr_key): return # -- // Perform Auto Power On (if not already on) \\ -- for o in outlets["linked"][pwr_key]: outlet = outlets["defined"].get(o.split(":")[0]) ports = [] if ":" not in o else json.loads(o.split(":")[1]) # NoQA .replace('\'', '"')) No longer necessary single port defs are listified in config.py _addr = outlet["address"] # -- // DLI web power switch Auto Power On \\ -- if outlet["type"].lower() == "dli": for p in ports: log.debug( f"[Auto PwrOn] Power ON {pwr_key} Linked Outlet {outlet['type']}:{_addr} p{p}" ) # TODO have seen this, but unable to recreate. may be transient failure??? # NoQA This log occurs: [ERROR]: [DLI GET OUTLETS] dli @ labpower2.kabrew.com reachable, but failed to fetch statuslist (outlet_list) # is_on in pwr.data['labpower2']['defined'] is being flushed based on error above so empty dict resulting in key error # Exception in thread auto_pwr_on_r1-8320T-sw: # Traceback (most recent call last): # File "/usr/lib/python3.7/threading.py", line 917, in _bootstrap_inner # self.run() # File "/usr/lib/python3.7/threading.py", line 865, in run # self._target(*self._args, **self._kwargs) # File "/etc/ConsolePi/src/pypkg/consolepi/exec.py", line 88, in auto_pwron_thread # if not outlet["is_on"][p][ # KeyError: 2 if not outlet["is_on"][p]["state"]: # This is just checking what's in the dict not querying the DLI r = self.pwr.pwr_toggle( outlet["type"], _addr, desired_state=True, port=p ) if isinstance(r, bool): if r: threading.Thread( target=self.outlet_update, kwargs={"refresh": True, "upd_linked": True}, name="auto_pwr_refresh_dli", ).start() self.autopwr_wait = True else: log.warning( f"{pwr_key} Error operating linked outlet @ {o}", show=True, ) # -- // esphome Auto Power On \\ -- elif outlet["type"].lower() == "esphome": for p in ports: log.debug( f"[Auto PwrOn] Power ON {pwr_key} Linked Outlet {outlet['type']}:{_addr} p{p}" ) if not outlet["is_on"][p]["state"]: # This is just checking what's in the dict r = self.pwr.pwr_toggle( outlet["type"], _addr, desired_state=True, port=p ) if isinstance(r, bool): self.pwr.data['defined'][o.split(':')[0]]['is_on'][p]['state'] = r else: log.show(r) log.warning( f"{pwr_key} Error operating linked outlet @ {o}", show=True, ) # -- // GPIO & TASMOTA Auto Power On \\ -- else: log.debug( f"[Auto PwrOn] Power ON {pwr_key} Linked Outlet {outlet['type']}:{_addr}" ) r = self.pwr.pwr_toggle( outlet["type"], _addr, desired_state=True, noff=outlet.get("noff", True) if outlet["type"].upper() == "GPIO" else True, ) if isinstance(r, int) and r > 1: # return is an error r = False else: # return is bool which is what we expect if r: self.pwr.data["defined"][o]["state"] = r self.autopwr_wait = True # self.pwr.pwr_get_outlets(upd_linked=True) else: # self.config.log_and_show(f"Error operating linked outlet {o}:{outlet['address']}", log=log.warning) log.show( f"Error operating linked outlet {o}:{outlet['address']}", show=True, )
ztp = False # -- Simply logging if another ConsolePi has connected directly to this one -- if vendor and 'ConsolePi' in vendor: log.info(f'A ConsolePi has connected to {iface}') else: # -- // Triggerd By TFTP XFER \\ -- ztp = True file_size = os.stat(cfg_file).st_size ztp_ok = True if int(mac_bytes) == file_size else False mac = utils.Mac(get_mac(ip)) log.info(f"[ZTP - TFTP XFR] {os.path.basename(cfg_file)} sent to {ip}|{mac.cols}{' Success' if ztp_ok else ''}") _res = utils.do_shell_cmd(f"wall 'consolepi-ztp: {os.path.basename(cfg_file)} sent to " f"{ip}|{mac.cols}{' Success' if ztp_ok else ' WARNING xfr != file size'}'") if not ztp_ok: log.warning(f"File Size {file_size} and Xfr Total ({mac_bytes}) don't match") # If cfg file was sent transition to next cfg (ordered). # if img sent adjust adjust dnsmasq to prevent needless resend of img. next_ztp(cfg_file, mac) if config.ztp and cfg_file.endswith('.cfg'): # load stashed dict from file. keys are ztp.py generated cfg files names, mapped to dict of ztp settings from config. if not utils.valid_file(ZTP_CLI_FILE): log.warning(f'Skipping ZTP Post CLI for {ip} {ZTP_CLI_FILE} not found/invalid') else: with open(ZTP_CLI_FILE) as fb: cfg_dict = json.loads(''.join(fb.readlines())) cfg_file_name = os.path.basename(cfg_file) if cfg_file_name in cfg_dict: cli_ok = True cfg_dict[cfg_file_name]['ip'] = ip