def verify_remote_thread(remotepi, data, rename): """sub to verify reachability and api data for remotes params: remotepi: The hostname currently being processed data: dict remote ConsolePi dict with hostname as key """ this = data[remotepi] res = self.api_reachable(remotepi, this, rename=rename) this = res.data if res.update: self.cache_update_pending = True if not res.reachable: log.warning( f"[GET REM] Found {remotepi} in Local Cloud Cache: UNREACHABLE" ) this["fail_cnt"] = (1 if not this.get("fail_cnt") else this["fail_cnt"] + 1) self.pop_list.append(remotepi) self.cache_update_pending = True else: self.connected = True if this.get("fail_cnt"): this["fail_cnt"] = 0 self.cache_update_pending = True if res.update: log.info( f"[GET REM] Updating Cache - Found {remotepi} in Local Cloud Cache, " f"reachable via {this['rem_ip']}") data[remotepi] = this
def trigger_cloud_update(self): local = self.cpi.local remotes = self.cpi.remotes log.info( '[MDNS REG] Cloud Update triggered delaying {} seconds'.format( UPDATE_DELAY)) time.sleep( UPDATE_DELAY ) # Wait 30 seconds and then update, to accomodate multiple add removes data = local.build_local_dict(refresh=True) for a in local.data[local.hostname].get('adapters', {}): if 'udev' in local.data[local.hostname]['adapters'][a]: del local.data[local.hostname]['adapters'][a]['udev'] log.debug( f'[MDNS REG] Final Data set collected for {local.hostname}: \n{json.dumps(data)}' ) remote_consoles = {} if config.cloud_svc == 'gdrive': # pylint: disable=maybe-no-member cloud = GoogleDrive(local.hostname) remote_consoles = cloud.update_files(data) # Send remotes learned from cloud file to local cache if len(remote_consoles) > 0 and 'Gdrive-Error' not in remote_consoles: remotes.update_local_cloud_file(remote_consoles) log.info( '[MDNS REG] Cloud Update Completed, Found {} Remote ConsolePis' .format(len(remote_consoles))) else: log.warning( '[MDNS REG] Cloud Update Completed, No remotes found, or Error Occured' )
def next_ztp(filename, mac): _from = os.path.basename(filename) _to = f"{_from.split('_')[0]}_{int(_from.rstrip('.cfg').split('_')[-1]) + 1}.cfg" host_lines = [] if not os.path.isfile(ztp_opts_conf): log.warning(f"{ztp_opts_conf} not found. Noting to do.") else: if not os.path.isfile(f"{os.path.dirname(filename)}/{_to}"): log.info(f"No More Files for {_from.split('_')[0]}") with in_place.InPlace(ztp_opts_conf) as fp: line_num = 1 for line in fp: if _from in line: # TODO if not mac.ok don't write retry lines... print/log/warning if mac.ok: fp.write( f"# {mac.cols}|{ip} Sent {_from}" f"{' Success' if ztp_ok else 'WARN file size != xfer total check switch and logs'}\n" ) fp.write( f"# -- Retry Lines for {_from.rstrip('.cfg')} Based On mac {mac.cols} --\n" ) fp.write( f'tag:{mac.tag},option:bootfile-name,"{_from}"\n') host_lines.append( f"{mac.cols},{mac.tag},,{ztp_lease_time},set:{mac.tag},set:sent\n" ) else: print( f'Unable to write Retry Lines for previously updated device. Mac {mac.orig} appears invalid' ) fp.write(f"# SENT # {line}") log.info( f"Disabled {_from} on line {line_num} of {os.path.basename(ztp_opts_conf)}" ) log.info( f"Retry Entries Created for {_from.rstrip('.cfg')} | {mac.cols} | {ip}" ) elif _to in line: if not line.startswith('#'): log.warning( f'Expected {_to} option line to be commented out @ this point. It was not.' ) fp.write(line.lstrip('#').lstrip()) log.info( f"Enabled {_to} on line {line_num} of {os.path.basename(ztp_opts_conf)}" ) else: fp.write(line) line_num += 1 if host_lines: with open(ztp_hosts_conf, 'a') as fp: fp.writelines(host_lines) log.info( f"Retry Entries Written to file for {_from.rstrip('.cfg')} | {mac.cols} | {ip}" )
def get_adapters_via_api(self, ip: str, port: int = 5000, rename: bool = False, log_host: str = None): """Send RestFul GET request to Remote ConsolePi to collect adapter info params: ip(str): ip address or FQDN of remote ConsolePi rename(bool): TODO log_host(str): friendly string for logging purposes "hostname(ip)" returns: adapter dict for remote if successful and adapters exist status_code 200 if successful but no adapters or Falsey or response status_code if an error occurred. """ if not log_host: log_host = ip url = f"http://{ip}:{port}/api/v1.0/adapters" if rename: url = f"{url}?refresh=true" log.debug(url) headers = { "Accept": "*/*", "Cache-Control": "no-cache", "Host": f"{ip}:{port}", "accept-encoding": "gzip, deflate", "Connection": "keep-alive", "cache-control": "no-cache", } try: response = requests.request("GET", url, headers=headers, timeout=config.remote_timeout) except (OSError, TimeoutError): log.warning( f"[API RQST OUT] Remote ConsolePi: {log_host} TimeOut when querying via API - Unreachable." ) return False if response.ok: ret = response.json() ret = ret["adapters"] if ret["adapters"] else response.status_code _msg = f"Adapters Successfully retrieved via API for Remote ConsolePi: {log_host}" log.info("[API RQST OUT] {}".format(_msg)) log.debugv("[API RQST OUT] Response: \n{}".format( json.dumps(ret, indent=4, sort_keys=True))) else: ret = response.status_code log.error( f"[API RQST OUT] Failed to retrieve adapters via API for Remote ConsolePi: {log_host}\n{ret}:{response.text}" ) return ret
def run(self, connection_args): '''Establish Connection to device and run CLI commands provided via ztp config Args: connection_args (dict): Arguments passed to paramiko to establish connection ''' result = dict( changed=False, cli_output=[], message='' ) _start_time = time.time() while True: try: go = False # Connect to Switch via SSH self.ssh_client.connect(**connection_args) self.prompt = '' # SSH Command execution not allowed, therefore using the following paramiko functionality self.shell_chanel = self.ssh_client.invoke_shell() self.shell_chanel.settimeout(8) # AOS-CX specific self.get_prompt() go = True break except socket.timeout: log.error(f'ZTP CLI Operations Failed, TimeOut Connecting to {self.ip}') except paramiko.ssh_exception.NoValidConnectionsError as e: log.error(f'ZTP CLI Operations Failed, {e}') except paramiko.ssh_exception.AuthenticationException: log.error('ZTP CLI Operations Failed, CLI Authentication Failed verify creds in config') if time.time() - _start_time >= ZTP_CLI_LOGIN_MAX_WAIT: break # Give Up else: time.sleep(10) if go: try: result['cli_output'] = self.execute_command(self.cmd_list) result['changed'] = True if self.fail_msg: result['message'] += self.fail_msg.get('msg') finally: self.logout() # Format log entries and exit _res = " -- // Command Results \\ -- \n" _cmds = [c for c in self.cmd_list if 'SLEEP' not in c] for cmd, out in zip(_cmds, result['cli_output']): if "progress:" in out and f"progress: {out.count('progress:')}/{out.count('progress:')}" in out: out = out.split("progress:")[0] + f"progress: {out.count('progress:')}/{out.count('progress:')}" _res += "{}:{} {}\n".format(cmd, '\n' if '\n' in out else '', out) _res += " --------------------------- \n" _res += ''.join([f"{k}: {v}\n" for k, v in result.items() if k != "cli_output" and v]) log.info(f"Post ZTP CLI Operational Result for {ip}:\n{_res}")
def run(self): self.zc = Zeroconf() if not self.startup_logged: log.info( f"[MDNS DSCVRY] Discovering ConsolePis via mdns - Debug Enabled: {self.debug}" ) self.startup_logged = True return ServiceBrowser(self.zc, "_consolepi._tcp.local.", handlers=[self.on_service_state_change]) # NoQA pylint: disable=unused-variable
def _write_to_file(fp, current: list, new: list): new = utils.unique(new) if 'ztp-opts' in fp.name: # Overwrite the existing file contents for line_num, line in enumerate(new): fp.write(line) else: # Append to existing file for ztp-hosts.conf for line_num, line in enumerate(new): if line not in current: fp.write(line) else: log.info(f"Skipping write for content on line {line_num} ({line[0:20]}...) as the line already exists")
def update_mdns(self, device=None, action=None, *args, **kwargs): zeroconf = self.zeroconf info = self.try_build_info() def sub_restart_zc(): log.info( '[MDNS REG] mdns_refresh thread Start... Delaying {} Seconds'. format(UPDATE_DELAY)) time.sleep( UPDATE_DELAY ) # Wait x seconds and then update, to accomodate multiple add removes zeroconf.update_service(info) zeroconf.unregister_service(info) time.sleep(5) zeroconf.register_service(info) log.info('[MDNS REG] mdns_refresh thread Completed') if device is not None: abort_mdns = False for thread in threading.enumerate(): if 'mdns_refresh' in thread.name: log.debug( '[MDNS REG] Another mdns_refresh thread already queued, this thread will abort' ) abort_mdns = True break if not abort_mdns: threading.Thread(target=sub_restart_zc, name='mdns_refresh', args=()).start() log.debug( '[MDNS REG] mdns_refresh Thread Started. Current Threads:\n {}' .format(threading.enumerate())) log.info('[MDNS REG] detected change: {} {}'.format( device.action, device.sys_name)) if config.cloud: # pylint: disable=maybe-no-member abort = False for thread in threading.enumerate(): if 'cloud_update' in thread.name: log.debug( '[MDNS REG] Another cloud Update thread already queued, this thread will abort' ) abort = True break if not abort: threading.Thread(target=self.trigger_cloud_update, name='cloud_update', args=()).start() log.debug( '[MDNS REG] Cloud Update Thread Started. Current Threads:\n {}' .format(threading.enumerate()))
def sub_restart_zc(): log.info( '[MDNS REG] mdns_refresh thread Start... Delaying {} Seconds'. format(UPDATE_DELAY)) time.sleep( UPDATE_DELAY ) # Wait x seconds and then update, to accomodate multiple add removes zeroconf.update_service(info) zeroconf.unregister_service(info) time.sleep(5) zeroconf.register_service(info) log.info('[MDNS REG] mdns_refresh thread Completed')
def create_sheet(self): service = self.sheets_svc log.info('[GDRIVE]: ConsolePi.csv not found on Gdrive. Creating ConsolePi.csv') spreadsheet = { 'properties': { 'title': 'ConsolePi.csv' } } request = service.spreadsheets().create(body=spreadsheet, fields='spreadsheetId') spreadsheet = self.exec_request(request) return '{0}'.format(spreadsheet.get('spreadsheetId'))
def get_adapters_via_api(self, ip: str, rename: bool = False): """Send RestFul GET request to Remote ConsolePi to collect adapter info params: ip(str): ip address or FQDN of remote ConsolePi returns: adapter dict for remote if successful Falsey or response status_code if an error occured. """ # log = self.config.log if rename: url = f"http://{ip}:5000/api/v1.0/adapters?refresh=true" else: url = f"http://{ip}:5000/api/v1.0/adapters" log.info(url) # DEBUG headers = { "Accept": "*/*", "Cache-Control": "no-cache", "Host": f"{ip}:5000", "accept-encoding": "gzip, deflate", "Connection": "keep-alive", "cache-control": "no-cache", } try: response = requests.request("GET", url, headers=headers, timeout=config.remote_timeout) except (OSError, TimeoutError): log.warning( "[API RQST OUT] Remote ConsolePi @ {} TimeOut when querying via API - Unreachable." .format(ip)) return False if response.ok: ret = response.json() ret = ret["adapters"] if ret["adapters"] else response.status_code _msg = "Adapters Successfully retrieved via API for Remote ConsolePi @ {}".format( ip) log.info("[API RQST OUT] {}".format(_msg)) log.debugv("[API RQST OUT] Response: \n{}".format( json.dumps(ret, indent=4, sort_keys=True))) else: ret = response.status_code log.error( "[API RQST OUT] Failed to retrieve adapters via API for Remote ConsolePi @ {}\n{}:{}" .format(ip, ret, response.text)) return ret
def __init__(self, ip: str = None, cli_method: str = 'ssh', cli_user: str = None, cli_pass: str = None, cli_timeout: int = ZTP_CLI_DEFAULT_TIMEOUT, cmd_list: list = None, **kwargs): # TELNET not supported for now... cli_method currently ignored self.fail_msg = '' self.ip = ip self.cmd_list = cmd_list if not cli_user or cli_pass is None or not cmd_list: log.info(f"No CLI Operations Performed on {ip} Missing/incomplete cli configuration") else: paramiko_ssh_connection_args = {'hostname': ip, 'port': 22, 'look_for_keys': False, 'username': cli_user, 'password': cli_pass, 'timeout': cli_timeout} # Login self.ssh_client = paramiko.SSHClient() # Default AutoAdd as Policy self.ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) self.run(paramiko_ssh_connection_args)
def wait_for_threads(self, name="init", timeout=10, thread_type="power"): """wait for parallel async threads to complete returns: bool: True if threads are still running indicating a timeout None indicates no threads found ~ they have finished """ start = time.time() do_log = False found = False while True: found = False for t in threading.enumerate(): if name in t.name: found = do_log = True t.join(timeout - 1) if not found: if name == "init" and thread_type == "power": if self.pwr and not self.pwr.data or not self.pwr.data.get( "dli_power"): self.pwr.dli_exists = False self.pwr_init_complete = True if do_log: log.info( "[{0} {1} WAIT] {0} Threads have Completed, elapsed time: {2}" .format( name.strip("_").upper(), thread_type.upper(), time.time() - start, )) break elif time.time() - start > timeout: log.error( "[{0} {1} WAIT] Timeout Waiting for {0} Threads to Complete, elapsed time: {2}" .format( name.strip("_").upper(), thread_type.upper(), time.time() - start, ), show=True, ) return True
def do_api_request(self, ip: str, path: str, *args, **kwargs): """Send RestFul GET request to Remote ConsolePi to collect data params: ip(str): ip address or FQDN of remote ConsolePi path(str): path beyond /api/v1.0/ returns: response object """ url = f"http://{ip}:5000/api/v1.0/{path}" log.debug(f'[do_api_request] URL: {url}') headers = { "Accept": "*/*", "Cache-Control": "no-cache", "Host": f"{ip}:5000", "accept-encoding": "gzip, deflate", "Connection": "keep-alive", "cache-control": "no-cache", } try: response = requests.request("GET", url, headers=headers, timeout=config.remote_timeout) except (OSError, TimeoutError): log.warning( f"[API RQST OUT] Remote ConsolePi @ {ip} TimeOut when querying via API - Unreachable." ) return False if response.ok: log.info(f"[API RQST OUT] {url} Response: OK") log.debugv( f"[API RQST OUT] Response: \n{json.dumps(response.json(), indent=4, sort_keys=True)}" ) else: log.error(f"[API RQST OUT] API Request Failed {url}") return response
def main(): cpi = ConsolePi() cloud_svc = config.cfg.get("cloud_svc", "error") local = cpi.local remotes = cpi.remotes cpiexec = cpi.cpiexec log.info('[CLOUD TRIGGER (IP)]: Cloud Update triggered by IP Update') CLOUD_CREDS_FILE = config.static.get( "CLOUD_CREDS_FILE", '/etc/ConsolePi/cloud/gdrive/.credentials/credentials.json') if not utils.is_reachable("www.googleapis.com", 443): log.error(f"Not Updating {cloud_svc} due to connection failure") sys.exit(1) if not utils.valid_file(CLOUD_CREDS_FILE): log.error('Credentials file not found or invalid') sys.exit(1) # -- // Get details from Google Drive - once populated will skip \\ -- if cloud_svc == "gdrive" and remotes.cloud is None: remotes.cloud = GoogleDrive(hostname=local.hostname) if cpiexec.wait_for_threads(thread_type="remotes") and ( config.power and cpiexec.wait_for_threads(name="_toggle_refresh")): log.error( 'IP Change Cloud Update Trigger: TimeOut Waiting for Threads to Complete' ) remote_consoles = remotes.cloud.update_files(local.data) if remote_consoles and "Gdrive-Error:" in remote_consoles: log.error(remote_consoles) else: for r in remote_consoles: # -- Convert Any Remotes with old API schema to new API schema -- if isinstance(remote_consoles[r].get("adapters", {}), list): remote_consoles[r]["adapters"] = remotes.convert_adapters( remote_consoles[r]["adapters"]) log.warning( f"Adapter data for {r} retrieved from cloud in old API format... Converted" ) if len(remote_consoles) > 0: remotes.update_local_cloud_file(remote_consoles)
f"Enabled {_to} on line {line_num} of {os.path.basename(ztp_opts_conf)}" ) else: fp.write(line) line_num += 1 if host_lines: with open(ztp_hosts_conf, 'a') as fp: fp.writelines(host_lines) log.info( f"Retry Entries Written to file for {_from.rstrip('.cfg')} | {mac.cols} | {ip}" ) if add_del != "tftp": log.info( f'[DHCP LEASE] DHCP Client Connected ({add_del}): iface: {iface}, mac: {mac_bytes}, ip: {ip}, vendor: {vendor}' ) ztp = False else: ztp = True file_size = os.stat(cfg_file).st_size ztp_ok = True if int(mac_bytes) == file_size else False mac = utils.Mac(get_mac(ip)) log.info( f"[ZTP - TFTP XFR] {os.path.basename(cfg_file)} sent to {ip}|{mac.cols}{' Success' if ztp_ok else ''}" ) _res = utils.do_shell_cmd( f"wall 'consolepi-ztp: {os.path.basename(cfg_file)} sent to " f"{ip}|{mac.cols}{' Success' if ztp_ok else ' WARNING xfr != file size'}'" ) if not ztp_ok:
def next_ztp(filename, mac): '''Transition dnsmasq configuration to next config file when using ordered ZTP mechanism. Args: filename (str): Full path of filename just sent via tftp. Provided by dnsmasq as argument. mac (Mac object): mac object with various attributes for the MAC address of the device that requested/rcvd the config. ''' _from = os.path.basename(filename) _to = None # init if _from.endswith('.cfg'): set_tag = "cfg_sent" _cfg_mac = utils.Mac(_from.rstrip('.cfg')) if _cfg_mac.ok and mac.clean not in [_cfg_mac.clean, _cfg_mac.oobm.clean]: _to = f"{_from.split('_')[0]}_{int(_from.rstrip('.cfg').split('_')[-1]) + 1}.cfg" else: set_tag = "img_sent" host_lines = [] opts_lines = [] if not os.path.isfile(ztp_opts_conf): log.warning(f"{ztp_opts_conf} not found. Noting to do.") else: if _to and not os.path.isfile(f"{os.path.dirname(filename)}/{_to}"): log.info(f"No More Files for {_from.split('_')[0]}") with in_place.InPlace(ztp_opts_conf) as fp: line_num = 1 cur_opts_lines = fp.readlines() for line in cur_opts_lines: if _from in line: if mac.ok: opts_lines.append( f"# {mac.cols}|{ip} Sent {_from}" f"{' Success' if ztp_ok else 'WARN file size != xfer total check switch and logs'}\n" ) if set_tag == "cfg_sent" and not line.startswith("#"): opts_lines.append(f"# SENT # {line}") opts_lines.append(f"# -- Retry Line for {_from.rstrip('.cfg')} Based On mac {mac.cols} --\n") opts_lines.append(f'tag:{mac.tag},option:bootfile-name,"{_from}"\n') log.info(f"Disabled {_from} on line {line_num} of {os.path.basename(ztp_opts_conf)}") log.info(f"Retry Entry Created for {_from.rstrip('.cfg')} | {mac.cols} | {ip}") else: opts_lines.append(line) host_lines.append(f"{mac.cols},{mac.tag},,{ztp_lease_time},set:{mac.tag},set:{set_tag}\n") else: print(f'Unable to write Retry Lines for previously updated device. Mac {mac.orig} appears invalid') log.warning( f"Unable to Create Retry Entry for {_from.rstrip('.cfg')} @ {ip}, " f"Invalid MAC address --> {mac.cols}" ) elif _to and _to in line: if not line.startswith('#'): log.warning(f'Expected {_to} option line to be commented out @ this point. It was not.') opts_lines.append(line.lstrip('#').lstrip()) log.info(f"Enabled {_to} on line {line_num} of {os.path.basename(ztp_opts_conf)}") else: opts_lines.append(line) line_num += 1 _write_to_file(fp, cur_opts_lines, opts_lines) if host_lines: with open(ztp_hosts_conf, 'a+') as fp: fp.seek(0) cur_host_lines = fp.readlines() _write_to_file(fp, cur_host_lines, host_lines) if set_tag.startswith('cfg'): log.info(f"Retry Entries Written to file for {_from.rstrip('.cfg')} | {mac.cols} | {ip}") else: log.info(f"{mac.cols} tagged as img_sent to prevent re-send of {_from}")
def get_remote(self, data=None, rename=False): spin = self.spin def verify_remote_thread(remotepi, data, rename): """sub to verify reachability and api data for remotes params: remotepi: The hostname currently being processed data: dict remote ConsolePi dict with hostname as key """ this = data[remotepi] res = self.api_reachable(remotepi, this, rename=rename) this = res.data if res.update: self.cache_update_pending = True if not res.reachable: log.warning( f"[GET REM] Found {remotepi} in Local Cloud Cache: UNREACHABLE" ) this["fail_cnt"] = (1 if not this.get("fail_cnt") else this["fail_cnt"] + 1) self.pop_list.append(remotepi) self.cache_update_pending = True else: self.connected = True if this.get("fail_cnt"): this["fail_cnt"] = 0 self.cache_update_pending = True if res.update: log.info( f"[GET REM] Updating Cache - Found {remotepi} in Local Cloud Cache, " f"reachable via {this['rem_ip']}") data[remotepi] = this if data is None or len(data) == 0: data = config.remotes # remotes from local cloud cache if not data: # print(self.log_sym_warn + " No Remotes in Local Cache") log.info("No Remotes found in Local Cache") data = {} # convert None type to empy dict else: # if self is in the remote-data remove and warn user (can occur in rare scenarios i.e. hostname changes) if socket.gethostname() in data: del data[socket.gethostname()] log.show( "Local cache included entry for self - do you have other ConsolePis using the same hostname?" ) # Verify Remote ConsolePi details and reachability if stdin.isatty(): spin.start( "Querying Remotes via API to verify reachability and adapter data" ) for remotepi in data: # -- // Launch Threads to verify all remotes in parallel \\ -- threading.Thread( target=verify_remote_thread, args=(remotepi, data, rename), name=f"vrfy_{remotepi}", ).start() # verify_remote_thread(remotepi, data) # Non-Threading DEBUG # -- wait for threads to complete -- if not self.cpiexec.wait_for_threads(name="vrfy_", thread_type="remote"): if config.remotes: if stdin.isatty(): spin.succeed( "[GET REM] Querying Remotes via API to verify reachability and adapter data\n\t" f"Found {len(config.remotes)} Remote ConsolePis") else: if stdin.isatty(): spin.warn( "[GET REM] Querying Remotes via API to verify reachability and adapter data\n\t" "No Reachable Remote ConsolePis Discovered") else: log.error( "[GET REM] Remote verify threads Still running / exceeded timeout" ) if stdin.isatty(): spin.stop() # update local cache if any ConsolePis found UnReachable if self.cache_update_pending: if self.pop_list: for remotepi in self.pop_list: if ( data[remotepi]["fail_cnt"] >= 3 ): # NoQA remove from local cache after 3 failures (cloud or mdns will repopulate if discovered) removed = data.pop(remotepi) log.warning( "[GET REM] {} has been removed from Local Cache after {} failed attempts" .format(remotepi, removed["fail_cnt"]), show=True, ) else: log.show("Cached Remote '{}' is unreachable".format( remotepi)) # update local cache file if rem_ip or adapter data changed data = self.update_local_cloud_file(data) self.pop_list = [] self.cache_update_pending = False return data
def refresh(self, bypass_cloud=False): remote_consoles = None cpiexec = self.cpiexec local = self.local cloud_svc = config.cfg.get("cloud_svc", "error") # TODO refactor wait_for_threads to have an all key or accept a list with Halo(text="Waiting For threads to complete", spinner="dots1"): if cpiexec.wait_for_threads(thread_type="remotes") and ( config.power and cpiexec.wait_for_threads(name="_toggle_refresh")): log.show( "Timeout Waiting for init or toggle threads to complete try again later or" " investigate logs") return # -- // Update/Refresh Local Data (Adapters/Interfaces) \\ -- local.data = local.build_local_dict(refresh=True) log.debugv( f"Final Data set collected for {local.hostname}: {local.data}") # -- // Get details from Google Drive - once populated will skip \\ -- if not bypass_cloud and self.do_cloud and not self.local_only: if cloud_svc == "gdrive" and self.cloud is None: # burried import until I find out why this import takes so @#%$@#% long. Not imported until 1st refresh is called with Halo(text="Loading Google Drive Library", spinner="dots1"): from consolepi.gdrive import GoogleDrive self.cloud = GoogleDrive(hostname=local.hostname) log.info("[MENU REFRESH] Gdrive init") # Pass Local Data to update_sheet method get remotes found on sheet as return # update sheets function updates local_cloud_file _msg = "[MENU REFRESH] Updating to/from {}".format(cloud_svc) log.info(_msg) if stdin.isatty(): self.spin.start(_msg) # -- // SYNC DATA WITH GDRIVE \\ -- remote_consoles = self.cloud.update_files( local.data) # local data refreshed above if remote_consoles and "Gdrive-Error:" not in remote_consoles: if stdin.isatty(): self.spin.succeed(_msg + "\n\tFound {} Remotes via Gdrive Sync". format(len(remote_consoles))) for r in remote_consoles: # -- Convert Any Remotes with old API schema to new API schema -- if isinstance(remote_consoles[r].get("adapters", {}), list): remote_consoles[r][ "adapters"] = self.convert_adapters( remote_consoles[r]["adapters"]) log.warning( f"Adapter data for {r} retrieved from cloud in old API format... Converted" ) elif "Gdrive-Error:" in remote_consoles: if stdin.isatty(): self.spin.fail("{}\n\t{} {}".format( _msg, self.log_sym_error, remote_consoles)) log.show(remote_consoles ) # display error returned from gdrive module remote_consoles = [] else: if stdin.isatty(): self.spin.warn(_msg + "\n\tNo Remotes Found via Gdrive Sync") if len(remote_consoles) > 0: _msg = f"[MENU REFRESH] Updating Local Cache with data from {cloud_svc}" log.info(_msg) if stdin.isatty(): self.spin.start(_msg) self.update_local_cloud_file(remote_consoles) if stdin.isatty(): self.spin.succeed(_msg) # no real error correction here else: log.warning( f"[MENU REFRESH] No Remote ConsolePis found on {cloud_svc}", show=True, ) else: if self.do_cloud and not bypass_cloud: log.show( f"Not Updating from {cloud_svc} due to connection failure\n" "Close and re-launch menu if network access has been restored" ) # Update Remote data with data from local_cloud cache / cloud self.data = self.get_remote(data=remote_consoles)
def update_local_cloud_file(self, remote_consoles=None, current_remotes=None, local_cloud_file=None): """Update local cloud cache (cloud.json). Verifies the newly discovered data is more current than what we already know and updates the local cloud.json file if so The Menu uses cloud.json to populate remote menu items params: remote_consoles: The newly discovered data (from Gdrive or mdns) current_remotes: The current remote data fetched from the local cloud cache (cloud.json) - func will retrieve this if not provided local_cloud_file The path to the local cloud file (global var cloud.json) returns: dict: The resulting remote console dict representing the most recent data for each remote. """ local_cloud_file = (config.static.get("LOCAL_CLOUD_FILE") if local_cloud_file is None else local_cloud_file) if len(remote_consoles) > 0: if current_remotes is None: current_remotes = self.data = config.remote_update( ) # grabs the remote data from local cloud cache # update current_remotes dict with data passed to function if len(remote_consoles) > 0: if current_remotes is not None: for _ in current_remotes: if _ not in remote_consoles: if ("fail_cnt" not in current_remotes[_] or current_remotes[_]["fail_cnt"] < 2): remote_consoles[_] = current_remotes[_] elif (remote_consoles.get(_) and "fail_cnt" not in remote_consoles[_] and "fail_cnt" in current_remotes[_]): remote_consoles[_]["fail_cnt"] = current_remotes[ _]["fail_cnt"] else: # -- VERBOSE DEBUG -- log.debugv( "[CACHE UPD] \n--{}-- \n remote upd_time: {}\n remote rem_ip: {}\n remote source: {}\n cache rem upd_time: {}\n cache rem_ip: {}\n cache source: {}\n" .format( # NoQA _, time.strftime( "%a %x %I:%M:%S %p %Z", time.localtime( remote_consoles[_]["upd_time"]), ) if "upd_time" in remote_consoles[_] else None, # NoQA remote_consoles[_]["rem_ip"] if "rem_ip" in remote_consoles[_] else None, remote_consoles[_]["source"] if "source" in remote_consoles[_] else None, time.strftime( "%a %x %I:%M:%S %p %Z", time.localtime( current_remotes[_]["upd_time"]), ) if "upd_time" in current_remotes[_] else None, # NoQA current_remotes[_]["rem_ip"] if "rem_ip" in current_remotes[_] else None, current_remotes[_]["source"] if "source" in current_remotes[_] else None, )) # -- END VERBOSE DEBUG -- # No Change Detected (data passed to function matches cache) if "last_ip" in current_remotes[_]: del current_remotes[_]["last_ip"] if remote_consoles[_] == current_remotes[_]: log.debug( "[CACHE UPD] {} No Change in info detected". format(_)) # only factor in existing data if source is not mdns elif ("upd_time" in remote_consoles[_] or "upd_time" in current_remotes[_]): if ("upd_time" in remote_consoles[_] and "upd_time" in current_remotes[_]): if (current_remotes[_]["upd_time"] > remote_consoles[_]["upd_time"]): remote_consoles[_] = current_remotes[_] log.info( f"[CACHE UPD] {_} Keeping existing data from {current_remotes[_].get('source', '')} " "based on more current update time") elif (remote_consoles[_]["upd_time"] > current_remotes[_]["upd_time"]): log.info( "[CACHE UPD] {} Updating data from {} " "based on more current update time". format(_, remote_consoles[_]["source"])) else: # -- Update Times are equal -- if (current_remotes[_].get("adapters") and remote_consoles[_].get("adapters") and current_remotes[_]["adapters"]. keys() != remote_consoles[_] ["adapters"].keys() ) or remote_consoles[_].get( "interfaces", {}) != current_remotes[_].get( "interfaces", {}): log.warning( "[CACHE UPD] {} current cache update time and {} update time are equal" " but data appears to have changed. Updating" .format( _, remote_consoles[_]["source"])) elif "upd_time" in current_remotes[_]: remote_consoles[_] = current_remotes[_] log.info( "[CACHE UPD] {} Keeping existing data based *existence* of update time " "which is lacking in this update from {}". format(_, remote_consoles[_]["source"])) for _try in range(0, 2): try: with open(local_cloud_file, "w") as cloud_file: cloud_file.write( json.dumps(remote_consoles, indent=4, sort_keys=True)) utils.set_perm( local_cloud_file ) # a hack to deal with perms ~ consolepi-details del func break except PermissionError: utils.set_perm(local_cloud_file) else: log.warning( "[CACHE UPD] cache update called with no data passed, doing nothing" ) return remote_consoles
def on_service_state_change(self, zeroconf: Zeroconf, service_type: str, name: str, state_change: ServiceStateChange) -> None: cpi = self.cpi mdns_data = None update_cache = False if state_change is ServiceStateChange.Added: info = zeroconf.get_service_info(service_type, name) if info: if info.server.split('.')[0] != cpi.local.hostname: if info.properties: properties = info.properties mdns_data = { k.decode('UTF-8'): v.decode('UTF-8') if not v.decode('UTF-8')[0] in ['[', '{'] else json.loads(v.decode('UTF-8')) # NoQA for k, v in properties.items() } hostname = mdns_data.get('hostname') interfaces = mdns_data.get('interfaces', []) # interfaces = json.loads(properties[b'interfaces'].decode("utf-8")) log_out = json.dumps(mdns_data, indent=4, sort_keys=True) log.debug( f'[MDNS DSCVRY] {hostname} Properties Discovered via mdns:\n{log_out}' ) rem_ip = mdns_data.get('rem_ip') if not rem_ip: if len(mdns_data.get('interfaces', [])) == 1: rem_ip = [ interfaces[i]['ip'] for i in interfaces ] rem_ip = rem_ip[0] else: rem_ip = None if hostname not in cpi.remotes.data or 'rem_ip' not in cpi.remotes.data[hostname] \ else cpi.remotes.data[hostname]['rem_ip'] cur_known_adapters = cpi.remotes.data.get( hostname, { 'adapters': None }).get('adapters') # -- Log new entry only if this is the first time it's been discovered -- if hostname not in self.d_discovered: self.d_discovered.append(hostname) log.info( '[MDNS DSCVRY] {}({}) Discovered via mdns'. format(hostname, rem_ip if rem_ip is not None else '?')) from_mdns_adapters = mdns_data.get('adapters') mdns_data['rem_ip'] = rem_ip mdns_data[ 'adapters'] = from_mdns_adapters if from_mdns_adapters is not None else cur_known_adapters mdns_data['source'] = 'mdns' mdns_data['upd_time'] = int(time.time()) mdns_data = {hostname: mdns_data} # update from API only if no adapter data exists either in cache or from mdns that triggered this # adapter data is updated on menu_launch if not mdns_data[hostname][ 'adapters'] or hostname not in cpi.remotes.data: log.info( '[MDNS DSCVRY] {} provided no adapter data Collecting via API' .format(info.server.split('.')[0])) # TODO check this don't think needed had a hung process on one of my Pis added it to be safe try: res = cpi.remotes.api_reachable( hostname, mdns_data[hostname]) update_cache = res.update mdns_data[hostname] = res.data # reachable = res.reachable except Exception as e: log.error( f'Exception occured verifying reachability via API for {hostname}:\n{e}' ) if self.show: if hostname in self.discovered: self.discovered.remove(hostname) self.discovered.append('{}{}'.format( hostname, '*' if update_cache else '')) print(hostname + '({}) Discovered via mdns.'.format( rem_ip if rem_ip is not None else '?')) try: print('{}\n{}'.format( 'mdns: None' if from_mdns_adapters is None else 'mdns: {}'.format([ d.replace('/dev/', '') for d in from_mdns_adapters ] if not isinstance( from_mdns_adapters, list) else [ d['dev'].replace('/dev/', '') for d in from_mdns_adapters ]), 'cache: None' if cur_known_adapters is None else 'cache: {}'.format([ d.replace('/dev/', '') for d in cur_known_adapters ] if not isinstance( cur_known_adapters, list) else [ d['dev'].replace('/dev/', '') for d in cur_known_adapters ]))) except TypeError as e: print(f'EXCEPTION: {e}') print( f'\nDiscovered ConsolePis: {self.discovered}') print("press Ctrl-C to exit...\n") log.debug( '[MDNS DSCVRY] {} Final data set:\n{}'.format( hostname, json.dumps(mdns_data, indent=4, sort_keys=True))) if update_cache: if 'hostname' in mdns_data[hostname]: del mdns_data[hostname]['hostname'] cpi.remotes.data = cpi.remotes.update_local_cloud_file( remote_consoles=mdns_data) log.info( f'[MDNS DSCVRY] {hostname} Local Cache Updated after mdns discovery' ) else: log.warning( f'[MDNS DSCVRY] {hostname}: No properties found') else: log.warning(f'[MDNS DSCVRY] {info}: No info found')
add_del = sys.argv[1] mac = sys.argv[2] ip = sys.argv[3] try: iface = os.environ['DNSMASQ_INTERFACE'] except KeyError: iface = None try: vendor = os.environ['DNSMASQ_VENDOR_CLASS'] except KeyError: vendor = None log.info( '[DHCP LEASE] DHCP Client Connected ({}): iface: {}, mac: {}, ip: {}, vendor: {}' .format(add_del, iface, mac, ip, vendor)) # NoQA if vendor is not None and 'ConsolePi' in vendor: log.info('ConsolePi Discovered via DHCP') url = 'http://{}:5000/api/v1.0/details'.format(ip) headers = { 'User-Agent': 'ConsolePi/version', 'Accept': '*/*', 'Cache-Control': 'no-cache', 'Host': '{}:5000'.format(ip), 'accept-encoding': 'gzip, deflate', 'Connection': 'keep-alive', 'cache-control': 'no-cache' }
def do_ser2net_line(self, from_name: str = None, to_name: str = None, baud: int = None, dbits: int = None, parity: str = None, flow: str = None, sbits: int = None): '''Process Adapter Configuration Changes in ser2net.conf. Keyword Arguments: from_name {str} -- The Adapters existing name/alias (default: {None}) to_name {str} -- The Adapters new name/alias (default: {None}) baud {int} -- Adapter baud (default: {self.baud}) dbits {int} -- Adapter databits (default: {self.data_bits}) parity {str} -- Adapter Parity (default: {self.parity}) flow {str} -- Adapter flow (default: {self.flow}) sbits {int} -- Adapter stop bits (default: {self.sbits}) Returns: {str|None} -- Returns error text if an error occurs or None if no issues. ''' # don't add the new entry to ser2net if one already exists for the alias if from_name != to_name and config.ser2net_conf.get(f"/dev/{to_name}"): log.info( f"ser2net: {to_name} already mapped to port {config.ser2net_conf[f'/dev/{to_name}'].get('port')}", show=True) return ser2net_parity = {'n': 'NONE', 'e': 'EVEN', 'o': 'ODD'} ser2net_flow = {'n': '', 'x': ' XONXOFF', 'h': ' RTSCTS'} baud = self.baud if not baud else baud dbits = self.data_bits if not dbits else dbits parity = self.parity if not parity else parity flow = self.flow if not flow else flow sbits = self.sbits if not sbits else sbits log_ptr = '' cur_line = config.ser2net_conf.get(f'/dev/{from_name}', {}).get('line') if cur_line and '/dev/ttyUSB' not in cur_line and '/dev/ttyACM' not in cur_line: new_entry = False next_port = next_port = cur_line.split(':')[0] # Renaming existing log_ptr = config.ser2net_conf[f'/dev/{from_name}'].get('log_ptr') if not log_ptr: log_ptr = '' else: new_entry = True if utils.valid_file(self.ser2net_file): ports = [ a['port'] for a in config.ser2net_conf.values() if 7000 < a.get('port', 0) <= 7999 ] next_port = 7001 if not ports else int(max(ports)) + 1 else: next_port = 7001 error = utils.do_shell_cmd( f'sudo cp {self.ser2net_file} /etc/', handle_errors=False) if error: log.error( f'Rename Menu Error while attempting to cp ser2net.conf from src {error}' ) return error # error added to display in calling method ser2net_line = ( '{telnet_port}:telnet:0:/dev/{alias}:{baud} {dbits}DATABITS {parity} ' '{sbits}STOPBIT {flow} banner {log_ptr}'.format( telnet_port=next_port, alias=to_name, baud=baud, dbits=dbits, sbits=sbits, parity=ser2net_parity[parity], flow=ser2net_flow[flow], log_ptr=log_ptr)) # -- // Append to ser2net.conf \\ -- if new_entry: error = utils.append_to_file(self.ser2net_file, ser2net_line) # -- // Rename Existing Definition in ser2net.conf \\ -- # -- for devices with existing definitions cur_line is the existing line else: ser2net_line = ser2net_line.strip().replace('/', r'\/') cur_line = cur_line.replace('/', r'\/') cmd = "sudo sed -i 's/^{}$/{}/' {}".format( cur_line, ser2net_line, self.ser2net_file) error = utils.do_shell_cmd(cmd, shell=True) if not error: config.ser2net_conf = config.get_ser2net() else: return error
def print_menu(self, body, subs=None, header=None, subhead=None, footer=None, foot_fmt=None, col_pad=4, force_cols=False, do_cols=True, do_format=True, by_tens=False): ''' format and print current menu. build the content and in the calling method and pass into this function for format & printing params: body: a list of lists or list of strings, where each inner list is made up of text for each menu-item in that logical section/group. subs: a list of sub-head lines that map to each inner body list. This is the header for the specific logical grouping of menu-items. body and subs lists should be of = len header: The main Header text for the menu footer: an optional text string or list of strings to be added to the menu footer. footer: {dict} - where footer['opts'] is list of 'strs' to match key from footer_options dict defined in menu_formatting method. Determines what menu options are displayed in footer. (defaults options: x. Exit) col_pad: how many spaces will be placed between horizontal menu sections. force_cols: By default the menu will print as a single column, with force_cols=True it will bypass the vertical fit test - print section in cols horizontally foot_fmt: {dict} - Optional formatting dict. top-level should be designated keywork that specifies supported formatting options (_rjust = right justify). 2nd level should be the footer_options key to match on where the value = the text. Example: foot_fmt={'_rjust': {'back': 'menu # alone will toggle the port'}} ~ will result in b. Back menu # alone will toggle the port where 'b. Back' comes from the pre-defined foot_opts dict. do_cols: bool, If specified and set to False will bypass horizontal column printing and resulting in everything printing vertically on one screen do_format: bool, Only applies to sub_head auto formatting. If specified and set to False will not perform formatting on sub-menu text. Auto formatting results in '------- text -------' (width of section) by_tens: Will start each section @ 1, 11, 21, 31... unless the section is greater than 10 menu_action statements should match accordingly ''' line_dict = od({ 'header': { 'lines': header }, 'body': { 'sections': [], 'rows': [], 'width': [] }, 'footer': { 'lines': footer } }) # Determine header and footer length used to determine if we can print with # a single column subs = utils.listify(subs) subhead = utils.listify(subhead) if subhead: subhead = [ f"{' ' + line if not line.startswith(' ') else line}" for line in subhead ] subhead.insert(0, '') if not subs: subhead.append('') head_len = len( self.menu_formatting('header', text=header, do_print=False)[0]) if subhead: head_len += len(subhead) elif not subs: head_len += 1 # blank line added during print # TODO REMOVE TEMP during re-factor if isinstance(footer, dict): foot_lines = self.menu_formatting('footer', footer=footer, do_print=False)[0] foot_len = len(foot_lines) line_dict['footer']['lines'] = foot_lines else: foot_len = len( self.menu_formatting('footer', text=footer, do_print=False)[0]) ''' generate list for each sections where each line is padded to width of longest line collect width of longest line and # of rows/menu-entries for each section All of this is used to format the header/footer width and to ensure consistent formatting during print of multiple columns ''' # if str was passed place in list to iterate over if isinstance(body, str): body = [body] # ensure body is a list of lists for mapping with list of subs body = [body] if len(body) >= 1 and isinstance(body[0], str) else body # if subs is not None: # subs = [subs] if not isinstance(subs, list) else subs i = 0 item = start = 1 for _section in body: if by_tens and i > 0: item = start + 10 if item <= start + 10 else item start += 10 _item_list, _max_width = self.menu_formatting( 'body', text=_section, sub=subs if subs is None else subs[i], index=item, do_print=False, do_format=do_format) line_dict['body']['width'].append(_max_width) line_dict['body']['rows'].append(len(_item_list)) line_dict['body']['sections'].append(_item_list) item = item + len(_section) i += 1 ''' print multiple sections vertically - determine best cut point to start next column ''' _rows = line_dict['body']['rows'] tot_body_rows = sum(_rows) # The # of rows to be printed # TODO what if rows for 1 section is greater than term rows tty_body_avail = (self.rows - head_len - foot_len) _begin = 0 _end = 1 _iter_start_stop = [] _pass = 0 # # -- won't fit in a single column calc sections we can put in the column # # #if not tot_body_rows < tty_body_avail: # Force at least 2 cols while testing _r = [] [_r.append(r) for r in _rows if r not in _r ] # deteremine if all sections are of equal size (common for dli) if len(_r) == 1 or force_cols: for x in range(0, len(line_dict['body']['sections'])): _iter_start_stop.append([x, x + 1]) # _tot_width.append(sum(body['width'][x:x + 1]) + (col_pad * (cols - 1))) next else: while True: r = sum(_rows[_begin:_end]) if not r >= tty_body_avail and not r >= tot_body_rows / 2: _end += 1 else: if r > tty_body_avail and _end > 1: if _begin != _end - 1: # NoQA Indicates the individual section is > then avail rows so give up until paging implemented _end = _end - 1 if not _end == (len(_rows)): _iter_start_stop.append([_begin, _end]) _begin = _end _end = _begin + 1 if _end == (len(_rows)): _iter_start_stop.append([_begin, _end]) break if _pass > len(_rows) + 20: # should not hit this anymore log.info( f'menu formatter exceeded {len(_rows) + 20} passses and gave up!!!', show=True) break _pass += 1 sections = [] _tot_width = [] for _i in _iter_start_stop: this_max_width = 0 if not line_dict['body']['width'][ _i[0]:_i[1]] else max(line_dict['body']['width'][_i[0]:_i[1]]) _tot_width.append(this_max_width) _column_list = [] for _s in line_dict['body']['sections'][_i[0]:_i[1]]: for _line in _s: _fnl_line = '{:{_len}}'.format(_line, _len=this_max_width) _s[_s.index(_line)] = _fnl_line _column_list += _s sections.append(_column_list) line_dict['body']['sections'] = sections # -- set the initial # of columns body = line_dict['body'] cols = len(body['sections']) if len( body['sections']) <= MAX_COLS else MAX_COLS if not force_cols: # TODO OK to remove and refactor tot_1_col_len is _tot_body_rows calculated above # TODO tot_1_col_len is inaccurate tot_1_col_len = sum(line_dict['body']['rows']) + len(line_dict['body']['rows']) \ + head_len + foot_len cols = 1 if not do_cols or tot_1_col_len < self.rows else cols # -- if any footer or subhead lines are longer adjust _tot_width (which is the longest line from any section) # TODO This is likely wrong if there are formatters {{}} in the footer, the return should be fully formmated foot = self.menu_formatting('footer', text=line_dict['footer']['lines'], do_print=False)[0] _foot_width = [len(line) for line in foot] if isinstance(_tot_width, int): _tot_width = [_tot_width] _tot_width = max(_foot_width + _tot_width) if subhead: _subhead_width = [len(line) for line in subhead] _tot_width = max(_subhead_width) if max( _subhead_width) > _tot_width else _tot_width if MIN_WIDTH < self.cols: _tot_width = MIN_WIDTH if _tot_width < MIN_WIDTH else _tot_width # -- // Generate Final Body Rows \\ -- # _final_rows = [] pad = ' ' * col_pad _final_rows = [] if not body['sections'] else body['sections'][0] for s in body['sections']: if body['sections'].index(s) == 0: continue else: if len(_final_rows) > len(s): for _spaces in range(len(_final_rows) - len(s)): s.append(' ' * len(s[0])) elif len(s) > len(_final_rows): for _spaces in range(len(s) - len(_final_rows)): _final_rows.append(' ' * len(_final_rows[0])) _final_rows = [a + pad + b for a, b in zip(_final_rows, s)] # --// PRINT MENU \\-- if _final_rows: _tot_width = len(_final_rows[0]) if len( _final_rows[0]) > _tot_width else _tot_width else: _tot_width = 0 self.menu_cols = _tot_width # FOR DEBUGGING self.menu_formatting('header', text=header, width=_tot_width, do_print=True) if subhead: for line in subhead: print(line) self.menu_rows += 1 elif not subs: # TODO remove auto first blank line from subhead/subs and have formatter always do 1st blank line print('') # Add blank line after header if no subhead and no subs self.menu_rows += 1 for row in _final_rows: # TODO print here, also can print in the formatter method print(row) self.menu_rows += 1 # TODO REMOVE TEMP during re-factor if isinstance(footer, dict): self.menu_formatting('footer', footer=footer, width=_tot_width, do_print=True) else: self.menu_formatting('footer', text=footer, width=_tot_width, do_print=True)
if host_lines: with open(ztp_hosts_conf, 'a+') as fp: fp.seek(0) cur_host_lines = fp.readlines() _write_to_file(fp, cur_host_lines, host_lines) if set_tag.startswith('cfg'): log.info(f"Retry Entries Written to file for {_from.rstrip('.cfg')} | {mac.cols} | {ip}") else: log.info(f"{mac.cols} tagged as img_sent to prevent re-send of {_from}") if __name__ == "__main__": if add_del != "tftp": # -- // Triggerd By DHCP \\ -- log.info(f'[DHCP LEASE] DHCP Client Connected ({add_del}): iface: {iface}, mac: {mac_bytes}, ip: {ip}, vendor: {vendor}') ztp = False # -- Simply logging if another ConsolePi has connected directly to this one -- if vendor and 'ConsolePi' in vendor: log.info(f'A ConsolePi has connected to {iface}') else: # -- // Triggerd By TFTP XFER \\ -- ztp = True file_size = os.stat(cfg_file).st_size ztp_ok = True if int(mac_bytes) == file_size else False mac = utils.Mac(get_mac(ip)) log.info(f"[ZTP - TFTP XFR] {os.path.basename(cfg_file)} sent to {ip}|{mac.cols}{' Success' if ztp_ok else ''}") _res = utils.do_shell_cmd(f"wall 'consolepi-ztp: {os.path.basename(cfg_file)} sent to " f"{ip}|{mac.cols}{' Success' if ztp_ok else ' WARNING xfr != file size'}'") if not ztp_ok:
def update_files(self, data): for x in data[self.hostname]['adapters']: if 'udev' in data[self.hostname]['adapters'][x]: del data[self.hostname]['adapters'][x]['udev'] log.debugv('[GDRIVE]: -->update_files - data passed to function\n{}'.format(json.dumps(data, indent=4, sort_keys=True))) if not self.auth(): return 'Gdrive-Error: Unable to Connect to Gdrive refer to cloud log for details' spreadsheet_id = self.file_id service = self.sheets_svc # init remote_consoles dict, any entries in config not matching this ConsolePis hostname are added as remote ConsolePis value_input_option = 'USER_ENTERED' remote_consoles = {} cnt = 1 data[self.hostname]['upd_time'] = int(time.time()) # Put timestamp (epoch) on data for this ConsolePi for k in data: found = False value_range_body = { "values": [ [ k, json.dumps(data[k]) ] ] } # find out if this ConsolePi already has a row use that row in range request = service.spreadsheets().values().get( spreadsheetId=spreadsheet_id, range='A:B') result = self.exec_request(request) log.info('[GDRIVE]: Reading from Cloud Config') if result.get('values') is not None: x = 1 for row in result.get('values'): if k == row[0]: # k is hostname row[0] is column A of current row cnt = x found = True else: log.info('[GDRIVE]: {0} found in Google Drive Config'.format(row[0])) remote_consoles[row[0]] = json.loads(row[1]) remote_consoles[row[0]]['source'] = 'cloud' x += 1 log.debugv(f'[GDRIVE]: {len(remote_consoles)} Remote ConsolePis Found on Gdrive: \n{json.dumps(remote_consoles)}') range_ = 'a' + str(cnt) + ':b' + str(cnt) # -- // Update gdrive with this ConsolePis data \\ -- if not config.cloud_pull_only: if found: log.info('[GDRIVE]: Updating ' + str(k) + ' data found on row ' + str(cnt) + ' of Google Drive config') request = service.spreadsheets().values().update(spreadsheetId=spreadsheet_id, range=range_, valueInputOption=value_input_option, body=value_range_body) else: log.info('[GDRIVE]: Adding ' + str(k) + ' to Google Drive Config') request = service.spreadsheets().values().append(spreadsheetId=spreadsheet_id, range=range_, valueInputOption=value_input_option, body=value_range_body) self.exec_request(request) else: log.info('cloud_pull_only override enabled not updating cloud with data from this host') cnt += 1 self.resize_cols() return remote_consoles
def get_hosts(self): '''Parse user defined hosts for inclusion in menu returns dict with formatted keys prepending /host/ ''' # utils = self.utils hosts = self.cfg_yml.get('HOSTS') if not hosts: # fallback to legacy json config hosts = self.get_json_file(self.static.get('REM_HOSTS_FILE')) if not hosts: return {} # generate remote command used in menu for h in hosts: hosts[h]["method"] = hosts[h].get('method', 'ssh').lower() if hosts[h][ "method"] == 'ssh': # method defaults to ssh if not provided port = 22 if ':' not in hosts[h]['address'] else hosts[h][ 'address'].split(':')[1] _user_str = '' if not hosts[h].get( 'username') else f'{hosts[h].get("username")}@' key_file = None if hosts[h].get("key") and self.loc_user is not None: if utils.valid_file( f"/home/{self.loc_user}/.ssh/{hosts[h]['key']}"): user_key = Path( f"/home/{self.loc_user}/.ssh/{hosts[h]['key']}") if utils.valid_file( f"/etc/ConsolePi/.ssh/{hosts[h]['key']}"): mstr_key = Path( f"/etc/ConsolePi/.ssh/{hosts[h]['key']}") if mstr_key.stat().st_mtime > user_key.stat( ).st_mtime: shutil.copy(mstr_key, user_key) shutil.chown(user_key, user=self.loc_user, group=self.loc_user) user_key.chmod(0o600) log.info( f"{hosts[h]['key']} Updated from ConsolePi global .ssh key_dir to " f"{str(user_key.parent)} for use with {h}...", show=True) key_file = str(user_key) elif utils.valid_file(hosts[h]['key']): key_file = hosts[h]['key'] elif utils.valid_file( f"/etc/ConsolePi/.ssh/{hosts[h]['key']}"): user_ssh_dir = Path(f"/home/{self.loc_user}/.ssh/") if user_ssh_dir.is_dir: shutil.copy( f"/etc/ConsolePi/.ssh/{hosts[h]['key']}", user_ssh_dir) user_key = Path( f"{user_ssh_dir}/{hosts[h]['key']}") shutil.chown(user_key, user=self.loc_user, group=self.loc_user) user_key.chmod(0o600) log.info( f"{hosts[h]['key']} imported from ConsolePi global .ssh key_dir to " f"{str(user_ssh_dir)} for use with {h}...", show=True) key_file = str(user_key) hosts[h]['cmd'] = f"sudo -u {self.loc_user} ssh{' ' if not key_file else f' -i {key_file} '}" \ f"-t {_user_str}{hosts[h]['address'].split(':')[0]} -p {port}" # hosts[h]['cmd'] = f"sudo -u {self.loc_user} ssh -t {_user_str}{hosts[h]['address'].split(':')[0]} -p {port}" elif hosts[h].get('method').lower() == 'telnet': port = 23 if ':' not in hosts[h]['address'] else hosts[h][ 'address'].split(':')[1] _user_str = '' if not hosts[h].get( 'username') else f'-l {hosts[h].get("username")}' hosts[h][ 'cmd'] = f"sudo -u {self.loc_user} telnet {_user_str} {hosts[h]['address'].split(':')[0]} {port}" groups = [hosts[h].get('group', 'user-defined') for h in hosts] host_dict = {'main': {}, 'rshell': {}} for g in utils.unique(groups): host_dict['main'][g] = { f'/host/{h.split("/")[-1]}': hosts[h] for h in hosts if hosts[h].get('show_in_main', False) and hosts[h].get('group', 'user-defined') == g } if not host_dict['main'][g]: del host_dict['main'][g] host_dict['rshell'][g] = { f'/host/{h.split("/")[-1]}': hosts[h] for h in hosts if not hosts[h].get('show_in_main', False) and hosts[h].get('group', 'user-defined') == g } if not host_dict['rshell'][g]: del host_dict['rshell'][g] host_dict['_methods'] = utils.unique( [hosts[h].get('method', 'ssh') for h in hosts]) host_dict['_host_list'] = [f'/host/{h.split("/")[-1]}' for h in hosts] return host_dict
def log_request(request: Request, route: str): log.info('[NEW API RQST IN] {} Requesting -- {} -- Data via API'.format( request.client.host, route))
def on_service_state_change(self, zeroconf: Zeroconf, service_type: str, name: str, state_change: ServiceStateChange) -> None: if self.cpi.local.hostname == name.split(".")[0]: return if state_change is not ServiceStateChange.Added: return info = zeroconf.get_service_info(service_type, name) if not info: log.warning(f'[MDNS DSCVRY] {name}: No info found') return if not hasattr(info, "properties") or not info.properties: log.warning(f'[MDNS DSCVRY] {name}: No properties found') return properties = info.properties cpi = self.cpi mdns_data = None update_cache = False try: mdns_data = { k.decode('UTF-8'): v.decode('UTF-8') if len(v) == 0 or not v.decode('UTF-8')[0] in ['[', '{'] else json.loads(v.decode('UTF-8')) # NoQA for k, v in properties.items() } except Exception as e: log.exception( f"[MDNS DSCVRY] {e.__class__.__name__} occured while parsing mdns_data:\n {mdns_data}\n" f"Exception: \n{e}" ) log.error(f"[MDNS DSCVRY] entry from {name} ignored due to parsing exception.") return hostname = mdns_data.get('hostname') interfaces = mdns_data.get('interfaces', []) log_out = json.dumps(mdns_data, indent=4, sort_keys=True) log.debug(f'[MDNS DSCVRY] {hostname} Properties Discovered via mdns:\n{log_out}') rem_ip = mdns_data.get('rem_ip') if not rem_ip: if len(mdns_data.get('interfaces', [])) == 1: rem_ip = [interfaces[i]['ip'] for i in interfaces] rem_ip = rem_ip[0] else: rem_ip = None if hostname not in cpi.remotes.data or 'rem_ip' not in cpi.remotes.data[hostname] \ else cpi.remotes.data[hostname]['rem_ip'] cur_known_adapters = cpi.remotes.data.get(hostname, {'adapters': None}).get('adapters') # -- Log new entry only if this is the first time it's been discovered -- if hostname not in self.d_discovered: self.d_discovered += [hostname] log.info('[MDNS DSCVRY] {}({}) Discovered via mdns'.format( hostname, rem_ip if rem_ip is not None else '?')) from_mdns_adapters = mdns_data.get('adapters') mdns_data['rem_ip'] = rem_ip mdns_data['adapters'] = from_mdns_adapters if from_mdns_adapters else cur_known_adapters mdns_data['source'] = 'mdns' mdns_data['upd_time'] = int(time.time()) mdns_data = {hostname: mdns_data} # update from API only if no adapter data exists either in cache or from mdns that triggered this # adapter data is updated on menu_launch either way if (not mdns_data[hostname]['adapters'] and hostname not in self.no_adapters) or \ hostname not in cpi.remotes.data: log.info(f"[MDNS DSCVRY] {info.server.split('.')[0]} provided no adapter data Collecting via API") # TODO check this don't think needed had a hung process on one of my Pis added it to be safe try: # TODO we are setting update time here so always result in a cache update with the restart timer res = cpi.remotes.api_reachable(hostname, mdns_data[hostname]) update_cache = res.update if not res.data.get('adapters'): self.no_adapters.append(hostname) elif hostname in self.no_adapters: self.no_adapters.remove(hostname) mdns_data[hostname] = res.data except Exception as e: log.exception(f'Exception occurred verifying reachability via API for {hostname}:\n{e}') if self.show: if hostname in self.discovered: self.discovered.remove(hostname) self.discovered.append('{}{}'.format(hostname, '*' if update_cache else '')) print(hostname + '({}) Discovered via mdns.'.format(rem_ip if rem_ip is not None else '?')) try: print( '{}\n{}\n{}'.format( 'mdns: None' if from_mdns_adapters is None else 'mdns: {}'.format( [d.replace('/dev/', '') for d in from_mdns_adapters] if not isinstance(from_mdns_adapters, list) else [d['dev'].replace('/dev/', '') for d in from_mdns_adapters] ), 'api (mdns trigger): None' if not mdns_data[hostname]['adapters'] else 'api (mdns trigger): {}'.format( [d.replace('/dev/', '') for d in mdns_data[hostname]['adapters']] if not isinstance(mdns_data[hostname]['adapters'], list) else [d['dev'].replace('/dev/', '') for d in mdns_data[hostname]['adapters']] ), 'cache: None' if cur_known_adapters is None else 'cache: {}'.format( [d.replace('/dev/', '') for d in cur_known_adapters] if not isinstance(cur_known_adapters, list) else [d['dev'].replace('/dev/', '') for d in cur_known_adapters] ) ) ) except TypeError as e: print(f'EXCEPTION: {e}') print(f'\nDiscovered ConsolePis: {self.discovered}') print("press Ctrl-C to exit...\n") log.debugv( f"[MDNS DSCVRY] {hostname} Final data set:\n{json.dumps(mdns_data, indent=4, sort_keys=True)}" ) # TODO could probably just put the call to cache update in the api_reachable method if update_cache: if 'hostname' in mdns_data[hostname]: del mdns_data[hostname]['hostname'] cpi.remotes.data = cpi.remotes.update_local_cloud_file(remote_consoles=mdns_data) log.info(f'[MDNS DSCVRY] {hostname} Local Cache Updated after mdns discovery')