def get_config_all(self, yaml_cfg=None, legacy_cfg=None): '''Parse bash style cfg vars from cfg file convert to class attributes.''' # prefer yaml file for all config items if it exists do_legacy = True yml = {} if yaml_cfg and utils.valid_file(yaml_cfg): yml = self.get_yaml_file(yaml_cfg) cfg = yml.get('CONFIG', yml) if cfg: do_legacy = False for k in cfg: if cfg[k] in ['true', 'false']: cfg[k] = True if cfg[k] == 'true' else False if 'CONSOLEPI_VER' not in yml: yml['CONFIG'] = cfg else: cfg = {} if utils.valid_file(legacy_cfg): with open(legacy_cfg, 'r') as config: for line in config: if line.strip() and not line.startswith('#'): var = line.split("=")[0] value = line.split('#')[0] value = value.replace('{0}='.format(var), '') value = value.split('#')[0].strip() if value in ['true', 'false']: cfg[var] = bool(value.replace('false', '')) else: cfg[var] = value.replace('"', '') return yml if not do_legacy else {'CONFIG': cfg}
def _get_template(self): tmplt = self.conf.get('template', '').rstrip('.j2') mac = self.mac # template is defined in config if tmplt and utils.valid_file(f"{ztp_dir}/{tmplt}.j2"): return f"{ztp_dir}/{tmplt}.j2" # template is not defined look for <mac>.j2 file for template elif mac and utils.valid_file(f"{ztp_dir}/{mac.clean}.j2"): return f"{ztp_dir}/{mac.clean}.j2" else: self.ok = False self.error += f"[ZTP Entry Skipped!!] No Template Found for:\n{self.conf_pretty}\n"
def __init__(self, local, cpiexec): self.cpiexec = cpiexec self.pop_list = [] self.old_api_log_sent = False self.log_sym_warn = log_sym.WARNING.value self.log_sym_error = log_sym.ERROR.value self.local = local self.connected = False self.cache_update_pending = False self.spin = Halo(spinner="dots") self.cloud = None # Set in refresh method if reachable self.do_cloud = config.cfg.get("cloud", False) CLOUD_CREDS_FILE = config.static.get("CLOUD_CREDS_FILE") if not CLOUD_CREDS_FILE: self.no_creds_error() if self.do_cloud and config.cloud_svc == "gdrive": if utils.is_reachable("www.googleapis.com", 443): self.local_only = False if not utils.valid_file(CLOUD_CREDS_FILE): self.no_creds_error() else: log.warning( f"failed to connect to {config.cloud_svc} - operating in local only mode", show=True, ) self.local_only = True self.data = self.get_remote(data=config.remote_update( )) # re-get cloud.json to capture any updates via mdns
def _get_var_file(self): variables = self.conf.get('variables') mac = self.mac if variables and utils.valid_file(f"{ztp_dir}/{variables}"): return f"{ztp_dir}/{variables}" elif mac and utils.valid_file(f"{ztp_dir}/{mac.clean}.yaml"): return f'{ztp_dir}/{mac.clean}.yaml' elif mac and utils.valid_file(f'{ztp_dir}/variables.yaml'): return f'{ztp_dir}/variables.yaml' else: self.ok = False if self.error and 'No Template Found' in self.error: self.error = "[ZTP Entry Skipped!!] No Template or Variables Found for Entry with " \ f"the following config:\n{self.conf_pretty}\n" pass else: self.error += f"[ZTP Entry Skipped!!] No Variables Found for:\n{self.conf_pretty}\n"
def _get_template(self): '''Determine what jinja2 template to use to generate the config. Template filename can be provided in config if not provided look for template named <mac-address>.j2 Returns: str: Full path to j2 template. ''' tmplt = self.conf.get('template', '').rstrip('.j2') mac = self.mac # template is defined in config if tmplt and utils.valid_file(f"{ztp_dir}/{tmplt}.j2"): return f"{ztp_dir}/{tmplt}.j2" # template is not defined look for <mac>.j2 file for template elif mac and utils.valid_file(f"{ztp_dir}/{mac.clean}.j2"): return f"{ztp_dir}/{mac.clean}.j2" else: self.ok = False self.error += f"[ZTP Entry Skipped!!] No Template Found for:\n{self.conf_pretty}\n"
def _get_var_file(self): '''Determine variable file to use to generate the config. Variables can be provided (in the order we look for them): 1. in the config for the device 2. via a file named <mac address>.yaml 3. matching the template name with .yaml extension 4. in a common variables.yaml file a. by mac-address b. by key matching the template name when using ordered ztp variable file based on template name should have _# appended where # corresponds to the order for that model. i.e. 6200F_1.yaml, 6200F_2.yaml given the template is 6200F.j2 Returns: str: Full path to yaml variable file. ''' variables = self.conf.get('variables') mac = self.mac if variables and utils.valid_file(f"{ztp_dir}/{variables}"): return f"{ztp_dir}/{variables}" elif mac and utils.valid_file(f"{ztp_dir}/{mac.clean}.yaml"): return f'{ztp_dir}/{mac.clean}.yaml' elif self.tmplt and utils.valid_file( f"{self.tmplt.rstrip('.j2')}.yaml"): return f"{self.tmplt.rstrip('.j2')}.yaml" elif mac and utils.valid_file(f'{ztp_dir}/variables.yaml'): return f'{ztp_dir}/variables.yaml' else: self.ok = False if self.error and 'No Template Found' in self.error: self.error = "[ZTP Entry Skipped!!] No Template or Variables Found for Entry with " \ f"the following config:\n{self.conf_pretty}\n" else: self.error += f"[ZTP Entry Skipped!!] No Variables Found for:\n{self.conf_pretty}\n"
def main(): cpi = ConsolePi() cloud_svc = config.cfg.get("cloud_svc", "error") local = cpi.local remotes = cpi.remotes cpiexec = cpi.cpiexec log.info('[CLOUD TRIGGER (IP)]: Cloud Update triggered by IP Update') CLOUD_CREDS_FILE = config.static.get( "CLOUD_CREDS_FILE", '/etc/ConsolePi/cloud/gdrive/.credentials/credentials.json') if not utils.is_reachable("www.googleapis.com", 443): log.error(f"Not Updating {cloud_svc} due to connection failure") sys.exit(1) if not utils.valid_file(CLOUD_CREDS_FILE): log.error('Credentials file not found or invalid') sys.exit(1) # -- // Get details from Google Drive - once populated will skip \\ -- if cloud_svc == "gdrive" and remotes.cloud is None: remotes.cloud = GoogleDrive(hostname=local.hostname) if cpiexec.wait_for_threads(thread_type="remotes") and ( config.power and cpiexec.wait_for_threads(name="_toggle_refresh")): log.error( 'IP Change Cloud Update Trigger: TimeOut Waiting for Threads to Complete' ) remote_consoles = remotes.cloud.update_files(local.data) if remote_consoles and "Gdrive-Error:" in remote_consoles: log.error(remote_consoles) else: for r in remote_consoles: # -- Convert Any Remotes with old API schema to new API schema -- if isinstance(remote_consoles[r].get("adapters", {}), list): remote_consoles[r]["adapters"] = remotes.convert_adapters( remote_consoles[r]["adapters"]) log.warning( f"Adapter data for {r} retrieved from cloud in old API format... Converted" ) if len(remote_consoles) > 0: remotes.update_local_cloud_file(remote_consoles)
def add_to_udev(self, udev_line: str, section_marker: str, label: str = None): '''Add or edit udev rules file with new symlink after adapter rename. Arguments: udev_line {str} -- The properly formatted udev line being added to the file section_marker {str} -- Match text used to determine where to place the line Keyword Arguments: label {str} -- The rules file GOTO label used in some scenarios (i.e. multi-port 1 serial) (default: {None}) Returns: {str|None} -- Returns error string if an error occurs ''' found = ser_label_exists = get_next = update_file = False # init goto = line = cmd = '' # init rules_file = self.rules_file # if 'ttyAMA' not in udev_line else self.ttyama_rules_file Testing 1 rules file if utils.valid_file(rules_file): with open(rules_file) as x: for line in x: # temporary for those that have the original file if 'ID_SERIAL' in line and 'IMPORT' not in line: _old = 'ENV{ID_SERIAL}=="", GOTO="BYPATH-POINTERS"' _new = 'ENV{ID_SERIAL_SHORT}=="", IMPORT{builtin}="path_id", GOTO="BYPATH-POINTERS"' cmd = "sudo sed -i 's/{}/{}/' {}".format( _old, _new, rules_file) update_file = True # No longer including SUBSYSTEM in formatted udev line, redundant given logic @ top of rules file if line.replace('SUBSYSTEM=="tty", ', '').strip() == udev_line.strip(): return # Line is already in file Nothing to do. if get_next: goto = line get_next = False if section_marker.replace(' END', '') in line: get_next = True elif section_marker in line: found = True elif label and 'LABEL="{}"'.format(label) in line: ser_label_exists = True last_line = line if update_file: error = utils.do_shell_cmd(cmd) if error: log.show(error) goto = goto.split('GOTO=')[1].replace( '"', '').strip() if 'GOTO=' in goto else None if goto is None: goto = last_line.strip().replace('LABEL=', '').replace( '"', '') if 'LABEL=' in last_line else None else: error = utils.do_shell_cmd( f'sudo cp /etc/ConsolePi/src/{os.path.basename(rules_file)} /etc/udev/rules.d/' ) # TODO switch to pathlib.Path('path...').copy(src, dst) found = True goto = 'END' if goto and 'GOTO=' not in udev_line: udev_line = '{}, GOTO="{}"'.format(udev_line, goto) if label and not ser_label_exists: udev_line = 'LABEL="{}"\\n{}'.format(label, udev_line) # -- // UPDATE RULES FILE WITH FORMATTED LINE \\ -- if found: udev_line = '{}\\n{}'.format(udev_line, section_marker) cmd = "sudo sed -i 's/{}/{}/' {}".format(section_marker, udev_line, rules_file) error = utils.do_shell_cmd(cmd, handle_errors=False) if error: return error else: # Not Using new 10-ConsolePi.rules template just append to file if section_marker == '# END BYSERIAL-DEVS': return utils.append_to_file(rules_file, udev_line) else: # if not by serial device the new template is required return 'Unable to Add Line, please use the new 10.ConsolePi.rules found in src dir and\n' \ 'add you\'re current rules to the BYSERIAL-DEVS section.'
def do_ser2net_line(self, from_name: str = None, to_name: str = None, baud: int = None, dbits: int = None, parity: str = None, flow: str = None, sbits: int = None): '''Process Adapter Configuration Changes in ser2net.conf. Keyword Arguments: from_name {str} -- The Adapters existing name/alias (default: {None}) to_name {str} -- The Adapters new name/alias (default: {None}) baud {int} -- Adapter baud (default: {self.baud}) dbits {int} -- Adapter databits (default: {self.data_bits}) parity {str} -- Adapter Parity (default: {self.parity}) flow {str} -- Adapter flow (default: {self.flow}) sbits {int} -- Adapter stop bits (default: {self.sbits}) Returns: {str|None} -- Returns error text if an error occurs or None if no issues. ''' # don't add the new entry to ser2net if one already exists for the alias if from_name != to_name and config.ser2net_conf.get(f"/dev/{to_name}"): log.info( f"ser2net: {to_name} already mapped to port {config.ser2net_conf[f'/dev/{to_name}'].get('port')}", show=True) return ser2net_parity = {'n': 'NONE', 'e': 'EVEN', 'o': 'ODD'} ser2net_flow = {'n': '', 'x': ' XONXOFF', 'h': ' RTSCTS'} baud = self.baud if not baud else baud dbits = self.data_bits if not dbits else dbits parity = self.parity if not parity else parity flow = self.flow if not flow else flow sbits = self.sbits if not sbits else sbits log_ptr = '' cur_line = config.ser2net_conf.get(f'/dev/{from_name}', {}).get('line') if cur_line and '/dev/ttyUSB' not in cur_line and '/dev/ttyACM' not in cur_line: new_entry = False next_port = next_port = cur_line.split(':')[0] # Renaming existing log_ptr = config.ser2net_conf[f'/dev/{from_name}'].get('log_ptr') if not log_ptr: log_ptr = '' else: new_entry = True if utils.valid_file(self.ser2net_file): ports = [ a['port'] for a in config.ser2net_conf.values() if 7000 < a.get('port', 0) <= 7999 ] next_port = 7001 if not ports else int(max(ports)) + 1 else: next_port = 7001 error = utils.do_shell_cmd( f'sudo cp {self.ser2net_file} /etc/', handle_errors=False) if error: log.error( f'Rename Menu Error while attempting to cp ser2net.conf from src {error}' ) return error # error added to display in calling method ser2net_line = ( '{telnet_port}:telnet:0:/dev/{alias}:{baud} {dbits}DATABITS {parity} ' '{sbits}STOPBIT {flow} banner {log_ptr}'.format( telnet_port=next_port, alias=to_name, baud=baud, dbits=dbits, sbits=sbits, parity=ser2net_parity[parity], flow=ser2net_flow[flow], log_ptr=log_ptr)) # -- // Append to ser2net.conf \\ -- if new_entry: error = utils.append_to_file(self.ser2net_file, ser2net_line) # -- // Rename Existing Definition in ser2net.conf \\ -- # -- for devices with existing definitions cur_line is the existing line else: ser2net_line = ser2net_line.strip().replace('/', r'\/') cur_line = cur_line.replace('/', r'\/') cmd = "sudo sed -i 's/^{}$/{}/' {}".format( cur_line, ser2net_line, self.ser2net_file) error = utils.do_shell_cmd(cmd, shell=True) if not error: config.ser2net_conf = config.get_ser2net() else: return error
def get_ser2net(self): '''Parse ser2net.conf to extract connection info for serial adapters retruns 2 level dict (empty dict if ser2net.conf not found or empty): { <adapter name or alias>: { "baud": <baud>, "dbits": <data bits>, "flow": "<flow control>", "parity": "<parity>", "sbits": <stop bits>, "port": <telnet port (ser2net), "logfile": None or logfile if defined in ser2net.conf "cmd": picocom command string used in menu "line": The line from ser2net.conf } } ''' ######################################################## # --- ser2net (3.x) config lines look like this --- # ... 9600 NONE 1STOPBIT 8DATABITS XONXOFF LOCAL -RTSCTS # ... 9600 8DATABITS NONE 1STOPBIT banner ######################################################## # utils = self.utils if not utils.valid_file(self.static.get('SER2NET_FILE')): log.warning( 'No ser2net.conf file found unable to extract port definition', show=True) return {} ser2net_conf = {} trace_files = {} with open(self.static['SER2NET_FILE']) as cfg: for line in cfg: if 'TRACEFILE:' in line: line = line.split(':') trace_files[line[1]] = line[2] continue elif not line[0].isdigit(): continue _line = line.strip('\n') line = line.split(':') tty_port = int(line[0]) tty_dev = line[3] # Reset defaults # baud is used to determine parsing failure dbits = 8 parity = 'n' flow = 'n' sbits = 1 logfile = None log_ptr = None connect_params = line[4].replace(',', ' ').split() baud = None for option in connect_params: if option in self.static.get('VALID_BAUD', [ '300', '1200', '2400', '4800', '9600', '19200', '38400', '57600', '115200' ]): baud = int(option) elif 'DATABITS' in option: dbits = int(option.replace('DATABITS', '')) # int 5 - 8 if dbits < 5 or dbits > 8: log.warning( f'{tty_dev}: Invalid value for "data bits" found in ser2net.conf falling back to 8', show=True) dbits = 8 elif option in ['EVEN', 'ODD', 'NONE']: parity = option[0].lower( ) # converts to e o n used by picocom elif option == 'XONXOFF': flow = 'x' elif option == 'RTSCTS': flow = 'h' elif 'STOPBIT' in option: # Not used by picocom sbits = int(option[0]) if option[0].isdigit else 1 elif 'tb=' in option or 'tr=' in option or 'tw=' in option: log_ptr = option logfile = option.split('=')[1] # Use baud to determine if options were parsed correctly if baud is None: log.warning( f'{tty_dev} found in ser2net but unable to parse baud falling back to {self.default_baud}', show=True) baud = self.default_baud # parse TRACEFILE defined in ser2net.conf cmd_base = f'picocom {tty_dev} --baud {baud} --flow {flow} --databits {dbits} --parity {parity}' if self.picocom_ver > 1: # picocom ver 1.x in Stretch doesn't support "--stopbits" cmd_base = cmd_base + f' --stopbits {sbits}' if logfile: logfile = trace_files[logfile] logfile = logfile.replace('\\p', str(tty_port)).replace( '\\d', tty_dev.split('/')[-1]) logfile = logfile.replace( '\\s', f'{baud}_{dbits}{parity.upper()}{sbits}') logfile = logfile.split( '\\' )[0] + '-{{timestamp}}.log' # + time.strftime('%H.%M.log') cmd = cmd_base + f' --logfile {logfile}' utils.do_shell_cmd( f"mkdir -p {'/'.join(logfile.split('/')[0:-1])}") utils.set_perm('/'.join(logfile.split('/')[0:-1])) else: cmd = cmd_base # update dict with values for this device ser2net_conf[tty_dev] = { 'port': tty_port, 'baud': baud, 'dbits': dbits, 'parity': parity, 'flow': flow, 'sbits': sbits, 'logfile': logfile, 'log_ptr': log_ptr, 'cmd': cmd, 'line': _line } return ser2net_conf
def get_hosts(self): '''Parse user defined hosts for inclusion in menu returns dict with formatted keys prepending /host/ ''' # utils = self.utils hosts = self.cfg_yml.get('HOSTS') if not hosts: # fallback to legacy json config hosts = self.get_json_file(self.static.get('REM_HOSTS_FILE')) if not hosts: return {} # generate remote command used in menu for h in hosts: hosts[h]["method"] = hosts[h].get('method', 'ssh').lower() if hosts[h][ "method"] == 'ssh': # method defaults to ssh if not provided port = 22 if ':' not in hosts[h]['address'] else hosts[h][ 'address'].split(':')[1] _user_str = '' if not hosts[h].get( 'username') else f'{hosts[h].get("username")}@' key_file = None if hosts[h].get("key") and self.loc_user is not None: if utils.valid_file( f"/home/{self.loc_user}/.ssh/{hosts[h]['key']}"): user_key = Path( f"/home/{self.loc_user}/.ssh/{hosts[h]['key']}") if utils.valid_file( f"/etc/ConsolePi/.ssh/{hosts[h]['key']}"): mstr_key = Path( f"/etc/ConsolePi/.ssh/{hosts[h]['key']}") if mstr_key.stat().st_mtime > user_key.stat( ).st_mtime: shutil.copy(mstr_key, user_key) shutil.chown(user_key, user=self.loc_user, group=self.loc_user) user_key.chmod(0o600) log.info( f"{hosts[h]['key']} Updated from ConsolePi global .ssh key_dir to " f"{str(user_key.parent)} for use with {h}...", show=True) key_file = str(user_key) elif utils.valid_file(hosts[h]['key']): key_file = hosts[h]['key'] elif utils.valid_file( f"/etc/ConsolePi/.ssh/{hosts[h]['key']}"): user_ssh_dir = Path(f"/home/{self.loc_user}/.ssh/") if user_ssh_dir.is_dir: shutil.copy( f"/etc/ConsolePi/.ssh/{hosts[h]['key']}", user_ssh_dir) user_key = Path( f"{user_ssh_dir}/{hosts[h]['key']}") shutil.chown(user_key, user=self.loc_user, group=self.loc_user) user_key.chmod(0o600) log.info( f"{hosts[h]['key']} imported from ConsolePi global .ssh key_dir to " f"{str(user_ssh_dir)} for use with {h}...", show=True) key_file = str(user_key) hosts[h]['cmd'] = f"sudo -u {self.loc_user} ssh{' ' if not key_file else f' -i {key_file} '}" \ f"-t {_user_str}{hosts[h]['address'].split(':')[0]} -p {port}" # hosts[h]['cmd'] = f"sudo -u {self.loc_user} ssh -t {_user_str}{hosts[h]['address'].split(':')[0]} -p {port}" elif hosts[h].get('method').lower() == 'telnet': port = 23 if ':' not in hosts[h]['address'] else hosts[h][ 'address'].split(':')[1] _user_str = '' if not hosts[h].get( 'username') else f'-l {hosts[h].get("username")}' hosts[h][ 'cmd'] = f"sudo -u {self.loc_user} telnet {_user_str} {hosts[h]['address'].split(':')[0]} {port}" groups = [hosts[h].get('group', 'user-defined') for h in hosts] host_dict = {'main': {}, 'rshell': {}} for g in utils.unique(groups): host_dict['main'][g] = { f'/host/{h.split("/")[-1]}': hosts[h] for h in hosts if hosts[h].get('show_in_main', False) and hosts[h].get('group', 'user-defined') == g } if not host_dict['main'][g]: del host_dict['main'][g] host_dict['rshell'][g] = { f'/host/{h.split("/")[-1]}': hosts[h] for h in hosts if not hosts[h].get('show_in_main', False) and hosts[h].get('group', 'user-defined') == g } if not host_dict['rshell'][g]: del host_dict['rshell'][g] host_dict['_methods'] = utils.unique( [hosts[h].get('method', 'ssh') for h in hosts]) host_dict['_host_list'] = [f'/host/{h.split("/")[-1]}' for h in hosts] return host_dict
#!/etc/ConsolePi/venv/bin/python3 import sys import yaml from jinja2 import Environment, FileSystemLoader sys.path.insert(0, '/etc/ConsolePi/src/pypkg') from consolepi import log, utils # NoQA # Look for config template by same name in same dir if not len(sys.argv) > 1: print('Template Name Must be provided') sys.exit(1) elif not utils.valid_file(sys.argv[1] + '.j2'): print(f'{sys.argv[1]}.j2 Not Found or empty') sys.exit(1) else: cfg_templ = sys.argv[1] + '.j2' # Look for variables file by same name in same dir if utils.valid_file(sys.argv[1] + '.yaml'): var_file = sys.argv[1] + '.yaml' # Look for single common variables file elif utils.valid_file('./variables.yaml'): var_file = 'variables.yaml' else: print('No Valid Variable File Found') sys.exit(1) config_data = yaml.load(open(var_file), Loader=yaml.FullLoader) if var_file == 'variables.yaml':
ztp = True file_size = os.stat(cfg_file).st_size ztp_ok = True if int(mac_bytes) == file_size else False mac = utils.Mac(get_mac(ip)) log.info(f"[ZTP - TFTP XFR] {os.path.basename(cfg_file)} sent to {ip}|{mac.cols}{' Success' if ztp_ok else ''}") _res = utils.do_shell_cmd(f"wall 'consolepi-ztp: {os.path.basename(cfg_file)} sent to " f"{ip}|{mac.cols}{' Success' if ztp_ok else ' WARNING xfr != file size'}'") if not ztp_ok: log.warning(f"File Size {file_size} and Xfr Total ({mac_bytes}) don't match") # If cfg file was sent transition to next cfg (ordered). # if img sent adjust adjust dnsmasq to prevent needless resend of img. next_ztp(cfg_file, mac) if config.ztp and cfg_file.endswith('.cfg'): # load stashed dict from file. keys are ztp.py generated cfg files names, mapped to dict of ztp settings from config. if not utils.valid_file(ZTP_CLI_FILE): log.warning(f'Skipping ZTP Post CLI for {ip} {ZTP_CLI_FILE} not found/invalid') else: with open(ZTP_CLI_FILE) as fb: cfg_dict = json.loads(''.join(fb.readlines())) cfg_file_name = os.path.basename(cfg_file) if cfg_file_name in cfg_dict: cli_ok = True cfg_dict[cfg_file_name]['ip'] = ip if 'cli_post' in cfg_dict[cfg_file_name]: cfg_dict[cfg_file_name]['cmd_list'] = cfg_dict[cfg_file_name]['cli_post'] del cfg_dict[cfg_file_name]['cli_post'] log.debug(f"Dict from .ztpcli: {cfg_dict[cfg_file_name]}") # -- // PERFORM Post ZTP CLI Based Operations via SSH \\ --