def test04(self, steps): with steps.start(f"Compare pre-state to post SPF states", continue_=True) as step: # Verification diff = Diff(self.pre_dic, self.post_dic) diff.findDiff() diff = str(diff) if not diff: log.info( f'No new OSPF SPF executions have occured - Test Passed') else: log.info( f'New OSPF SPF calculations have occured - Test Failed') for name in self.devices.keys(): before = self.pre_dic[name] after = self.post_dic[name] if before > after: log.info( f"Hostname {name}: Invalid. Pre state is greater than Post state. Counters may have been cleared. Skipping " ) else: delta = after - before if delta >= 1: log.info( f"Hostname {name}: New SPF calculations have occured. There have been {delta} new SPF calculations" ) step.failed()
def diff(self, steps, device, pre, post, continue_=True, fail_different=False, command=None, exclude=None): with steps.start("Perform Diff for '{device}'".format(device=device.name), continue_=continue_) as step: exclude_items = _get_exclude(command, device) if exclude and isinstance(exclude, list): exclude_items.extend(exclude) try: diff = Diff(pre, post, exclude=exclude_items) except Exception as e: step.failed(str(e)) diff.findDiff() if diff: log.info(diff) if fail_different: step.failed('{pre} and {post} are not ' 'identical'.format(pre=pre, post=post))
def test(self, steps): # Loop over every device with learnt configs for device_name, config in self.configs.items(): with steps.start( f"Looking for running/startup diff on {device_name}", continue_=True) as device_step: # Get rid of certificate comparison, as "service private-config-encryption" won't show them exclusion = [] for key in config['startup'].keys(): if key.startswith( 'crypto pki certificate') or key.startswith( 'crypto pki trust') or key.startswith( 'Using '): exclusion.append(key) for key in config['running'].keys(): if key.startswith( 'Building configuration...') or key.startswith( 'Current configuration :'): exclusion.append(key) # Compare configs diff = Diff(config['startup'], config['running'], exclude=exclusion) diff.findDiff() if len(str(diff)) > 0: device_step.failed( f'Device {device_name} has the following config difference:\n{str(diff)}' )
def _verify_finds_ops(self, ops, requirements, missing=False, all_keys=False, obj_mod=None, org_req=None): '''Verify multiple requirements for ops''' for req in requirements: self._verify_find(ops, [req], missing, all_keys) # Each requirement can produce many populate_reqs (Many vrfs # for example) so loop over them for populate_req in requirements: # Let's modify the ops value to be equal to the original # snapshot. This will allow for comparing the otherk keys if obj_mod in self._ops_ret: try: self._modify_ops_snapshot(original=self._ops_ret[obj_mod], current=ops, path=populate_req) except Exception as e: log.warning("No comparison will be done between " "original snapshot and snapshot after " "configuration\n{e}".format(e=str(e))) return # Alright now compare if obj_mod in self._ops_ret: # add handle for modify_exclude and exclude exclude = self._populate_exclude(org_req['exclude']) diff = Diff(self._ops_ret[obj_mod], ops, exclude=exclude + ['callables']) diff.findDiff() if str(diff): raise Exception("The output is not same with diff\n{}" .format(str(diff)))
def verify_recover(self, uut, wait_time=20): '''Figure out if bgp is configured and up''' ### Code replaced by using Verification! log.info('Sleeping for {w}'.format(w=wait_time)) time.sleep(wait_time) ### # Check if there is a bgp_id # And it is running output = uut.parse('show bgp process vrf all') if output['bgp_tag'] != self.bgp_id: self.failed("Bgp id {bgp_id} is not showing anymore in the " "output of the cmd, this is " "unexpected!".format(bgp_id=self.bgp_id)) # Now see if its down if output['bgp_protocol_state'] != 'running': self.failed( "Reconfigure of Bgp {bgp_id} did not work as it is not " "running".format(bgp_id=self.bgp_id)) ### Code replaced by using Verification! # Uncomment me diff = Diff(self.initial_output, output) diff.findDiff() if diff.diffs: self.failed('Unexpected change has happened to our device state ' '\n{d}'.format(d=diff))
def genie_config_diff(self, output1, output2, mode=None, exclude=None): if not PY3: raise AnsibleFilterError("Genie requires Python 3") if not HAS_GENIE: raise AnsibleFilterError( "Genie not found. Run 'pip install genie'") if not HAS_PYATS: raise AnsibleFilterError( "pyATS not found. Run 'pip install pyats'") supported_mode = ['add', 'remove', 'modified', None] if mode not in supported_mode: raise AnsibleFilterError( "Mode '%s' is not supported. Specify %s." % (mode, supported_mode)) config1 = Config(output1) config1.tree() dict1 = config1.config config2 = Config(output2) config2.tree() dict2 = config2.config dd = Diff(dict1, dict2, mode=mode, exclude=exclude) dd.findDiff() diff = str(dd) diff_list = diff.split('\n') return diff_list
def compare_profile(self, pts, pts_compare, devices): '''Compare system profiles taken as snapshots during the run''' if os.path.isfile(pts): compare1 = self.testscript.unpickle(pts) else: compare1 = self.testscript.parameters[pts] compare2 = self.testscript.parameters[pts_compare] exclude_list = [ 'device', 'maker', 'diff_ignore', 'callables', '(Current configuration.*)' ] if 'exclude' in self.pts_datafile: exclude_list.extend(self.pts_datafile['exclude']) for fet in compare1: failed = [] feature_exclude_list = exclude_list # Get the information too from the pts_data try: feature_exclude_list.extend(self.pts_datafile[fet]['exclude']) except KeyError: pass for dev in compare1[fet]: # Only compare for the specified devices if dev not in devices: continue diff = Diff(compare1[fet][dev], compare2[fet][dev], exclude=feature_exclude_list) diff.findDiff() if len(diff.diffs): failed.append((dev, diff)) if failed: msg = [ "Comparison between {pts} and " "{OPS} is different for feature '{f}' " "for device:\n".format(pts=pts, OPS=pts_compare, f=fet) ] for device, diff in failed: msg.append("'{d}'\n{diff}".format(d=device, diff=diff)) self.builtin.fail('\n'.join(msg)) else: msg = [ "Comparison between {pts} and " "{OPS} is identical\n".format(pts=pts, OPS=pts_compare) ] self.builtin.pass_execution('\n'.join(msg))
def diff(self, steps, section, name, device, pre, post, alias=None, continue_=True, fail_different=False, command=None, exclude=None, processor='', health_uids=None, health_groups=None, health_sections=None, feature=None, mode=None, **kwargs): msg = kwargs.pop('custom_substep_message', "Perform Diff for '{device}'".format(device=device.name)) with steps.start(msg, continue_=continue_) as step: exclude_items = _get_exclude(command, device) # if feature is given, get exclude from the Ops if feature: try: exclude_items.extend(get_ops_exclude(feature, device)) except LookupError: log.warning( "No Ops for {feature} was found. Couldn't retrieve exclude list." .format(feature=feature)) # check given mode if mode and mode not in ['add', 'remove', 'modified']: log.warning( "Wrong mode '{mode}' was given. Ignored.".format(mode=mode)) if exclude and isinstance(exclude, list): exclude_items.extend(exclude) log.debug('exclude: {exclude}'.format(exclude=exclude_items)) try: diff = Diff(pre, post, exclude=exclude_items, mode=mode) except Exception as e: step.failed(str(e)) diff.findDiff() # check content of diff if str(diff): log.info(diff) if fail_different: step.failed('{pre} and {post} are not ' 'identical'.format(pre=pre, post=post)) else: step.passed('{pre} and {post} are identical'.format(pre=pre, post=post))
def compare_output_before_after(hostname, test_name): result_before = json.loads( db.get_output_test(hostname, test_name, "before")) result_after = json.loads(db.get_output_test(hostname, test_name, "after")) dd = Diff(result_before, result_after, mode='modified') dd.findDiff() print(dd)
def main(): nr = InitNornir(config_file="config.yaml") #Filter devices to run against nr = nr.filter(F(groups__contains="iosv")) print('Running postvalidaiton.py against the following Nornir inventory hosts:', nr.inventory.hosts.keys()) # Ask for credentials at runtime instead of storing. nornir_set_creds(nr) print("Collecting running configurations and operational values\n") resultconf = nr.run(task=collect_configs) resultgetters = nr.run(task=collect_getters) #import ipdb; ipdb.set_trace() #Loop through napalm getters and output current running version. prYellow('Current IOS Running Versions:') for host in resultgetters: print(host, '>>', resultgetters[host][1].result['facts']['os_version']) #Perform a Diff between the pre and post nornir getter files we saved. for host in nr.inventory.hosts: #dont try to open files or compare if a host failed collection if host in resultconf.failed_hosts or host in resultgetters.failed_hosts: print('!', host, 'failed collection and Op State will not be compared\n') #TODO: log netmiko/nornir error to file. otherwise it should exist in nornir.log. continue else: #load facts in hosts pre and post folder and store to var. since were not using pyats native learn objects we must loop through files prGreen("vvv --- " + host + " --- Begin Comparison between Pre Upgrade and Post Upgrade operational values vvv") for filename in os.listdir(initial_facts_dir+host): with open(initial_facts_dir+host+'/'+filename, 'r') as f: initialstate = json.load(f) with open(facts_dir+host+'/'+filename, 'r') as f: poststate = json.load(f) compare = Diff(initialstate, poststate) compare.findDiff() print('#', filename, '#\n', compare) prGreen("^^^ --- " + host + " --- End Comparison between Pre Upgrade and Post Upgrade operational values ^^^\n") prGreen("vvv --- " + host + " --- Begin Comparison between Pre Upgrade and Post Upgrade configurations vvv") with open(initial_config_dir+host+'-running.cfg', 'r') as f: cfg = f.read() initialconfig = Config(cfg) initialconfig.tree() with open(config_dir+host+'-running.cfg', 'r') as f: cfg = f.read() postconfig = Config(cfg) postconfig.tree() compare = Diff(initialconfig, postconfig) compare.findDiff() prCyan("# " + os.path.basename(f.name) + " #") print(compare) #ipdb.set_trace() prGreen("^^^ --- " + host + " --- End Comparison between Pre Upgrade and Post Upgrade configurations ^^^\n")
def main(): argument_spec = dict( compare=dict(type='dict', required=False), sendonly=dict(type='bool', default=False, required=False), ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) if not PY3: module.fail_json(msg="pyATS/Genie requires Python 3") if not HAS_GENIE: module.fail_json(msg="Genie not found Run 'pip install genie'") if not HAS_PYATS: module.fail_json(msg="pyATS not found. Run 'pip install pyats'") if module.check_mode and not module.params['command'].startswith('show'): module.fail_json( msg='Only show commands are supported when using check_mode, not ' 'executing %s' % module.params['command']) warnings = list() result = {'changed': False, 'warnings': warnings} connection = Connection(module._socket_path) response = '' try: response = connection.get(command='show run') except ConnectionError as exc: module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) config = Config(response) config.tree() if module.params['compare']: diff = Diff(config.config, module.params['compare']) diff.findDiff() else: diff = None try: result['json'] = module.from_json(response) except ValueError: pass result.update({ 'stdout': response, 'structured': config.config, 'diff': "{0}".format(diff) }) module.exit_json(**result)
def verify_object(section, name, verf, ret, exclude, device, last): # The name should begins with Verify, so remove pre_ and post_ name = name.replace('pre_', '').replace('post_', '') # if all are invalid then fail section if issubclass(type(ret), Base) and not ret.maker.summary_table['Empty'] and not \ ret.maker.summary_table['Executed Fine'] and ret.maker.summary_table['Not accepted']: section.failed('Invalid command has been executed') return # Check if snapshot exists # Then first time see this device if device not in verf: # Add everything as it didnt exists anyway verf[device] = {} verf[device][name] = ret log.info('Saving initial snapshot of this command - To be ' 'used for comparing in future verification') return # Case where device exists, then check if section.name exists if name in verf[device]: # Then compare new = ret old = verf[device][name] log.info('Comparing current snapshot to initial snapshot\nExcluding ' 'these keys {exc}'.format(exc=exclude)) exclude = ['maker', 'callables', 'device'] + exclude diff = Diff(old, new, exclude=exclude) diff.findDiff() if diff.diffs: # then there was some diffs # Alright as we don't want all future verification to # also fail, retake snapshot Only take snapshot if last and parent # is testscript if last and isinstance(section.parent, TestScript): verf[device][name] = new log.error('Retaking snapshot for the verification as the ' 'comparison with original one failed') section.failed('Found a difference between original snapshot ' 'and current snapshot of the ' 'Verification\n{d}'.format(d=str(diff))) else: log.info('Snapshot is same as initial snapshot') else: # Store log.info('Saving initial snapshot of this command - To be ' 'used for comparing in future verification') verf[device][name] = ret
def compare_configs(self): dd = Diff(self.golden_config, self.current_config) dd.findDiff() #import ipdb; ipdb.set_trace() if len(str(dd)) == 0 : self.status = self.STATUS_OK self.set_message("No changes in Running Config") else: self.status = self.STATUS_CRITICAL self.set_message("Running Config has changed: {}".format(dd))
def compare(self, uut, cmd, context1, context2, exclude=[]): '''Compare between the two contexts''' exclude = default_exclude + exclude diff = Diff(self.context1_ops, self.context2_ops, exclude=exclude) diff.findDiff() # Verify that all keys are present if diff.diffs: self.failed("'{c1}' is not equal to '{c2}' for " "command '{cmd}'\n{e}".format(c1=context1, c2=context2, e=str(diff), cmd=cmd))
def test04(self, steps): with steps.start(f"Compare pre-state to post fragmentation states", continue_=True) as step: # Verification diff = Diff(self.pre_dic, self.post_dic) diff.findDiff() diff = str(diff) if not diff: log.info(f'No new Fragmentaion cases detected - Test Passed') else: log.info(f'New Fragmentation cases have occured - Test Failed') for name in self.devices.keys(): #Checking for fragmentaitons #print(f'the pre dictionary {self.pre_dic[name]}') #print(f'the psot dictionary {self.post_dic[name]}') if self.pre_dic[name] and self.post_dic[name]: before_fragmented = self.pre_dic[name][ 'ip_frags_fragmented'] after_fragmented = self.post_dic[name][ 'ip_frags_fragmented'] if before_fragmented > after_fragmented: log.info( f"Hostname {name}: Invalid. Pre state is greater than Post state. Counters may have been cleared. Skipping " ) else: delta = after_fragmented - before_fragmented if delta >= 1: log.info( f"Hostname {name}: Fragmentation as occured. {delta} packets fragmented" ) # Checking for drops due to not being able to fragment before_fragmented = self.pre_dic[name][ 'ip_frags_no_fragmented'] after_fragmented = self.post_dic[name][ 'ip_frags_no_fragmented'] if before_fragmented > after_fragmented: log.info( f"Hostname {name}: Invalid. Pre state is greater than Post state. Counters may have been cleared. Skipping " ) else: delta = after_fragmented - before_fragmented if delta >= 1: log.info( f"Hostname {name}: New packet drops have occured due to failure to fragment one or more packets. {delta} new drops" ) step.failed()
def get_diffs_platform(platform_before, platform_after): """ Check differences between two parsed outputs from 'show platform' Args: platform_before ('str'): Parsed output from 'show platform' platform_after ('str'): Parsed output from 'show platform' Return: True False Raises: None """ dd = Diff(platform_before, platform_after, exclude=["insert_time"]) dd.findDiff() for slot in platform_after["slot"]: for rp_lc in platform_after["slot"][slot]: for type_ in platform_after["slot"][slot][rp_lc]: state_after = platform_after["slot"][slot][rp_lc][type_][ "state"] state_before = (platform_before["slot"].get(slot, {}).get( rp_lc, {}).get(type_, {}).get("state", False)) if not state_before: log.info( "Found differences between outputs:\n{out}".format( out=dd)) return False for subslot in platform_before["slot"][slot][rp_lc].get( "subslot", []): subslot_state = (platform_after["slot"][slot][rp_lc].get( subslot, {}).get("state", False)) if not subslot_state: log.info( "Found differences between outputs:\n{out}".format( out=dd)) if state_after == state_before or ("ok" in state_after and "ok" in state_before): continue else: log.info( "Found differences between outputs:\n{out}".format( out=dd)) return False return True
def compared_with_running_config(device, config): """ Show difference between given config and current config Args: config ('dict'): Config to compare with Raise: None Returns: Diff """ current = device.api.get_running_config_dict() diff = Diff(current, config) diff.findDiff() return diff
def pyats_diff(self, output1, output2): if not PY3: raise AnsibleFilterError("Genie requires Python 3") if not HAS_GENIE: raise AnsibleFilterError("Genie not found. Run 'pip install genie'") if not HAS_PYATS: raise AnsibleFilterError("pyATS not found. Run 'pip install pyats'") diff = Diff(output1, output2) diff.findDiff() return diff
def compare_config_dicts(a, b, exclude=None): """ Compare two configuration dicts and return the differences Args: a (`dict`): Configuration dict b (`dict`): Configuration dict exclude (`list`): List of item to ignore. Supports Regex. Regex must begins with ( ) Returns: out (`str`): differences """ excludes = [r"(^Load|Time|Build|Current|Using|exit|end)"] if exclude: excludes.extend(exclude) diff = Diff(a, b, exclude=excludes) diff.findDiff() return str(diff)
def diff_configuration(device, config1, config2): """ Show difference between two configurations Args: config1 ('str'): Configuration one config2 ('str'): Configuration two Raise: None Returns: Diff """ configObject1 = Config(config1) configObject2 = Config(config2) configObject1.tree() configObject2.tree() diff = Diff(configObject1.config, configObject2.config) diff.findDiff() return diff
def diff(self, steps, section, name, device, pre, post, alias=None, continue_=True, fail_different=False, command=None, exclude=None, processor=''): with steps.start("Perform Diff for '{device}'".format(device=device.name), continue_=continue_) as step: exclude_items = _get_exclude(command, device) if exclude and isinstance(exclude, list): exclude_items.extend(exclude) log.debug('exclude: {exclude}'.format(exclude=exclude_items)) try: diff = Diff(pre, post, exclude=exclude_items) except Exception as e: step.failed(str(e)) diff.findDiff() # check content of diff if str(diff): log.info(diff) if fail_different: step.failed('{pre} and {post} are not ' 'identical'.format(pre=pre, post=post)) else: step.passed('{pre} and {post} are identical'.format(pre=pre, post=post))
def test_golden(self, steps, local_class, operating_system, _display_only_failed=None, token=None, number=None): """Test step that finds any output named with _output.txt, and compares to similar named .py file.""" if token: folder_root = f"{operating_system}/{token}/{local_class.__name__}/cli/equal" else: folder_root = f"{operating_system}/{local_class.__name__}/cli/equal" # Get list of output files to parse and sort convert = lambda text: int(text) if text.isdigit() else text aph_key = lambda key: [convert(c) for c in re.split("([0-9]+)", key)] if number and not operating_system or not local_class: output_glob = sorted( glob.glob(f"{folder_root}/golden_output{number}_output.txt"), key=aph_key, ) else: output_glob = sorted(glob.glob(f"{folder_root}/*_output.txt"), key=aph_key) if len(output_glob) == 0: steps.failed( f"No files found in appropriate directory for {local_class}") # Look for any files ending with _output.txt, presume the user defined name from that (based # on truncating that _output.txt suffix) and obtaining expected results and potentially an arguments file for user_defined in output_glob: user_test = os.path.basename(user_defined[:-len("_output.txt")]) if token: msg = f"Gold -> {operating_system} -> Token {token} -> {local_class.__name__} -> {user_test}" else: msg = f"Gold -> {operating_system} -> {local_class.__name__} -> {user_test}" with steps.start(msg, continue_=True): golden_output_str = read_from_file( f"{folder_root}/{user_test}_output.txt") golden_output = {"execute.return_value": golden_output_str} golden_parsed_output = read_python_file( f"{folder_root}/{user_test}_expected.py") arguments = {} if os.path.exists(f"{folder_root}/{user_test}_arguments.json"): arguments = read_json_file( f"{folder_root}/{user_test}_arguments.json") device = Mock(**golden_output) obj = local_class(device=device) parsed_output = obj.parse(**arguments) # Use Diff method to get the difference between # what is expected and the parsed output dd = Diff(parsed_output, golden_parsed_output) dd.findDiff() if parsed_output != golden_parsed_output: # if -f flag provided, then add the screen handler back into # the root.handlers to displayed failed tests. Decorator removes # screen handler from root.handlers after failed tests are displayed # to stdout if _display_only_failed: self.add_logger() log.info(banner(msg)) # Format expected and parsed output in a nice format parsed_json_data = json.dumps(parsed_output, indent=4, sort_keys=True) golden_parsed_output_json_data = json.dumps( golden_parsed_output, indent=4, sort_keys=True) # Display device output, parsed output, and golden_output of failed tests log.info( "\nThe following is the device output before it is parsed:\n{}\n" .format(golden_output['execute.return_value']), extra={'colour': 'yellow'}) log.info( "The following is your device's parsed output:\n{}\n". format(parsed_json_data), extra={'colour': 'yellow'}) log.info( "The following is your expected output:\n{}\n".format( golden_parsed_output_json_data), extra={'colour': 'yellow'}) log.info( "The following is the difference between the two outputs:\n", extra={'colour': 'yellow'}) # Display the diff between parsed output and golden_output log.info(str(dd), extra={'colour': 'yellow'}) raise AssertionError( "Device output and expected output do not match") else: # If tests pass, display the device output in debug mode # But first check if the screen handler is removed, if it is # put it back into the root otherwise just display to stdout if self.temporary_screen_handler not in log.root.handlers and self.temporary_screen_handler != None: self.add_logger() logging.debug(banner(msg)) logging.debug( "\nThe following is the device output for the passed parser:\n{}\n" .format(golden_output['execute.return_value']), extra={'colour': 'yellow'}) self.remove_logger() else: logging.debug(banner(msg)) logging.debug( "\nThe following is the device output for the passed parser:\n{}\n" .format(golden_output['execute.return_value']), extra={'colour': 'yellow'})
if __name__ == '__main__': parser = argparse.ArgumentParser(description='') parser.add_argument('-original', metavar='FILE', type=str, default=None, help='File containing original information') parser.add_argument('-new', metavar='FILE', type=str, default=None, help='File containing original information') custom_args = parser.parse_known_args()[0] with open(custom_args.original, 'r') as f: original = f.read() with open(custom_args.new, 'r') as f: new = f.read() new = yaml.safe_load(new) original = yaml.safe_load(original) diff = Diff(original, new) diff.findDiff() print(diff)
def main(): argument_spec = dict(command=dict(type='str', required=True), prompt=dict(type='list', required=False), answer=dict(type='list', required=False), compare=dict(type='dict', required=False), sendonly=dict(type='bool', default=False, required=False), # newline=dict(type='bool', default=True, required=False), # check_all=dict(type='bool', default=False, required=False), ) required_together = [['prompt', 'answer']] module = AnsibleModule(argument_spec=argument_spec, required_together=required_together, supports_check_mode=True) if not PY3: module.fail_json(msg="pyATS/Genie requires Python 3") if not HAS_GENIE: module.fail_json(msg="Genie not found. Run 'pip install genie'") if not HAS_PYATS: module.fail_json(msg="pyATS not found. Run 'pip install pyats'") if module.check_mode and not module.params['command'].startswith('show'): module.fail_json( msg='Only show commands are supported when using check_mode, not ' 'executing %s' % module.params['command'] ) warnings = list() result = {'changed': False, 'warnings': warnings} connection = Connection(module._socket_path) capabilities = json.loads(connection.get_capabilities()) if capabilities['device_info']['network_os'] == 'ios': genie_os = 'iosxe' else: genie_os = capabilities['device_info']['network_os'] compare = module.params.pop('compare') response = '' try: response = connection.get(**module.params) except ConnectionError as exc: module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) device = Device("uut", os=genie_os) device.custom.setdefault("abstraction", {})["order"] = ["os"] device.cli = AttrDict({"execute": None}) try: get_parser(module.params['command'], device) except Exception as e: module.fail_json(msg="Unable to find parser for command '{0}' ({1})".format(module.params['command'], e)) try: parsed_output = device.parse(module.params['command'], output=response) except Exception as e: module.fail_json(msg="Unable to parse output for command '{0}' ({1})".format(module.params['command'], e)) # import sys; # sys.stdin = open('/dev/tty') # import pdb; # pdb.set_trace() if compare: diff = Diff(parsed_output, compare, exclude=get_parser_exclude(module.params['command'], device)) diff.findDiff() else: diff = None if not module.params['sendonly']: try: result['json'] = module.from_json(response) except ValueError: pass result.update({ 'stdout': response, 'structured': parsed_output, 'diff': "{0}".format(diff), 'exclude': get_parser_exclude(module.params['command'], device), }) module.exit_json(**result)
def restore_configuration(self, device, method, abstract, iteration=10, interval=60, compare=False, compare_exclude=[], reload_timeout=1200): if method == 'checkpoint': # Enable the feature for i in range(1, iteration): try: out = self.rollback_checkpoint(device=device, name=self.ckname) except Exception as e: raise Exception('Unable to rollback config') if out and 'Rollback Done' in out: break else: log.info('Rollback configuration failed: sleeping {} ' 'seconds and retrying...'.format(interval)) time.sleep(interval) else: raise Exception('Unable to rollback config') # Delete the checkpoint self.create_delete_checkpoint(device=device, name=self.ckname, abstract=abstract, action='delete') # Check if checkpoint is successfully deleted self.check_checkpoint_status(device=device, name=self.ckname, expect='delete', abstract=abstract) elif method == 'local': # reover the deivce with whole running-config device.configure(self.run_config) elif method == 'config_replace': # delete the archive file dialog = Dialog([ Statement(pattern=r'This will apply all necessary.*', action='sendline(Y)', loop_continue=True, continue_timer=False), Statement(pattern=r'less than running config.*', action='sendline(Y)', loop_continue=True, continue_timer=False), ]) # dialog for plan B alt_dialog = Dialog([ Statement(pattern=r'Destination filename.*', action='sendline()', loop_continue=True, continue_timer=False), ]) for i in range(1, iteration): # configure replace location:<filename> out = device.execute('configure replace {}'.\ format(self.to_url), reply=dialog, error_pattern=[]) if out and 'Rollback Done' in out: break elif 'invalid' in out.lower(): # device does not support config replace, do it in different way device.execute('erase startup-config') device.execute('copy {} startup-config'.format( self.to_url), reply=alt_dialog) device.reload(reload_timeout=reload_timeout) break else: log.info( 'Config replace failed: sleeping {} seconds before' ' retrying.'.format(interval)) time.sleep(interval) else: raise Exception('Unable to execute config replace') # Compare restored configuration to details in file if compare: log.info( "Comparing current running-config with config-replace file" ) # Default exclude = [ 'device', 'maker', 'diff_ignore', 'callables', '(Current configuration.*)', '(.*Building configuration.*)', '(.*Load for.*)', '(.*Time source.*)' ] if compare_exclude: if isinstance(compare_exclude, str): exclude.extend([compare_exclude]) else: exclude.extend(compare_exclude) # show run show_run_output = device.execute('show running-config') show_run_config = Config(show_run_output) show_run_config.tree() # location:<filename> contents more_file = device.execute('more {}'.format(self.to_url)) more_file_config = Config(more_file) more_file_config.tree() # Diff 'show run' and config replace file contents diff = Diff(show_run_config.config, more_file_config.config, exclude=exclude) diff.findDiff() # Check for differences if len(diff.diffs): log.error("Differences observed betweenrunning-config and " "config-replce file:'{f}' for device {d}:".\ format(f=self.to_url, d=device.name)) log.error(str(diff.diffs)) raise Exception( "Comparison between running-config and " "config-replace file '{f}' failed for device" " {d}".format(f=self.to_url, d=device.name)) else: log.info("Comparison between running-config and config-replace" "file '{f}' passed for device {d}".\ format(f=self.to_url, d=device.name)) # Delete location:<filename> self.filetransfer = FileUtils.from_device(device) self.filename = self.to_url self.filetransfer.deletefile(target=self.to_url, device=device) # Verify location:<filename> deleted dir_output = self.filetransfer.dir(target=self.to_url, device=device) for file in dir_output: if self.filename in file: break else: log.info("Successfully deleted '{}'".format(self.to_url)) return raise Exception("Unable to delete '{}'".format(self.to_url)) else: # modify the device via callable function # using Conf object self.modify_func(device=device, conf=self.conf, values_dict=self.conf_argument, recover=True, **self.specific_args)
def restore_configuration(self, device, method, abstract, iteration=10, interval=60, compare=False, compare_exclude=[], reload_timeout=None): if method == 'checkpoint': # Enable the feature dialog = Dialog([ Statement(pattern=r'\[no\]', action='sendline(y)', loop_continue=True, continue_timer=False) ]) for i in range(1, iteration): # replace config with checkpoint cfg ='load disk0:{name}\n'\ 'commit replace'.format(name=self.ckname) output = device.configure(cfg, reply=dialog) if 'fail' not in output: break elif i == iteration - 1: raise Exception('Failed to rollback config to checkpoint') else: log.info('Rollback checkpoint failed: sleeping {} seconds ' 'and retrying...'.format(interval)) time.sleep(interval) # need to delete the config file on the device dialog = Dialog([ Statement(pattern=r'\[confirm\]', action='sendline(y)', loop_continue=True, continue_timer=False) ]) device.execute('delete disk0:{name}'.format(name=self.ckname), reply=dialog) # Keeping them for later enhancement elif method == 'local': pass elif method == 'config_replace': for i in range(1, iteration): # Execute commit replace cmd = "load {}\n"\ "commit replace".format(self.to_url) output = device.configure(cmd) if 'Failed to commit' not in output: break elif i == iteration - 1: raise Exception('Unable to execute commit replace') else: log.info( 'Commit replace failed: sleeping {} seconds before' ' retrying.'.format(interval)) device.execute('show configuration failed') time.sleep(interval) # Compare restored configuration to details in file if compare: log.info( "Comparing current running-config with config-replace file" ) # Default exclude = [ 'device', 'maker', 'diff_ignore', 'callables', '(Current configuration.*)' ] if compare_exclude: if isinstance(compare_exclude, str): exclude.extend([compare_exclude]) else: exclude.extend(compare_exclude) # show run show_run_output = device.execute('show running-config') show_run_config = Config(show_run_output) show_run_config.tree() # location:<filename> contents more_file = device.execute('more {}'.format(self.to_url)) more_file_config = Config(more_file) more_file_config.tree() # Diff 'show run' and config replace file contents diff = Diff(show_run_config.config, more_file_config.config, exclude=exclude) diff.findDiff() # Check for differences if len(diff.diffs): log.error("Differences observed betweenrunning-config and " "config-replce file:'{f}' for device {d}:".\ format(f=self.to_url, d=device.name)) log.error(str(diff.diffs)) raise Exception( "Comparison between running-config and " "config-replace file '{f}' failed for device" " {d}".format(f=self.to_url, d=device.name)) else: log.info("Comparison between running-config and config-replace" "file '{f}' passed for device {d}".\ format(f=self.to_url, d=device.name)) # Delete location:<filename> self.filetransfer = FileUtils.from_device(device) self.filename = self.to_url self.filetransfer.deletefile(target=self.to_url, device=device) # Verify location:<filename> deleted dir_output = self.filetransfer.dir(target=self.to_url, device=device) for file in dir_output: if self.filename in file: break else: log.info("Successfully deleted '{}'".format(self.to_url)) return raise Exception("Unable to delete '{}'".format(self.to_url)) else: pass
def test04(self, steps): with steps.start(f"Compare pre-state to post interface states", continue_=True) as step: # Verification diff = Diff(self.pre_dic, self.post_dic) diff.findDiff() diff = str(diff) print(diff) print("pre and post....") print(self.pre_dic) print(self.post_dic) if not diff: log.info(f'No DMVPN changes detected - Test Passed') else: log.info(f'DMVPN changes detected - Test Failed') for device in self.devices.keys(): #Find removed DMVPN interfaces missing = [ x for x in self.pre_dic[device].keys() if x not in self.post_dic[device].keys() ] if missing: for inter in missing: log.info( f'{device} -- DMVPN no longer enabled on interface {inter}' ) #Find new DMVPN interfaces added = [ x for x in self.post_dic[device].keys() if x not in self.pre_dic[device].keys() ] if added: for inter in added: log.info( f'{device} -- New DMVPN interface detected, {inter}' ) #Create list of common DMVPN interfaces beween pre and post state capture common_ints = [ x for x in self.pre_dic[device].keys() if x in self.post_dic[device].keys() ] for interface in common_ints: #Check DMVPN Peer Count if self.pre_dic[device][interface][ 'peer_count'] != self.post_dic[device][ interface]['peer_count']: log.info( f"{device} -- DMVPN peer count changed. Peer count was {self.pre_dic[device][interface]['peer_count']}. Now it is {self.post_dic[device][interface]['peer_count']}" ) #Check NBMA Address for peer print( f"pre --- {self.pre_dic[device][interface]['nbma_peers']}" ) print( f"post --- {self.post_dic[device][interface]['nbma_peers']}" ) #NBMA peer results could be string or list. Do a conversion to list to ensure consistent data type pre_peer_list = [] post_peer_list = [] if isinstance( self.pre_dic[device][interface] ['nbma_peers'], str): pre_peer_list.append(self.pre_dic[device] [interface]['nbma_peers']) elif isinstance( self.pre_dic[device][interface] ['nbma_peers'], list): pre_peer_list = [ str(x[0]) for x in self.pre_dic[device] [interface]['nbma_peers'] ] if isinstance( self.post_dic[device][interface] ['nbma_peers'], str): post_peer_list.append( self.post_dic[device][interface] ['nbma_peers']) elif isinstance( self.post_dic[device][interface] ['nbma_peers'], list): post_peer_list = [ str(x[0]) for x in self.post_dic[device] [interface]['nbma_peers'] ] missing_peer = [ x for x in pre_peer_list if x not in post_peer_list ] if missing_peer: log.info( f"{device} -- The following peers are missing {missing_peer}" ) new_peer = [ x for x in post_peer_list if x not in pre_peer_list ] if new_peer: log.info( f"{device} -- The following peers have been added {new_peer}" ) step.failed()
def verify_process(self, repeat_restart, steps, timeout): '''Verify the process has been restarted Args: uut (`obj`): Device object. steps (`step obj`): aetest step object Returns: None ''' with steps.start('Verify process has restarted correctly') as step: temp = TempResult(container=step) while timeout.iterate(): output = self.abstract.parser.show_system.\ ShowSystemInternalSysmgrServiceName(device=\ self.device).parse(process=self.process) if 'instance' not in output: temp.failed("No output for 'show system internal sysmgr " "service name {p}'".format(p=self.process)) timeout.sleep() continue # Check the if the process has changed pid try: pid = output['instance'][self.instance]['tag'][ self.tag]['pid'] sap = output['instance'][self.instance]['tag'][ self.tag]['sap'] restart_count = output['instance'][self.instance]['tag'][ self.tag]['restart_count'] last_restart = output['instance'][self.instance]['tag'][ self.tag]['last_restart_date'] last_restart_time = datetime.datetime.strptime( last_restart, '%a %b %d %H:%M:%S %Y') except Exception as e: temp.failed("Issue retrieving information about " "'{p}' process".format(p=self.process), from_exception=e) # Make sure time has changed if not self.last_restart_time < last_restart_time: temp.failed("The restart time has not changed for " "process '{p}'".format(p=self.process)) timeout.sleep() continue # Make sure the pid has changed if self.process not in self.reconnect and pid == self.previous_pid: temp.failed("The pid has not changed for process '{p}'" "\nprevious pid: {pp}" "\ncurrent pid: " "{cp}".format(p=self.process, pp=self.previous_pid, cp=pid)) timeout.sleep() continue # Verify the restart_count has increased if self.process not in self.reconnect and\ self.previous_restart_count + repeat_restart != restart_count: temp.failed('Restart count has not increased by {rr}' '\nprevious count: {pc}' '\ncurrent count: {cc}'.format( rr=repeat_restart, pc=self.previous_restart_count, cc=restart_count)) timeout.sleep() continue # exclude sap when the value is not in range [0, 1023] if sap > 1023: self.verify_exclude.append('sap') # Modify the original output so it does not fail the compare self.previous_output['instance'][self.instance]['tag'][self.tag]['restart_count'] =\ restart_count self.previous_output['instance'][self.instance]['tag'][self.tag]['pid'] =\ pid diff = Diff(self.previous_output, output, exclude=self.verify_exclude) diff.findDiff() if diff.diffs: temp.failed( "The device output has changed in an unexpected " "way\n{d}".format(d=str(diff.diffs))) timeout.sleep() continue break else: temp.result()
def _verify_same(self, ops, initial, exclude, **kwargs): diff = Diff(initial, ops, exclude=exclude + ['callables']) diff.findDiff() if diff.diffs: raise Exception("Current ops is not equal to the initial Snapshot " "taken\n{e}".format(e=str(diff)))
def compare_profile(self, pts, pts_compare, devices): '''Compare system profiles taken as snapshots during the run''' if os.path.isfile(pts): compare1 = unpickle(pts) else: compare1 = self.testscript.parameters[pts] if os.path.isfile(pts_compare): compare2 = unpickle(pts_compare) else: compare2 = self.testscript.parameters[pts_compare] exclude_list = [ 'device', 'maker', 'diff_ignore', 'callables', '(Current configuration.*)', 'ops_schema' ] try: if 'exclude' in self.pts_datafile: exclude_list.extend(self.pts_datafile['exclude']) except AttributeError: pass msg = [] for fet in compare1: failed = [] feature_exclude_list = exclude_list.copy() # Get the information too from the pts_data try: feature_exclude_list.extend(self.pts_datafile[fet]['exclude']) except (KeyError, AttributeError): pass for dev in compare1[fet]: # Only compare for the specified devices if dev not in devices: continue dev_exclude = feature_exclude_list.copy() try: dev_exclude.extend(compare1[fet][dev].exclude) # TODO - better fix, dev_exclude.remove(None) except (AttributeError, ValueError): pass diff = Diff(compare1[fet][dev], compare2[fet][dev], exclude=dev_exclude) diff.findDiff() if len(diff.diffs): failed.append((dev, diff)) if failed: msg.append('\n' + '*' * 10) msg.append("Comparison between {pts} and " "{OPS} is different for feature '{f}' " "for device:\n".format(pts=pts, OPS=pts_compare, f=fet)) for device, diff in failed: msg.append("'{d}'\n{diff}".format(d=device, diff=diff)) else: message = "Comparison between {pts} and "\ "{OPS} is identical\n".format(pts=pts, OPS=pts_compare) # print out message log.info(message) if msg: self.builtin.fail('\n'.join(msg)) message = 'All Feature were identical on all devices' self.builtin.pass_execution(message)