def print_result( result: Result, host: Optional[str] = None, nr_vars: List[str] = None, failed: bool = False, severity_level: int = logging.INFO, ) -> None: updated_agg_result = AggregatedResult(result.name) for hostname, multi_result in result.items(): updated_multi_result = MultiResult(result.name) for r in multi_result: if isinstance(r.result, str) and r.result.startswith("Task skipped"): continue else: updated_multi_result.append(r) if updated_multi_result: updated_agg_result[hostname] = updated_multi_result if not updated_agg_result: return LOCK.acquire() try: _print_result(updated_agg_result, host, nr_vars, failed, severity_level) finally: LOCK.release()
def test_nornsible_print_task_no_results(): test_result = AggregatedResult("testresult") test_result["localhost"] = MultiResult("testresult") test_result["localhost"].append( Result(host="localhost", result="Task skipped", failed=False, changed=False) ) output = print_result(test_result) assert output is None
def test_nornsible_print_task_results(capfd): test_result = AggregatedResult("testresult") test_result["localhost"] = MultiResult("testresult") test_result["localhost"].append( Result(host="localhost", result="stuff happening!", failed=False, changed=False) ) print_result(test_result) std_out, std_err = capfd.readouterr() assert "stuff happening" in std_out
def wrapper( wrapped: Callable[..., Any], instance: object, args: List[Any], kwargs: Dict[str, Any], ) -> Result: test = LoopRecord( values=values, placeholder=placeholder, reset_conns=reset_conns, ) if len(args) > 0: task = args[0] else: task = kwargs["task"] result = MultiResult(name=f"{task.name} - loop") for value in values: kwargs.update({placeholder: value}) result.append(wrapped(*args, **kwargs)) if result[-1].failed: result.failed = True if result[-1].changed: result.changed = True if test.reset_conns: task.host.close_connections() return result
def create_multi_result(results: List[Result], task_name: str) -> MultiResult: multi_result = MultiResult(task_name) multi_result += results return multi_result
def print_table_result( result: ScrapliResult, failed: bool = False, severity_level: int = logging.INFO, parser: str = "textfsm", to_dict: bool = True, fail_to_string: bool = False, ) -> None: """ Prints the :obj:`nornir.core.task.Result` from a previous task to screen Arguments: result: from a previous task failed: if `True` assume the task failed severity_level: Print only errors with this severity level or higher parser: textfsm|genie -- parser to parse output with to_dict: output structured data in dict form instead -- basically put k:v instead of just lists of lists of values for textfsm output; ignored if parser == "genie" fail_to_string: fallback to printing unstructured output or have tasks skipped (because print_result won't print empty lists which scrapli returns if parsing fails) """ updated_agg_result = AggregatedResult(result.name) # breakpoint() console = Console() table = Table( "Hostname", "Version", "Platform", "Image ID", "Image Type", "Uptime", "System Image", "Compiled Date") for hostname, multi_result in result.items(): updated_multi_result = MultiResult(result.name) for individual_result in multi_result: scrapli_responses = getattr( individual_result, "scrapli_response", None) if isinstance(scrapli_responses, Response): scrapli_responses = [scrapli_responses] if not scrapli_responses: updated_multi_result.append(individual_result) continue for scrapli_response in scrapli_responses: parser_method = getattr( scrapli_response, f"{parser}_parse_output") updated_result = Result( host=individual_result.host, changed=individual_result.changed, diff=individual_result.diff, exception=individual_result.exception, failed=individual_result.failed, name=individual_result.name, severity_level=individual_result.severity_level, stderr=individual_result.stderr, stdout=individual_result.stdout, ) if parser == "textfsm": structured_result = parser_method(to_dict=to_dict) else: structured_result = parser_method() if not structured_result and fail_to_string: updated_result.result = scrapli_response.result else: updated_result.result = structured_result updated_multi_result.append(updated_result) try: version = structured_result['version'] table.add_row(f'[green]{version["hostname"]}[/green]', f'[blue]{version["version"]}[/blue]', f'[magenta]{version["platform"]}[/magenta]', f'[cyan]{version["image_id"]}[/cyan]', f'[orange1]{version["image_type"]}[/orange1]', f'[bright_green]{version["uptime"]}[/bright_green]', f'[magenta]{version["system_image"]}[/magenta]', f'[yellow]{version["compiled_date"]}[/yellow]') except KeyError: print("This command is not supported in Table format!") if updated_multi_result: updated_agg_result[hostname] = updated_multi_result # noqa LOCK.acquire() try: console.print(table) finally: LOCK.release()
def task_instance_completed( self, task: Task, host: Host, result: MultiResult ) -> None: """Diff files with current task result""" # get previous results data index = min(self.last - 1, len(self.aliases_data[self.diff][host.name]) - 1) prev_res_alias_data = self.aliases_data[self.diff][host.name][index] prev_res_filename = prev_res_alias_data["filename"] # decide on results formatter to use data_format = "raw" if prev_res_filename.endswith("json"): data_format = "json" elif prev_res_filename.endswith("yaml"): data_format = "yaml" elif prev_res_filename.endswith("py"): data_format = "pprint" # open previous results file with open(prev_res_filename, mode="r", encoding="utf-8") as f: prev_result = f.read() # run diff on a per task basis if self.diff_per_task: for i in result: # check if need to skip this task results exception = ( str(i.exception) if i.exception != None else i.host.get("exception", None) ) if ( hasattr(i, "skip_results") and i.skip_results is True and not exception ): continue else: new_result = formatters[data_format](i.result) + "\n" # check if task results exists if not i.name in prev_res_alias_data["tasks"]: i.diff = "'{}' task results not in '{}''".format( i.name, prev_res_filename ) continue # run diff using portion of prev_result file with given task results only spans = prev_res_alias_data["tasks"][i.name] difference = self._run_diff( prev_result=prev_result[spans[0] : spans[1]], new_result=new_result, fromfile="old {}".format(prev_res_filename), tofile="new results", ) i.diff = "".join(difference) # make new task results text and run diff for whole of them else: new_result = "" for i in result: # check if need to skip this task results exception = ( str(i.exception) if i.exception != None else i.host.get("exception", None) ) if ( hasattr(i, "skip_results") and i.skip_results is True and not exception ): continue else: new_result += formatters[data_format](i.result) + "\n" # run diff difference = self._run_diff( prev_result, new_result, fromfile="old {}".format(prev_res_filename), tofile="new results", ) # pop other results and add diff results while result: _ = result.pop() result.append( Result( host, result="".join(difference), name="{}_diff".format(self.diff) ) )
def print_structured_result( result: ScrapliResult, failed: bool = False, severity_level: int = logging.INFO, parser: str = "textfsm", to_dict: bool = True, fail_to_string: bool = False, ) -> None: """ Prints the :obj:`nornir.core.task.Result` from a previous task to screen Arguments: result: from a previous task failed: if `True` assume the task failed severity_level: Print only errors with this severity level or higher parser: textfsm|genie -- parser to parse output with to_dict: output structured data in dict form instead -- basically put k:v instead of just lists of lists of values for textfsm output; ignored if parser == "genie" fail_to_string: fallback to printing unstructured output or have tasks skipped (because print_result won't print empty lists which scrapli returns if parsing fails) """ updated_agg_result = AggregatedResult(result.name) for hostname, multi_result in result.items(): updated_multi_result = MultiResult(result.name) for individual_result in multi_result: scrapli_responses = getattr(individual_result, "scrapli_response", None) if isinstance(scrapli_responses, Response): scrapli_responses = [scrapli_responses] if not scrapli_responses: updated_multi_result.append(individual_result) continue for scrapli_response in scrapli_responses: parser_method = getattr(scrapli_response, f"{parser}_parse_output") updated_result = Result( host=individual_result.host, changed=individual_result.changed, diff=individual_result.diff, exception=individual_result.exception, failed=individual_result.failed, name=individual_result.name, severity_level=individual_result.severity_level, stderr=individual_result.stderr, stdout=individual_result.stdout, ) if parser == "textfsm": structured_result = parser_method(to_dict=to_dict) else: structured_result = parser_method() if not structured_result and fail_to_string: updated_result.result = scrapli_response.result else: updated_result.result = structured_result updated_multi_result.append(updated_result) if updated_multi_result: updated_agg_result[hostname] = updated_multi_result # noqa LOCK.acquire() try: _print_result(result=updated_agg_result, attrs=None, failed=failed, severity_level=severity_level) finally: LOCK.release()
RAW_RESULT = "\n".join([IOSXE_SHOW_VERSION, IOSXE_SHOW_IP_ROUTE]) TEST_SCRAPLI_RESPONSE_ONE = Response(host="sea-ios-1", channel_input="show version", textfsm_platform="cisco_iosxe") TEST_SCRAPLI_RESPONSE_ONE._record_response(result=IOSXE_SHOW_VERSION) TEST_SCRAPLI_RESPONSE_TWO = Response(host="sea-ios-1", channel_input="show ip route", textfsm_platform="cisco_iosxe") TEST_SCRAPLI_RESPONSE_TWO._record_response(result=IOSXE_SHOW_IP_ROUTE) TEST_SCRAPLI_RESPONSE = [TEST_SCRAPLI_RESPONSE_ONE, TEST_SCRAPLI_RESPONSE_TWO] TEST_HOST = Host(name="sea-ios-1") TEST_AGG_RESULT = AggregatedResult("send_commands") TEST_MULTI_RESULT = MultiResult("send_commands") TEST_RESULT = Result(host=TEST_HOST, result=RAW_RESULT, name="send_commands") setattr(TEST_RESULT, "scrapli_response", TEST_SCRAPLI_RESPONSE) TEST_MULTI_RESULT.append(TEST_RESULT) TEST_AGG_RESULT[TEST_HOST.name] = TEST_MULTI_RESULT @pytest.mark.parametrize( "to_dict", [ ( True, "\x1b[1m\x1b[36msend_commands*******************************************************************\n\x1b[1m\x1b[34m* sea-ios-1 ** changed : False *************************************************\n\x1b[1m\x1b[32mvvvv send_commands ** changed : False vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv INFO\n[ { 'config_register': '0x2102',\n 'hardware': ['CSR1000V'],\n 'hostname': 'csr1000v',\n 'mac': [],\n 'reload_reason': 'reload',\n 'rommon': 'IOS-XE',\n 'running_image': 'packages.conf',\n 'serial': ['9FKLJWM5EB0'],\n 'uptime': '2 hours, 43 minutes',\n 'version': '16.4.1'}]\n\x1b[1m\x1b[32m---- send_commands ** changed : False ------------------------------------------ INFO\n[ { 'distance': '',\n 'mask': '24',\n 'metric': '',\n 'network': '10.0.0.0',\n 'nexthop_if': 'GigabitEthernet1',\n 'nexthop_ip': '',\n 'protocol': 'C',\n 'type': '',\n 'uptime': ''},\n { 'distance': '',\n 'mask': '32',\n 'metric': '',\n 'network': '10.0.0.15',\n 'nexthop_if': 'GigabitEthernet1',\n 'nexthop_ip': '',\n 'protocol': 'L',\n 'type': '',\n 'uptime': ''}]\n\x1b[1m\x1b[32m^^^^ END send_commands ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", ),
def task_instance_completed( self, task: Task, host: Host, result: MultiResult ) -> None: """ Method to iterate over individual hosts's result after task/sub-tasks completion. """ # record the len of tasks to clean them up if required if self.remove_tasks: self.len_tasks = len(result) # do the tests for test in self.tests: test = test.copy() # if test item is a list, transform it to dictionary if isinstance(test, list): test = { "task": test[0], "test": test[1], "pattern": test[2], "name": test[3] if len(test) == 4 else None, } if test["test"] in ["eval", "EvalTest"]: test["expr"] = test.pop("pattern") # make sure we have test name defined if not test.get("name"): test["name"] = "{} {} {}..".format( test["task"], test["test"], test.get("pattern", "")[:9] ) # get task results to use; use all results if test.get("use_all_tasks") == True: test["result"] = result # use subset of task results elif isinstance(test["task"], list): test["result"] = [] for task_result in result: if task_result.name in test["task"]: test["result"].append(task_result) # use results for single task only else: # try to find task by matching it's name for task_result in result: if task_result.name == test["task"]: test["result"] = task_result break else: # use first task if only one test and one task given tasks = [t for t in result if not hasattr(t, "skip_results")] if len(self.tests) == 1 and len(tasks) == 1: test["result"] = tasks[0] else: log.warning( "nornir-salt:TestsProcessor: no results for task '{}'".format( test["task"] ) ) continue # get test function and function kwargs if test["test"] in test_functions_dispatcher: test_func = test_functions_dispatcher[test["test"]]["fun"] test.update(test_functions_dispatcher[test["test"]]["kwargs"]) elif test["test"] in globals() and "Test" in test["test"]: test_func = globals()[test["test"]] else: raise NameError( "nornir-salt:TestsProcessor unsupported test function '{}'".format( test["test"] ) ) # run the test try: # run test for data at given path if test.get("path"): report_all = test.pop("report_all", False) res = [ test_func(host=host, result=item, **test) for item in _get_result_by_path( data=test.pop("result").result, path=test.pop("path").split("."), host=host, ) ] # leave only failed results if not report_all: res = [i for i in res if i.success == False] # add single successful test if no tests failed if not res: ret = test_result_template.copy() ret.update(test) _ = ret.pop("expr", None) res = Result(host=host, **ret) else: res = test_func(host=host, **test) except: msg = "nornir-salt:TestsProcessor run error:\n{}".format( traceback.format_exc() ) log.error(msg) ret = test_result_template.copy() ret.update(test) ret.update({"result": "ERROR", "success": False, "exception": msg}) res = Result(host=host, **ret) if isinstance(res, list): result.extend(res) else: result.append(res)