def amend_test_result(self, matching_flows, stats, params): # Output info for each session flow of interest for session_id in matching_flows: flow = matching_flows[session_id] if "test_exception" in flow and \ flow["test_exception"] is not None: if not flow["test_exception"] in self.message_list: self.message_list.append(flow["test_exception"]) elif "test_format" in flow: msg = Output.populate_format(flow, flow["test_format"]) self.message_list.append(msg) # TODO stats["idle_threshold_seconds"] = params["idle_threshold_seconds"] stats["filter_string"] = params["filter_string"] if self.status == Output.Status.OK: try: sformat = params["entry_tests"]["result"]["PASS"] except KeyError: sformat = "Missing output format" else: try: sformat = params["entry_tests"]["result"]["FAIL"] except KeyError: sformat = "Missing output format" self.message = Output.populate_format(stats, sformat) return True
def amend_test_result(self, entry): message = "No message format found..." if "test_exception" in entry and \ entry["test_exception"] is not None: message = entry["test_exception"] elif "test_format" in entry: message = Output.populate_format(entry, entry["test_format"]) self.message = message return True
def eval_entry_by_tests(self, entry, entry_tests): """ *entry* Entry from json reply list, Linux command etc. Linux command etc. *entry_tests* Dictionary of tests to run against this entry """ no_match = {"status": None} defaults = {} tests = entry_tests["tests"] if "no_match" in entry_tests: no_match = entry_tests["no_match"] if "defaults" in entry_tests: defaults = entry_tests["defaults"] self.parser.json_entry = entry if self.debug: print("------- eval_entry_by_tests: the entry --------") pprint.pprint(self.parser.json_entry) print("------- eval_entry_by_tests: the tests --------") pprint.pprint(entry_tests) print("-------------------------------------------------") return_status = None test_index = 0 for test in tests: current_status = self.eval_entry_by_test(entry, test_index, test, defaults) if current_status is not None: return_status = current_status break test_index += 1 if return_status is None: return_status = Output.text_to_status(no_match["status"]) entry["test_status"] = return_status entry["test_matched"] = None entry["test_exception"] = None if "format" in no_match: entry["test_format"] = no_match["format"] if self.debug: print( f"test_entry[#N/A]: return_status None -> " f"{Output.status_to_text(return_status)} ({return_status})" ) return return_status
def eval_entry_by_test(self, entry, test_index, test_entry, defaults): """ *entry* Entry from json reply list, Linux command etc. Linux command etc. Linux command etc. *test_index* Dictionary of tests to run against this entry *test_entry* Test (dictionary) to run against this entry *defaults* Default values if not present in test_entry Returns None if: (1) entry did not match the test (2) entry matched but no status could be found """ fname = "eval_entry_by_test" return_status = None if self.debug: print( f"{fname}[#{test_index}]: check entry against {test_entry['test']}" ) try: tokens = self.lexer.tokenize(test_entry["test"]) result = self.parser.parse(tokens) if self.debug: print( f"{fname}[#{test_index}]: parse({test_entry['test']}) -> RESULT:{result}" ) except Exception as e: return_status = Output.Status.FAIL entry["test_status"] = return_status entry["test_matched"] = test_entry entry["test_index"] = test_index entry[ "test_exception"] = f"Rule #{test_index}: {e.__class__.__name__} exception '{e}'" if self.debug: print( f"{fname}[#{test_index}]: EXCEPTION {e.__class__.__name__} {e}" ) return return_status matched = self.true_value(result) if matched: if "status" in test_entry: status = test_entry["status"] elif "status" in defaults: status = defaults["status"] else: return None format_string = None if "format" in test_entry: format_string = test_entry["format"] elif "format" in defaults: format_string = defaults["format"] return_status = Output.text_to_status(status) if self.debug: print(f"{fname}[#{test_index}]: {status} matched -> " f"{Output.text_to_status(status)}({status})") entry["test_status"] = return_status entry["test_matched"] = test_entry entry["test_index"] = test_index entry["test_format"] = format_string entry["test_exception"] = None return return_status
def run(self, local_info, router_context, gql_token, fp): """ This test uses the gql engine to learn gateway IP addresses and ping them, processing the latency results. Note apparently timeout is in seconds and not ms as suggested by grapql documentation """ test_info = self.test_info(local_info, router_context) self.output.test_start(test_info) params = self.get_params() # First, query allRouters...nodes...networkInterfaces... to determine # the router and gateway """ API = allRouters Fields = name """ intf_list = params["network-interfaces"] dev_list = params["device-interfaces"] network_exclude_tests = params["network_exclude_tests"] address_exclude_tests = params["address_exclude_tests"] # backwards compatibility try: if params["static-address"]: address_mode = "static" else: address_mode = "dynamic" except KeyError: address_mode = params["address_mode"] if self.debug: print(f'------ Network Interfaces to Process ------') pprint.pprint(intf_list) devint_fields = "state { operationalStatus adminStatus redundancyStatus }" qr = gql_helper.NodeGQL("allRouters", ['name'], [router_context.get_router()], debug=self.debug) qn = gql_helper.NodeGQL("nodes", ['name', 'assetId']) qd = gql_helper.NodeGQL( "deviceInterfaces", ['name', 'type', 'sharedPhysAddress', devint_fields], dev_list) qi = gql_helper.NodeGQL("networkInterfaces", [ 'name', 'type', 'state { addresses { ipAddress gateway prefixLength } }' ], intf_list) qa = gql_helper.NodeGQL("addresses", ['ipAddress', 'gateway', 'prefixLength']) qr.add_node(qn) qn.add_node(qd) qd.add_node(qi) qi.add_node(qa) json_reply = {} if not self.send_query(qr, gql_token, json_reply): return self.output.test_end(fp) flatter_json = qr.flatten_json( json_reply, 'router/nodes/deviceInterfaces/networkInterfaces', '/') ni_name_key = 'name' router_context.set_allRouters_node_type(flatter_json) self._workaround_graphql_api(flatter_json) if self.debug: print('........ flattened list ..........') pprint.pprint(flatter_json) engine = EntryTest.Parser(debug=self.debug) self.output.progress_start(fp) gateway_success_count = 0 gateway_fail_count = 0 gateway_count = 0 stats = {} Output.init_result_stats(stats) stats["total_count"] = len(flatter_json) stats["address_total_count"] = len(flatter_json) stats["address_exclude_count"] = 0 for netintf in flatter_json: if self.debug: print(f'%%% process NI for Ping %%%') pprint.pprint(netintf) if engine.exclude_entry(netintf, network_exclude_tests): stats["exclude_count"] += 1 continue address = '' gateway = '' try: addresses = self.get_address_list(netintf) if self.debug: print(f'%%%% process address for Ping %%%%') pprint.pprint(addresses) egress_interface = netintf["name"] stats["address_total_count"] += len(addresses) for address_entry in addresses: if engine.exclude_entry(address_entry, address_exclude_tests): stats["address_exclude_count"] += 1 continue address = address_entry['ipAddress'] gateway = address_entry['gateway'] prefix_length = int(address_entry['prefixLength']) target = gateway dest_ip = None if gateway is None: # hack for ipv4 only! hack = int(ipaddress.IPv4Address(address)) if prefix_length == 31: gateway_hack = hack & 0xfffffffe | -(hack & 0x01) + 1 gateway_ip = ipaddress.IPv4Address(gateway_hack) dest_ip = str(gateway_ip) gateway = '' target = dest_ip elif prefix_length == 30: gateway_hack = hack & 0xfffffffc | -(hack & 0x03) + 3 gateway_ip = ipaddress.IPv4Address(gateway_hack) dest_ip = str(gateway_ip) gateway = '' target = dest_ip else: self.output.proc_cannot_ping_no_gateway( netintf, ni_name_key) gateway_count += 1 continue try: oper_status = netintf[ 'router/nodes/deviceInterfaces/state/operationalStatus'] if oper_status != 'OPER_UP': self.output.proc_cannot_ping_dev_status( netintf, ni_name_key, oper_status) gateway_count += 1 #continue break except KeyError: # Continue as there is no operationalStatus to evaluate pass # Invoke Graphql PING API ping_count = 0 if dest_ip is None: if "destination-ip" in params and \ params["destination-ip"] != '': dest_ip = params["destination-ip"] else: dest_ip = gateway size = params["size"] timeout = params["timeout"] seqno = params["sequence"] router = netintf["router/name"] node = netintf["node_name"] identifier = params["identifier"] total_response_time = float(0) average_response_time = float(0) ping_success_count = 0 ping_fail_count = 0 while ping_count < params["iterations"]: argstr = f'routerName: "{router}"' argstr += f', nodeName: "{node}"' argstr += f', identifier: {identifier}' argstr += f', egressInterface: "{egress_interface}"' if dest_ip != '': argstr += f', destinationIp: "{dest_ip}"' if gateway != '': argstr += f', gatewayIp: "{gateway}"' argstr += f', sequence: {seqno}' if size != '': argstr += f', size: {size}' argstr += f', timeout: {timeout}' if self.debug: print(f'argstr={argstr}') # display progress in-place as does 128status.sh... now_message = f"NI {netintf[ni_name_key]}: ping {gateway} {ping_count}/{params['iterations']} tmo={params['timeout']}s" self.output.progress_display(now_message, fp) qp = gql_helper.RawGQL( f'ping({argstr}) ' + '{ status statusReason reachable sequence ttl responseTime }', debug=self.debug) json_ping_reply = {} qp.send_query(gql_token, json_ping_reply) # standard graphql error processing may not be appropriate here as a failure can # be part of the test process w/o ruining the test. ping_count += 1 seqno += 1 try: # "0" used < 4.2.0; "SUCCESS" used in 4.2.0+ json_ping_reply = json_ping_reply['ping'] if json_ping_reply['reachable'] == True and \ (json_ping_reply['status'] == "0" or \ json_ping_reply['status'] == "SUCCESS"): ping_success_count += 1 total_response_time += float( json_ping_reply['responseTime']) average_response_time = total_response_time / float( ping_success_count) else: ping_fail_count += 1 except (KeyError, TypeError) as e: self.output.proc_no_data_in_reply( netintf, ni_name_key, gateway) ping_fail_count += 1 gateway_count += 1 continue if ping_count == ping_success_count: # fix this for multiple matching entries gateway_success_count += 1 self.output.proc_ping_result_pass( netintf, ni_name_key, ping_count, ping_success_count, target, average_response_time) else: gateway_fail_count += 1 self.output.proc_ping_result_fail( netintf, ni_name_key, ping_count, ping_fail_count, target, average_response_time) gateway_count += 1 except (TypeError) as e: self.output.proc_no_address_in_reply(netintf, ni_name_key) continue status = self.output.status if gateway_count == 0: status = Output.Status.WARN if gateway_count != gateway_success_count: status = Output.Status.FAIL self.output.proc_test_result(status, gateway_count, gateway_success_count, gateway_fail_count, params) return self.output.test_end(fp)
def run(self, local_info, router_context, gql_token, fp): flowEntryFields = [ \ 'sourceIp', 'destIp', 'sourcePort', 'destPort', 'vlan', 'devicePort', 'protocol', 'sessionUuid', 'natIp', 'natPort', 'serviceName', 'tenant', 'encrypted', 'inactivityTimeout', 'deviceInterfaceName', 'networkInterfaceName', 'startTime', 'forward' ] test_info = self.test_info(local_info, router_context) self.output.test_start(test_info) params = self.get_params() try: idle_threshold_seconds = params["idle_threshold_seconds"] idle_maximum_seconds = params["idle_maximum_seconds"] max_sessions = params["max_sessions_to_query"] filter_string = params["filter_string"] match_port = params["match_port"] except Exception as e: # TODO: Improve error handling print("CONFIG ERROR\n") return Output.Status.FAIL exclude_tests = [] if "exclude_tests" in params: exclude_tests = params["exclude_tests"] flow_entry_suffix = f'(first: {max_sessions}, filter: "\\"\\"~\\"{filter_string}\\"")' if local_info.get_router_name() == router_context.get_router() and \ local_info.get_node_type() == 'conductor': # Check Error output self.output.unsupported_node_type(local_info) return Output.Status.WARN qr = gql_helper.NodeGQL("allRouters", ['name'], [router_context.get_router()], debug=self.debug) qn = gql_helper.NodeGQL("nodes", ['name']) qf = gql_helper.NodeGQL(f"flowEntries{flow_entry_suffix}", flowEntryFields) qr.add_node(qn) qn.add_node(qf) json_reply = {} if not self.send_query(qr, gql_token, json_reply): return self.output.test_end(fp) # Unfortunately jmespath is buggy and does not work well for integers :-( # This is unforunate as the hope was to use a jmespath expression # to eliminate all valid sessions (however that might be defined) flatter_json = qr.flatten_json(json_reply, 'router/nodes/flowEntries', '/') if self.debug: print('........ flattened list ..........') pprint.pprint(flatter_json) matching_flows = {} session_flow_counts = {} stats = {} Output.init_result_stats(stats) stats["total_count"] = len(flatter_json) stats["session_flow_count"] = 0 engine = EntryTest.Parser(debug=self.debug) for flow in flatter_json: try: uuid = flow['sessionUuid'] if engine.exclude_entry(flow, exclude_tests): stats["exclude_count"] += 1 continue if not uuid in session_flow_counts: session_flow_counts[uuid] = 1 else: session_flow_counts[uuid] += 1 test_status = engine.eval_entry_by_tests( flow, params["entry_tests"]) Output.update_stats(stats, test_status) if test_status == Output.Status.FAIL: # Note that this must be configured in the parameters just so the value can # be used in this calculation delta = idle_maximum_seconds - flow['inactivityTimeout'] flow["test_idle_duration"] = delta if not uuid in matching_flows or \ matching_flows[uuid]["test_inactivity_duration"] < delta: matching_flows[uuid] = flow except (KeyError, TypeError) as e: flow["test_exception"] = f"Flow Exception: {e}" continue stats["session_flow_count"] = len(session_flow_counts) status = Output.Status.FAIL if len(matching_flows) == 0: status = Output.Status.OK self.output.proc_test_result(status, matching_flows, stats, params) return self.output.test_end(fp)
def run(self, local_info, router_context, gql_token, fp): """ This test uses the gql engine to get peer reachability status """ test_info = self.test_info(local_info, router_context) self.output.test_start(test_info) params = self.get_params() # TODO figure out what the include_list is, a list of peers? include_list = params["include_list"] exclusions = params["exclude_tests"] entry_tests = params["entry_tests"] """ API = allNodes Fields = name """ qr = gql_helper.NodeGQL("allRouters", ['name'], [router_context.get_router()], debug=self.debug) qp = gql_helper.NodeGQL("peers", [ 'name', 'paths { node adjacentNode deviceInterface networkInterface adjacentAddress status }' ], include_list) qr.add_node(qp) json_reply = {} if not self.send_query(qr, gql_token, json_reply): return self.output.test_end(fp) # this query is not working correctly unfortunately... even UP is returned flatter_json = qr.flatten_json(json_reply, 'router/peers/paths') router_context.set_allRouters_node_type(flatter_json, 'node') if self.debug: print('........ flattened list ..........') pprint.pprint(flatter_json) paths_per_peer = {} failed_peer_paths = {} stats = {} Output.init_result_stats(stats) stats["total_count"] = len(flatter_json) stats["failed_peer_count"] = 0 stats["tested_peer_count"] = 0 engine = EntryTest.Parser(self.debug) for path in flatter_json: try: if engine.exclude_entry(path, exclusions): stats["exclude_count"] += 1 continue peer_name = path['router/peers/name'] test_result = engine.eval_entry_by_tests(path, entry_tests) Output.update_stats(stats, test_result) if peer_name in paths_per_peer: paths_per_peer[peer_name] += 1 else: paths_per_peer[peer_name] = 1 stats["tested_peer_count"] += 1 if test_result == Output.Status.FAIL: if peer_name in failed_peer_paths: failed_peer_paths[peer_name] += 1 else: failed_peer_paths[peer_name] = 1 stats["failed_peer_count"] += 1 self.output.proc_failed_peer(path, peer_name) self.output.proc_failed_path(path) except KeyError: pass status = Output.Status.OK if stats["FAIL"] > 0: status = Output.Status.FAIL self.output.proc_test_result(entry_tests, stats, status=status) return self.output.test_end(fp)