def Trigger(tc): collector_info = utils.GetMirrorCollectorsInfo(tc.collector_wl, tc.collector_ip, tc.collector_type) ret = utils.RunAll(tc, tc.verif_json, 'mirror', collector_info, tc.IsBareMetal) if ret['res'] != api.types.status.SUCCESS: return ret['res'] if tc.skip_flap: api.Logger.info("Skipping switch port flap") return api.types.status.SUCCESS # Flap the uplink flapTask = utils.GetSwitchPortFlapTask(api.GetNaplesHostnames(), 1, tc.port_down_time) flapTask.start() # Make sure up link is down ret = utils.DetectUpLinkState(api.GetNaplesHostnames(), utils.PORT_OPER_STATUS_DOWN, any) if ret != api.types.status.SUCCESS: api.Logger.error("Failed to detect any uplink in DOWN state.") flapTask.join(tc.port_down_time) return ret # Rerun the test ret = utils.RunAll(tc, tc.verif_json, 'mirror', collector_info, tc.IsBareMetal) api.Logger.info("Waiting for switch flap thread to join..") flapTask.join(tc.port_down_time) return ret['res']
def Setup(tc): tc.skip_flap = False tc.newObjects = None tc.collector_ip = [] tc.collector_wl = [] tc.collector_type = [] tc.wl_sec_ip_info = defaultdict(lambda: dict()) tc.IsBareMetal = utils.IsBareMetal() tc.port_down_time = getattr(tc.args, "port_down_time", 60) policies = utils.GetTargetJsons('mirror', tc.iterators.proto) policy_json = policies[0] tc.verif_json = utils.GetVerifJsonFromPolicyJson(policy_json) # Push Mirror objects tc.newObjects = agent_api.AddOneConfig(policy_json) ret = agent_api.PushConfigObjects(tc.newObjects) if ret != api.types.status.SUCCESS: api.Logger.error("Unable to push mirror objects") tc.newObjects = None return api.types.status.FAILURE # Populate secondary IP utils.PopulateSecondaryAddress(tc) # Get collector ret = GetCollectorWorkloadFromObjects(tc) if ret != api.types.status.SUCCESS: api.Logger.error("Failed to get collector workload") return ret ret = utils.DetectUpLinkState(api.GetNaplesHostnames(), utils.PORT_OPER_STATUS_UP, all) if ret != api.types.status.SUCCESS: api.Logger.error("All uplink on Nodes are not in UP state.") tc.skip_flap = True return api.types.status.SUCCESS if api.GetConfigNicMode() in ["classic", "unified"]: api.Logger.info( f"NIC mode: {api.GetConfigNicMode()}, Skipping uplink flap") tc.skip_flap = True api.Logger.info("All uplink on Nodes are UP!") # Bring up inband and reset the active link on bond. ret = utils.SetupInbandInterface() if ret != api.types.status.SUCCESS: return ret return api.types.status.SUCCESS
def Setup(tc): tc.cmd_cookies = [] tc.interval = 0.1 tc.resp = None tc.skip = False tc.uplink_fail_stage = False tc.nodes = api.GetNaplesHostnames() tc.is_bm_type = False for node in tc.nodes: if api.IsBareMetalWorkloadType(node): # for baremetal topology, untag WL is configured on the uplink itself. # shutting down uplink on both nodes will bring down the WL. # so bring down uplink only on one naples tc.nodes = [node] tc.is_bm_type = True break tc.port_down_time = getattr(tc.args, "port_shut_time", 60) tc.failover_delay = getattr(tc.args, "failover_delay", 0) result = bond_utils.DetectUpLinkState(tc.nodes, bond_utils.PORT_OPER_STATUS_UP, all) if result != api.types.status.SUCCESS: api.Logger.error("All uplink on %s are not in UP state." % tc.nodes) tc.skip = True return api.types.status.SUCCESS api.Logger.info("All uplink on %s are UP!" % tc.nodes) # Bring up inband and reset the active link on bond. result = bond_utils.SetupInbandInterface() if result != api.types.status.SUCCESS: return result trigger_ping_to_bond(tc) result = verify_ping_to_bond_result(tc) if result != api.types.status.SUCCESS: api.Logger.error("Ping failed during setup stage") return result
def Trigger(tc): tc.cmd_cookies = [] if tc.skip: return api.types.status.SUCCESS # Flap one uplink api.Logger.info("Bring down one uplink!") flapTask = bond_utils.GetSwitchPortFlapTask(tc.nodes, 1, tc.port_down_time) flapTask.start() time.sleep(tc.failover_delay) api.Logger.info("one uplink is brought down!") # Make sure up link is down result = bond_utils.DetectUpLinkState(tc.nodes, bond_utils.PORT_OPER_STATUS_DOWN, any) if result != api.types.status.SUCCESS: api.Logger.error("Failed to detect any uplink in DOWN state.") flapTask.join(tc.port_down_time) return result tc.uplink_fail_stage = True # Rerun ping test trigger_ping_to_bond(tc) result = verify_ping_to_bond_result(tc) if result != api.types.status.SUCCESS: api.Logger.error("Ping failed after Uplink failover") flapTask.join(tc.port_down_time) return result # wait until the background task brings up the uplink flapTask.join() # Flap both uplinks flapTask = bond_utils.GetSwitchPortFlapTask(tc.nodes, 2, tc.port_down_time) flapTask.start() # Make sure both uplinks are down result = bond_utils.DetectUpLinkState(tc.nodes, bond_utils.PORT_OPER_STATUS_DOWN, all) if result != api.types.status.SUCCESS: api.Logger.error("Failed to detect all uplink in DOWN state.") flapTask.join(tc.port_down_time) return result api.Logger.info( "Waiting until {}secs to bring up both uplinks after flap!".format( tc.port_down_time)) # wait until the background task brings up the uplink flapTask.join() time.sleep(tc.failover_delay) # Make sure atleast one up link came UP after both uplink flap. result = bond_utils.DetectUpLinkState(tc.nodes, bond_utils.PORT_OPER_STATUS_UP, any) if result != api.types.status.SUCCESS: api.Logger.error("Atleast one uplink on %s is not in UP state." % tc.nodes) return api.types.status.FAILURE tc.uplink_fail_stage = False # Rerun ping test trigger_ping_to_bond(tc) return result