Exemplo n.º 1
0
def PopulateSecondaryAddress(tc):
    for wl in api.GetWorkloads():
        tc.wl_sec_ip_info[wl.workload_name] = []
        sec_ip_list = sec_ip_api.ConfigWorkloadSecondaryIp(wl, True, 2)
        tc.wl_sec_ip_info[wl.workload_name] = sec_ip_list
Exemplo n.º 2
0
def RemoveSecondaryAddress(tc):
    for wl in api.GetWorkloads():
        sec_ip_api.ConfigWorkloadSecondaryIp(wl, False, 2)
        tc.wl_sec_ip_info[wl.workload_name].clear()
Exemplo n.º 3
0
def Setup(tc):

    tc.mfg_mode = api.GetTestsuiteAttr("mfg_mode")
    if tc.mfg_mode is None:
        tc.mfg_mode = 'no'

    tc.test_intf = api.GetTestsuiteAttr("mfg_test_intf")
    if tc.test_intf is None:
        tc.test_intf = 'up1'  # default up1 for kni tests

    # get node info
    tc.bitw_node_name = api.GetTestsuiteAttr("bitw_node_name")
    tc.wl_node_name = api.GetTestsuiteAttr("wl_node_name")

    host_intfs = api.GetNaplesHostInterfaces(tc.wl_node_name)

    # Assuming single nic per host
    if len(host_intfs) != 2:
        api.Logger.error('Failed to get host interfaces')
        return api.types.status.FAILURE

    up0_intf = host_intfs[0]
    up1_intf = host_intfs[1]

    workloads = api.GetWorkloads()
    if len(workloads) == 0:
        api.Logger.error('No workloads available')
        return api.types.status.FAILURE

    tc.sub_wl = []
    for wl in workloads:
        if (wl.parent_interface == up0_intf
                and tc.test_intf == 'up0') or (wl.parent_interface == up1_intf
                                               and tc.test_intf == 'up1'):
            if wl.uplink_vlan == 0:  # Native workload
                tc.wl0 = wl

            else:  # Tagged workload
                tc.sub_wl.append(wl)

    # 1 subintf is used by default for kni tests
    # 3 subintf are used by default for mfg mode tests (2 for positive test and
    # 1 for negative test)
    if tc.mfg_mode == 'yes':
        tc.sub_wl = tc.sub_wl[:3]
    else:
        tc.sub_wl = tc.sub_wl[:1]

    api.SetTestsuiteAttr("kni_wl", tc.wl0)
    api.SetTestsuiteAttr("kni_sub_wl", tc.sub_wl)

    api.Logger.info("wl0: vlan: {}, mac: {}, ip: {}".format(
        tc.wl0.uplink_vlan, tc.wl0.mac_address, tc.wl0.ip_address))
    for idx, sub_wl in enumerate(tc.sub_wl):
        api.Logger.info("sub_wl[{}]: vlan: {}, mac: {}, ip: {}".format(
            idx, sub_wl.uplink_vlan, sub_wl.mac_address, sub_wl.ip_address))

    # check if mnic_p2p interface is present
    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    cmd = "ifconfig mnic_p2p"
    api.Trigger_AddNaplesCommand(req, tc.bitw_node_name, cmd)

    resp = api.Trigger(req)
    for cmd in resp.commands:
        api.PrintCommandResults(cmd)

        if cmd.exit_code != 0:
            api.Logger.error("mnic_p2p intf not found on naples %s" % \
                              tc.bitw_node_name)
            return api.types.status.FAILURE

    return api.types.status.SUCCESS
Exemplo n.º 4
0
def Setup(tc):

    # get node info
    tc.bitw_node_name = None
    tc.wl_node_name = None

    # Assuming only one bitw node and one workload node
    nics = store.GetTopology().GetNicsByPipeline("athena")
    for nic in nics:
        tc.bitw_node_name = nic.GetNodeName()
        break
    api.SetTestsuiteAttr("bitw_node_name", tc.bitw_node_name)

    workloads = api.GetWorkloads()
    if len(workloads) == 0:
        api.Logger.error('No workloads available')
        return api.types.status.FAILURE

    tc.wl_node_name = workloads[0].node_name
    api.SetTestsuiteAttr("wl_node_name", tc.wl_node_name)

    host_intfs = api.GetNaplesHostInterfaces(tc.wl_node_name)

    # Assuming single nic per host
    if len(host_intfs) != 2:
        api.Logger.error('Failed to get host interfaces')
        return api.types.status.FAILURE

    tc.wl = []
    for wl in workloads:
        tc.wl.append(wl)
        api.Logger.info("wl: vlan: {}, mac: {}, ip: {}".format(
            wl.uplink_vlan, wl.mac_address, wl.ip_address))

    tc.intfs = []
    tc.intfs.append({
        'name': 'inb_mnic0',
        'ip': str(tc.wl[0].ip_address),
        'sub_ip': str(tc.wl[2].ip_address),
        'vlan': str(tc.wl[2].uplink_vlan)
    })
    tc.intfs.append({
        'name': 'inb_mnic1',
        'ip': str(tc.wl[1].ip_address),
        'sub_ip': str(tc.wl[3].ip_address),
        'vlan': str(tc.wl[3].uplink_vlan)
    })
    api.SetTestsuiteAttr("inb_mnic_intfs", tc.intfs)

    # copy device_bootstrap.json to naples
    bootstrap_json_fname = api.GetTopDir(
    ) + '/nic/conf/athena/device_bootstrap.json'
    api.CopyToNaples(tc.bitw_node_name, [bootstrap_json_fname], "")

    # write and copy pensando_pre_init.sh to naples
    f = open('pensando_pre_init.sh', "w")
    f.write('echo "copying device.json"\n')
    f.write('cp /data/device_bootstrap.json /nic/conf/device.json\n')
    f.close()

    api.CopyToNaples(tc.bitw_node_name, ['pensando_pre_init.sh'], "")
    os.remove('pensando_pre_init.sh')

    # move pensando_pre_init.sh to /sysconfig/config0/ and restart Athena Node
    req = api.Trigger_CreateExecuteCommandsRequest()

    cmd = "mv /pensando_pre_init.sh /sysconfig/config0/"
    api.Trigger_AddNaplesCommand(req, tc.bitw_node_name, cmd)

    cmd = "mv /device_bootstrap.json /data/"
    api.Trigger_AddNaplesCommand(req, tc.bitw_node_name, cmd)

    resp = api.Trigger(req)
    cmd = resp.commands[0]
    api.PrintCommandResults(cmd)

    if cmd.exit_code != 0:
        api.Logger.error("Bootstrap setup failed on node %s" % \
                          tc.bitw_node_name)
        return api.types.status.FAILURE

    # reboot the node
    api.Logger.info("Rebooting {}".format(tc.bitw_node_name))
    return api.RestartNodes([tc.bitw_node_name], 'reboot')
Exemplo n.º 5
0
def gen_plcy_cfg_local_wl_topo(tc):
    api.SetTestsuiteAttr("dp_policy_json_path", api.GetTopDir() + \
                        DP_PLCY_JSON_PATH)

    tc.dp_policy_json_path = api.GetTestsuiteAttr("dp_policy_json_path")

    tc.skip_flow_log_vnics = getattr(tc.args, "skip_flow_log_vnics", [])

    # Read template policy.json file
    plcy_obj = None
    
    with open(tc.template_policy_json_path) as fd:
        plcy_obj = json.load(fd)

    workloads = api.GetWorkloads()
    if len(workloads) == 0:
        api.Logger.error('No workloads available')
        return api.types.status.FAILURE

    host_intfs = api.GetNaplesHostInterfaces(tc.wl_node_name)

    # Assuming single nic per host 
    if len(host_intfs) != 2:
        api.Logger.error('Failed to get host interfaces')
        return api.types.status.FAILURE

    tc.host_ifs[(tc.wl_node_name, tc.classic_nic)] = host_intfs
    up0_intf = host_intfs[0]
    up1_intf = host_intfs[1]

    vnics = plcy_obj['vnic']

    for idx, vnic in enumerate(vnics):
        vnic_id = vnic['vnic_id']

        # vnic_type has 2 options: L2 or L3
        tc.vnic_type = 'L2' if "vnic_type" in vnic and vnic['vnic_type'] == 'L2' else 'L3'
        tc.nat = 'yes' if "nat" in vnic else 'no'

        api.Logger.info('Setup policy.json file for No.%s vnic' % (vnic_id))

        up0_vlan, up1_vlan = None, None           
        up0_mac, up1_mac = None, None

        mac_lo = 'ff:ff:ff:ff:ff:ff'
        mac_hi = '00:00:00:00:00:00'

        wl_up0_idx = utils.get_wl_idx(0, idx+1)
        wl_up1_idx = utils.get_wl_idx(1, idx+1)

        wl_up0 = workloads[wl_up0_idx]
        wl_up1 = workloads[wl_up1_idx]

        if wl_up0.parent_interface == up0_intf:
            up0_vlan = wl_up0.uplink_vlan
            up0_mac = wl_up0.mac_address
        else:
            api.Logger.error('The interface order prediction is wrong')

        if wl_up1.parent_interface == up1_intf:
            up1_vlan = wl_up1.uplink_vlan
            up1_mac = wl_up1.mac_address
        else:
            api.Logger.error('The interface order prediction is wrong')

        if not up0_mac or not up1_mac:
            api.Logger.error('Failed to get workload sub-intf mac addresses')
            return api.types.status.FAILURE

        if not up0_vlan or not up1_vlan:
            api.Logger.error('Failed to get workload sub-intf vlan value')
            return api.types.status.FAILURE

        mac_lo = min(mac_lo, up0_mac, up1_mac)
        mac_hi = max(mac_hi, up0_mac, up1_mac)

        api.Logger.info('Workload0: up0_intf %s up0_vlan %s up0_mac %s' % (
                        up0_intf, up0_vlan, up0_mac))
        api.Logger.info('Workload1: up1_intf %s up1_vlan %s up1_mac %s' % (
                        up1_intf, up1_vlan, up1_mac))
        api.Logger.info('mac_lo %s mac_hi %s' % (mac_lo, mac_hi))

        # these keys need to be changed for both L2 and L3 with or without NAT.
        vnic['vlan_id'] = str(up1_vlan)
        vnic['rewrite_underlay']['vlan_id'] = str(up0_vlan)
        vnic['session']['to_switch']['host_mac'] = str(up1_mac)
        vnic['rewrite_underlay']['dmac'] = str(up0_mac)

        # these fields need to be changed only for L3
        if tc.vnic_type == 'L3':
            vnic['rewrite_host']['dmac'] = str(up1_mac)

        # only applicable to L2 vnics
        if tc.vnic_type == 'L2':
            vnic['l2_flows_range']['h2s_mac_lo'] = str(mac_lo)
            vnic['l2_flows_range']['h2s_mac_hi'] = str(mac_hi)
            vnic['l2_flows_range']['s2h_mac_lo'] = str(mac_lo)
            vnic['l2_flows_range']['s2h_mac_hi'] = str(mac_hi)

        # Set skip_flow_log if vnic is part of the skip_flow_log_vnics
        if int(vnic_id) in tc.skip_flow_log_vnics:
            api.Logger.info('Setting skip_flow_log for vnic %d' % (
                            int(vnic_id)))
            vnic['session']['skip_flow_log'] = "true"

    # write vlan/mac addr and flow info to actual file 
    with open(tc.dp_policy_json_path, 'w+') as fd:
        json.dump(plcy_obj, fd, indent=4)

    # copy policy.json file to node
    api.CopyToNaples(tc.bitw_node_name, [tc.dp_policy_json_path], "")
Exemplo n.º 6
0
def Trigger(tc):
    if tc.skip or api.IsDryrun():
        return api.types.status.SUCCESS

    result = api.types.status.SUCCESS
    tc.orig_hwtag_flags = {}
    tc.pds_verify = {}
    tc.cmd_status = {}

    tc.tx_random = []
    tc.rx_random = []
    if type(tc.args.tx) == int:
        tc.tx_random = SetRandom_Offload()
    if type(tc.args.rx) == int:
        tc.rx_random = SetRandom_Offload()

    for wl in api.GetWorkloads():
        if wl.parent_interface != wl.interface:
            continue
        if wl.IsNaples():
            # Save original flag values for rollback
            tx_status = naples_workload.Get_TxVlanOffload_Status(wl)
            rx_status = naples_workload.Get_RxVlanOffload_Status(wl)
            if api.IsApiResponseOk(rx_status):
                if api.GetNodeOs(wl.node_name) == 'linux':
                    rx_enable = (rx_status.commands[0].stdout).split(':')[1]
                elif api.GetNodeOs(wl.node_name) == 'freebsd':
                    options = (rx_status.commands[0].stdout).split(',')
                    if 'VLAN_HWTAGGING' in options:
                        rx_enable = 'on'
                    else:
                        rx_enable = 'off'
                else:
                    api.Logger.error("Unmatched node os %s" %
                                     (api.GetNodeOs(wl.node_name), ))
                    result = api.types.status.FAILURE
                    break
            else:
                result = api.types.status.FAILURE
                break

            if api.IsApiResponseOk(tx_status):
                if api.GetNodeOs(wl.node_name) == 'linux':
                    tx_enable = (tx_status.commands[0].stdout).split(':')[1]
                elif api.GetNodeOs(wl.node_name) == 'freebsd':
                    options = (tx_status.commands[0].stdout).split(',')
                    if 'VLAN_HWTAGGING' in options:
                        tx_enable = 'on'
                    else:
                        tx_enable = 'off'
                else:
                    api.Logger.error("Unmatched node os %s" %
                                     (api.GetNodeOs(wl.node_name), ))
                    result = api.types.status.FAILURE
                    break
            else:
                result = api.types.status.FAILURE
                break

            tc.orig_hwtag_flags[wl.workload_name] = (tx_enable, rx_enable)

            # Change tx_vlan and rx_vlan as per args
            if type(tc.args.tx) == int:
                if wl.workload_name in tc.tx_random:
                    tx_enable = 'off' if tx_enable == 'on' else 'on'
            else:
                tx_enable = 'on' if tc.args.tx else 'off'
            toggle_tx_resp = naples_workload.Toggle_TxVlanOffload(
                wl, tx_enable)

            if type(tc.args.rx) == int:
                if wl.workload_name in tc.rx_random:
                    rx_enable = 'off' if rx_enable == 'on' else 'on'
            else:
                rx_enable = 'on' if tc.args.rx else 'off'
            toggle_rx_resp = naples_workload.Toggle_RxVlanOffload(
                wl, rx_enable)

            if not api.IsApiResponseOk(toggle_tx_resp):
                result = api.types.status.FAILURE
                break
            if not api.IsApiResponseOk(toggle_rx_resp):
                result = api.types.status.FAILURE
                break

            # Validate change using pdsctl command
            api.Logger.warn(
                "XXX 'pdsctl show lif' does not support --yaml output, and does not show the mode flag for validation"
            )
            #tc.toggle_resp, is_ok = pdsctl.ExecutePdsctlShowCommand(wl.node_name, 'lif')
            #if api.IsApiResponseOk(tc.toggle_resp):
            #    cmd =  tc.toggle_resp.commands[0]
            #    if cmd.stdout is not None:
            #        yml_loaded = yaml.load_all(cmd.stdout, Loader=yaml.FullLoader)
            #        for spec in yml_loaded:
            #            if spec is not None:
            #                name = spec["spec"]["name"]
            #                if name == wl.interface:
            #                    api.Logger.info("Interface: %s, Vlan Insert Enable: %s, Vlan Strip Enable: %s" % (wl.interface, spec["spec"]["vlaninserten"], spec["spec"]["vlanstripen"]))
            #                    tx = str(True) if tx_enable == 'on' else str(False)
            #                    rx = str(True) if rx_enable == 'on' else str(False)
            #                    tc.pds_verify[wl.workload_name] = [(tx, spec["spec"]["vlaninserten"]), (rx, spec["spec"]["vlanstripen"])]
            #else:
            #    result = api.types.status.FAILURE
            #    break

            # Store status for verification
            rx_status = naples_workload.Get_RxVlanOffload_Status(wl)
            tx_status = naples_workload.Get_TxVlanOffload_Status(wl)

            tc.cmd_status[wl.workload_name] = (tx_status, rx_status)

    # Run traffic test
    tc.cmd_cookies, tc.resp = traffic_utils.pingWorkloads(tc.workload_pairs)

    api.Logger.info("TC.Trigger result: %s" % result)
    return result
Exemplo n.º 7
0
def Setup(tc):

    # parse iterator args
    # parse_args(tc)

    # skip some iterator cases
    if skip_curr_test(tc):
        return api.types.status.SUCCESS

    # node init
    tc.tester_node = None
    tc.tester_node_name = None
    tc.dut_node = None

    # init response list
    tc.resp = []

    workloads = api.GetWorkloads()
    if len(workloads) == 0:
        api.Logger.error('No workloads available')
        return api.types.status.FAILURE

    # initialize tester-node and dut-node.
    tc.nodes = api.GetNodes()
    for node in tc.nodes:
        if api.GetNicType(node.Name()) == 'intel':
            tc.tester_node = node
            tc.tester_node_name = node.Name()
            tc.tester_node_mgmt_ip = api.GetMgmtIPAddress(node.Name())
            api.Logger.info('tester node: %s mgmt IP: %s' %
                            (node.Name(), tc.tester_node_mgmt_ip))
        else:
            tc.dut_node = node
            tc.dut_node_mgmt_ip = api.GetMgmtIPAddress(node.Name())
            api.Logger.info('dut node: %s mgmt IP: %s' %
                            (node.Name(), tc.dut_node_mgmt_ip))

    # create tar.gz file of dpdk and dpdk-test
    sdk_fullpath = api.GetTopDir() + SDK_SRC_PATH
    dpdk_tar_path = api.GetTopDir() + DPDK_TAR_FILE

    tar = tarfile.open(dpdk_tar_path, "w:gz")
    os.chdir(sdk_fullpath)
    tar.add("dpdk")
    os.chdir("dpdk-test")
    for name in os.listdir("."):
        tar.add(name)
    tar.close()

    api.Logger.info("dpdk-test tarfile location is: " + dpdk_tar_path)

    api.Logger.info("Configuring DTS on " + tc.tester_node_mgmt_ip)

    # copy dpdk-test.tar.gz to tester node.
    api.CopyToHost(tc.tester_node.Name(), [dpdk_tar_path], "")

    # untar dpdk-test.tar.gz and configure tester to run DTS
    req = api.Trigger_CreateExecuteCommandsRequest()
    trig_cmd1 = "tar -xzvf dpdk-test.tar.gz"
    trig_cmd2 = "scripts/config_tester.sh %s %s" % (tc.dut_node_mgmt_ip,
                                                    tc.tester_node_mgmt_ip)
    api.Trigger_AddHostCommand(req,
                               tc.tester_node.Name(),
                               trig_cmd1,
                               timeout=60)
    api.Trigger_AddHostCommand(req,
                               tc.tester_node.Name(),
                               trig_cmd2,
                               timeout=60)
    trig_resp = api.Trigger(req)
    tc.resp.append(trig_resp)

    # disable internal mnic
    cmd = "ifconfig inb_mnic0 down && ifconfig inb_mnic1 down"
    resp = api.RunNaplesConsoleCmd(tc.dut_node.Name(), cmd)

    return api.types.status.SUCCESS
Exemplo n.º 8
0
def ValidateMacRegistration():
    nodes = api.GetNaplesHostnames()
    naples_node = nodes[0]

    # workload endpoints
    wload_intf_mac_dict = {}
    wload_intf_vlan_map = {}
    for wd in subif_utils.getNativeWorkloads():
        if wd.node_name == naples_node and wd.interface == wd.parent_interface:
            wload_intf_mac_dict[wd.interface] = util_host.GetMACAddress(
                naples_node, wd.interface)
            wload_intf_vlan_map[wd.interface] = [8192]
            for sub in subif_utils.GetSubifs(wd.interface, wd.node_name):
                sub_wd = subif_utils.getWorkloadForInf(sub, wd.node_name)
                wload_intf_mac_dict[
                    sub_wd.interface] = util_host.GetMACAddress(
                        naples_node, sub_wd.interface)
                lst = wload_intf_vlan_map.get(wd.interface, None)
                if lst:
                    (wload_intf_vlan_map[wd.interface]).append(
                        sub_wd.encap_vlan)
                else:
                    (wload_intf_vlan_map[wd.interface]).append(
                        sub_wd.encap_vlan)

    api.Logger.info("wload_intf_vlan_map: %s \n" % wload_intf_vlan_map)
    api.Logger.info("wload_intf_mac_dict: %s \n" % wload_intf_mac_dict)

    wload_ep_set = filters_utils.getWorkloadEndPoints(naples_node,
                                                      wload_intf_mac_dict,
                                                      wload_intf_vlan_map)

    host_intf_mac_dict = {}
    for wl in api.GetWorkloads():
        if wl.node_name == naples_node and wl.interface == wl.parent_interface:
            if wl.interface not in wload_intf_mac_dict:
                host_intf_mac_dict[wl.interface] = util_host.GetMACAddress(
                    naples_node, wl.interface)

    for inf in naples_host_utils.GetHostInternalMgmtInterfaces(naples_node):
        if inf not in wload_intf_mac_dict:
            mac = util_host.GetMACAddress(naples_node, inf)
            host_intf_mac_dict[inf] = mac

    # Other host interface endpoints (which aren't part of workloads)
    host_ep_set = filters_utils.getHostIntfEndPoints(naples_node,
                                                     host_intf_mac_dict)

    # Naples intf endpoints
    naples_intf_mac_dict = filters_utils.getNaplesIntfMacAddrDict(naples_node)
    naples_ep_set = filters_utils.getNaplesIntfEndPoints(
        naples_node, naples_intf_mac_dict)

    # HAL view of endpoints
    hal_ep_set = filters_utils.getNaplesHALEndPoints(naples_node)

    #Keeping them separate as it is useful for debugging in scale
    api.Logger.info("getAllEndPointsView: wload_ep_set ", len(wload_ep_set),
                    wload_ep_set)
    api.Logger.info("getAllEndPointsView: host_ep_set ", len(host_ep_set),
                    host_ep_set)
    api.Logger.info("getAllEndPointsView: naples_ep_set ", len(naples_ep_set),
                    naples_ep_set)
    api.Logger.info("getAllEndPointsView: hal_ep_set ", len(hal_ep_set),
                    hal_ep_set)

    return wload_ep_set, host_ep_set, naples_ep_set, hal_ep_set
Exemplo n.º 9
0
def Setup(tc):
    tc.seed = random.randrange(sys.maxsize)
    api.Logger.info("Using seed : %s" % (tc.seed))
    tc.serverHandle = None
    tc.clientHandle = None
    tc.selected_sec_profile_objs = None

    tc.skip_stats_validation = getattr(tc.args, 'triggers', False)

    tc.cancel = False
    tc.workloads = api.GetWorkloads()

    utils.UpdateSecurityProfileTimeouts(tc)
    chooseWorkload(tc)
    server, client = tc.workload_pair[0], tc.workload_pair[1]
    if not updateSessionLimits(tc, server):
        api.logger.error("Cannot configure session limit on non-Naples NIC")
        return api.types.status.FAILURE

    if tc.skip_stats_validation:
        naples_utils.CopyMemStatsCheckTool()

    api.Logger.info("Server: %s(%s)(%s) <--> Client: %s(%s)(%s)" %\
                    (server.workload_name, server.ip_address,
                     server.mgmt_ip, client.workload_name,
                     client.ip_address, client.mgmt_ip))

    try:
        StoreCurrentPdsLogLevel(tc)
        pds_utils.SetPdsLogsLevel("error")
    except Exception as e:
        #traceback.print_exc()
        api.Logger.error("Failed to setup cps test workloads : %s" % (e))
        return api.types.status.FAILURE

    try:
        tc.serverHandle = TRexIotaWrapper(
            server,
            role="server",
            gw=client.ip_address,
            kill=0,
            sync_port=server.exposed_tcp_ports[0],
            async_port=server.exposed_tcp_ports[1])
        tc.clientHandle = TRexIotaWrapper(
            client,
            role="client",
            gw=server.ip_address,
            kill=0,
            sync_port=client.exposed_tcp_ports[0],
            async_port=client.exposed_tcp_ports[1])

        api.Logger.info("connect trex...")
        tc.serverHandle.connect()
        tc.clientHandle.connect()

        api.Logger.info("reset connection...")
        tc.serverHandle.reset(True)
        tc.clientHandle.reset(True)

        api.Logger.info("setting profile...")
        profile_path = getProfilePath(tc)
        tc.serverHandle.load_profile(getProfilePath(tc), getTunables(tc))
        tc.clientHandle.load_profile(getProfilePath(tc), getTunables(tc))

    except Exception as e:
        #traceback.print_exc()
        api.Logger.info("Failed to setup TRex topology: %s" % e)
        #cleanup(tc)
        return api.types.status.FAILURE

    api.Logger.info("Clear hardware state before trex trigger...")
    flowutils.clearFlowTable(tc.workload_pairs)
    __clearVPPEntity("errors")
    flowutils.clearFlowTable(tc.workload_pairs)
    __clearVPPEntity("flow statistics")
    __clearVPPEntity("flow entries")
    __clearVPPEntity("runtime")

    return api.types.status.SUCCESS
Exemplo n.º 10
0
def __add_from_store(req, node_name, parent_inf, total):
    count = 0
    global __deleted_store_subifs
    __add_subifs_wl = []

    key = node_name + '-' + parent_inf
    lst_del_subif = __deleted_store_subifs.get(key, None)
    for wl in api.GetWorkloads():
        if wl.parent_interface != parent_inf:
            continue
        if node_name != wl.node_name:
            continue
        # exclude native interfaces
        if wl.parent_interface == wl.interface:
            continue
        res = False
        for wreq in req.workloads:
            #Check if it is already added.
            if wreq.workload_name == wl.workload_name:
                res = True
                break
        if res:
            continue

        #assert(not api.IsWorkloadRunning(wl.workload_name))
        # remove the interface from deleted subifs
        is_deleted = False
        if lst_del_subif and wl.interface in lst_del_subif:
            lst_del_subif.remove(wl.interface)
            is_deleted = True

        if not is_deleted:
            continue

        api.Logger.info("adding to store again: %s" % wl.workload_name)
        wl_msg = req.workloads.add()
        intf = wl_msg.interfaces.add()
        intf.ip_prefix = wl.ip_prefix
        intf.ipv6_prefix = wl.ipv6_prefix
        intf.mac_address = wl.mac_address
        intf.encap_vlan = wl.encap_vlan
        intf.uplink_vlan = wl.uplink_vlan
        wl_msg.workload_name = wl.workload_name
        wl_msg.node_name = wl.node_name
        intf.pinned_port = wl.pinned_port
        intf.interface_type = wl.interface_type
        intf.interface = wl.interface
        intf.parent_interface = wl.parent_interface
        wl_msg.workload_type = wl.workload_type
        wl_msg.workload_image = wl.workload_image

        count += 1
        __add_subifs_wl.append(wl.interface)

        if count == total:
            break

    api.Logger.info(
        "expected: %d, available in store: %d, workloads: %s for host %s on node %s: "
        % (total, count, __add_subifs_wl, parent_inf, node_name))

    return count, __add_subifs_wl
Exemplo n.º 11
0
def do_vmotion(tc, dsc_to_dsc):
    factor_l2seg = True
    new_node = ''
    old_node = ''
    api.Logger.info("In do_vmotion for dsc_to_dsc {}".format(dsc_to_dsc))
    # get first naples node to move VM to
    if (dsc_to_dsc):
        assert (len(tc.Nodes) >= 2)
        # set old name as Node[0] and new node as Node[1]
        old_node = tc.Nodes[0]
        new_node = tc.Nodes[1]
    else:
        assert (len(tc.Nodes) >= 1)
        assert (len(tc.NonNaplesNodes) >= 1)
        old_node = tc.NonNaplesNodes[0]
        new_node = tc.Nodes[0]

    workloads = api.GetWorkloads(old_node)
    assert (len(workloads) != 0)
    api.Logger.info("moving workloads from {} to {}".format(
        old_node, new_node))
    update_move_info(tc, workloads, factor_l2seg, new_node)
    vm_threads = []
    trigger_node = None
    tc.trigger_wl = None
    for wl_info in tc.move_info:
        if (api.IsNaplesNode(wl_info.wl.node_name)):
            wl_info.sess_info_before = vm_utils.get_session_info(
                tc, wl_info.wl)
            vm_utils.get_sessions_info(tc, wl_info.old_node)
        api.Logger.info("moving wl {} from node {} to node {}".format(
            wl_info.wl.workload_name, wl_info.old_node, wl_info.new_node))
        if not trigger_node:
            if tc.trigger == 'port_flap':
                trigger_node = wl_info.new_node
            elif tc.trigger == 'mgmt_down' or tc.trigger == 'ep_delete':
                if tc.trigger_on == 'old':
                    trigger_node = wl_info.old_node
                else:
                    trigger_node = wl_info.new_node
                tc.trigger_wl = wl_info.wl
        vm_thread = threading.Thread(target=triggerVmotion,
                                     args=(
                                         tc,
                                         wl_info.wl,
                                         wl_info.new_node,
                                     ))
        vm_threads.append(vm_thread)
        vm_thread.start()
        create_ep_info(tc, wl_info.wl, wl_info.new_node, "START",
                       wl_info.old_node)

    if tc.trigger and trigger_node:
        if tc.trigger == 'port_flap':
            switchPortFlap2(tc, trigger_node)
        elif tc.trigger == 'mgmt_down':
            flapMgmtConnectivity(tc, trigger_node)
        elif tc.trigger == 'delete_ep':
            deleteEpTrigger(tc, trigger_node, wl)

    dump_nodes = []
    for wl_info in tc.move_info:
        if (api.IsNaplesNode(wl_info.new_node)):
            if wl_info.new_node not in dump_nodes:
                vm_utils.get_sessions_info(tc, wl_info.new_node)
                dump_nodes.append(wl_info.new_node)

    # wait for vmotion thread to complete, meaning vmotion is done on vcenter
    for vm_thread in vm_threads:
        vm_thread.join()

    for wl_info in tc.move_info:
        vm_utils.update_ep_migr_status(tc, wl_info.wl, wl_info.new_node,
                                       "DONE")
    return api.types.status.SUCCESS
Exemplo n.º 12
0
def Trigger(tc):
    tc.cmd_cookies = []
    tc.cmd_cookies1 = []
    tc.cmd_cookies2 = []

    nodes = api.GetWorkloadNodeHostnames()
    push_node_0 = [nodes[0]]
    push_node_1 = [nodes[1]]

    encrypt_objects = netagent_cfg_api.QueryConfigs(kind='IPSecSAEncrypt')
    decrypt_objects = netagent_cfg_api.QueryConfigs(kind='IPSecSADecrypt')
    policy_objects = netagent_cfg_api.QueryConfigs(kind='IPSecPolicy')

    # Configure IPsec on Node 1

    if api.IsNaplesNode(nodes[0]):

        if len(encrypt_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_encryption_node1.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_encryption_node1.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_0,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-encryption objects to node %s" %
                    nodes[0])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                return api.types.status.FAILURE

        if len(decrypt_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_decryption_node1.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_decryption_node1.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_0,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-encryption objects to node %s" %
                    nodes[0])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                return api.types.status.FAILURE

        if len(policy_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_policies_node1.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_policies_node1.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_0,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-policy objects to node %s" %
                    nodes[0])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                return api.types.status.FAILURE

    else:
        workloads = api.GetWorkloads(nodes[0])
        w1 = workloads[0]

        req1 = api.Trigger_CreateExecuteCommandsRequest(serial=True)

        api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name,
                               "sudo ip xfrm state flush")

        api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name,
                               "sudo ip xfrm policy flush")

        api.Trigger_AddCommand(
            req1, w1.node_name, w1.workload_name,
            "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto tcp dport %s dir in tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req1, w1.node_name, w1.workload_name,
            "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto tcp dport %s dir fwd tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req1, w1.node_name, w1.workload_name,
            "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto tcp sport %s dir out tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req1, w1.node_name, w1.workload_name,
            "sudo ip xfrm state add src 192.168.100.103 dst 192.168.100.101 proto esp spi 0x01 mode tunnel aead 'rfc4106(gcm(aes))' 0x414141414141414141414141414141414141414141414141414141414141414100000000 128 sel src 192.168.100.103/32 dst 192.168.100.101/32"
        )

        api.Trigger_AddCommand(
            req1, w1.node_name, w1.workload_name,
            "sudo ip xfrm state add src 192.168.100.101 dst 192.168.100.103 proto esp spi 0x01 mode tunnel aead 'rfc4106(gcm(aes))' 0x414141414141414141414141414141414141414141414141414141414141414100000000 128 sel src 192.168.100.101/32 dst 192.168.100.103/32"
        )

        api.Trigger_AddCommand(
            req1, w1.node_name, w1.workload_name,
            "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto udp dport %s dir in tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req1, w1.node_name, w1.workload_name,
            "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto udp dport %s dir fwd tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req1, w1.node_name, w1.workload_name,
            "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto udp sport %s dir out tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name,
                               "sudo ip xfrm state list")

        api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name,
                               "sudo ip xfrm policy list")

        trig_resp1 = api.Trigger(req1)
        term_resp1 = api.Trigger_TerminateAllCommands(trig_resp1)

    # Configure IPsec on Node 2

    if api.IsNaplesNode(nodes[1]):

        if len(encrypt_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_encryption_node2.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_encryption_node2.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_1,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-encryption objects to node %s" %
                    nodes[1])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                #return api.types.status.FAILURE

        if len(decrypt_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_decryption_node2.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_decryption_node2.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_1,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-encryption objects to node %s" %
                    nodes[1])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                #return api.types.status.FAILURE

        if len(policy_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_policies_node2.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_policies_node2.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_1,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-encryption objects to node %s" %
                    nodes[1])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                #return api.types.status.FAILURE

    else:
        workloads = api.GetWorkloads(nodes[1])
        w2 = workloads[0]

        req2 = api.Trigger_CreateExecuteCommandsRequest(serial=True)

        api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name,
                               "sudo ip xfrm state flush")

        api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name,
                               "sudo ip xfrm policy flush")

        api.Trigger_AddCommand(
            req2, w2.node_name, w2.workload_name,
            "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto tcp dport %s dir in tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req2, w2.node_name, w2.workload_name,
            "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto tcp dport %s dir fwd tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req2, w2.node_name, w2.workload_name,
            "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto tcp sport %s dir out tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req2, w2.node_name, w2.workload_name,
            "sudo ip xfrm state add src 192.168.100.101 dst 192.168.100.103 proto esp spi 0x01 mode tunnel aead 'rfc4106(gcm(aes))' 0x414141414141414141414141414141414141414141414141414141414141414100000000 128 sel src 192.168.100.101/32 dst 192.168.100.103/32"
        )

        api.Trigger_AddCommand(
            req2, w2.node_name, w2.workload_name,
            "sudo ip xfrm state add src 192.168.100.103 dst 192.168.100.101 proto esp spi 0x01 mode tunnel aead 'rfc4106(gcm(aes))' 0x414141414141414141414141414141414141414141414141414141414141414100000000 128 sel src 192.168.100.103/32 dst 192.168.100.101/32"
        )

        api.Trigger_AddCommand(
            req2, w2.node_name, w2.workload_name,
            "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto udp dport %s dir in tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req2, w2.node_name, w2.workload_name,
            "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto udp dport %s dir fwd tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req2, w2.node_name, w2.workload_name,
            "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto udp sport %s dir out tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name,
                               "sudo ip xfrm state list")

        api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name,
                               "sudo ip xfrm policy list")

        trig_resp2 = api.Trigger(req2)
        term_resp2 = api.Trigger_TerminateAllCommands(trig_resp2)

    workloads = api.GetWorkloads(nodes[0])
    w1 = workloads[0]
    workloads = api.GetWorkloads(nodes[1])
    w2 = workloads[0]
    bypass_test = 0

    if w1.IsNaples() and w2.IsNaples():
        api.Logger.info(
            "Both workloads are Naples, %s is iperf client, %s is iperf server, bypassing test"
            % (w1.node_name, w2.node_name))
        iperf_client_wl = w1
        iperf_server_wl = w2
        bypass_test = 1
    elif w1.IsNaples():
        api.Logger.info("%s is Naples and iperf client, %s is iperf server" %
                        (w1.node_name, w2.node_name))
        iperf_client_wl = w1
        iperf_server_wl = w2
    elif w2.IsNaples():
        api.Logger.info("%s is Naples and iperf client, %s is iperf server" %
                        (w2.node_name, w1.node_name))
        iperf_client_wl = w2
        iperf_server_wl = w1

    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)

    tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s) on %s port %s" %\
                   (iperf_server_wl.workload_name, iperf_server_wl.ip_address, iperf_client_wl.workload_name, iperf_client_wl.ip_address, tc.iterators.protocol, tc.iterators.port)

    api.Logger.info("Starting Iperf test over IPSec from %s" % (tc.cmd_descr))

    if bypass_test == 0:
        cmd_cookie = "Set rcv socket buffer size on %s" % (w1.workload_name)
        api.Trigger_AddCommand(
            req, w1.node_name, w1.workload_name,
            "sysctl -w net.ipv4.tcp_rmem='4096 2147483647 2147483647'")
        tc.cmd_cookies.append(cmd_cookie)

        cmd_cookie = "Set rcv socket buffer size on %s" % (w2.workload_name)
        api.Trigger_AddCommand(
            req, w2.node_name, w2.workload_name,
            "sysctl -w net.ipv4.tcp_rmem='4096 2147483647 2147483647'")
        tc.cmd_cookies.append(cmd_cookie)

        cmd_cookie = "Setting MTU to smaller value on %s" % (
            iperf_client_wl.workload_name)
        api.Trigger_AddCommand(
            req, iperf_client_wl.node_name, iperf_client_wl.workload_name,
            "ifconfig %s mtu 1048" % iperf_client_wl.interface)
        tc.cmd_cookies.append(cmd_cookie)

        cmd_cookie = "Running iperf server on %s" % (
            iperf_server_wl.workload_name)
        api.Trigger_AddCommand(req,
                               iperf_server_wl.node_name,
                               iperf_server_wl.workload_name,
                               "iperf -s -p %s" % (tc.iterators.port),
                               background=True)
        tc.cmd_cookies.append(cmd_cookie)

        cmd_cookie = "Brief Sleep"
        api.Trigger_AddCommand(req, iperf_client_wl.node_name,
                               iperf_client_wl.workload_name, "sleep 1")
        tc.cmd_cookies.append(cmd_cookie)

        cmd_cookie = "Running iperf client on %s" % (
            iperf_client_wl.workload_name)
        if tc.iterators.protocol == "tcp":
            api.Trigger_AddCommand(
                req, iperf_client_wl.node_name, iperf_client_wl.workload_name,
                "iperf -c %s -p %s -M %s" %
                (iperf_server_wl.ip_address, tc.iterators.port,
                 tc.iterators.pktsize))
        else:
            api.Trigger_AddCommand(
                req, iperf_client_wl.node_name, iperf_client_wl.workload_name,
                "iperf --udp -c %s -p %s -M %s" %
                (iperf_server_wl.ip_address, tc.iterators.port,
                 tc.iterators.pktsize))
        tc.cmd_cookies.append(cmd_cookie)

    if w1.IsNaples():
        cmd_cookie = "IPSec state on %s AFTER running iperf traffic" % (
            w1.node_name)
        api.Trigger_AddNaplesCommand(
            req, w1.node_name, "/nic/bin/halctl show ipsec-global-stats")
        tc.cmd_cookies.append(cmd_cookie)
    else:
        cmd_cookie = "IPSec state on %s AFTER running iperf traffic" % (
            w1.node_name)
        api.Trigger_AddCommand(req, w1.node_name, w1.workload_name,
                               "sudo ip xfrm policy show")
        tc.cmd_cookies.append(cmd_cookie)

    if w2.IsNaples():
        cmd_cookie = "IPSec state on %s AFTER running iperf traffic" % (
            w2.node_name)
        api.Trigger_AddNaplesCommand(
            req, w2.node_name, "/nic/bin/halctl show ipsec-global-stats")
        tc.cmd_cookies.append(cmd_cookie)
    else:
        cmd_cookie = "IPSec state on %s AFTER running iperf traffic" % (
            w2.node_name)
        api.Trigger_AddCommand(req, w2.node_name, w2.workload_name,
                               "sudo ip xfrm policy show")
        tc.cmd_cookies.append(cmd_cookie)

    trig_resp = api.Trigger(req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)

    tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)
    return api.types.status.SUCCESS
Exemplo n.º 13
0
def Setup(tc):
    tc.tunnels = tunnel.GetTunnels()
    tc.Workloads = api.GetWorkloads()
    return api.types.status.SUCCESS
Exemplo n.º 14
0
def __create_subifs(subif_count = 0, native_inf = None):
    for wl in api.GetWorkloads():
        if wl.parent_interface != wl.interface:
            continue
        if wl.IsNaples():
            subif_utils.Create_Subifs(subif_count, wl.interface, wl.node_name)
Exemplo n.º 15
0
def gen_plcy_cfg_e2e_wl_topo(tc):

    api.SetTestsuiteAttr("node1_dp_policy_json_path", api.GetTopDir() + \
                        E2E_NODE1_DP_PLCY_JSON_PATH)
    api.SetTestsuiteAttr("node2_dp_policy_json_path", api.GetTopDir() + \
                        E2E_NODE2_DP_PLCY_JSON_PATH)

    node1_dp_plcy_json_path = api.GetTestsuiteAttr("node1_dp_policy_json_path")
    node2_dp_plcy_json_path = api.GetTestsuiteAttr("node2_dp_policy_json_path")

    # Get list of workloads for nodes
    nodes = [pair[0] for pair in tc.wl_node_nic_pairs]
    workloads_node1 = api.GetWorkloads(nodes[0])
    workloads_node2 = api.GetWorkloads(nodes[1])

    # Read template policy.json file
    t_plcy_obj = None
    with open(tc.template_policy_json_path) as fd:
        t_plcy_obj = json.load(fd)

    t_vnics = t_plcy_obj['vnic']
    n1_plcy_obj = deepcopy(t_plcy_obj)
    n2_plcy_obj = deepcopy(t_plcy_obj)

    for idx, t_vnic in enumerate(t_vnics):

        # Use workloads on up0 for node1 and use workloads 
        # on up1 for node2 since they match switch vlan config
        node1_wl = workloads_node1[utils.get_wl_idx(0, idx+1)]
        node2_wl = workloads_node2[utils.get_wl_idx(1, idx+1)]

        #TODO: tmp fix. Need infra query api
        # total vlans = 36, so add 12 for vlan in 2nd grp 
        tc.encap_vlan_id = node1_wl.uplink_vlan + 12  
        api.Logger.info("idx %s vnic: encap vlan %s" % (
                        idx, tc.encap_vlan_id))

        node1_up0_mac = node1_wl.mac_address
        node2_up1_mac = node2_wl.mac_address

        for node in nodes:
            if node == 'node1':
                vnic = n1_plcy_obj['vnic'][idx]
            else:
                vnic = n2_plcy_obj['vnic'][idx]

            vnic_id = vnic['vnic_id']

            api.Logger.info('Setup policy.json file for No.%s vnic '
                            'on node %s' % (vnic_id, node))

            vlan_id, host_mac = None, None
            src_slot_id, dst_slot_id = None, None

            if node == 'node1':
                vlan_id = node1_wl.uplink_vlan
                src_slot_id = _get_slot_id('node1', int(vnic_id))
                dst_slot_id = _get_slot_id('node2', int(vnic_id))
                host_mac = node1_up0_mac
                
            else:
                vlan_id = node2_wl.uplink_vlan
                src_slot_id = _get_slot_id('node2', int(vnic_id))
                dst_slot_id = _get_slot_id('node1', int(vnic_id))
                host_mac = node2_up1_mac

            api.Logger.info("%s workload for vnic %s: vlan %s, "
                            "host mac %s" % (node, vnic_id, 
                            vlan_id, host_mac))

            # these keys need to be changed for both L2 and L3 with or without NAT.
            vnic['vlan_id'] = str(vlan_id)
            vnic['slot_id'] = str(src_slot_id)
            vnic['session']['to_switch']['host_mac'] = str(host_mac)
            vnic['rewrite_underlay']['vlan_id'] = str(tc.encap_vlan_id)

            if vnic['rewrite_underlay']['type'] == 'mplsoudp':
                vnic['rewrite_underlay']['mpls_label2'] = str(dst_slot_id)
            elif vnic['rewrite_underlay']['type'] == 'geneve':
                vnic['rewrite_underlay']['dst_slot_id'] = str(dst_slot_id)

            # only applicable to L3 vnics
            if not utils.is_L2_vnic(vnic):
                if node == 'node1':
                    vnic['rewrite_host']['smac'] = str(node2_up1_mac)
                    vnic['rewrite_host']['dmac'] = str(node1_up0_mac)
                else:
                    vnic['rewrite_host']['smac'] = str(node1_up0_mac)
                    vnic['rewrite_host']['dmac'] = str(node2_up1_mac)

            # only applicable to L2 vnics
            if utils.is_L2_vnic(vnic):
                if node == 'node1':
                    vnic['l2_flows_range']['h2s_mac_lo'] = str(node2_up1_mac)
                    vnic['l2_flows_range']['h2s_mac_hi'] = str(node2_up1_mac)
                    vnic['l2_flows_range']['s2h_mac_lo'] = str(node1_up0_mac)
                    vnic['l2_flows_range']['s2h_mac_hi'] = str(node1_up0_mac)
                else:
                    vnic['l2_flows_range']['h2s_mac_lo'] = str(node1_up0_mac)
                    vnic['l2_flows_range']['h2s_mac_hi'] = str(node1_up0_mac)
                    vnic['l2_flows_range']['s2h_mac_lo'] = str(node2_up1_mac)
                    vnic['l2_flows_range']['s2h_mac_hi'] = str(node2_up1_mac)

    
    # write modified plcy objects to file 
    with open(node1_dp_plcy_json_path, 'w+') as fd:
        json.dump(n1_plcy_obj, fd, indent=4)

    with open(node2_dp_plcy_json_path, 'w+') as fd:
        json.dump(n2_plcy_obj, fd, indent=4)

    # copy both policy.json files to respective nodes
    tmp_plcy_json_path = api.GetTopDir() + DP_PLCY_JSON_PATH 
    
    node, nic = tc.athena_node_nic_pairs[0]
    copyfile(node1_dp_plcy_json_path, tmp_plcy_json_path)
    api.CopyToNaples(node, [tmp_plcy_json_path], "", nic)

    node, nic = tc.athena_node_nic_pairs[1]
    copyfile(node2_dp_plcy_json_path, tmp_plcy_json_path)
    api.CopyToNaples(node, [tmp_plcy_json_path], "", nic)

    os.remove(tmp_plcy_json_path)
Exemplo n.º 16
0
def __delete_subifs(h_interface = None, node_name = None):
    for wl in api.GetWorkloads():
        if wl.parent_interface != wl.interface:
            continue
        if wl.IsNaples():
            subif_utils.Delete_Subifs(wl.interface, wl.node_name)
Exemplo n.º 17
0
def Setup(tc):

    result = api.types.status.SUCCESS
    tc.skip = False
    node_names = api.GetWorkloadNodeHostnames()

    if not api.RunningOnSameSwitch():
        tc.skip = True
        api.Logger.error(
            "Rx Mode MC : Setup -> Multi switch topology not supported yet - So skipping the TC"
        )
        return api.types.status.IGNORED

    if api.IsNaplesNode(node_names[0]):
        tc.naples_node = node_names[0]
        tc.peer_node = node_names[1]
    elif api.IsNaplesNode(node_names[1]):
        tc.naples_node = node_names[1]
        tc.peer_node = node_names[0]
    else:
        api.Logger.info("Skipping as there are no Naples nodes")
        tc.skip = True
        return api.types.status.IGNORED

    if tc.args.mode != "enable_allmulti" and tc.args.mode != "disable_allmulti":
        api.Logger.error("Unknown mode '%s'. Skipping testcase" %
                         (tc.args.mode))
        tc.skip = True
        return api.types.status.IGNORED

    if api.GetNodeOs(
            tc.naples_node) != "linux" and tc.args.mode == "enable_allmulti":
        api.Logger.info("Skipping testcase because allmulti cannot be set")
        tc.skip = True
        return api.types.status.IGNORED

    tc.expect_pkt = {}
    tc.on_host = {}

    tc.host_intfs = list(api.GetNaplesHostInterfaces(tc.naples_node))
    # Unknown Multicast Packets from uplink will reach host interface when allmulti is enabled
    for intf in tc.host_intfs:
        tc.expect_pkt[intf] = True
        tc.on_host[intf] = True

    # Mgmt interface on host for network connection to Naples over PCIE (Subset of tc.host_intfs)
    tc.host_int_intfs = naples_host_utils.GetHostInternalMgmtInterfaces(
        tc.naples_node)
    for intf in tc.host_int_intfs:
        # Host internal management should not receive unknown multicast packets from uplink regardless of its allmulti state
        tc.expect_pkt[intf] = False
        tc.on_host[intf] = True

    tc.inband_intfs = naples_host_utils.GetNaplesInbandInterfaces(
        tc.naples_node)
    # Unknwown multicast packets from uplink will reach inband interface when allmulti is enabled
    for intf in tc.inband_intfs:
        tc.expect_pkt[intf] = True
        tc.on_host[intf] = False

    tc.naples_int_mgmt_intfs = naples_host_utils.GetNaplesInternalMgmtInterfaces(
        tc.naples_node)
    # Packets from uplink should not reach naples internal managment interfaces [int_mnic0] regardless of its allmulti
    for intf in tc.naples_int_mgmt_intfs:
        tc.expect_pkt[intf] = False
        tc.on_host[intf] = False

    tc.naples_oob_mgmt_intfs = naples_host_utils.GetNaplesOobInterfaces(
        tc.naples_node)
    # Packets from uplink should not reach naples oob managment interfaces [oob_mnic0] regardless of its allmulti state
    for intf in tc.naples_oob_mgmt_intfs:
        tc.expect_pkt[intf] = False
        tc.on_host[intf] = False

    tc.all_intfs = tc.host_intfs + tc.host_int_intfs + tc.inband_intfs + tc.naples_int_mgmt_intfs + tc.naples_oob_mgmt_intfs

    # In non-promiscuous mode, unknown unicast traffic shouldn't reach any interface
    if tc.args.mode == "disable_allmulti":
        for intf in tc.all_intfs:
            tc.expect_pkt[intf] = False

    api.Logger.debug("Test interfaces: ", tc.all_intfs)

    workloads = api.GetWorkloads()
    tc.peer_workloads = []

    # List of 'default vlan' workloads on peer node
    for workload in workloads:
        if workload.encap_vlan == 0 and workload.node_name == tc.peer_node:
            tc.peer_workloads.append(workload)

    # Random Multicast IP address
    tc.target_multicast_IP = "226.1.2.3"
    api.Logger.debug("Random Multicast IP = %s " % (tc.target_multicast_IP))

    # Move all interfaces to allmulti mode if tc.args.mode == 'enable_allmulti'
    if tc.args.mode == "enable_allmulti":
        for intf in tc.all_intfs:
            result = __Toggle_AllMulti(tc, intf, True)
            if result != api.types.status.SUCCESS:
                api.Logger.Error("Skipping testcase")
                break

    return result