def testTopo(): "Create an empty network and add nodes to it." net = Containernet(build=False, link=TCLink, xterms=False, autoSetMacs=True, autoStaticArp=True) info("*** Adding Controller\n") controller = net.addController("c0", controller=RemoteController, ip="127.0.0.1", port=6633) controller.start() info("*** Add switches\n") for i in range(4): sw = net.addSwitch("s%d" % (i + 1), dpid="%016x" % (i + 1)) sw.start([controller]) sw.cmdPrint("ovs-ofctl show s%d" % (i + 1)) sw.cmdPrint("ovs-vsctl show") info("Add hosts\n") for i in range(4): net.addDockerHost("h%d" % (i + 1), dimage="dev_test", ip="10.0.0.%d" % (i + 1)) info("*** Add links\n") http_link_config = {"bw": 1} video_link_config = {"bw": 10} net.addLinkNamedIfce("s1", "s2", **http_link_config) net.addLinkNamedIfce("s2", "s4", **http_link_config) net.addLinkNamedIfce("s1", "s3", **video_link_config) net.addLinkNamedIfce("s3", "s4", **video_link_config) net.addLinkNamedIfce("s1", "h1", bw=100, use_htb=True) net.addLinkNamedIfce("s1", "h2", bw=100, use_htb=True) net.addLinkNamedIfce("s4", "h3", bw=100, use_htb=True) net.addLinkNamedIfce("s4", "h4", bw=100, use_htb=True) net.build() info("*** Starting network\n") net.start() info("*** Enter CLI\n") info("Use help command to get CLI usages\n") CLI(net) info("*** Stopping network") net.stop()
def testTopo(): # xterms=True, spawn xterms for all nodes after net.start() net = Containernet(controller=Controller, link=TCLink, xterms=True) info("*** Adding controller\n") net.addController("c0") info("*** Adding hosts\n") h1 = net.addDockerHost( "h1", dimage="dev_test", ip="10.0.0.1/24", docker_args={ "cpuset_cpus": "0", "nano_cpus": int(1e8), "hostname": "h1" }, ) h2 = net.addDockerHost( "h2", dimage="dev_test", ip="10.0.0.2/24", docker_args={ "cpuset_cpus": "0", "nano_cpus": int(1e8), "hostname": "h2" }, ) h3 = net.addHost("h3", ip="10.0.0.3/24") h4 = net.addHost("h4", ip="10.0.0.4/24") info("*** Adding switch\n") s1 = net.addSwitch("s1") info("*** Creating links\n") net.addLinkNamedIfce(s1, h1, bw=10, delay="100ms") net.addLinkNamedIfce(s1, h2, bw=10, delay="100ms") net.addLinkNamedIfce(s1, h3, bw=10, delay="100ms") net.addLinkNamedIfce(s1, h4, bw=10, delay="100ms") info("*** Starting network\n") net.start() info("*** Enter CLI\n") info("Use help command to get CLI usages\n") CLI(net) info("*** Stopping network") net.stop()
net.start() info("*** Run ping and UDP latency measurement with single flow.\n") srv1_1 = mgr.addContainer( "srv1_1", "h1", "network_measurement", "sockperf server", docker_args={} ) ret = h2.cmd("ping -c 10 -i 0.01 10.0.0.1") print(f"- Result of ping: \n{ret}") ret = h2.cmd("sockperf under-load -i 10.0.0.1 -t 3 --reply-every 1") print(f"- Result of Sockperf: \n{ret}") mgr.removeContainer("srv1_1") # Run netserver not in daemon mode, avoid container termination. srv1_1 = mgr.addContainer( "srv1_1", "h1", "network_measurement", "netserver -D", docker_args={} ) # Wait netserver to start. time.sleep(1) print("** Run flent RRUL and ping tests (each 15 seconds.)") h2.cmd("flent rrul -p all_scaled -l 15 -H 10.0.0.1 -o /flent_data/rrul.png") h2.cmd("flent rrul -p ping_cdf -l 15 -H 10.0.0.1 -o /flent_data/ping.png") print("Generated plot and data are located in ./examples/.") if not AUTOTEST_MODE: CLI(net) net.stop() mgr.stop()
def testTopo(): """testTopo""" net = Containernet( controller=Controller, link=TCLink, autoSetMacs=True, autoStaticArp=True, xterms=True, ) info("*** Adding controller\n") net.addController("c0") info("*** Adding hosts\n") client = net.addDockerHost( "client", dimage="yolov2", ip="10.0.0.11/24", docker_args={ "cpuset_cpus": "0", "hostname": "client" }, ) vnf = net.addDockerHost( "vnf", dimage="yolov2", ip="10.0.0.12/24", docker_args={ "cpuset_cpus": "0", "hostname": "vnf" }, ) # Run server on another vCPU since it is more compute-intensive than client # and vnf. server = net.addDockerHost( "server", dimage="yolov2", ip="10.0.0.21/24", docker_args={ "cpuset_cpus": "1", "hostname": "server" }, ) info("*** Adding switch\n") s1 = net.addSwitch("s1") info("*** Creating links\n") net.addLinkNamedIfce(s1, client, bw=10, delay="150ms", use_htb=True) net.addLinkNamedIfce(s1, vnf, bw=10, delay="150ms", use_htb=True) net.addLinkNamedIfce(s1, server, bw=10, delay="150ms", use_htb=True) info("*** Starting network\n") net.start() net.pingAll() add_ovs_flows() ifces = ["s1-vnf"] disable_cksum_offload(ifces) info("*** Enter CLI\n") CLI(net) info("*** Stopping network") net.stop()
def myTopology(): net = Containernet( switch=OVSKernelSwitch, build=False, autoSetMacs=True, autoStaticArp=True, link=TCLink, ) mgr = VNFManager(net) setLogLevel("info") info("*** Add Switches\n") sconfig1 = {"dpid": "%016x" % 1} sconfig2 = {"dpid": "%016x" % 2} sconfig3 = {"dpid": "%016x" % 3} sconfig4 = {"dpid": "%016x" % 4} sconfig5 = {"dpid": "%016x" % 5} net.addSwitch("s1", **sconfig1) net.addSwitch("s2", **sconfig2) net.addSwitch("s3", **sconfig3) net.addSwitch("s4", **sconfig4) net.addSwitch("s5", **sconfig5) info("*** Add Hosts\n") host_config = dict(inNamespace=True) #net.addHost("h1", **host_config, ip="192.0.0.1") h1 = net.addDockerHost( "h1", dimage="dev_test", ip="192.0.0.1", docker_args={"hostname": "h1"}, ) h2 = net.addDockerHost( "h2", dimage="dev_test", ip="192.0.0.2", docker_args={"hostname": "h2"}, ) h3 = net.addDockerHost( "h3", dimage="dev_test", ip="192.0.0.3", docker_args={"hostname": "h3"}, ) h4 = net.addDockerHost( "h4", dimage="dev_test", ip="192.0.0.4", docker_args={"hostname": "h4"}, ) h5 = net.addDockerHost( "h5", dimage="dev_test", ip="192.0.0.5", docker_args={"hostname": "h5"}, ) h6 = net.addDockerHost( "h6", dimage="dev_test", ip="192.0.0.6", docker_args={"hostname": "h6"}, ) h7 = net.addDockerHost( "h7", dimage="dev_test", ip="192.0.0.7", docker_args={"hostname": "h7"}, ) h8 = net.addDockerHost( "h8", dimage="dev_test", ip="192.0.0.8", docker_args={"hostname": "h8"}, ) info("*** Add Links\n") net.addLink("h1", "s1", bw=B1) net.addLink("h2", "s1", bw=B1) net.addLink("h3", "s1", bw=B1) net.addLink("h4", "s5", bw=B2) net.addLink("h5", "s5", bw=B2) net.addLink("s1", "s2", bw=B1) net.addLink("s2", "s3", bw=B1, delay=DELAY) net.addLink("s3", "s4", bw=B1, delay=DELAY) net.addLink("s2", "s4", bw=B2) net.addLink("s1", "s4", bw=B1) net.addLink("s1", "s5", bw=B1) net.addLink("s4", "h6", bw=B1) net.addLink("s4", "h7", bw=B1) net.addLink("s4", "h8", bw=B1) info("*** Add controller\n") controller = RemoteController("c0", ip="127.0.0.1", port=6633) net.addController(controller) net.build() #controller.start() #s1.start( [controller] ) #s2.start( [controller] ) #s1.cmd("ovs-vsctl set port s1-eth4 qos=@newqos -- --id=@newqos create QoS type=linux-htb other-config:max-rate=1000000 queues:123=@1q queues:234=@2q -- --id=@1q create queue other-config:min-rate=100000 other-config:max-rate=700000 -- --id=@2q create queue other-config:min-rate=100000 other-config:max-rate=700000") #s1.cmd("ovs-ofctl add-flow s1 nw_src=192.0.0.2,nw_dst=192.0.0.7,actions=enqueue:4:123") net.start() srv1 = mgr.addContainer( "srv1", "h1", "servernew", "python /home/servernew.py", docker_args={}, ) #autonomus srv2 = mgr.addContainer( "srv2", "h2", "dev_test", "bash", docker_args={}, ) #udp srv3 = mgr.addContainer( "srv3", "h3", "eclipse-mosquitto", "bash", docker_args={}, ) #brocker srv4 = mgr.addContainer( "srv4", "h4", "aksakalli/mqtt-client", "bash /home/clientnew.sh", docker_args={}, ) srv5 = mgr.addContainer( "srv5", "h5", "aksakalli/mqtt-client", "bash", docker_args={}, ) srv6 = mgr.addContainer( "srv6", "h6", "dev_test", "bash", docker_args={}, ) srv7 = mgr.addContainer( "srv7", "h7", "dev_test", "bash", docker_args={}, ) srv8 = mgr.addContainer( "srv8", "h8", "dev_test", "bash", docker_args={}, ) spawnXtermDocker("srv2") spawnXtermDocker("srv7") CLI(net) mgr.removeContainer("srv1") mgr.removeContainer("srv2") mgr.removeContainer("srv3") mgr.removeContainer("srv4") mgr.removeContainer("srv5") mgr.removeContainer("srv6") mgr.removeContainer("srv7") mgr.removeContainer("srv8") net.stop() mgr.stop()
def testTopo(): # xterms=True, spawn xterms for all nodes after net.start() net = Containernet(controller=Controller, link=TCLink, xterms=True) info("*** Adding controller\n") net.addController("c0") info("*** Adding hosts\n") compressor = net.addDockerHost( "compressor", dimage="o2sc", ip="10.0.0.1/24", docker_args={ "cpuset_cpus": "1", "cpu_quota": 25000, "hostname": "compressor" }, ) decompressor = net.addDockerHost( "decompressor", dimage="o2sc", ip="10.0.0.2/24", docker_args={ "cpuset_cpus": "1", "cpu_quota": 25000, "hostname": "decompressor", }, ) source_1 = net.addDockerHost( "source_1", dimage="o2sc", ip="10.0.0.11/24", docker_args={ "cpuset_cpus": "1", "cpu_quota": 25000, "hostname": "source_1" }, ) source_2 = net.addDockerHost( "source_2", dimage="o2sc", ip="10.0.0.12/24", docker_args={ "cpuset_cpus": "1", "cpu_quota": 25000, "hostname": "source_2" }, ) source_3 = net.addDockerHost( "source_3", dimage="o2sc", ip="10.0.0.13/24", docker_args={ "cpuset_cpus": "1", "cpu_quota": 25000, "hostname": "source_3" }, ) info("*** Adding switch\n") s1 = net.addSwitch("s1") info("*** Creating links\n") net.addLinkNamedIfce(s1, compressor, bw=10, delay="100ms") net.addLinkNamedIfce(s1, decompressor, bw=10, delay="100ms") net.addLinkNamedIfce(s1, source_1, bw=10, delay="100ms") net.addLinkNamedIfce(s1, source_2, bw=10, delay="100ms") net.addLinkNamedIfce(s1, source_3, bw=10, delay="100ms") info("*** Starting network\n") net.start() net.pingAll() info("*** Enter CLI\n") info("Use help command to get CLI usages\n") CLI(net) info("*** Stopping network") net.stop()
info("\n*** Starting network\n") net.start() print("- Run builder to build FFPP programs.") builder = mgr.addContainer( "builder", "h2", "ffpp", "bash", docker_args={ "volumes": FFPP_VOLS, "working_dir": "/ffpp_app", }, ) time.sleep(1) spawnXtermDocker("builder") CLI(net) print("- Add the distributor VNF on host h2.") distributor = mgr.addContainer( "distributor", "h2", "ffpp", "bash", docker_args={ "volumes": FFPP_VOLS, "working_dir": "/ffpp_app", "nano_cpus": int(3e8), }, ) time.sleep(1) spawnXtermDocker("distributor")
def myTopology(): net = Containernet( switch=OVSKernelSwitch, build=False, autoSetMacs=True, autoStaticArp=True, link=TCLink, ) mgr = VNFManager(net) setLogLevel("info") info("*** Add Switches\n") sconfig1 = {"dpid": "%016x" % 1} sconfig2 = {"dpid": "%016x" % 2} sconfig3 = {"dpid": "%016x" % 3} sconfig4 = {"dpid": "%016x" % 4} net.addSwitch("s1", **sconfig1) net.addSwitch("s2", **sconfig2) net.addSwitch("s3", **sconfig3) net.addSwitch("s4", **sconfig4) info("*** Add Hosts\n") host_config = dict(inNamespace=True) #net.addHost("h1", **host_config, ip="192.0.0.1") h1 = net.addDockerHost( "h1", dimage="dev_test", ip="192.0.0.1", docker_args={"hostname": "h1"}, ) h2 = net.addDockerHost( "h2", dimage="dev_test", ip="192.0.0.2", docker_args={"hostname": "h2"}, ) h3 = net.addDockerHost( "h3", dimage="dev_test", ip="192.0.0.3", docker_args={"hostname": "h3"}, ) h4 = net.addDockerHost( "h4", dimage="dev_test", ip="192.0.0.4", docker_args={"hostname": "h4"}, ) h5 = net.addDockerHost( "h5", dimage="dev_test", ip="192.0.0.5", docker_args={"hostname": "h5"}, ) h6 = net.addDockerHost( "h6", dimage="dev_test", ip="192.0.0.6", docker_args={"hostname": "h6"}, ) h7 = net.addDockerHost( "h7", dimage="dev_test", ip="192.0.0.7", docker_args={"hostname": "h7"}, ) info("*** Add Links\n") net.addLink("h1", "s1", bw=B1) net.addLink("h2", "s1", bw=B1) net.addLink("h3", "s1", bw=B1) net.addLink("h4", "s1", bw=B1) net.addLink("s1", "s2", bw=B1) net.addLink("s2", "s3", bw=B1, delay=DELAY) net.addLink("s3", "s4", bw=B1, delay=DELAY) net.addLink("s2", "s4", bw=B2) net.addLink("s1", "s4", bw=B1) net.addLink("s4", "h5", bw=B1) net.addLink("s4", "h6", bw=B1) net.addLink("s4", "h7", bw=B1) info("*** Add controller\n") controller = RemoteController("c1", ip="127.0.0.1", port=6633) net.addController(controller) net.build() net.start() srv1 = mgr.addContainer( "srv1", "h1", "echo_server", "python /home/server_ad.py", docker_args={}, ) srv2 = mgr.addContainer( "srv2", "h2", "echo_server", "python /home/server.py", docker_args={}, ) srv3 = mgr.addContainer( "srv3", "h3", "echo_server", "python /home/server.py", docker_args={}, ) srv4 = mgr.addContainer( "srv4", "h4", "dev_test", "bash", docker_args={}, ) srv5 = mgr.addContainer( "srv5", "h5", "dev_test", "bash", docker_args={}, ) srv6 = mgr.addContainer( "srv6", "h6", "dev_test", "bash", docker_args={}, ) srv7 = mgr.addContainer( "srv7", "h7", "dev_test", "bash", docker_args={}, ) spawnXtermDocker("srv5") spawnXtermDocker("srv1") CLI(net) mgr.removeContainer("srv1") mgr.removeContainer("srv2") mgr.removeContainer("srv3") mgr.removeContainer("srv4") mgr.removeContainer("srv5") mgr.removeContainer("srv6") mgr.removeContainer("srv7") net.stop() mgr.stop()
def testMuNF(nano_cpus): net = Containernet(controller=Controller, link=TCLink) mgr = VNFManager(net) start_ts = time.time() info("*** Adding controller\n") net.addController("c0") info("*** Adding Docker hosts\n") pktgen = net.addDockerHost( "pktgen", dimage=f"trex:{TREX_VER}", ip="10.0.0.1/24", docker_args={ "cpuset_cpus": "0", "hostname": "pktgen", "volumes": { os.path.join(TREX_CONF_DIR, "trex_cfg.yaml"): { "bind": "/etc/trex_cfg.yaml", "mode": "rw", }, TREX_CONF_DIR: {"bind": f"/trex/{TREX_VER}/local", "mode": "rw"}, }, }, ) dut = net.addDockerHost( "dut", dimage=f"ffpp:{FFPP_VER}", ip="10.0.0.2/24", docker_args={ "cpuset_cpus": "1,2", "nano_cpus": int(nano_cpus), "hostname": "dut", "volumes": { "/sys/bus/pci/drivers": {"bind": "/sys/bus/pci/drivers", "mode": "rw"}, "/sys/kernel/mm/hugepages": { "bind": "/sys/kernel/mm/hugepages", "mode": "rw", }, "/sys/devices/system/node": { "bind": "/sys/devices/system/node", "mode": "rw", }, "/dev": {"bind": "/dev", "mode": "rw"}, FFPP_DIR: {"bind": "/ffpp", "mode": "rw"}, }, }, ) s1 = net.addSwitch("s1") # Control plane links. net.addLinkNamedIfce(s1, pktgen) net.addLinkNamedIfce(s1, dut) # Data plane links. net.addLink( dut, pktgen, bw=1000, delay="1ms", intfName1="vnf-in", intfName2="pktgen-out" ) net.addLink( dut, pktgen, bw=1000, delay="1ms", intfName1="vnf-out", intfName2="pktgen-in" ) pktgen.cmd("ip addr add 192.168.17.1/24 dev pktgen-out") pktgen.cmd("ip addr add 192.168.18.1/24 dev pktgen-in") dut.cmd("ip addr add 192.168.17.2/24 dev vnf-in") dut.cmd("ip addr add 192.168.18.2/24 dev vnf-out") # TODO: Deploy a chain of CNFs. cnfs = list() for n in range(1): cnf = mgr.addContainer( f"cnf{n}", "dut", f"ffpp:{FFPP_VER}", "/bin/bash", docker_args={ "volumes": { "/sys/bus/pci/drivers": { "bind": "/sys/bus/pci/drivers", "mode": "rw", }, "/sys/kernel/mm/hugepages": { "bind": "/sys/kernel/mm/hugepages", "mode": "rw", }, "/sys/devices/system/node": { "bind": "/sys/devices/system/node", "mode": "rw", }, "/dev": {"bind": "/dev", "mode": "rw"}, FFPP_DIR: {"bind": "/ffpp", "mode": "rw"}, } }, ) cnfs.append(cnf) net.start() # Avoid looping pktgen.cmd("ip addr flush dev pktgen-s1") pktgen.cmd("ip link set pktgen-s1 down") dut.cmd("ip addr flush dev dut-s1") dut.cmd("ip link set dut-s1 down") pktgen.cmd("ping -c 5 192.168.17.2") pktgen.cmd("ping -c 5 192.168.18.2") duration = time.time() - start_ts print(f"Setup duration: {duration:.2f} seconds.") CLI(net) info("*** Stopping network\n") net.stop() mgr.stop()
def run_benchmark(proto): net = Containernet(controller=Controller, link=TCLink, switch=OVSSwitch, autoStaticArp=False) info("*** Adding controller\n") net.addController("c0") info("*** Adding switch\n") s1 = net.addSwitch("s1") # MARK: The relay should run on a different CPU core as the client and # server. To avoid cache misses of the VNF running on the relay. info("*** Adding client and server.\n") client = net.addDockerHost( "client", dimage="lat_bm:latest", ip="10.0.0.100/24", docker_args={ "cpuset_cpus": "0", "volumes": { "%s" % FFPP_DIR: { "bind": "/ffpp", "mode": "ro" } }, }, ) net.addLinkNamedIfce(s1, client, delay="100ms") server = net.addDockerHost( "server", dimage="lat_bm:latest", ip="10.0.0.200/24", docker_args={"cpuset_cpus": "0"}, ) net.addLinkNamedIfce(s1, server, delay="100ms") if ADD_RELAY: cpus_relay = "1" if TEST_NF == "l2fwd-power": print( "*** [INFO] l2fwd-power application require at least one master and one slave core.\n" "The master handles timers and slave core handles forwarding task." ) cpus_relay = "0,1" info("*** Adding relay.\n") # Need additional mounts to run DPDK application # MARK: Just used for development, never use this in production container # setup. relay = net.addDockerHost( "relay", dimage="ffpp:latest", ip="10.0.0.101/24", docker_args={ "cpuset_cpus": cpus_relay, "nano_cpus": int(1.0 * 1e9), "volumes": { "/sys/bus/pci/drivers": { "bind": "/sys/bus/pci/drivers", "mode": "rw", }, "/sys/kernel/mm/hugepages": { "bind": "/sys/kernel/mm/hugepages", "mode": "rw", }, "/sys/devices/system/node": { "bind": "/sys/devices/system/node", "mode": "rw", }, "/dev": { "bind": "/dev", "mode": "rw" }, "%s" % FFPP_DIR: { "bind": "/ffpp", "mode": "rw" }, }, }, ) net.addLinkNamedIfce(s1, relay, delay="100ms") info("*** Starting network\n") net.start() net.pingAll() nodes = [n.name for n in net.hosts] sw_ifaces = [f"s1-{n}" for n in nodes] info("*** Disable kernel IP checksum offloading.\n") for iface in sw_ifaces: check_output(split(f"ethtool --offload {iface} rx off tx off")) node_portnum_map = {n: getOFPort(s1, f"s1-{n}") for n in nodes} if ADD_RELAY: info("*** Add OpenFlow rules for traffic redirection.\n") peer_map = {"client": "relay", "relay": "server", "server": "client"} for p in ["udp", "tcp"]: for peer in peer_map.keys(): check_output( split( 'ovs-ofctl add-flow s1 "{},in_port={},actions=output={}"' .format(p, node_portnum_map[peer], node_portnum_map[peer_map[peer]]))) if DEBUG: flow_table = s1.dpctl("dump-flows") print(f"*** Current flow table of s1: \n {flow_table}") info("*** Run DPDK helloworld\n") relay.cmd("cd $RTE_SDK/examples/helloworld && make") ret = relay.cmd( "cd $RTE_SDK/examples/helloworld/build && ./helloworld") print(f"Output of helloworld app:\n{ret}") DISPATCHER[TEST_NF](relay) server.cmd("pkill sockperf") setup_server(server, proto) for mps in LAT_TEST_PARAS["client_mps_list"]: run_latency_test(server, client, proto, mps) time.sleep(3) if ENTER_CLI: info("*** Enter CLI\n") info("Use help command to get CLI usages\n") CLI(net) info("*** Stopping network") net.stop()
def main(): if os.geteuid() != 0: print("Run this script with sudo.", file=sys.stderr) sys.exit(1) parser = argparse.ArgumentParser(description="Dumbbell topology to test COIN-DL") parser.add_argument( "-m", "--vnf_mode", type=str, default="", choices=["", "store_forward", "compute_forward"], help="Working mode of all VNF(s)", ) parser.add_argument( "--tests", type=str, default="", help="Test names separated by commas. Empty string means no tests. Example: sockperf,store_forward", ) parser.add_argument("--dev", action="store_true", help="Run in development mode") args = parser.parse_args() check_env() setLogLevel("info") tests = parse_tests(args.tests) if not tests: info("*** No tests are going to be performed.\n") # IPv6 is currently not used, disable it. subprocess.run( shlex.split("sysctl -w net.ipv6.conf.all.disable_ipv6=1"), check=True, ) net = None nodes = None try: net, nodes, _ = create_dumbbell() # Use the minimal dumbbell as the start assert len(nodes["vnfs"]) == 2 if net: net.start() else: raise RuntimeError("Failed to create the dumbbell topology") disable_checksum_offload(nodes) start_controllers(nodes["controllers"]) time.sleep(3) net.pingAll() deploy_vnfs(nodes["vnfs"], args.vnf_mode) deploy_servers(nodes["servers"]) run_tests(nodes, tests) if args.dev: CLI(net) finally: if net: info("*** Stopping network\n") net.stop() info("*** Killing Ryu manager\n") subprocess.run(shlex.split("sudo killall ryu-manager"), check=False) info("*** Trying to clean up ComNetsEmu\n") subprocess.run( shlex.split("sudo ce -c"), check=True, capture_output=False, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, ) subprocess.run( shlex.split("sysctl -w net.ipv6.conf.all.disable_ipv6=0"), check=True, )