def _setup_testbed(dut, creds, ptf, test_params, tbinfo): """ Sets up the testbed to run the COPP tests. """ logging.info("Set up the PTF for COPP tests") copp_utils.configure_ptf(ptf, test_params.nn_target_port) logging.info("Update the rate limit for the COPP policer") copp_utils.limit_policer(dut, _TEST_RATE_LIMIT, test_params.nn_target_namespace) # Multi-asic will not support this mode as of now. if test_params.swap_syncd and not dut.is_multi_asic: logging.info("Swap out syncd to use RPC image...") docker.swap_syncd(dut, creds) else: # Set sysctl RCVBUF parameter for tests dut.command("sysctl -w net.core.rmem_max=609430500") # Set sysctl SENDBUF parameter for tests dut.command("sysctl -w net.core.wmem_max=609430500") # NOTE: Even if the rpc syncd image is already installed, we need to restart # SWSS for the COPP changes to take effect. logging.info("Reloading config and restarting swss...") config_reload(dut) logging.info("Configure syncd RPC for testing") copp_utils.configure_syncd(dut, test_params.nn_target_port, test_params.nn_target_interface, test_params.nn_target_namespace, creds)
def test_config_reload_lc(duthosts, all_cfg_facts, nbrhosts, nbr_macs): """ Tests the system after a config reload on a linecard, interfaces/neighbors should be in sync across the system. Args: duthosts: duthosts fixture all_cfg_facts: all_cfg_facts fixture nbrhosts: nbrhosts fixture nbr_macs: nbr_macs fixture """ logger.info("=" * 80) logger.info("Precheck") logger.info("-" * 80) check_intfs_and_nbrs(duthosts, all_cfg_facts, nbrhosts, nbr_macs) check_ip_fwd(duthosts, all_cfg_facts, nbrhosts) logger.info("=" * 80) logger.info("Config reload on node: %s", duthosts.frontend_nodes[0].hostname) logger.info("-" * 80) config_reload(duthosts.frontend_nodes[0], config_source='config_db', wait=600) poll_bgp_restored(duthosts) logger.info("=" * 80) logger.info("Postcheck") logger.info("-" * 80) check_intfs_and_nbrs(duthosts, all_cfg_facts, nbrhosts, nbr_macs) check_ip_fwd(duthosts, all_cfg_facts, nbrhosts)
def run_static_route_test(duthost, ptfadapter, ptfhost, tbinfo, prefix, nexthop_addrs, prefix_len, nexthop_devs, ipv6=False, config_reload_test=False): # Add ipaddresses in ptf add_ipaddr(ptfhost, nexthop_addrs, prefix_len, nexthop_devs, ipv6=ipv6) try: # Add static route duthost.shell( "sonic-db-cli CONFIG_DB hmset 'STATIC_ROUTE|{}' nexthop {}".format( prefix, ",".join(nexthop_addrs))) time.sleep(5) # Check traffic get forwarded to the nexthop ip_dst = str(ipaddress.ip_network(unicode(prefix))[1]) generate_and_verify_traffic(duthost, ptfadapter, tbinfo, ip_dst, nexthop_devs, ipv6=ipv6) # Check the route is advertised to the neighbors check_route_redistribution(duthost, prefix, ipv6) # Config save and reload if specified if config_reload_test: duthost.shell('config save -y') config_reload(duthost) generate_and_verify_traffic(duthost, ptfadapter, tbinfo, ip_dst, nexthop_devs, ipv6=ipv6) check_route_redistribution(duthost, prefix, ipv6) finally: # Remove static route duthost.shell( "sonic-db-cli CONFIG_DB del 'STATIC_ROUTE|{}'".format(prefix), module_ignore_errors=True) # Delete ipaddresses in ptf del_ipaddr(ptfhost, nexthop_addrs, prefix_len, nexthop_devs, ipv6=ipv6) # Check the advertised route get removed time.sleep(5) check_route_redistribution(duthost, prefix, ipv6, removed=True) # Config save if the saved config_db was updated if config_reload_test: duthost.shell('config save -y')
def config_reload_after_tests(duthosts, selected_rand_one_per_hwsku_hostname): """Restores the DuT. Args: duthosts: list of DUTs. selected_rand_one_per_hwsku_hostname: The fixture returns a dict of module to list of hostnames mapping Returns: None. """ up_bgp_neighbors = {} for hostname in selected_rand_one_per_hwsku_hostname: duthost = duthosts[hostname] bgp_neighbors = duthost.get_bgp_neighbors() up_bgp_neighbors[duthost] = [ k.lower() for k, v in bgp_neighbors.items() if v["state"] == "established" ] yield for hostname in selected_rand_one_per_hwsku_hostname: duthost = duthosts[hostname] logger.info("Reload config on DuT '{}' ...".format(duthost.hostname)) config_reload(duthost) postcheck_critical_processes_status(duthost, up_bgp_neighbors[duthost])
def cleanup_read_mac(duthosts, enum_rand_one_per_hwsku_frontend_hostname, localhost): duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] initImage = duthost.shell( 'sonic-installer list | grep Current | cut -f2 -d " "')['stdout'] yield """ Recover the image to image2 which is the image before doing this case. """ currentImage = duthost.shell( 'sonic-installer list | grep Current | cut -f2 -d " "')['stdout'] if initImage != currentImage: logger.info("Re-install the image: {} to DUT".format(initImage)) duthost.copy(src=BINARY_FILE_ON_LOCALHOST_2, dest=BINARY_FILE_ON_DUTHOST) duthost.shell( "sonic-installer install -y {}".format(BINARY_FILE_ON_DUTHOST)) reboot(duthost, localhost, wait=120) logger.info('Remove temporary images') duthost.shell("rm -rf {}".format(BINARY_FILE_ON_DUTHOST)) localhost.shell("rm -rf {}".format(BINARY_FILE_ON_LOCALHOST_1)) localhost.shell("rm -rf {}".format(BINARY_FILE_ON_LOCALHOST_2)) backup_minigraph_exist = duthost.stat( path="/etc/sonic/minigraph.xml.backup")["stat"]["exists"] if backup_minigraph_exist: logger.info("Apply minigraph from backup") duthost.shell( "mv /etc/sonic/minigraph.xml.backup /etc/sonic/minigraph.xml") config_reload(duthost, config_source='minigraph')
def _setup_testbed(dut, ptf, test_params): """ Sets up the testbed to run the COPP tests. """ logging.info("Disable LLDP for COPP tests") dut.command("docker exec lldp supervisorctl stop lldp-syncd") dut.command("docker exec lldp supervisorctl stop lldpd") logging.info("Set up the PTF for COPP tests") copp_utils.configure_ptf(ptf, test_params.nn_target_port) logging.info("Update the rate limit for the COPP policer") copp_utils.limit_policer(dut, _TEST_RATE_LIMIT) if test_params.swap_syncd: logging.info("Swap out syncd to use RPC image...") docker.swap_syncd(dut) else: # NOTE: Even if the rpc syncd image is already installed, we need to restart # SWSS for the COPP changes to take effect. logging.info("Reloading config and restarting swss...") config_reload(dut) logging.info("Configure syncd RPC for testing") copp_utils.configure_syncd(dut, test_params.nn_target_port)
def heal_testbed(duthost): # Nothing to do before test yield status, details = get_critical_processes_status(duthost) if not status: logging.info("Restoring dut with critical process failure: {}".format(details)) config_reload(duthost, config_source='config_db', wait=120)
def restore_default_syncd(duthost, creds): """Replaces the running syncd with the default syncd that comes with the image. This will restart the swss service. Args: duthost (SonicHost): The target device. creds (dict): Credentials used to access the docker registry. """ vendor_id = _get_vendor_id(duthost) docker_syncd_name = "docker-syncd-{}".format(vendor_id) duthost.stop_service("swss") duthost.delete_container("syncd") tag_image(duthost, "{}:latest".format(docker_syncd_name), docker_syncd_name, duthost.os_version) logger.info("Reloading config and restarting swss...") config_reload(duthost) # Remove the RPC image from the duthost docker_rpc_image = docker_syncd_name + "-rpc" registry = load_docker_registry_info(duthost, creds) duthost.command("docker rmi {}/{}:{}".format(registry.host, docker_rpc_image, duthost.os_version), module_ignore_errors=True)
def test_check_reset_status(construct_url, duthosts, rand_one_dut_hostname, localhost): duthost = duthosts[rand_one_dut_hostname] # Set reset status logger.info("Checking for RESTAPI reset status") r = restapi.get_reset_status(construct_url) pytest_assert(r.status_code == 200) logger.info(r.json()) response = r.json() pytest_assert(response['reset_status'] == "true") logger.info("Setting RESTAPI reset status") params = '{"reset_status":"false"}' r = restapi.post_reset_status(construct_url, params) pytest_assert(r.status_code == 200) r = restapi.get_reset_status(construct_url) pytest_assert(r.status_code == 200) logger.info(r.json()) response = r.json() pytest_assert(response['reset_status'] == "false") # Check reset status post config reload logger.info("Checking for RESTAPI reset status after config reload") config_reload(duthost) apply_cert_config(duthost) r = restapi.get_reset_status(construct_url) pytest_assert(r.status_code == 200) logger.info(r.json()) response = r.json() pytest_assert(response['reset_status'] == "true") # Check reset status post fast reboot check_reset_status_after_reboot('fast', "false", "true", duthost, localhost, construct_url) # Check reset status post cold reboot check_reset_status_after_reboot('cold', "false", "true", duthost, localhost, construct_url) # Check reset status post warm reboot check_reset_status_after_reboot('warm', "false", "false", duthost, localhost, construct_url)
def _setup_testbed(dut, creds, ptf, test_params, tbinfo): """ Sets up the testbed to run the COPP tests. """ mg_facts = dut.get_extended_minigraph_facts(tbinfo) is_backend_topology = mg_facts.get(constants.IS_BACKEND_TOPOLOGY_KEY, False) logging.info("Set up the PTF for COPP tests") copp_utils.configure_ptf(ptf, test_params, is_backend_topology) logging.info("Update the rate limit for the COPP policer") copp_utils.limit_policer(dut, _TEST_RATE_LIMIT, test_params.nn_target_namespace) # Multi-asic will not support this mode as of now. if test_params.swap_syncd and not dut.is_multi_asic: logging.info("Swap out syncd to use RPC image...") docker.swap_syncd(dut, creds) else: # Set sysctl RCVBUF parameter for tests dut.command("sysctl -w net.core.rmem_max=609430500") # Set sysctl SENDBUF parameter for tests dut.command("sysctl -w net.core.wmem_max=609430500") # NOTE: Even if the rpc syncd image is already installed, we need to restart # SWSS for the COPP changes to take effect. logging.info("Reloading config and restarting swss...") config_reload(dut, safe_reload=True, check_intf_up_ports=True) logging.info("Configure syncd RPC for testing") copp_utils.configure_syncd(dut, test_params.nn_target_port, test_params.nn_target_interface, test_params.nn_target_namespace, test_params.nn_target_vlanid, creds)
def reload_dut(duthosts, enum_rand_one_per_hwsku_frontend_hostname, request): duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] yield if request.node.rep_call.failed: #Issue a config_reload to clear statically added route table and ip addr logging.info("Reloading config..") config_reload(duthost)
def cleanup_mocked_configs(duthost, tbinfo): """Config reload to reset the mocked configs applied to DUT.""" yield if is_t0_mocked_dualtor(tbinfo): logger.info("Load minigraph to reset the DUT %s", duthost.hostname) config_reload(duthost, config_source="minigraph")
def config_reload_after_tests(duthost): bgp_neighbors = duthost.get_bgp_neighbors() up_bgp_neighbors = [ k.lower() for k, v in bgp_neighbors.items() if v["state"] == "established" ] yield config_reload(duthost) postcheck_critical_processes_status(duthost, up_bgp_neighbors)
def reload_dut_config(duthost): """ DUT's configuration reload on teardown Args: duthost: DUT host object """ config_reload(duthost)
def cleanup_mocked_configs(duthost, tbinfo): """Config reload to reset the mocked configs applied to DUT.""" yield if tbinfo["topo"]["type"] == "t0" and 'dualtor' not in tbinfo["topo"][ "name"]: logger.info("Load minigraph to reset the DUT %s", duthost.hostname) config_reload(duthost, config_source="minigraph")
def reload_testbed(duthosts, enum_rand_one_per_hwsku_frontend_hostname): """ Reload dut after test function finished """ duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] yield None logging.info("Reloading config and restarting swss...") config_reload(duthost) wait_critical_processes(duthost)
def reload_dut_config(duthost): """ Reloads the dut config. Args: duthost (SonicHost): The target device. """ logger.info("Reloading config and restarting other services ...") config_reload(duthost)
def teardown_test_class(duthost): """ Reload DUT configuration after running of test suite Args: duthost: DUT host object """ yield config_reload(duthost)
def test_interface_binding(duthosts, rand_one_dut_hostname, dut_dhcp_relay_data): duthost = duthosts[rand_one_dut_hostname] skip_release(duthost, ["201811", "201911", "202106"]) config_reload(duthost) wait_critical_processes(duthost) wait_until(120, 5, 0, check_interface_status, duthosts, rand_one_dut_hostname) output = duthost.shell("docker exec -it dhcp_relay ss -nlp | grep dhcp6relay")["stdout"].encode("utf-8") logger.info(output) for dhcp_relay in dut_dhcp_relay_data: assert "*:{}".format(dhcp_relay['downlink_vlan_iface']['name']) in output, "{} is not found in {}".format("*:{}".format(dhcp_relay['downlink_vlan_iface']['name']), output)
def config_dut_ports(duthost, ports, vlan): # https://github.com/Azure/sonic-buildimage/issues/2665 # Introducing config vlan member add and remove for the test port due to above mentioned PR. # Even though port is deleted from vlan , the port shows its master as Bridge upon assigning ip address. # Hence config reload is done as workaround. ##FIXME for i in range(len(ports)): duthost.command('config vlan member del %s %s' %(vlan,ports[i])) duthost.command('config interface ip add %s %s/24' %(ports[i],var['dut_intf_ips'][i])) duthost.command('config save -y') config_reload(duthost, config_source='config_db', wait=120) time.sleep(5)
def check_topo_and_restore(duthosts, enum_rand_one_per_hwsku_frontend_hostname, tbinfo): duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] mg_facts = duthost.get_extended_minigraph_facts(tbinfo) if len(mg_facts['minigraph_portchannels'].keys()) == 0 and not duthost.is_multi_asic: pytest.skip("Skip test due to there is no portchannel exists in current topology.") yield # Do config reload to restore everything back logging.info("Reloading config..") config_reload(duthost)
def test_monitoring_critical_processes(duthosts, rand_one_dut_hostname, tbinfo): """Tests the feature of monitoring critical processes with Supervisord. This function will check whether names of critical processes will appear in the syslog if the autorestart were disabled and these critical processes were stopped. Args: duthosts: list of DUTs. rand_one_dut_hostname: hostname of DUT. tbinfo: Testbed information. Returns: None. """ duthost = duthosts[rand_one_dut_hostname] loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix="monitoring_critical_processes") loganalyzer.expect_regex = [] bgp_neighbors = duthost.get_bgp_neighbors() up_bgp_neighbors = [ k.lower() for k, v in bgp_neighbors.items() if v["state"] == "established" ] skip_containers = [] skip_containers.append("database") skip_containers.append("gbsyncd") # Skip 'radv' container on devices whose role is not T0. if tbinfo["topo"]["type"] != "t0": skip_containers.append("radv") containers_in_namespaces = get_containers_namespace_ids(duthost, skip_containers) expected_alerting_messages = get_expected_alerting_messages(duthost, containers_in_namespaces) loganalyzer.expect_regex.extend(expected_alerting_messages) marker = loganalyzer.init() stop_critical_processes(duthost, containers_in_namespaces) # Wait for 70 seconds such that Supervisord has a chance to write alerting message into syslog. logger.info("Sleep 70 seconds to wait for the alerting message...") time.sleep(70) logger.info("Checking the alerting messages from syslog...") loganalyzer.analyze(marker) logger.info("Found all the expected alerting messages from syslog!") logger.info("Executing the config reload...") config_reload(duthost) logger.info("Executing the config reload was done!") ensure_all_critical_processes_running(duthost, containers_in_namespaces) if not postcheck_critical_processes_status(duthost, up_bgp_neighbors): pytest.fail("Post-check failed after testing the container checker!") logger.info("Post-checking status of critical processes and BGP sessions was done!")
def common_setup_teardown(duthosts, rand_one_dut_hostname, ptfhost): duthost = duthosts[rand_one_dut_hostname] mg_facts = duthost.minigraph_facts(host=duthost.hostname)['ansible_facts'] int_facts = duthost.interface_facts()['ansible_facts'] ports = list( sorted(mg_facts['minigraph_ports'].keys(), key=lambda item: int(item.replace('Ethernet', '')))) # Select port index 0 & 1 two interfaces for testing intf1 = ports[0] intf2 = ports[1] logger.info("Selected ints are {0} and {1}".format(intf1, intf2)) intf1_indice = mg_facts['minigraph_port_indices'][intf1] intf2_indice = mg_facts['minigraph_port_indices'][intf2] po1 = get_po(mg_facts, intf1) po2 = get_po(mg_facts, intf2) try: # Make sure selected interfaces are not in portchannel if po1 is not None: duthost.shell('config portchannel member del {0} {1}'.format( po1, intf1)) collect_info(duthost) duthost.shell('config interface startup {0}'.format(intf1)) collect_info(duthost) if po2 is not None: duthost.shell('config portchannel member del {0} {1}'.format( po2, intf2)) collect_info(duthost) duthost.shell('config interface startup {0}'.format(intf2)) collect_info(duthost) # Change SONiC DUT interface IP to test IP address duthost.shell('config interface ip add {0} 10.10.1.2/28'.format(intf1)) collect_info(duthost) duthost.shell( 'config interface ip add {0} 10.10.1.20/28'.format(intf2)) collect_info(duthost) if (po1 is not None) or (po2 is not None): time.sleep(40) # Copy test files ptfhost.copy(src="ptftests", dest="/root") yield duthost, ptfhost, int_facts, intf1, intf2, intf1_indice, intf2_indice finally: # Recover DUT interface IP address config_reload(duthost, config_source='config_db', wait=120)
def reload_testbed_on_failed(request, duthosts, enum_rand_one_per_hwsku_frontend_hostname): """ Reload dut after test function finished """ duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] yield None if request.node.rep_call.failed: # if test case failed, means bgp session down or port channel status not recovered, execute config reload logging.info("Reloading config and restarting swss...") config_reload(duthost) wait_critical_processes(duthost)
def setup(duthosts, rand_one_dut_hostname, ptfhost, tbinfo, config_sflow_feature): duthost = duthosts[rand_one_dut_hostname] global var var = {} feature_status, _ = duthost.get_feature_status() if 'sflow' not in feature_status or feature_status['sflow'] == 'disabled': pytest.skip("sflow feature is not eanbled") mg_facts = duthost.get_extended_minigraph_facts(tbinfo) var['router_mac'] = duthost.facts['router_mac'] vlan_dict = mg_facts['minigraph_vlans'] var['test_ports'] = [] var['ptf_test_indices'] = [] var['sflow_ports'] = {} for i in range(0, 3, 1): var['test_ports'].append(vlan_dict['Vlan1000']['members'][i]) var['ptf_test_indices'].append(mg_facts['minigraph_ptf_indices'][ vlan_dict['Vlan1000']['members'][i]]) collector_ips = ['20.1.1.2', '30.1.1.2'] var['dut_intf_ips'] = ['20.1.1.1', '30.1.1.1'] var['mgmt_ip'] = mg_facts['minigraph_mgmt_interface']['addr'] var['lo_ip'] = mg_facts['minigraph_lo_interfaces'][0]['addr'] config_dut_ports(duthost, var['test_ports'][0:2], vlan=1000) for port_channel, interfaces in mg_facts['minigraph_portchannels'].items(): port = interfaces['members'][0] var['sflow_ports'][port] = {} var['sflow_ports'][port]['ifindex'] = get_ifindex(duthost, port) var['sflow_ports'][port]['port_index'] = get_port_index(duthost, port) var['sflow_ports'][port]['ptf_indices'] = mg_facts[ 'minigraph_ptf_indices'][interfaces['members'][0]] var['sflow_ports'][port]['sample_rate'] = 512 var['portmap'] = json.dumps(var['sflow_ports']) udp_port = 6343 for i in range(0, 2, 1): var['collector%s' % i] = {} var['collector%s' % i]['name'] = 'collector%s' % i var['collector%s' % i]['ip_addr'] = collector_ips[i] var['collector%s' % i]['port'] = udp_port udp_port += 1 collector_ports = var['ptf_test_indices'][0:2] setup_ptf(ptfhost, collector_ports) # -------- Testing ---------- yield # -------- Teardown ---------- config_reload(duthost, config_source='minigraph', wait=120)
def test_interface_binding(duthosts, rand_one_dut_hostname, dut_dhcp_relay_data): duthost = duthosts[rand_one_dut_hostname] skip_release(duthost, ["201811", "201911", "202106"]) if not check_interface_status(duthost): config_reload(duthost) wait_critical_processes(duthost) pytest_assert(wait_until(120, 5, 0, check_interface_status, duthost)) output = duthost.shell("docker exec -it dhcp_relay ss -nlp | grep dhcrelay", module_ignore_errors=True)["stdout"].encode("utf-8") logger.info(output) for dhcp_relay in dut_dhcp_relay_data: assert "{}:67".format(dhcp_relay['downlink_vlan_iface']['name']) in output, "{} is not found in {}".format("{}:67".format(dhcp_relay['downlink_vlan_iface']['name']), output) for iface in dhcp_relay['uplink_interfaces']: assert "{}:67".format(iface) in output, "{} is not found in {}".format("{}:67".format(iface), output)
def test_neighbor_link_down(testbed_params, setup_counters, duthosts, rand_one_dut_hostname, toggle_all_simulator_ports_to_rand_selected_tor, mock_server, send_dropped_traffic, drop_reason, generate_dropped_packet, tbinfo): """ Verifies counters that check for a neighbor link being down. Note: This test works by mocking a server within a VLAN, thus the T0 topology is required. Args: drop_reason (str): The drop reason being tested. """ duthost = duthosts[rand_one_dut_hostname] counter_type = setup_counters([drop_reason]) rx_port = random.choice([ port for port in testbed_params["physical_port_map"].keys() if port != mock_server["server_dst_port"] ]) logging.info("Selected port %s to send traffic", rx_port) src_ip = MOCK_DEST_IP pkt = generate_dropped_packet(rx_port, src_ip, mock_server["server_dst_addr"]) try: # Add a static fdb entry apply_fdb_config(duthost, testbed_params['vlan_interface']['attachto'], mock_server['server_dst_intf'], mock_server['server_dst_mac'], "SET", "static") mock_server["fanout_neighbor"].shutdown(mock_server["fanout_intf"]) time.sleep(3) verifyFdbArp(duthost, mock_server['server_dst_addr'], mock_server['server_dst_mac'], mock_server['server_dst_intf']) send_dropped_traffic(counter_type, pkt, rx_port) finally: mock_server["fanout_neighbor"].no_shutdown(mock_server["fanout_intf"]) duthost.command("sonic-clear fdb all") duthost.command("sonic-clear arp") # Delete the static fdb entry apply_fdb_config(duthost, testbed_params['vlan_interface']['attachto'], mock_server['server_dst_intf'], mock_server['server_dst_mac'], "DEL", "static") # FIXME: Add config reload on t0-backend as a workaround to keep DUT healthy because the following # drop packet testcases will suffer from the brcm_sai_get_port_stats errors flooded in syslog if "backend" in tbinfo["topo"]["name"]: config_reload(duthost)
def backup_bgp_config(duthost): """ Copy default bgp configuration to the DUT and apply default configuration on the bgp docker after test Args: duthost: DUT host object """ apply_default_bgp_config(duthost, copy=True) yield try: apply_default_bgp_config(duthost) except Exception: config_reload(duthost) apply_default_bgp_config(duthost)
def cleanup_read_mac(duthosts, rand_one_dut_hostname, localhost): duthost = duthosts[rand_one_dut_hostname] yield logger.info('Remove temporary images') duthost.shell("rm -rf {}".format(BINARY_FILE_ON_DUTHOST)) localhost.shell("rm -rf {}".format(BINARY_FILE_ON_LOCALHOST_1)) localhost.shell("rm -rf {}".format(BINARY_FILE_ON_LOCALHOST_2)) backup_minigraph_exist = duthost.stat( path="/etc/sonic/minigraph.xml.backup")["stat"]["exists"] if backup_minigraph_exist: logger.info("Apply minigraph from backup") duthost.shell( "mv /etc/sonic/minigraph.xml.backup /etc/sonic/minigraph.xml") config_reload(duthost, config_source='minigraph')
def swap_syncd(duthost, creds): """Replaces the running syncd container with the RPC version of it. This will download a new Docker image to the duthost and restart the swss service. Args: duthost (SonicHost): The target device. creds (dict): Credentials used to access the docker registry. """ vendor_id = _get_vendor_id(duthost) docker_syncd_name = "docker-syncd-{}".format(vendor_id) docker_rpc_image = docker_syncd_name + "-rpc" # Force image download to go through mgmt network duthost.command("config bgp shutdown all") duthost.stop_service("swss") duthost.delete_container("syncd") # Set sysctl RCVBUF parameter for tests duthost.command("sysctl -w net.core.rmem_max=609430500") # Set sysctl SENDBUF parameter for tests duthost.command("sysctl -w net.core.wmem_max=609430500") _perform_swap_syncd_shutdown_check(duthost) is_syncdrpc_present_locally = duthost.command( 'docker image inspect ' + docker_rpc_image, module_ignore_errors=True)['rc'] == 0 if is_syncdrpc_present_locally: tag_image(duthost, "{}:latest".format(docker_syncd_name), docker_rpc_image, 'latest') else: registry = load_docker_registry_info(duthost, creds) download_image(duthost, registry, docker_rpc_image, duthost.os_version) tag_image(duthost, "{}:latest".format(docker_syncd_name), "{}/{}".format(registry.host, docker_rpc_image), duthost.os_version) logger.info("Reloading config and restarting swss...") config_reload(duthost) _perform_syncd_liveness_check(duthost)