def change_pinned_cpu_and_redeploy(self): """Unpinned CPU and redeploy cluster Scenario: 1. Revert snapshot "cpu_pinning_on_two_compute" 2. Unpinned CPU on the first compute 3. Deploy changes 4. Verify changes were applied 5. Check nova.conf doesn't contain pinned CPU at the first compute 6. Run OSTF 7. Boot VM with pinned CPU on the second compute Snapshot: change_pinned_cpu_and_redeploy """ self.show_step(1) self.env.revert_snapshot("cpu_pinning_on_two_compute") cluster_id = self.fuel_web.get_last_created_cluster() self.show_step(2) first_compute = self.fuel_web.get_nailgun_node_by_name("slave-01") first_config = self.fuel_web.client.get_node_attributes(first_compute["id"]) first_config["cpu_pinning"]["nova"]["value"] = 0 self.fuel_web.client.upload_node_attributes(first_config, first_compute["id"]) self.show_step(3) self.fuel_web.deploy_cluster_wait(cluster_id) self.show_step(4) compute_config = self.fuel_web.client.get_node_attributes(first_compute["id"]) asserts.assert_equal( compute_config["cpu_pinning"]["nova"]["value"], 0, "CPU wasn't unpinned on '{0}': " "Expected value 0, actual '{1}'".format( first_compute["ip"], compute_config["cpu_pinning"]["nova"]["value"] ), ) self.show_step(5) nova_conf_path = "/etc/nova/nova.conf" with self.ssh_manager.open_on_remote(ip=first_compute["ip"], path=nova_conf_path) as f: nova_conf = utils.get_ini_config(f) utils.check_config(nova_conf, nova_conf_path, "DEFAULT", "vcpu_pin_set", None) self.show_step(6) self.fuel_web.run_ostf(cluster_id=cluster_id) self.show_step(7) os_conn = os_actions.OpenStackActions(self.fuel_web.get_public_vip(cluster_id)) second_compute = self.fuel_web.get_nailgun_node_by_name("slave-02") meta = {"pinned": "true"} self.create_pinned_instance( os_conn=os_conn, cluster_id=cluster_id, name="cpu_1_", vcpus=1, hostname=second_compute["fqdn"], meta=meta ) self.env.make_snapshot("change_pinned_cpu_and_redeploy")
def check_config_on_remote(self, nodes, config): """ :param nodes: a list of nailgun nodes :param config: a structured dictionary of config :return: """ nodes = [x['ip'] for x in nodes] for node in nodes: with self.env.d_env.get_ssh_to_remote(node) as remote: for configpath, params in config.items(): result = remote.open(configpath) conf_for_check = utils.get_ini_config(result) for param in params: utils.check_config(conf_for_check, configpath, param['section'], param['option'], param['value'])
def cpu_pinning_with_other_role(self): """Check pinned CPU on compute,cinder node Scenario: 1. Revert snapshot "basic_env_for_numa_cpu_pinning" 2. Add cinder role for compute nodes 3. Pin maximum CPU for the nova on the computes 4. Verify setting was successfully applied 5. Deploy cluster 6. Check new filters are enabled in nova.conf at controller 7. Check nova.conf contains pinned CPU at computes 8. Run OSTF 9. Boot VMs with pinned CPU on each compute, cinder node Snapshot: cpu_pinning_with_other_role """ self.show_step(1) self.env.revert_snapshot("basic_env_for_numa_cpu_pinning") self.show_step(2) cluster_id = self.fuel_web.get_last_created_cluster() nodes = {'slave-01': ['compute', 'cinder'], 'slave-02': ['compute', 'cinder']} self.fuel_web.update_nodes(cluster_id, nodes) self.show_step(3) target_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( cluster_id, ['compute', 'cinder'], role_status='pending_roles') for compute in target_nodes: compute_cpu = compute['meta']['cpu']['total'] compute_config = self.fuel_web.client.get_node_attributes( compute['id']) compute_config['cpu_pinning']['nova']['value'] = compute_cpu - 1 self.fuel_web.client.upload_node_attributes( compute_config, compute['id']) self.show_step(4) for compute in target_nodes: compute_cpu = compute['meta']['cpu']['total'] compute_config = self.fuel_web.client.get_node_attributes( compute['id']) asserts.assert_equal( compute_config['cpu_pinning']['nova']['value'], compute_cpu - 1, "CPU pinning wasn't applied on '{0}': " "Expected value '{1}', actual '{2}'" .format(compute['ip'], compute_cpu - 1, compute_config['cpu_pinning']['nova']['value'])) self.show_step(5) self.fuel_web.deploy_cluster_wait(cluster_id) self.show_step(6) controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( cluster_id, roles=['controller']) nova_conf_path = "/etc/nova/nova.conf" for controller in controllers: with self.ssh_manager.open_on_remote( ip=controller['ip'], path=nova_conf_path) as f: nova_conf = utils.get_ini_config(f) self.assert_entry_in_config(nova_conf, nova_conf_path, "DEFAULT", "scheduler_default_filters", "NUMATopologyFilter") self.show_step(7) for compute in target_nodes: with self.ssh_manager.open_on_remote( ip=compute['ip'], path=nova_conf_path) as f: nova_conf = utils.get_ini_config(f) compute_cpu = compute['meta']['cpu']['total'] self.assert_quantity_in_config(nova_conf, nova_conf_path, "DEFAULT", "vcpu_pin_set", compute_cpu - 1) self.show_step(8) self.fuel_web.run_ostf(cluster_id=cluster_id) self.show_step(9) os_conn = os_actions.OpenStackActions( self.fuel_web.get_public_vip(cluster_id)) meta = {'pinned': 'true'} for compute in target_nodes: self.create_pinned_instance(os_conn=os_conn, cluster_id=cluster_id, name='cpu_role_', vcpus=2, hostname=compute['fqdn'], meta=meta) self.env.make_snapshot("cpu_pinning_with_other_role")
def cpu_pinning_on_two_compute(self): """Check different amount of pinned CPU Scenario: 1. Revert snapshot "basic_env_for_numa_cpu_pinning" 2. Pin maximum CPU for the nova on the first compute 3. Pin minimun CPU for the nova on the second compute 4. Verify setting was successfully applied 5. Deploy cluster 6. Check new filters are enabled in nova.conf at controller 7. Check nova.conf contains pinned CPU at compute 8. Run OSTF 9. Boot VM with pinned CPU on the first compute 10. Boot VM with pinned CPU on the second compute Snapshot: cpu_pinning_on_two_compute """ snapshot_name = 'cpu_pinning_on_two_compute' self.check_run(snapshot_name) self.show_step(1) self.env.revert_snapshot("basic_env_for_numa_cpu_pinning") cluster_id = self.fuel_web.get_last_created_cluster() self.show_step(2) first_compute = self.fuel_web.get_nailgun_node_by_name('slave-01') first_compute_cpu = first_compute['meta']['cpu']['total'] first_config = self.fuel_web.client.get_node_attributes( first_compute['id']) first_config['cpu_pinning']['nova']['value'] = first_compute_cpu - 1 self.fuel_web.client.upload_node_attributes( first_config, first_compute['id']) self.show_step(3) second_compute = self.fuel_web.get_nailgun_node_by_name('slave-02') second_config = self.fuel_web.client.get_node_attributes( second_compute['id']) second_config['cpu_pinning']['nova']['value'] = 1 self.fuel_web.client.upload_node_attributes( second_config, second_compute['id']) self.show_step(4) first_config = self.fuel_web.client.get_node_attributes( first_compute['id']) asserts.assert_equal( first_config['cpu_pinning']['nova']['value'], first_compute_cpu - 1, "CPU pinning wasn't applied on '{0}': " "Expected value '{1}', actual '{2}'" .format(first_compute['ip'], first_compute_cpu - 1, first_config['cpu_pinning']['nova']['value'])) second_config = self.fuel_web.client.get_node_attributes( second_compute['id']) asserts.assert_equal( second_config['cpu_pinning']['nova']['value'], 1, "CPU pinning wasn't applied on '{0}': " "Expected value '{1}', actual '{2}'" .format(second_compute['ip'], 1, second_config['cpu_pinning']['nova']['value'])) self.show_step(5) self.fuel_web.deploy_cluster_wait(cluster_id) self.show_step(6) controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( cluster_id, roles=['controller']) nova_conf_path = "/etc/nova/nova.conf" for controller in controllers: with self.ssh_manager.open_on_remote( ip=controller['ip'], path=nova_conf_path) as f: nova_conf = utils.get_ini_config(f) self.assert_entry_in_config(nova_conf, nova_conf_path, "DEFAULT", "scheduler_default_filters", "NUMATopologyFilter") self.show_step(7) with self.ssh_manager.open_on_remote( ip=first_compute['ip'], path=nova_conf_path) as f: nova_conf = utils.get_ini_config(f) self.assert_quantity_in_config(nova_conf, nova_conf_path, "DEFAULT", "vcpu_pin_set", first_compute_cpu - 1) with self.ssh_manager.open_on_remote( ip=second_compute['ip'], path=nova_conf_path) as f: nova_conf = utils.get_ini_config(f) self.assert_quantity_in_config(nova_conf, nova_conf_path, "DEFAULT", "vcpu_pin_set", 1) self.show_step(8) self.fuel_web.run_ostf(cluster_id=cluster_id) self.show_step(9) os_conn = os_actions.OpenStackActions( self.fuel_web.get_public_vip(cluster_id)) meta = {'pinned': 'true'} self.create_pinned_instance(os_conn=os_conn, cluster_id=cluster_id, name='cpu_3_', vcpus=3, hostname=first_compute['fqdn'], meta=meta) self.show_step(10) self.create_pinned_instance(os_conn=os_conn, cluster_id=cluster_id, name='cpu_1_', vcpus=1, hostname=second_compute['fqdn'], meta=meta) self.env.make_snapshot(snapshot_name, is_make=True)
def change_pinned_cpu_and_redeploy(self): """Unpinned CPU and redeploy cluster Scenario: 1. Revert snapshot "cpu_pinning_on_two_compute" 2. Unpinned CPU on the first compute 3. Deploy changes 4. Verify changes were applied 5. Check nova.conf doesn't contain pinned CPU at the first compute 6. Run OSTF 7. Boot VM with pinned CPU on the second compute Snapshot: change_pinned_cpu_and_redeploy """ self.show_step(1) self.env.revert_snapshot("cpu_pinning_on_two_compute") cluster_id = self.fuel_web.get_last_created_cluster() self.show_step(2) first_compute = self.fuel_web.get_nailgun_node_by_name('slave-01') first_config = self.fuel_web.client.get_node_attributes( first_compute['id']) first_config['cpu_pinning']['nova']['value'] = 0 self.fuel_web.client.upload_node_attributes(first_config, first_compute['id']) self.show_step(3) self.fuel_web.deploy_cluster_wait(cluster_id) self.show_step(4) compute_config = self.fuel_web.client.get_node_attributes( first_compute['id']) asserts.assert_equal( compute_config['cpu_pinning']['nova']['value'], 0, "CPU wasn't unpinned on '{0}': " "Expected value 0, actual '{1}'".format( first_compute['ip'], compute_config['cpu_pinning']['nova']['value'])) self.show_step(5) nova_conf_path = "/etc/nova/nova.conf" with self.ssh_manager.open_on_remote(ip=first_compute['ip'], path=nova_conf_path) as f: nova_conf = utils.get_ini_config(f) utils.check_config(nova_conf, nova_conf_path, "DEFAULT", "vcpu_pin_set", None) self.show_step(6) self.fuel_web.run_ostf(cluster_id=cluster_id) self.show_step(7) os_conn = os_actions.OpenStackActions( self.fuel_web.get_public_vip(cluster_id)) second_compute = self.fuel_web.get_nailgun_node_by_name('slave-02') meta = {'pinned': 'true'} self.create_pinned_instance(os_conn=os_conn, cluster_id=cluster_id, name='cpu_1_', vcpus=1, hostname=second_compute['fqdn'], meta=meta) self.env.make_snapshot('change_pinned_cpu_and_redeploy')
def cpu_pinning_with_other_role(self): """Check pinned CPU on compute,cinder node Scenario: 1. Revert snapshot "basic_env_for_numa_cpu_pinning" 2. Add cinder role for compute nodes 3. Pin maximum CPU for the nova on the computes 4. Verify setting was successfully applied 5. Deploy cluster 6. Check new filters are enabled in nova.conf at controller 7. Check nova.conf contains pinned CPU at computes 8. Run OSTF 9. Boot VMs with pinned CPU on each compute, cinder node Snapshot: cpu_pinning_with_other_role """ self.show_step(1) self.env.revert_snapshot("basic_env_for_numa_cpu_pinning") self.show_step(2) cluster_id = self.fuel_web.get_last_created_cluster() nodes = { 'slave-01': ['compute', 'cinder'], 'slave-02': ['compute', 'cinder'] } self.fuel_web.update_nodes(cluster_id, nodes) self.show_step(3) target_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( cluster_id, ['compute', 'cinder'], role_status='pending_roles') for compute in target_nodes: compute_cpu = compute['meta']['cpu']['total'] compute_config = self.fuel_web.client.get_node_attributes( compute['id']) compute_config['cpu_pinning']['nova']['value'] = compute_cpu - 1 self.fuel_web.client.upload_node_attributes( compute_config, compute['id']) self.show_step(4) for compute in target_nodes: compute_cpu = compute['meta']['cpu']['total'] compute_config = self.fuel_web.client.get_node_attributes( compute['id']) asserts.assert_equal( compute_config['cpu_pinning']['nova']['value'], compute_cpu - 1, "CPU pinning wasn't applied on '{0}': " "Expected value '{1}', actual '{2}'".format( compute['ip'], compute_cpu - 1, compute_config['cpu_pinning']['nova']['value'])) self.show_step(5) self.fuel_web.deploy_cluster_wait(cluster_id) self.show_step(6) controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( cluster_id, roles=['controller']) nova_conf_path = "/etc/nova/nova.conf" for controller in controllers: with self.ssh_manager.open_on_remote(ip=controller['ip'], path=nova_conf_path) as f: nova_conf = utils.get_ini_config(f) self.assert_entry_in_config(nova_conf, nova_conf_path, "DEFAULT", "scheduler_default_filters", "NUMATopologyFilter") self.show_step(7) for compute in target_nodes: with self.ssh_manager.open_on_remote(ip=compute['ip'], path=nova_conf_path) as f: nova_conf = utils.get_ini_config(f) compute_cpu = compute['meta']['cpu']['total'] self.assert_quantity_in_config(nova_conf, nova_conf_path, "DEFAULT", "vcpu_pin_set", compute_cpu - 1) self.show_step(8) self.fuel_web.run_ostf(cluster_id=cluster_id) self.show_step(9) os_conn = os_actions.OpenStackActions( self.fuel_web.get_public_vip(cluster_id)) meta = {'pinned': 'true'} for compute in target_nodes: self.create_pinned_instance(os_conn=os_conn, cluster_id=cluster_id, name='cpu_role_', vcpus=2, hostname=compute['fqdn'], meta=meta) self.env.make_snapshot("cpu_pinning_with_other_role")
def cpu_pinning_on_two_compute(self): """Check different amount of pinned CPU Scenario: 1. Revert snapshot "basic_env_for_numa_cpu_pinning" 2. Pin maximum CPU for the nova on the first compute 3. Pin minimun CPU for the nova on the second compute 4. Verify setting was successfully applied 5. Deploy cluster 6. Check new filters are enabled in nova.conf at controller 7. Check nova.conf contains pinned CPU at compute 8. Run OSTF 9. Boot VM with pinned CPU on the first compute 10. Boot VM with pinned CPU on the second compute Snapshot: cpu_pinning_on_two_compute """ snapshot_name = 'cpu_pinning_on_two_compute' self.check_run(snapshot_name) self.show_step(1) self.env.revert_snapshot("basic_env_for_numa_cpu_pinning") cluster_id = self.fuel_web.get_last_created_cluster() self.show_step(2) first_compute = self.fuel_web.get_nailgun_node_by_name('slave-01') first_compute_cpu = first_compute['meta']['cpu']['total'] first_config = self.fuel_web.client.get_node_attributes( first_compute['id']) first_config['cpu_pinning']['nova']['value'] = first_compute_cpu - 1 self.fuel_web.client.upload_node_attributes(first_config, first_compute['id']) self.show_step(3) second_compute = self.fuel_web.get_nailgun_node_by_name('slave-02') second_config = self.fuel_web.client.get_node_attributes( second_compute['id']) second_config['cpu_pinning']['nova']['value'] = 1 self.fuel_web.client.upload_node_attributes(second_config, second_compute['id']) self.show_step(4) first_config = self.fuel_web.client.get_node_attributes( first_compute['id']) asserts.assert_equal( first_config['cpu_pinning']['nova']['value'], first_compute_cpu - 1, "CPU pinning wasn't applied on '{0}': " "Expected value '{1}', actual '{2}'".format( first_compute['ip'], first_compute_cpu - 1, first_config['cpu_pinning']['nova']['value'])) second_config = self.fuel_web.client.get_node_attributes( second_compute['id']) asserts.assert_equal( second_config['cpu_pinning']['nova']['value'], 1, "CPU pinning wasn't applied on '{0}': " "Expected value '{1}', actual '{2}'".format( second_compute['ip'], 1, second_config['cpu_pinning']['nova']['value'])) self.show_step(5) self.fuel_web.deploy_cluster_wait(cluster_id) self.show_step(6) controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( cluster_id, roles=['controller']) nova_conf_path = "/etc/nova/nova.conf" for controller in controllers: with self.ssh_manager.open_on_remote(ip=controller['ip'], path=nova_conf_path) as f: nova_conf = utils.get_ini_config(f) self.assert_entry_in_config(nova_conf, nova_conf_path, "DEFAULT", "scheduler_default_filters", "NUMATopologyFilter") self.show_step(7) with self.ssh_manager.open_on_remote(ip=first_compute['ip'], path=nova_conf_path) as f: nova_conf = utils.get_ini_config(f) self.assert_quantity_in_config(nova_conf, nova_conf_path, "DEFAULT", "vcpu_pin_set", first_compute_cpu - 1) with self.ssh_manager.open_on_remote(ip=second_compute['ip'], path=nova_conf_path) as f: nova_conf = utils.get_ini_config(f) self.assert_quantity_in_config(nova_conf, nova_conf_path, "DEFAULT", "vcpu_pin_set", 1) self.show_step(8) self.fuel_web.run_ostf(cluster_id=cluster_id) self.show_step(9) os_conn = os_actions.OpenStackActions( self.fuel_web.get_public_vip(cluster_id)) meta = {'pinned': 'true'} self.create_pinned_instance(os_conn=os_conn, cluster_id=cluster_id, name='cpu_3_', vcpus=3, hostname=first_compute['fqdn'], meta=meta) self.show_step(10) self.create_pinned_instance(os_conn=os_conn, cluster_id=cluster_id, name='cpu_1_', vcpus=1, hostname=second_compute['fqdn'], meta=meta) self.env.make_snapshot(snapshot_name, is_make=True)
def reconfigure_ml2_vlan_range(self): """Reconfigure neutron ml2 VLAN range Scenario: 1. Revert snapshot "deploy_neutron_vlan_ha" 2. Upload a new openstack configuration 3. Get uptime of process "neutron-server" on each controller 4. Apply a new VLAN range(minimal range) to all nodes 5. Wait for configuration applying 6. Verify ml2 plugin settings 7. Create new private network 8. Try to create one more, verify that it is impossible Snapshot reconfigure_ml2_vlan_range """ self.show_step(1) self.env.revert_snapshot("deploy_neutron_vlan_ha") cluster_id = self.fuel_web.get_last_created_cluster() controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( cluster_id, ['controller']) controllers = [x['ip'] for x in controllers] self.show_step(2) config = utils.get_config_template('neutron') structured_config = get_structured_config_dict(config) self.fuel_web.client.upload_configuration(config, cluster_id) self.show_step(3) uptimes = dict(zip(controllers, range(len(controllers)))) for controller in controllers: with self.env.d_env.get_ssh_to_remote(controller) as remote: uptimes[controller] = \ utils.get_process_uptime(remote, 'neutron-server') self.show_step(4) task = self.fuel_web.client.apply_configuration(cluster_id) self.show_step(5) self.fuel_web.assert_task_success(task, timeout=300, interval=5) self.show_step(6) for controller in controllers: with self.env.d_env.get_ssh_to_remote(controller) as remote: uptime = utils.get_process_uptime(remote, 'neutron-server') asserts.assert_true(uptime <= uptimes[controller], 'Service "neutron-servers" was not ' 'restarted on {0}'.format(controller)) for configpath, params in structured_config.items(): result = remote.open(configpath) conf_for_check = utils.get_ini_config(result) for param in params: utils.check_config(conf_for_check, configpath, param['section'], param['option'], param['value']) self.show_step(7) os_conn = os_actions.OpenStackActions( self.fuel_web.get_public_vip(cluster_id)) tenant = os_conn.get_tenant('admin') os_conn.create_network('net1', tenant_id=tenant.id) self.show_step(8) try: os_conn.create_network('net2', tenant_id=tenant.id) except Exception as e: if 'No tenant network is available' not in e.message: raise e pass else: raise Exception("New configuration was not applied") self.env.make_snapshot("reconfigure_ml2_vlan_range", is_make=True)
def reconfigure_overcommit_ratio(self): """Tests for reconfiguration nova CPU overcommit ratio. Scenario: 1. Create cluster 2. Add 1 node with compute role 3. Add 3 nodes with controller role 4. Deploy the cluster 5. Verify network 6. Run OSTF 7. Verify configuration file on each controller 8. Apply new CPU overcommit ratio for each controller 9. Verify deployment task is finished 10. Verify nova-scheduler services uptime 11. Boot instances with flavor that occupy all CPU 12. Boot extra instance and catch the error Snapshot: reconfigure_overcommit_ratio """ self.env.revert_snapshot("ready_with_5_slaves") self.show_step(1) cluster_id = self.fuel_web.create_cluster( name=self.__class__.__name__, mode=settings.DEPLOYMENT_MODE, settings={ "net_provider": 'neutron', "net_segment_type": settings.NEUTRON_SEGMENT_TYPE, } ) self.show_step(2) self.show_step(3) self.fuel_web.update_nodes( cluster_id, { 'slave-01': ['compute'], 'slave-02': ['controller'], 'slave-03': ['controller'], 'slave-04': ['controller'] }) self.show_step(4) self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False) self.show_step(5) self.fuel_web.verify_network(cluster_id) self.show_step(6) self.fuel_web.run_ostf(cluster_id=cluster_id) self.show_step(7) cluster_id = self.fuel_web.get_last_created_cluster() config = utils.get_config_template('nova_cpu') structured_config = get_structured_config_dict(config) self.fuel_web.client.upload_configuration(config, cluster_id) service_name = "nova-scheduler" controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles( cluster_id, ['controller']) controllers = [x['ip'] for x in controllers] uptimes = dict(zip(controllers, range(len(controllers)))) for controller in controllers: with self.env.d_env.get_ssh_to_remote(controller) as remote: uptimes[controller] = \ utils.get_process_uptime(remote, service_name) task = self.fuel_web.client.apply_configuration(cluster_id) self.show_step(8) self.fuel_web.assert_task_success(task, timeout=300, interval=5) self.show_step(9) self.show_step(10) for controller in controllers: with self.env.d_env.get_ssh_to_remote(controller) as remote: uptime = utils.get_process_uptime(remote, service_name) asserts.assert_true(uptime <= uptimes[controller], "Service {0} was not restarted " "on {1}".format(controller, service_name)) for configpath, params in structured_config.items(): result = remote.open(configpath) conf_for_check = utils.get_ini_config(result) for param in params: utils.check_config(conf_for_check, configpath, param['section'], param['option'], param['value']) self.show_step(11) os_conn = os_actions.OpenStackActions( self.fuel_web.get_public_vip(cluster_id)) net_name = self.fuel_web.get_cluster_predefined_networks_name( cluster_id)['private_net'] server = os_conn.create_instance(neutron_network=True, label=net_name, server_name="Test_reconfig", vcpus=2) os_conn.verify_instance_status(server, 'ACTIVE') self.show_step(12) excessive_server = os_conn.create_instance(neutron_network=True, label=net_name, server_name="excessive_VM", flavor_name="overcommit") os_conn.verify_instance_status(excessive_server, 'ERROR') os_conn.delete_instance(excessive_server) self.env.make_snapshot("reconfigure_overcommit_ratio", is_make=True)