def test_retry_on_another_zone(self): only_zone_c_active = { "zones": { "zone_1": { "management": { "servers": "some-unavailable-server-1:4000" } }, "zone_2": { "management": { "servers": "some-unavailable-server-2:4000" } }, "zone_3": { "management": { "servers": self.active_mgmt_server } }, "zone_4": { "management": { "servers": "some-unavailable-server-4:4000" } }, } } zones_1_to_4 = [ Topology(segments={Consts.TopologyKey.ZONE: 'zone_1'}), Topology(segments={Consts.TopologyKey.ZONE: 'zone_2'}), Topology(segments={Consts.TopologyKey.ZONE: 'zone_3'}), Topology(segments={Consts.TopologyKey.ZONE: 'zone_4'}) ] volume_topology = TopologyRequirement(requisite=zones_1_to_4, preferred=zones_1_to_4) config = { 'TOPOLOGY_TYPE': Consts.TopologyType.MULTIPLE_NVMESH_CLUSTERS, 'TOPOLOGY': only_zone_c_active, 'LOG_LEVEL': 'INFO', 'SDK_LOG_LEVEL': 'INFO' } ConfigLoaderMock(config).load() driver_server = start_server(Consts.DriverType.Controller, config=config) parameters = {'vpg': 'DEFAULT_CONCATENATED_VPG'} ctrl_client = ControllerClient() res = ctrl_client.CreateVolume(name=VOL_2_ID, capacity_in_bytes=1 * GB, parameters=parameters, topology_requirements=volume_topology) driver_server.stop() self.assertTrue(res.volume.volume_id.startswith('zone_3:'))
def test_fail_available_zones_not_in_allowed_topology(self): all_inactive = { "zones": { "A": { "management": { "servers": "some-unavailable-server-1:4000" } }, "B": { "management": { "servers": TestRetryOnAnotherZone.active_mgmt_server } }, "C": { "management": { "servers": "some-unavailable-server-3:4000" } } } } requirement = TopologyRequirement( requisite=[ Topology(segments={Consts.TopologyKey.ZONE: 'A'}), Topology(segments={Consts.TopologyKey.ZONE: 'C'}) ], preferred=[ Topology(segments={Consts.TopologyKey.ZONE: 'A'}), Topology(segments={Consts.TopologyKey.ZONE: 'C'}) ]) config = { 'TOPOLOGY_TYPE': Consts.TopologyType.MULTIPLE_NVMESH_CLUSTERS, 'TOPOLOGY': all_inactive, 'LOG_LEVEL': 'DEBUG', 'SDK_LOG_LEVEL': 'INFO' } ConfigLoaderMock(config).load() driver_server = start_server(Consts.DriverType.Controller, config=config) self.addCleanup(lambda: driver_server.stop()) with self.assertRaises(_Rendezvous): parameters = {'vpg': 'DEFAULT_CONCATENATED_VPG'} ctrl_client = ControllerClient() res = ctrl_client.CreateVolume(name=VOL_2_ID, capacity_in_bytes=1 * GB, parameters=parameters, topology_requirements=requirement)
def test_disable_zone_single_allowed_zone(self): single_inactive = { "zones": { "A": { "management": { "servers": "some-unavailable-server-1:4000" } } } } single_zone = [Topology(segments={Consts.TopologyKey.ZONE: 'A'})] requirement = TopologyRequirement(requisite=single_zone, preferred=single_zone) config = { 'TOPOLOGY_TYPE': Consts.TopologyType.MULTIPLE_NVMESH_CLUSTERS, 'TOPOLOGY': single_inactive, 'LOG_LEVEL': 'DEBUG', 'SDK_LOG_LEVEL': 'INFO' } ConfigLoaderMock(config).load() driver_server = start_server(Consts.DriverType.Controller, config=config) self.addCleanup(lambda: driver_server.stop()) parameters = {'vpg': 'DEFAULT_CONCATENATED_VPG'} ctrl_client = ControllerClient() def assert_fail_to_create_volume(volume_id): try: res = ctrl_client.CreateVolume( name=volume_id, capacity_in_bytes=1 * GB, parameters=parameters, topology_requirements=requirement) self.addCleanup( lambda: ctrl_client.DeleteVolume(volume_id=res.volume_id)) except _Rendezvous as ex: self.assertEquals(ex._state.code, StatusCode.RESOURCE_EXHAUSTED) self.assertIn('Failed to create volume on all zones', ex.debug_error_string()) assert_fail_to_create_volume(VOL_1_ID) assert_fail_to_create_volume(VOL_2_ID)
def test_fail_if_zone_not_in_topology_config(self): parameters = {'vpg': 'DEFAULT_CONCATENATED_VPG'} wrong_topology = Topology( segments={Consts.TopologyKey.ZONE: 'wrong_zone'}) wrong_topology_req = TopologyRequirement(requisite=[wrong_topology], preferred=[wrong_topology]) try: self.ctrl_client.CreateVolume( name=VOL_2_ID, capacity_in_bytes=1 * GB, parameters=parameters, topology_requirements=wrong_topology_req) self.fail('Expected ValueError exception') except _Rendezvous as ex: log.debug(ex) self.assertTrue( "Zone wrong_zone missing from Config.topology" in ex.details())
def create_volume_for_specific_zone(zone_name): log.debug('create_volume_for_specific_zone %s' % zone_name) allowed_zones = Topology( segments={Consts.TopologyKey.ZONE: zone_name}) allowed_topologies = TopologyRequirement(requisite=[allowed_zones], preferred=[allowed_zones]) response = ctrl_client.CreateVolume( name='vol_zone_%s' % zone_name, capacity_in_bytes=5 * GB, parameters=parameters, topology_requirements=allowed_topologies) volume_id = response.volume.volume_id self.assertTrue(volume_id) if not zone_name in expected_volumes: expected_volumes[zone_name] = [] volume_id_without_zone = volume_id.split(':')[1] expected_volumes[zone_name].append(volume_id_without_zone)
from driver import config_map_api from driver.csi.csi_pb2 import TopologyRequirement, Topology from test.sanity.helpers.config_loader_mock import ConfigLoaderMock from test.sanity.helpers.setup_and_teardown import start_server import driver.consts as Consts from test.sanity.helpers.test_case_with_server import TestCaseWithServerRunning from test.sanity.clients.controller_client import ControllerClient from test.sanity.nvmesh_cluster_simulator.simulate_cluster import create_clusters, get_config_topology_from_cluster_list GB = pow(1024, 3) VOL_1_ID = "vol_1" VOL_2_ID = "vol_2" DEFAULT_TOPOLOGY = Topology(segments={Consts.TopologyKey.ZONE: 'A'}) DEFAULT_TOPOLOGY_REQUIREMENTS = TopologyRequirement( requisite=[DEFAULT_TOPOLOGY], preferred=[DEFAULT_TOPOLOGY]) TOPO_REQ_MULTIPLE_TOPOLOGIES = TopologyRequirement( requisite=[ Topology(segments={Consts.TopologyKey.ZONE: 'A'}), Topology(segments={Consts.TopologyKey.ZONE: 'B'}), Topology(segments={Consts.TopologyKey.ZONE: 'C'}) ], preferred=[ Topology(segments={Consts.TopologyKey.ZONE: 'A'}), Topology(segments={Consts.TopologyKey.ZONE: 'B'}), Topology(segments={Consts.TopologyKey.ZONE: 'C'}) ]) os.environ['DEVELOPMENT'] = 'TRUE'
from test.sanity.helpers.config_loader_mock import ConfigLoaderMock from test.sanity.helpers.sanity_test_config import SanityTestConfig from test.sanity.helpers.setup_and_teardown import start_server import driver.consts as Consts from test.sanity.helpers.test_case_with_server import TestCaseWithServerRunning from test.sanity.clients.controller_client import ControllerClient from test.sanity.helpers.error_handlers import CatchRequestErrors from test.sanity.nvmesh_cluster_simulator.simulate_cluster import NVMeshCluster, create_clusters, get_config_topology_from_cluster_list MB = pow(1024, 2) GB = pow(1024, 3) VOL_1_ID = "vol_1" VOL_2_ID = "vol_2" DEFAULT_TOPOLOGY = Topology(segments={Consts.TopologyKey.ZONE: 'zone_1'}) DEFAULT_TOPOLOGY_REQUIREMENTS = TopologyRequirement( requisite=[DEFAULT_TOPOLOGY], preferred=[DEFAULT_TOPOLOGY]) TOPO_REQ_MULTIPLE_TOPOLOGIES = TopologyRequirement( requisite=[ Topology(segments={Consts.TopologyKey.ZONE: 'zone_1'}), Topology(segments={Consts.TopologyKey.ZONE: 'zone_2'}), Topology(segments={Consts.TopologyKey.ZONE: 'zone_3'}) ], preferred=[ Topology(segments={Consts.TopologyKey.ZONE: 'zone_1'}), Topology(segments={Consts.TopologyKey.ZONE: 'zone_2'}), Topology(segments={Consts.TopologyKey.ZONE: 'zone_3'}) ]) os.environ['DEVELOPMENT'] = 'TRUE'
from test.sanity.helpers.error_handlers import CatchRequestErrors, CatchNodeDriverErrors GB = pow(1024, 3) VOL_ID = "vol_1" NODE_ID_1 = "node-1" DEFAULT_POD_ID = "pod-ab12" TOPOLOGY_SINGLE_ZONE = { 'zones': { 'zone_1': { 'management': { 'servers': 'localhost:4000' } } } } TOPOLOGY_MULTIPLE_ZONES = Topology( segments={Consts.TopologyKey.ZONE: 'zone_1'}) os.environ['DEVELOPMENT'] = 'TRUE' log = logging.getLogger('SanityTests') class TestNodeService(TestCaseWithServerRunning): driver_server = None def __init__(self, *args, **kwargs): TestCaseWithServerRunning.__init__(self, *args, **kwargs) self.driver_server = None @staticmethod def restart_server(new_config=None): TestNodeService.driver_server.stop()