Пример #1
0
    def test_node_graceful_shutdown(self):
        topology = {
            'type': consts.TopologyType.MULTIPLE_NVMESH_CLUSTERS,
            'zones': TOPOLOGY_SINGLE_ZONE['zones']
        }
        config = {'topology': json.dumps(topology)}
        driver_server = start_containerized_server(Consts.DriverType.Node,
                                                   config=config,
                                                   hostname='node-1')
        config['SOCKET_PATH'] = 'unix://%s' % driver_server.csi_socket_path
        ConfigLoaderMock(config).load()
        client = NodeClient()

        results = []

        def run_get_info(results):
            log.debug('Calling GetInfo')
            res = client.NodeGetInfo()
            results.append(res.node_id)

        thread = Thread(target=run_get_info, args=(results, ))
        thread.start()

        log.debug('Stopping the gRPC server')
        driver_server.stop()

        thread.join()

        self.assertEquals(len(results), 1)
Пример #2
0
	def setUpClass(cls):
		super(TestIdentityService, cls).setUpClass()
		config = {'topology': '{}'}
		cls.driver_server = start_containerized_server(Consts.DriverType.Node, config=config, hostname=NODE_ID)
		config['SOCKET_PATH'] = 'unix://%s' % cls.driver_server.csi_socket_path
		ConfigLoaderMock(config).load()
		cls.identityClient = IdentityClient()
Пример #3
0
    def setUpClass(cls):
        super(TestControllerServiceWithZoneTopology, cls).setUpClass()

        cls.clusters = create_clusters(num_of_clusters=3,
                                       num_of_client_per_cluster=3,
                                       name_prefix='zone_')

        for c in cls.clusters:
            c.start()

        for c in cls.clusters:
            c.wait_until_is_alive()

        cls.topology = get_config_topology_from_cluster_list(cls.clusters)

        config = {
            'MANAGEMENT_SERVERS': None,
            'MANAGEMENT_PROTOCOL': None,
            'MANAGEMENT_USERNAME': None,
            'MANAGEMENT_PASSWORD': None,
            'TOPOLOGY_TYPE': Consts.TopologyType.MULTIPLE_NVMESH_CLUSTERS,
            'TOPOLOGY': cls.topology,
            'LOG_LEVEL': 'DEBUG',
            'SDK_LOG_LEVEL': 'DEBUG'
        }

        ConfigLoaderMock(config).load()
        cls.driver_server = start_server(Consts.DriverType.Controller,
                                         config=config)
        cls.ctrl_client = ControllerClient()
Пример #4
0
    def restart_server(new_config=None):
        TestNodeService.driver_server.stop()

        config = new_config or TestNodeService.driver_server.config
        ConfigLoaderMock(config).load()
        TestNodeService.driver_server = start_containerized_server(
            Consts.DriverType.Node, config=config, hostname='node-1')
Пример #5
0
def start_containerized_server(driver_type, config, hostname=None):
    ConfigLoaderMock(config).load()
    driver_server = ContainerServerManager(driver_type,
                                           config,
                                           node_id=hostname)
    driver_server.start()
    driver_server.wait_for_grpc_server_to_be_alive()
    return driver_server
Пример #6
0
def start_server(driverType, config, mock_node_id=None, wait_for_grpc=True):
    ConfigLoaderMock(config).load()
    driver_server = ServerManager(driverType,
                                  config=config,
                                  mock_node_id=mock_node_id)
    driver_server.start()
    if wait_for_grpc:
        wait_for_grpc_server_to_be_up()
    return driver_server
Пример #7
0
 def init_and_start_csi_server(self, config):
     import driver.config as config_module
     config_module.config_loader = ConfigLoaderMock(config)
     self.make_sure_socket_dir_exists()
     server = NVMeshCSIDriverServer(self.driverType)
     if self.mock_node_id and self.driverType == consts.DriverType.Node:
         server.node_service.node_id = self.mock_node_id
     setup_logging_level()
     server.serve()
Пример #8
0
    def test_retry_on_another_zone(self):
        only_zone_c_active = {
            "zones": {
                "zone_1": {
                    "management": {
                        "servers": "some-unavailable-server-1:4000"
                    }
                },
                "zone_2": {
                    "management": {
                        "servers": "some-unavailable-server-2:4000"
                    }
                },
                "zone_3": {
                    "management": {
                        "servers": self.active_mgmt_server
                    }
                },
                "zone_4": {
                    "management": {
                        "servers": "some-unavailable-server-4:4000"
                    }
                },
            }
        }

        zones_1_to_4 = [
            Topology(segments={Consts.TopologyKey.ZONE: 'zone_1'}),
            Topology(segments={Consts.TopologyKey.ZONE: 'zone_2'}),
            Topology(segments={Consts.TopologyKey.ZONE: 'zone_3'}),
            Topology(segments={Consts.TopologyKey.ZONE: 'zone_4'})
        ]
        volume_topology = TopologyRequirement(requisite=zones_1_to_4,
                                              preferred=zones_1_to_4)
        config = {
            'TOPOLOGY_TYPE': Consts.TopologyType.MULTIPLE_NVMESH_CLUSTERS,
            'TOPOLOGY': only_zone_c_active,
            'LOG_LEVEL': 'INFO',
            'SDK_LOG_LEVEL': 'INFO'
        }

        ConfigLoaderMock(config).load()
        driver_server = start_server(Consts.DriverType.Controller,
                                     config=config)

        parameters = {'vpg': 'DEFAULT_CONCATENATED_VPG'}
        ctrl_client = ControllerClient()
        res = ctrl_client.CreateVolume(name=VOL_2_ID,
                                       capacity_in_bytes=1 * GB,
                                       parameters=parameters,
                                       topology_requirements=volume_topology)

        driver_server.stop()

        self.assertTrue(res.volume.volume_id.startswith('zone_3:'))
Пример #9
0
 def setUpClass(cls):
     super(TestNodeService, cls).setUpClass()
     topology = {
         'type': consts.TopologyType.MULTIPLE_NVMESH_CLUSTERS,
         'zones': TOPOLOGY_SINGLE_ZONE['zones']
     }
     config = {'topology': json.dumps(topology)}
     cls.driver_server = start_containerized_server(Consts.DriverType.Node,
                                                    config=config,
                                                    hostname='node-1')
     config['SOCKET_PATH'] = 'unix://%s' % cls.driver_server.csi_socket_path
     ConfigLoaderMock(config).load()
     cls._client = NodeClient()
Пример #10
0
    def test_fail_available_zones_not_in_allowed_topology(self):
        all_inactive = {
            "zones": {
                "A": {
                    "management": {
                        "servers": "some-unavailable-server-1:4000"
                    }
                },
                "B": {
                    "management": {
                        "servers": TestRetryOnAnotherZone.active_mgmt_server
                    }
                },
                "C": {
                    "management": {
                        "servers": "some-unavailable-server-3:4000"
                    }
                }
            }
        }

        requirement = TopologyRequirement(
            requisite=[
                Topology(segments={Consts.TopologyKey.ZONE: 'A'}),
                Topology(segments={Consts.TopologyKey.ZONE: 'C'})
            ],
            preferred=[
                Topology(segments={Consts.TopologyKey.ZONE: 'A'}),
                Topology(segments={Consts.TopologyKey.ZONE: 'C'})
            ])

        config = {
            'TOPOLOGY_TYPE': Consts.TopologyType.MULTIPLE_NVMESH_CLUSTERS,
            'TOPOLOGY': all_inactive,
            'LOG_LEVEL': 'DEBUG',
            'SDK_LOG_LEVEL': 'INFO'
        }

        ConfigLoaderMock(config).load()
        driver_server = start_server(Consts.DriverType.Controller,
                                     config=config)

        self.addCleanup(lambda: driver_server.stop())

        with self.assertRaises(_Rendezvous):
            parameters = {'vpg': 'DEFAULT_CONCATENATED_VPG'}
            ctrl_client = ControllerClient()
            res = ctrl_client.CreateVolume(name=VOL_2_ID,
                                           capacity_in_bytes=1 * GB,
                                           parameters=parameters,
                                           topology_requirements=requirement)
Пример #11
0
    def test_disable_zone_single_allowed_zone(self):
        single_inactive = {
            "zones": {
                "A": {
                    "management": {
                        "servers": "some-unavailable-server-1:4000"
                    }
                }
            }
        }

        single_zone = [Topology(segments={Consts.TopologyKey.ZONE: 'A'})]
        requirement = TopologyRequirement(requisite=single_zone,
                                          preferred=single_zone)

        config = {
            'TOPOLOGY_TYPE': Consts.TopologyType.MULTIPLE_NVMESH_CLUSTERS,
            'TOPOLOGY': single_inactive,
            'LOG_LEVEL': 'DEBUG',
            'SDK_LOG_LEVEL': 'INFO'
        }

        ConfigLoaderMock(config).load()
        driver_server = start_server(Consts.DriverType.Controller,
                                     config=config)

        self.addCleanup(lambda: driver_server.stop())

        parameters = {'vpg': 'DEFAULT_CONCATENATED_VPG'}
        ctrl_client = ControllerClient()

        def assert_fail_to_create_volume(volume_id):

            try:
                res = ctrl_client.CreateVolume(
                    name=volume_id,
                    capacity_in_bytes=1 * GB,
                    parameters=parameters,
                    topology_requirements=requirement)

                self.addCleanup(
                    lambda: ctrl_client.DeleteVolume(volume_id=res.volume_id))
            except _Rendezvous as ex:
                self.assertEquals(ex._state.code,
                                  StatusCode.RESOURCE_EXHAUSTED)
                self.assertIn('Failed to create volume on all zones',
                              ex.debug_error_string())

        assert_fail_to_create_volume(VOL_1_ID)
        assert_fail_to_create_volume(VOL_2_ID)
Пример #12
0
	def test_5_multiple_management_servers_per_zone(self):
		multi_mgmt_servers_topology = {
			'zones': {
				'A': {'management': {'servers': 'unavailable:4000,localhost:4000,localhost:4010'}},
			}
		}

		config = DEFAULT_MOCK_CONFIG.copy()
		config['TOPOLOGY'] = multi_mgmt_servers_topology
		ConfigLoaderMock(config).load()

		try:
			zone_a_api = VolumeAPIPool.get_volume_api_for_zone('A', logger)
		except ValueError as ex:
			self.fail('Got ValueError exception: %s' % ex)
Пример #13
0
    def setUpClass(cls):
        super(TestControllerServiceWithoutTopology, cls).setUpClass()
        cls.cluster1 = NVMeshCluster('cluster_' + cls.__name__)
        cls.cluster1.start()

        config = {
            'MANAGEMENT_SERVERS': cls.cluster1.get_mgmt_server_string(),
            'MANAGEMENT_PROTOCOL': 'https',
            'MANAGEMENT_USERNAME': '******',
            'MANAGEMENT_PASSWORD': '******',
            'TOPOLOGY_TYPE': None,
            'TOPOLOGY': None,
            'SDK_LOG_LEVEL': 'DEBUG'
        }

        ConfigLoaderMock(config).load()
        cls.driver_server = start_server(Consts.DriverType.Controller,
                                         config=config)
        cls.ctrl_client = ControllerClient()
Пример #14
0
    def test_abort_during_request(self):
        cluster = NVMeshCluster('cluster1',
                                options={'volumeCreationDelayMS': 5000})
        cluster.start()

        self.addCleanup(lambda: cluster.stop())

        config = {
            'MANAGEMENT_SERVERS': 'localhost:4000',
            'MANAGEMENT_PROTOCOL': 'https',
            'MANAGEMENT_USERNAME': '******',
            'MANAGEMENT_PASSWORD': '******',
            'TOPOLOGY_TYPE': Consts.TopologyType.SINGLE_ZONE_CLUSTER,
            'TOPOLOGY': None
        }

        ConfigLoaderMock(config).load()

        driver_server = start_server(Consts.DriverType.Controller,
                                     config=config)
        response_bucket = {}
        response_bucket['volume_id'] = None

        def create_volume(response_bucket):
            ctrl_client = ControllerClient()
            parameters = {'vpg': 'DEFAULT_CONCATENATED_VPG'}
            res = ctrl_client.CreateVolume(
                name='pvc-test-graceful-shutdown',
                capacity_in_bytes=5 * GB,
                parameters=parameters,
                topology_requirements=TOPO_REQ_MULTIPLE_TOPOLOGIES)

            log.debug('create_volume returned %s' % res)
            response_bucket['volume_id'] = res.volume.volume_id

        t = threading.Thread(target=create_volume, args=(response_bucket, ))
        t.start()
        time.sleep(2)
        log.debug('shutting the server down')
        driver_server.stop()

        # if volume_id is None that means the thread pre-maturely terminated
        self.assertTrue(response_bucket['volume_id'])
Пример #15
0
    def test_fail_to_create_all_mgmts_not_available(self):
        all_inactive = {
            "zones": {
                "zone_1": {
                    "management": {
                        "servers": "some-unavailable-server-1:4000"
                    }
                },
                "zone_2": {
                    "management": {
                        "servers": "some-unavailable-server-2:4000"
                    }
                },
                "zone_3": {
                    "management": {
                        "servers": "some-unavailable-server-3:4000"
                    }
                }
            }
        }

        config = {
            'TOPOLOGY_TYPE': Consts.TopologyType.MULTIPLE_NVMESH_CLUSTERS,
            'TOPOLOGY': all_inactive,
            'LOG_LEVEL': 'DEBUG',
            'SDK_LOG_LEVEL': 'INFO'
        }

        ConfigLoaderMock(config).load()
        driver_server = start_server(Consts.DriverType.Controller,
                                     config=config)

        self.addCleanup(lambda: driver_server.stop())

        with self.assertRaises(_Rendezvous):
            parameters = {'vpg': 'DEFAULT_CONCATENATED_VPG'}
            ctrl_client = ControllerClient()
            res = ctrl_client.CreateVolume(
                name=VOL_2_ID,
                capacity_in_bytes=1 * GB,
                parameters=parameters,
                topology_requirements=TOPO_REQ_MULTIPLE_TOPOLOGIES)
Пример #16
0
    def test_scale(self):

        os.environ['DEVELOPMENT'] = 'TRUE'

        self._delete_topology_config_map_if_exists()

        clusters = self._create_multiple_clusters(num_of_clusters=15,
                                                  num_of_client_per_cluster=5)

        config_topology = get_config_topology_from_cluster_list(clusters)

        config = {
            'TOPOLOGY_TYPE': Consts.TopologyType.MULTIPLE_NVMESH_CLUSTERS,
            'TOPOLOGY': config_topology,
            'LOG_LEVEL': 'DEBUG',
            'SDK_LOG_LEVEL': 'DEBUG'
        }
        import driver.config as Config
        Config.config_loader = ConfigLoaderMock(config)
        Config.config_loader.load()

        self._start_csi_controller(config)

        log.debug('Creating volumes')
        ctrl_client = ControllerClient()

        parameters = {'vpg': 'DEFAULT_CONCATENATED_VPG'}

        expected_volumes = {}

        def create_volume_for_specific_zone(zone_name):
            log.debug('create_volume_for_specific_zone %s' % zone_name)
            allowed_zones = Topology(
                segments={Consts.TopologyKey.ZONE: zone_name})
            allowed_topologies = TopologyRequirement(requisite=[allowed_zones],
                                                     preferred=[allowed_zones])

            response = ctrl_client.CreateVolume(
                name='vol_zone_%s' % zone_name,
                capacity_in_bytes=5 * GB,
                parameters=parameters,
                topology_requirements=allowed_topologies)

            volume_id = response.volume.volume_id
            self.assertTrue(volume_id)

            if not zone_name in expected_volumes:
                expected_volumes[zone_name] = []

            volume_id_without_zone = volume_id.split(':')[1]
            expected_volumes[zone_name].append(volume_id_without_zone)

        for cluster in clusters:
            create_volume_for_specific_zone(cluster.name)

        # Verify volumes created in the correct zones
        volumes_from_clusters = {}
        for cluster in clusters:
            # Fetch volumes from the mgmt server and compare
            mgmt_srv = cluster.get_mgmt_server_string()
            api = VolumeAPI(managementServers=mgmt_srv,
                            managementProtocol='https')
            err, volumes = api.get()
            volume_ids = [v._id for v in volumes]
            volumes_from_clusters[cluster.name] = volume_ids

        for cluster in clusters:
            expected_volume_ids = expected_volumes[cluster.name]
            actual_volumes = volumes_from_clusters[cluster.name]
            self.assertItemsEqual(
                expected_volume_ids, actual_volumes,
                'expected: {}\n\nfound: {}'.format(
                    pretty_json(expected_volumes),
                    pretty_json(volumes_from_clusters)))

        log.info('test finished successfully')
Пример #17
0
    def test_all_zones_are_accessible(self):
        clusters = TestTopologyFetcherThread.clusters
        LoggerUtils.add_stdout_handler(logging.getLogger('topology-service'))

        os.environ['DEVELOPMENT'] = 'TRUE'
        topology = {
            "zones": {
                "A": {
                    "management": {
                        "servers": clusters[0].get_mgmt_server_string(),
                        "ws_port": clusters[0].ws_port
                    }
                },
                "B": {
                    "management": {
                        "servers": clusters[1].get_mgmt_server_string(),
                        "ws_port": clusters[1].ws_port
                    }
                }
            }
        }

        config = {
            'TOPOLOGY_TYPE': consts.TopologyType.MULTIPLE_NVMESH_CLUSTERS,
            'TOPOLOGY': topology,
            'LOG_LEVEL': 'DEBUG',
            'SDK_LOG_LEVEL': 'DEBUG'
        }

        ConfigLoaderMock(config).load()

        from driver.topology_service import TopologyService

        topology_service = TopologyService()
        topology_service.run()
        self.addCleanup(lambda: topology_service.stop())

        # calc expected topology
        expected_topology = {}

        for client in clusters[0].clients:
            expected_topology[client] = 'A'

        for client in clusters[1].clients:
            expected_topology[client] = 'B'

        success = False
        max_waiting_time_seconds = 15
        for i in range(max_waiting_time_seconds):
            nodes_topology = topology_service.topology.nodes
            all_topology_discovered = json.dumps(expected_topology,
                                                 sort_keys=True) == json.dumps(
                                                     nodes_topology,
                                                     sort_keys=True)
            if all_topology_discovered:
                success = True
                break

            time.sleep(1)

        self.assertTrue(success,
                        "Timed-out waiting for all topology to be discovered")
Пример #18
0
class TestVolumeAPIPool(unittest.TestCase):
	ConfigLoaderMock(DEFAULT_CONFIG).load()
	cluster1 = None

	@classmethod
	def setUpClass(cls):
		super(TestVolumeAPIPool, cls).setUpClass()
		cls.cluster1 = NVMeshCluster('cluster1', http_port=4000, ws_port=4001)
		cls.cluster1.start()
		cls.cluster1.wait_until_is_alive()

		cls.cluster2 = NVMeshCluster('cluster2', http_port=4010, ws_port=4011)
		cls.cluster2.start()
		cls.cluster2.wait_until_is_alive()

	@classmethod
	def tearDownClass(cls):
		cls.cluster1.stop()

	def tearDown(self):
		topology_utils.VolumeAPIPool._VolumeAPIPool__api_dict = {}
		topology_utils.VolumeAPIPool._VolumeAPIPool__zone_locks = {}

	def check_lock_released(self):
		self.assertFalse(VolumeAPIPool.isLocked())

	def test_1_init(self):
		VolumeAPIPool.get_volume_api_for_zone('A', logger)

	def test_2_get_volume_api(self):
		api = VolumeAPIPool.get_volume_api_for_zone('A', logger)
		self.check_lock_released()

	def test_3_get_api_mgmt_not_responding(self):
		with self.assertRaises(ManagementTimeout):
			VolumeAPIPool.get_volume_api_for_zone('B', logger)

		self.check_lock_released()

	def test_3_get_api_zone_missing(self):
		with self.assertRaises(ValueError):
			VolumeAPIPool.get_volume_api_for_zone('D', logger)

		self.check_lock_released()

	def test_5_multiple_management_servers_per_zone(self):
		multi_mgmt_servers_topology = {
			'zones': {
				'A': {'management': {'servers': 'unavailable:4000,localhost:4000,localhost:4010'}},
			}
		}

		config = DEFAULT_MOCK_CONFIG.copy()
		config['TOPOLOGY'] = multi_mgmt_servers_topology
		ConfigLoaderMock(config).load()

		try:
			zone_a_api = VolumeAPIPool.get_volume_api_for_zone('A', logger)
		except ValueError as ex:
			self.fail('Got ValueError exception: %s' % ex)

	def test_4_api_creation_delayed_does_not_block_other_zones(self):
		original_volume_api = topology_utils.VolumeAPI

		def restore_original_api():
			topology_utils.VolumeAPI = original_volume_api

		self.addCleanup(restore_original_api)

		class MockConn(object):
			def __init__(self, servers, protocol):
				self.managementServers = [protocol + '://' + servers]

		class MockVolumeAPI(object):
			def __init__(self, user=None, password=None, logger=None, managementServers=None, managementProtocol='https', dbUUID=None):
				self.managementConnection = MockConn(managementServers, managementProtocol)
				if managementServers == 'localhost:4000':
					# Delay the request for zone A
					time.sleep(3)

		e = Event()
		apis = {}

		topology_utils.VolumeAPI = MockVolumeAPI

		def create_api_in_thread():
			e.set()
			print("A: %s" % topology_utils.VolumeAPI)
			a = VolumeAPIPool.get_volume_api_for_zone('A', logger)
			print("got A = %s" % a)
			apis['A'] = a

		t = Thread(target=create_api_in_thread)
		t.start()

		e.wait()
		print("getting C")
		time.sleep(1)
		print("C: %s" % topology_utils.VolumeAPI)
		b = VolumeAPIPool.get_volume_api_for_zone('C', logger)
		print("got C = %s" % b)

		# Make sure we returned before A
		self.assertNotIn('A', apis, "A VolumeAPI is already available, This probably means that the long creation of A blocked us from getting C")

		t.join()
		self.check_lock_released()