def test_is_unicode_enabled(self): """Verify that unicode is enabled only when both conditions are True: 1. naming_scheme_unicode is True or empty 2. BCF capabilities include display-name :return: """ self.is_unicode_enabled_p.stop() def capability_unicode_supported(): return ['dummy', 'display-name'] def capability_unicode_unsupported(): return ['dummy'] patch_supported = mock.patch(POOL_GET_CAPABILITIES, side_effect=capability_unicode_supported) patch_unsupported = mock.patch( POOL_GET_CAPABILITIES, side_effect=capability_unicode_unsupported) # Create a server pool with default naming_scheme_unicode # verify default value is true sp = servermanager.ServerPool() self.assertTrue(cfg.CONF.RESTPROXY.naming_scheme_unicode) # config enabled, and unicode is supported on bcf patch_supported.start() self.assertTrue(sp.is_unicode_enabled()) patch_supported.stop() # config enabled, but unicode is not supported on bcf patch_unsupported.start() self.assertFalse(sp.is_unicode_enabled()) patch_unsupported.stop() # Recreate the server pool, as the config is read during initialization cfg.CONF.set_override('naming_scheme_unicode', False, 'RESTPROXY') sp = servermanager.ServerPool() # config disabled, though unicode is supported on bcf patch_supported.start() self.assertFalse(sp.is_unicode_enabled()) patch_supported.stop() # config disabled, and unicode is not supported on bcf patch_unsupported.start() self.assertFalse(sp.is_unicode_enabled()) patch_unsupported.stop()
def test_ipv6_server_address(self): cfg.CONF.set_override('servers', ['[ABCD:EF01:2345:6789:ABCD:EF01:2345:6789]:80'], 'RESTPROXY') s = servermanager.ServerPool() self.assertEqual(s.servers[0].server, 'ABCD:EF01:2345:6789:ABCD:EF01:2345:6789')
def initialize(self): LOG.debug('Initializing driver') # register plugin config opts pl_config.register_config() self.evpool = eventlet.GreenPool(cfg.CONF.RESTPROXY.thread_pool_size) # init network ctrl connections self.servers = servermanager.ServerPool() self.servers.get_topo_function = self._get_all_data self.servers.get_topo_function_args = { 'get_ports': True, 'get_floating_ips': True, 'get_routers': True, 'get_sgs': True } # perform one forced topo_sync after 60secs. delayed to let plugin # initialization complete eventlet.spawn_after(60, self.servers.force_topo_sync, **{'check_ts': True}) self.segmentation_types = ', '.join(cfg.CONF.ml2.type_drivers) # if os-net-config is present, attempt to read physnet bridge_mappings # from openvswitch_agent.ini self.bridge_mappings = {} if os.path.isfile(RH_NET_CONF_PATH): self.bridge_mappings = _read_ovs_bridge_mappings() # Track hosts running IVS to avoid excessive calls to the backend self.ivs_host_cache = {} self.setup_rpc_callbacks() LOG.debug("Initialization done")
def test_connect_failures(self): sp = servermanager.ServerPool() with mock.patch(HTTPCON, return_value=None): resp = sp.servers[0].rest_call('GET', '/') self.assertEqual(resp, (0, None, None, None)) # verify same behavior on ssl class sp.servers[0].currentcon = False sp.servers[0].ssl = True with mock.patch(HTTPSCON, return_value=None): resp = sp.servers[0].rest_call('GET', '/') self.assertEqual(resp, (0, None, None, None))
def test_auth_token_header(self): cfg.CONF.set_override('server_auth', 'fake_token', 'RESTPROXY') sp = servermanager.ServerPool() with mock.patch(HTTPCON) as conmock: rv = conmock.return_value rv.getresponse.return_value.getheader.return_value = 'HASHHEADER' sp.rest_create_network('tenant', 'network') callheaders = rv.request.mock_calls[0][1][3] self.assertIn('Cookie', callheaders) self.assertNotIn('Authorization', callheaders) self.assertEqual(callheaders['Cookie'], 'session_cookie="fake_token"')
def test_auth_header(self): cfg.CONF.set_override('server_auth', 'username:pass', 'RESTPROXY') sp = servermanager.ServerPool() with mock.patch(HTTPCON) as conmock: rv = conmock.return_value rv.getresponse.return_value.getheader.return_value = 'HASHHEADER' sp.rest_create_network('tenant', 'network') callheaders = rv.request.mock_calls[0][1][3] self.assertIn('Authorization', callheaders) self.assertEqual(callheaders['Authorization'], 'Basic dXNlcm5hbWU6cGFzcw==')
def test_capabilities_retrieval_failure(self): sp = servermanager.ServerPool() with mock.patch(HTTPCON) as conmock: rv = conmock.return_value.getresponse.return_value rv.getheader.return_value = 'HASHHEADER' # a failure to parse should result in an empty capability set rv.read.return_value = 'XXXXX' self.assertEqual([], sp.servers[0].get_capabilities()) # One broken server should affect all capabilities rv.read.side_effect = ['{"a": "b"}', '["b","c","d"]'] self.assertEqual(set(), sp.get_capabilities())
def test_header_add(self): sp = servermanager.ServerPool() with mock.patch(HTTPCON) as conmock: rv = conmock.return_value rv.getresponse.return_value.getheader.return_value = 'HASHHEADER' sp.servers[0].rest_call('GET', '/', headers={'EXTRA-HEADER': 'HI'}) callheaders = rv.request.mock_calls[0][1][3] # verify normal headers weren't mangled self.assertIn('Content-type', callheaders) self.assertEqual(callheaders['Content-type'], 'application/json') # verify new header made it in self.assertIn('EXTRA-HEADER', callheaders) self.assertEqual(callheaders['EXTRA-HEADER'], 'HI')
def test_reconnect_on_timeout_change(self): sp = servermanager.ServerPool() with mock.patch(HTTPCON) as conmock: rv = conmock.return_value rv.getresponse.return_value.getheader.return_value = 'HASHHEADER' sp.servers[0].capabilities = ['keep-alive'] sp.servers[0].rest_call('GET', '/', timeout=10) # even with keep-alive enabled, a change in timeout will trigger # a reconnect sp.servers[0].rest_call('GET', '/', timeout=75) conmock.assert_has_calls([ mock.call('localhost', 9000, timeout=10), mock.call('localhost', 9000, timeout=75), ], any_order=True)
def test_capabilities_retrieval(self): sp = servermanager.ServerPool() with mock.patch(HTTPCON) as conmock: rv = conmock.return_value.getresponse.return_value rv.getheader.return_value = 'HASHHEADER' # each server will get different capabilities rv.read.side_effect = ['["a","b","c"]', '["b","c","d"]'] # pool capabilities is intersection between both self.assertEqual(set(['b', 'c']), sp.get_capabilities()) self.assertEqual(2, rv.read.call_count) # the pool should cache after the first call so no more # HTTP calls should be made rv.read.side_effect = ['["w","x","y"]', '["x","y","z"]'] self.assertEqual(set(['b', 'c']), sp.get_capabilities()) self.assertEqual(2, rv.read.call_count)
def test_no_reconnect_recurse_to_infinity(self): self.skipTest("cached connections are currently disabled because " "their assignment to the servermanager object is not " "thread-safe") # retry uses recursion when a reconnect is necessary # this test makes sure it stops after 1 recursive call sp = servermanager.ServerPool() with mock.patch(HTTPCON) as conmock: rv = conmock.return_value # hash header must be string instead of mock object rv.getresponse.return_value.getheader.return_value = 'HASH' sp.servers[0].capabilities = ['keep-alive'] sp.servers[0].rest_call('GET', '/first') # after retrying once, the rest call should raise the # exception up rv.request.side_effect = httplib.ImproperConnectionState() self.assertRaises(httplib.ImproperConnectionState, sp.servers[0].rest_call, *('GET', '/second')) # 1 for the first call, 2 for the second with retry self.assertEqual(rv.request.call_count, 3)
def test_capabilities_retrieval(self): sp = servermanager.ServerPool() with mock.patch(HTTPCON) as conmock: rv = conmock.return_value.getresponse.return_value rv.getheader.return_value = 'HASHHEADER' # each server will get different capabilities rv.read.side_effect = ['["a","b","c"]', '["b","c","d"]'] # pool capabilities is union of both # normally capabilities should be the same across all servers # this only happens in two situations: # 1. a server is down # 2. during upgrade/downgrade self.assertEqual(set(['a', 'b', 'c', 'd']), sp.get_capabilities()) self.assertEqual(2, rv.read.call_count) # the pool should cache after the first call # so no more HTTP calls should be made rv.read.side_effect = ['["w","x","y"]', '["x","y","z"]'] self.assertEqual(set(['a', 'b', 'c', 'd']), sp.get_capabilities()) self.assertEqual(2, rv.read.call_count)
def initialize(self): LOG.debug('Initializing driver') # register plugin config opts pl_config.register_config() self.evpool = eventlet.GreenPool(cfg.CONF.RESTPROXY.thread_pool_size) hash_handler = cdb.HashHandler() if hash_handler.is_db_hash_empty(): LOG.debug("Forcing topology sync as consistency hash is empty") hash_handler.read_for_update() hash_handler.put_hash('initial:hash,code') # init network ctrl connections self.servers = servermanager.ServerPool() self.servers.get_topo_function = self._get_all_data_auto self.segmentation_types = ', '.join(cfg.CONF.ml2.type_drivers) # Track hosts running IVS to avoid excessive calls to the backend self.vswitch_host_cache = {} self.setup_sg_rpc_callbacks() LOG.debug("Initialization done")
def test_reconnect_cached_connection(self): self.skipTest("cached connections are currently disabled because " "their assignment to the servermanager object is not " "thread-safe") sp = servermanager.ServerPool() with mock.patch(HTTPCON) as conmock: rv = conmock.return_value rv.getresponse.return_value.getheader.return_value = 'HASH' sp.servers[0].capabilities = ['keep-alive'] sp.servers[0].rest_call('GET', '/first') # raise an error on re-use to verify reconnect # return okay the second time so the reconnect works rv.request.side_effect = [ httplib.ImproperConnectionState(), mock.MagicMock() ] sp.servers[0].rest_call('GET', '/second') uris = [c[1][1] for c in rv.request.mock_calls] expected = [ sp.base_uri + '/first', sp.base_uri + '/second', sp.base_uri + '/second', ] self.assertEqual(uris, expected)
def test_socket_error(self): sp = servermanager.ServerPool() with mock.patch(HTTPCON) as conmock: conmock.return_value.request.side_effect = socket.timeout() resp = sp.servers[0].rest_call('GET', '/') self.assertEqual(resp, (0, None, None, None))