def test_get_udp_listener_pool_status(self, mock_check_output): # test with ipv4 and ipv6 mock_check_output.return_value = KERNAL_FILE_SAMPLE_V4 res = lvs_query.get_udp_listener_pool_status(self.listener_id_v4) expected = { 'lvs': { 'uuid': self.pool_id_v4, 'status': constants.UP, 'members': { self.member_id1_v4: constants.UP, self.member_id2_v4: constants.UP, self.member_id3_v4: constants.DOWN, self.member_id4_v4: constants.MAINT } } } self.assertEqual(expected, res) mock_check_output.return_value = KERNAL_FILE_SAMPLE_V6 res = lvs_query.get_udp_listener_pool_status(self.listener_id_v6) expected = { 'lvs': { 'uuid': self.pool_id_v6, 'status': constants.UP, 'members': { self.member_id1_v6: constants.UP, self.member_id2_v6: constants.UP, self.member_id3_v6: constants.DOWN, self.member_id4_v6: constants.MAINT } } } self.assertEqual(expected, res)
def test_get_udp_listener_pool_status_when_no_members( self, mock_get_resource_ipports): # Just test with ipv4, ipv6 tests is same. # the returned resource_ipport_mapping doesn't contains the 'Members' # resources, that means the pool of listener doesn't have a enabled # pool resource, so the pool is not usable, then the pool status will # return DOWN. mock_get_resource_ipports.return_value = ({ 'Listener': { 'id': self.listener_id_v4, 'ipport': '10.0.0.37:7777' }, 'Pool': { 'id': self.pool_id_v4 } }, constants.AMPHORA_NAMESPACE) res = lvs_query.get_udp_listener_pool_status(self.listener_id_v4) expected = { 'lvs': { 'uuid': self.pool_id_v4, 'status': constants.DOWN, 'members': {} } } self.assertEqual(expected, res)
def get_udp_listener_status(self, listener_id): """Gets the status of a UDP listener This method will consult the stats socket so calling this method will interfere with the health daemon with the risk of the amphora shut down :param listener_id: The id of the listener """ self._check_udp_listener_exists(listener_id) status = self._check_udp_listener_status(listener_id) if status != consts.ACTIVE: stats = dict(status=status, uuid=listener_id, type='UDP') return webob.Response(json=stats) stats = dict(status=status, uuid=listener_id, type='UDP') try: pool = keepalivedlvs_query.get_udp_listener_pool_status( listener_id) except subprocess.CalledProcessError as e: return webob.Response(json=dict( message="Error getting kernel lvs status for udp listener " "{}".format(listener_id), details=e.output), status=500) stats['pools'] = [pool] return webob.Response(json=stats)
def build_stats_message(): global SEQ msg = {'id': CONF.amphora_agent.amphora_id, 'seq': SEQ, "listeners": {}, 'ver': MSG_VER} SEQ += 1 stat_sock_files = list_sock_stat_files() for listener_id, stat_sock_file in stat_sock_files.items(): listener_dict = {'pools': {}, 'status': 'DOWN', 'stats': { 'tx': 0, 'rx': 0, 'conns': 0, 'totconns': 0, 'ereq': 0}} msg['listeners'][listener_id] = listener_dict if util.is_listener_running(listener_id): (stats, pool_status) = get_stats(stat_sock_file) listener_dict = msg['listeners'][listener_id] for row in stats: if row['svname'] == 'FRONTEND': listener_dict['stats']['tx'] = int(row['bout']) listener_dict['stats']['rx'] = int(row['bin']) listener_dict['stats']['conns'] = int(row['scur']) listener_dict['stats']['totconns'] = int(row['stot']) listener_dict['stats']['ereq'] = int(row['ereq']) listener_dict['status'] = row['status'] for oid, pool in pool_status.items(): if oid != listener_id: pool_id = oid pools = listener_dict['pools'] pools[pool_id] = {"status": pool['status'], "members": pool['members']} # UDP listener part udp_listener_ids = util.get_udp_listeners() if udp_listener_ids: listeners_stats = keepalivedlvs_query.get_udp_listeners_stats() if listeners_stats: for listener_id, listener_stats in listeners_stats.items(): pool_status = keepalivedlvs_query.get_udp_listener_pool_status( listener_id) udp_listener_dict = dict() udp_listener_dict['status'] = listener_stats['status'] udp_listener_dict['stats'] = { 'tx': listener_stats['stats']['bout'], 'rx': listener_stats['stats']['bin'], 'conns': listener_stats['stats']['scur'], 'totconns': listener_stats['stats']['stot'], 'ereq': listener_stats['stats']['ereq'] } udp_listener_dict['pools'] = {} if pool_status: udp_listener_dict['pools'] = { pool_status['lvs']['uuid']: { "status": pool_status['lvs']['status'], "members": pool_status['lvs']['members']}} msg['listeners'][listener_id] = udp_listener_dict return msg
def test_get_udp_listener_pool_status_when_not_get_realserver_result( self, mock_get_mapping, mock_os_stat): # This will hit if the kernel lvs file (/proc/net/ip_vs) # lose its content. So at this moment, eventhough we configure the # pool and member into udp keepalived config file, we have to set # ths status of pool and its members to DOWN. mock_os_stat.side_effect = ( mock.Mock(st_mtime=1234), mock.Mock(st_mtime=1234), ) mock_get_mapping.return_value = (False, {}) res = lvs_query.get_udp_listener_pool_status(self.listener_id_v4) expected = { 'lvs': { 'uuid': self.pool_id_v4, 'status': constants.DOWN, 'members': { self.member_id1_v4: constants.DOWN, self.member_id2_v4: constants.DOWN, self.member_id3_v4: constants.DOWN, self.member_id4_v4: constants.MAINT } } } self.assertEqual(expected, res)
def test_get_udp_listener_pool_status_when_no_pool( self, mock_get_resource_ipports): # Just test with ipv4, ipv6 tests is same. # the returned resource_ipport_mapping doesn't contains the 'Pool' # resource, that means the listener doesn't have a pool resource, it # isn't usable at this moment, then the pool status will # return nothing. mock_get_resource_ipports.return_value = ({ 'Listener': { 'id': self.listener_id_v4, 'ipport': '10.0.0.37:7777' } }, constants.AMPHORA_NAMESPACE) res = lvs_query.get_udp_listener_pool_status(self.listener_id_v4) self.assertEqual({}, res)
def test_get_udp_listener_pool_status_restarting(self, mock_check_output, mock_os_stat): mock_os_stat.side_effect = ( mock.Mock(st_mtime=1234), # config file mock.Mock(st_mtime=1220), # pid file ) # test with ipv4 and ipv6 mock_check_output.return_value = KERNAL_FILE_SAMPLE_V4 res = lvs_query.get_udp_listener_pool_status(self.listener_id_v4) expected = { 'lvs': {'uuid': self.pool_id_v4, 'status': constants.UP, 'members': {self.member_id1_v4: constants.UP, self.member_id2_v4: constants.UP, self.member_id3_v4: constants.RESTARTING, self.member_id4_v4: constants.MAINT}}} self.assertEqual(expected, res)
def get_udp_listener_status(self, listener_id): """Gets the status of a UDP listener This method will consult the stats socket so calling this method will interfere with the health daemon with the risk of the amphora shut down :param listener_id: The id of the listener """ self._check_udp_listener_exists(listener_id) status = self._check_udp_listener_status(listener_id) if status != consts.ACTIVE: stats = dict( status=status, uuid=listener_id, type='UDP' ) return webob.Response(json=stats) stats = dict( status=status, uuid=listener_id, type='UDP' ) try: pool = keepalivedlvs_query.get_udp_listener_pool_status( listener_id) except subprocess.CalledProcessError as e: return webob.Response(json=dict( message="Error getting kernel lvs status for udp listener " "{}".format(listener_id), details=e.output), status=500) stats['pools'] = [pool] return webob.Response(json=stats)
def build_stats_message(): # Example version 2 message without UDP: # { # "id": "<amphora_id>", # "seq": 67, # "listeners": { # "<listener_id>": { # "status": "OPEN", # "stats": { # "tx": 0, # "rx": 0, # "conns": 0, # "totconns": 0, # "ereq": 0 # } # } # }, # "pools": { # "<pool_id>:<listener_id>": { # "status": "UP", # "members": { # "<member_id>": "no check" # } # } # }, # "ver": 2 # } global SEQ msg = { 'id': CONF.amphora_agent.amphora_id, 'seq': SEQ, 'listeners': {}, 'pools': {}, 'ver': MSG_VER } SEQ += 1 stat_sock_files = list_sock_stat_files() # TODO(rm_work) There should only be one of these in the new config system for lb_id, stat_sock_file in stat_sock_files.items(): if util.is_lb_running(lb_id): (stats, pool_status) = get_stats(stat_sock_file) for row in stats: if row['svname'] == 'FRONTEND': listener_id = row['pxname'] msg['listeners'][listener_id] = { 'status': row['status'], 'stats': { 'tx': int(row['bout']), 'rx': int(row['bin']), 'conns': int(row['scur']), 'totconns': int(row['stot']), 'ereq': int(row['ereq']) } } for pool_id, pool in pool_status.items(): msg['pools'][pool_id] = { "status": pool['status'], "members": pool['members'] } # UDP listener part udp_listener_ids = util.get_udp_listeners() if udp_listener_ids: listeners_stats = keepalivedlvs_query.get_udp_listeners_stats() if listeners_stats: for listener_id, listener_stats in listeners_stats.items(): pool_status = keepalivedlvs_query.get_udp_listener_pool_status( listener_id) udp_listener_dict = dict() udp_listener_dict['status'] = listener_stats['status'] udp_listener_dict['stats'] = { 'tx': listener_stats['stats']['bout'], 'rx': listener_stats['stats']['bin'], 'conns': listener_stats['stats']['scur'], 'totconns': listener_stats['stats']['stot'], 'ereq': listener_stats['stats']['ereq'] } if pool_status: pool_id = pool_status['lvs']['uuid'] msg['pools'][pool_id] = { "status": pool_status['lvs']['status'], "members": pool_status['lvs']['members'] } msg['listeners'][listener_id] = udp_listener_dict return msg
def build_stats_message(): global SEQ msg = {'id': CONF.amphora_agent.amphora_id, 'seq': SEQ, "listeners": {}} SEQ += 1 stat_sock_files = list_sock_stat_files() for listener_id, stat_sock_file in stat_sock_files.items(): listener_dict = { 'pools': {}, 'status': 'DOWN', 'stats': { 'tx': 0, 'rx': 0, 'conns': 0, 'totconns': 0, 'ereq': 0 } } msg['listeners'][listener_id] = listener_dict if util.is_listener_running(listener_id): (stats, pool_status) = get_stats(stat_sock_file) listener_dict = msg['listeners'][listener_id] for row in stats: if row['svname'] == 'FRONTEND': listener_dict['stats']['tx'] = int(row['bout']) listener_dict['stats']['rx'] = int(row['bin']) listener_dict['stats']['conns'] = int(row['scur']) listener_dict['stats']['totconns'] = int(row['stot']) listener_dict['stats']['ereq'] = int(row['ereq']) listener_dict['status'] = row['status'] for oid, pool in pool_status.items(): if oid != listener_id: pool_id = oid pools = listener_dict['pools'] pools[pool_id] = { "status": pool['status'], "members": pool['members'] } # UDP listener part udp_listener_ids = util.get_udp_listeners() if udp_listener_ids: listeners_stats = keepalivedlvs_query.get_udp_listeners_stats() if listeners_stats: for listener_id, listener_stats in listeners_stats.items(): pool_status = keepalivedlvs_query.get_udp_listener_pool_status( listener_id) udp_listener_dict = dict() udp_listener_dict['status'] = listener_stats['status'] udp_listener_dict['stats'] = { 'tx': listener_stats['stats']['bout'], 'rx': listener_stats['stats']['bin'], 'conns': listener_stats['stats']['scur'], 'totconns': listener_stats['stats']['stot'], 'ereq': listener_stats['stats']['ereq'] } if pool_status: udp_listener_dict['pools'] = { pool_status['lvs']['uuid']: { "status": pool_status['lvs']['status'], "members": pool_status['lvs']['members'] } } msg['listeners'][listener_id] = udp_listener_dict return msg
def build_stats_message(): """Build a stats message based on retrieved listener statistics. Example version 3 message without UDP (note that values are deltas, not absolutes):: {"id": "<amphora_id>", "seq": 67, "listeners": { "<listener_id>": { "status": "OPEN", "stats": { "tx": 0, "rx": 0, "conns": 0, "totconns": 0, "ereq": 0 } } }, "pools": { "<pool_id>:<listener_id>": { "status": "UP", "members": { "<member_id>": "no check" } } }, "ver": 3 } """ global SEQ msg = { 'id': CONF.amphora_agent.amphora_id, 'seq': SEQ, 'listeners': {}, 'pools': {}, 'ver': MSG_VER } SEQ += 1 stat_sock_files = list_sock_stat_files() # TODO(rm_work) There should only be one of these in the new config system for lb_id, stat_sock_file in stat_sock_files.items(): if util.is_lb_running(lb_id): (stats, pool_status) = get_stats(stat_sock_file) for row in stats: if row['svname'] == 'FRONTEND': listener_id = row['pxname'] delta_values = calculate_stats_deltas(listener_id, row) msg['listeners'][listener_id] = { 'status': row['status'], 'stats': { 'tx': delta_values['bout'], 'rx': delta_values['bin'], 'conns': int(row['scur']), 'totconns': delta_values['stot'], 'ereq': delta_values['ereq'] } } for pool_id, pool in pool_status.items(): msg['pools'][pool_id] = { "status": pool['status'], "members": pool['members'] } # UDP listener part udp_listener_ids = util.get_udp_listeners() if udp_listener_ids: listeners_stats = keepalivedlvs_query.get_udp_listeners_stats() if listeners_stats: for listener_id, listener_stats in listeners_stats.items(): delta_values = calculate_stats_deltas(listener_id, listener_stats['stats']) pool_status = keepalivedlvs_query.get_udp_listener_pool_status( listener_id) udp_listener_dict = dict() udp_listener_dict['status'] = listener_stats['status'] udp_listener_dict['stats'] = { 'tx': delta_values['bout'], 'rx': delta_values['bin'], 'conns': listener_stats['stats']['scur'], 'totconns': delta_values['stot'], 'ereq': delta_values['ereq'] } if pool_status: pool_id = pool_status['lvs']['uuid'] msg['pools'][pool_id] = { "status": pool_status['lvs']['status'], "members": pool_status['lvs']['members'] } msg['listeners'][listener_id] = udp_listener_dict persist_counters() return msg