def setUp(self): super(LvsQueryTestCase, self).setUp() self.listener_id_v4 = uuidutils.generate_uuid() self.pool_id_v4 = uuidutils.generate_uuid() self.member_id1_v4 = uuidutils.generate_uuid() self.member_id2_v4 = uuidutils.generate_uuid() self.listener_id_v6 = uuidutils.generate_uuid() self.pool_id_v6 = uuidutils.generate_uuid() self.member_id1_v6 = uuidutils.generate_uuid() self.member_id2_v6 = uuidutils.generate_uuid() cfg_content_v4 = CFG_FILE_TEMPLATE_v4 % { 'listener_id': self.listener_id_v4, 'ns_name': constants.AMPHORA_NAMESPACE, 'pool_id': self.pool_id_v4, 'member_id1': self.member_id1_v4, 'member_id2': self.member_id2_v4 } cfg_content_v6 = CFG_FILE_TEMPLATE_v6 % { 'listener_id': self.listener_id_v6, 'ns_name': constants.AMPHORA_NAMESPACE, 'pool_id': self.pool_id_v6, 'member_id1': self.member_id1_v6, 'member_id2': self.member_id2_v6 } self.useFixture( test_utils.OpenFixture( util.keepalived_lvs_cfg_path(self.listener_id_v4), cfg_content_v4)) self.useFixture( test_utils.OpenFixture( util.keepalived_lvs_cfg_path(self.listener_id_v6), cfg_content_v6))
def setUp(self): super().setUp() self.listener_id_v4 = uuidutils.generate_uuid() self.pool_id_v4 = uuidutils.generate_uuid() self.member_id1_v4 = uuidutils.generate_uuid() self.member_id2_v4 = uuidutils.generate_uuid() self.member_id3_v4 = uuidutils.generate_uuid() self.member_id4_v4 = uuidutils.generate_uuid() self.listener_id_v6 = uuidutils.generate_uuid() self.pool_id_v6 = uuidutils.generate_uuid() self.member_id1_v6 = uuidutils.generate_uuid() self.member_id2_v6 = uuidutils.generate_uuid() self.member_id3_v6 = uuidutils.generate_uuid() self.member_id4_v6 = uuidutils.generate_uuid() self.member_id5_v6 = uuidutils.generate_uuid() self.disabled_listener_id = uuidutils.generate_uuid() cfg_content_v4 = CFG_FILE_TEMPLATE_v4 % { 'listener_id': self.listener_id_v4, 'ns_name': constants.AMPHORA_NAMESPACE, 'pool_id': self.pool_id_v4, 'member_id1': self.member_id1_v4, 'member_id2': self.member_id2_v4, 'member_id3': self.member_id3_v4, 'member_id4': self.member_id4_v4, } cfg_content_v6 = CFG_FILE_TEMPLATE_v6 % { 'listener_id': self.listener_id_v6, 'ns_name': constants.AMPHORA_NAMESPACE, 'pool_id': self.pool_id_v6, 'member_id1': self.member_id1_v6, 'member_id2': self.member_id2_v6, 'member_id3': self.member_id3_v6, 'member_id4': self.member_id4_v6, 'member_id5': self.member_id5_v6 } cfg_content_disabled_listener = ( CFG_FILE_TEMPLATE_DISABLED_LISTENER % { 'listener_id': self.listener_id_v6, 'ns_name': constants.AMPHORA_NAMESPACE, }) self.useFixture( test_utils.OpenFixture( util.keepalived_lvs_cfg_path(self.listener_id_v4), cfg_content_v4)) self.useFixture( test_utils.OpenFixture( util.keepalived_lvs_cfg_path(self.listener_id_v6), cfg_content_v6)) self.useFixture( test_utils.OpenFixture( util.keepalived_lvs_cfg_path(self.disabled_listener_id), cfg_content_disabled_listener))
def _check_udp_listener_exists(self, listener_id): if not os.path.exists(util.keepalived_lvs_cfg_path(listener_id)): raise exceptions.HTTPException( response=webob.Response(json=dict( message='UDP Listener Not Found', details="No UDP listener with UUID: {0}".format( listener_id)), status=404))
def test_upload_lvs_listener_config_with_vrrp_check_dir( self, m_check_output, m_os_rm, m_os_mkdir, m_exists, m_os_chmod, m_os_sysinit, m_copy2, mock_netns, mock_install_netns, mock_systemctl, mock_get_lbs, mock_get_lvs_listeners): m_exists.side_effect = [False, False, True, True, False, False, False] mock_get_lbs.return_value = [] mock_get_lvs_listeners.return_value = [self.FAKE_ID] cfg_path = util.keepalived_lvs_cfg_path(self.FAKE_ID) m = self.useFixture(test_utils.OpenFixture(cfg_path)).mock_open conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) conf.config(group='controller_worker', loadbalancer_topology=consts.TOPOLOGY_ACTIVE_STANDBY) with mock.patch('os.open') as m_open, mock.patch.object( os, 'fdopen', m) as m_fdopen: m_open.side_effect = [ 'TEST-WRITE-CFG', 'TEST-WRITE-SYSINIT', 'TEST-WRITE-UDP-VRRP-CHECK' ] res = self.client.put(self.TEST_URL % ('123', self.FAKE_ID), data=self.NORMAL_CFG_CONTENT) os_mkdir_calls = [ mock.call(util.keepalived_lvs_dir()), mock.call(util.keepalived_backend_check_script_dir()) ] m_os_mkdir.assert_has_calls(os_mkdir_calls) mock_install_netns.assert_called_once() systemctl_calls = [ mock.call(consts.ENABLE, consts.AMP_NETNS_SVC_PREFIX), mock.call(consts.ENABLE, 'octavia-keepalivedlvs-%s' % str(self.FAKE_ID)), ] mock_systemctl.assert_has_calls(systemctl_calls) m_os_chmod.assert_called_with( util.keepalived_backend_check_script_path(), stat.S_IEXEC) flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH systemd_cfg_path = util.keepalived_lvs_init_path( consts.INIT_SYSTEMD, self.FAKE_ID) script_path = os.path.join( util.keepalived_check_scripts_dir(), keepalivedlvs.KEEPALIVED_CHECK_SCRIPT_NAME) m_open_calls = [ mock.call(cfg_path, flags, mode), mock.call(systemd_cfg_path, flags, mode), mock.call(script_path, flags, stat.S_IEXEC) ] m_open.assert_has_calls(m_open_calls) m_fdopen.assert_any_call('TEST-WRITE-CFG', 'wb') m_fdopen.assert_any_call('TEST-WRITE-SYSINIT', 'w') m_fdopen.assert_any_call('TEST-WRITE-UDP-VRRP-CHECK', 'w') m_os_rm.assert_called_once_with(util.haproxy_check_script_path()) self.assertEqual(200, res.status_code)
def test_keepalived_lvs_cfg_path(self): fake_path = '/fake/path' self.CONF.config(group="haproxy_amphora", base_path=fake_path) ref_path = (fake_path + '/lvs/octavia-keepalivedlvs-' + self.listener_id + '.conf') result = util.keepalived_lvs_cfg_path(self.listener_id) self.assertEqual(ref_path, result)
def get_udp_listener_config(self, listener_id): """Gets the keepalivedlvs config :param listener_id: the id of the listener """ self._check_udp_listener_exists(listener_id) with open(util.keepalived_lvs_cfg_path(listener_id), 'r') as file: cfg = file.read() resp = webob.Response(cfg, content_type='text/plain') return resp
def get_lvs_listener_config(self, listener_id): """Gets the keepalivedlvs config :param listener_id: the id of the listener """ self._check_lvs_listener_exists(listener_id) with open(util.keepalived_lvs_cfg_path(listener_id), 'r') as file: cfg = file.read() resp = webob.Response(cfg, content_type='text/plain') return resp
def _check_udp_listener_status(self, listener_id): if os.path.exists(util.keepalived_lvs_pids_path(listener_id)[0]): if os.path.exists(os.path.join( '/proc', util.get_keepalivedlvs_pid(listener_id))): # Check if the listener is disabled with open(util.keepalived_lvs_cfg_path(listener_id), 'r') as file: cfg = file.read() m = re.search('virtual_server', cfg) if m: return consts.ACTIVE return consts.OFFLINE return consts.ERROR return consts.OFFLINE
def test_upload_udp_listener_config_with_vrrp_check_dir( self, m_check_output, m_os_rm, m_os_mkdir, m_exists, m_os_chmod, m_os_sysinit, m_copy2, mock_netns, mock_install_netns, mock_systemctl): m_exists.side_effect = [False, False, True, True, True, False, False] cfg_path = util.keepalived_lvs_cfg_path(self.FAKE_ID) m = self.useFixture(test_utils.OpenFixture(cfg_path)).mock_open with mock.patch('os.open') as m_open, mock.patch.object(os, 'fdopen', m) as m_fdopen: m_open.side_effect = ['TEST-WRITE-CFG', 'TEST-WRITE-SYSINIT', 'TEST-WRITE-UDP-VRRP-CHECK'] res = self.client.put(self.TEST_URL % ('123', self.FAKE_ID), data=self.NORMAL_CFG_CONTENT) os_mkdir_calls = [ mock.call(util.keepalived_lvs_dir()), mock.call(util.keepalived_backend_check_script_dir()) ] m_os_mkdir.assert_has_calls(os_mkdir_calls) mock_install_netns.assert_called_once() systemctl_calls = [ mock.call(consts.ENABLE, consts.AMP_NETNS_SVC_PREFIX), mock.call(consts.ENABLE, 'octavia-keepalivedlvs-%s' % str(self.FAKE_ID)), ] mock_systemctl.assert_has_calls(systemctl_calls) m_os_chmod.assert_called_with( util.keepalived_backend_check_script_path(), stat.S_IEXEC) flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH systemd_cfg_path = util.keepalived_lvs_init_path( consts.INIT_SYSTEMD, self.FAKE_ID) script_path = os.path.join( util.keepalived_check_scripts_dir(), keepalivedlvs.KEEPALIVED_CHECK_SCRIPT_NAME) m_open_calls = [ mock.call(cfg_path, flags, mode), mock.call(systemd_cfg_path, flags, mode), mock.call(script_path, flags, stat.S_IEXEC) ] m_open.assert_has_calls(m_open_calls) m_fdopen.assert_any_call('TEST-WRITE-CFG', 'wb') m_fdopen.assert_any_call('TEST-WRITE-SYSINIT', 'w') m_fdopen.assert_any_call('TEST-WRITE-UDP-VRRP-CHECK', 'w') self.assertEqual(200, res.status_code)
def test_upload_udp_listener_config_no_vrrp_check_dir( self, m_check_output, m_os_rm, m_os_mkdir, m_exists, m_os_chmod, m_os_sysinit, m_copy2, mock_netns, mock_install_netns, mock_systemctl): m_exists.side_effect = [False, False, True, True, False, False] cfg_path = util.keepalived_lvs_cfg_path(self.FAKE_ID) m = self.useFixture(test_utils.OpenFixture(cfg_path)).mock_open with mock.patch('os.open') as m_open, mock.patch.object( os, 'fdopen', m) as m_fdopen: m_open.side_effect = ['TEST-WRITE-CFG', 'TEST-WRITE-SYSINIT'] res = self.client.put(self.TEST_URL % ('123', self.FAKE_ID), data=self.NORMAL_CFG_CONTENT) mock_install_netns.assert_called_once() systemctl_calls = [ mock.call(consts.ENABLE, consts.AMP_NETNS_SVC_PREFIX), mock.call(consts.ENABLE, 'octavia-keepalivedlvs-%s' % str(self.FAKE_ID)), ] mock_systemctl.assert_has_calls(systemctl_calls) os_mkdir_calls = [ mock.call(util.keepalived_lvs_dir()), mock.call(util.keepalived_backend_check_script_dir()) ] m_os_mkdir.assert_has_calls(os_mkdir_calls) m_os_chmod.assert_called_with( util.keepalived_backend_check_script_path(), stat.S_IEXEC) flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH systemd_cfg_path = util.keepalived_lvs_init_path( consts.INIT_SYSTEMD, self.FAKE_ID) m_open_calls = [ mock.call(cfg_path, flags, mode), mock.call(systemd_cfg_path, flags, mode) ] m_open.assert_has_calls(m_open_calls) m_fdopen.assert_any_call('TEST-WRITE-CFG', 'wb') m_fdopen.assert_any_call('TEST-WRITE-SYSINIT', 'w') self.assertEqual(200, res.status_code)
def upload_lvs_listener_config(self, listener_id): stream = loadbalancer.Wrapped(flask.request.stream) NEED_CHECK = True if not os.path.exists(util.keepalived_lvs_dir()): os.makedirs(util.keepalived_lvs_dir()) if not os.path.exists(util.keepalived_backend_check_script_dir()): current_file_dir, _ = os.path.split(os.path.abspath(__file__)) try: script_dir = os.path.join( os.path.abspath(os.path.join(current_file_dir, '../..')), 'utils') assert True is os.path.exists(script_dir) assert True is os.path.exists( os.path.join(script_dir, CHECK_SCRIPT_NAME)) except Exception as e: raise exceptions.Conflict( description='%(file_name)s not Found for ' 'UDP Listener %(listener_id)s' % { 'file_name': CHECK_SCRIPT_NAME, 'listener_id': listener_id }) from e os.makedirs(util.keepalived_backend_check_script_dir()) shutil.copy2(os.path.join(script_dir, CHECK_SCRIPT_NAME), util.keepalived_backend_check_script_path()) os.chmod(util.keepalived_backend_check_script_path(), stat.S_IEXEC) # Based on current topology setting, only the amphora instances in # Active-Standby topology will create the directory below. So for # Single topology, it should not create the directory and the check # scripts for status change. if (CONF.controller_worker.loadbalancer_topology != consts.TOPOLOGY_ACTIVE_STANDBY): NEED_CHECK = False conf_file = util.keepalived_lvs_cfg_path(listener_id) flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC # mode 00644 mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH with os.fdopen(os.open(conf_file, flags, mode), 'wb') as f: b = stream.read(BUFFER) while b: f.write(b) b = stream.read(BUFFER) init_system = util.get_os_init_system() file_path = util.keepalived_lvs_init_path(init_system, listener_id) if init_system == consts.INIT_SYSTEMD: template = SYSTEMD_TEMPLATE # Render and install the network namespace systemd service util.install_netns_systemd_service() util.run_systemctl_command(consts.ENABLE, consts.AMP_NETNS_SVC_PREFIX) elif init_system == consts.INIT_UPSTART: template = UPSTART_TEMPLATE elif init_system == consts.INIT_SYSVINIT: template = SYSVINIT_TEMPLATE else: raise util.UnknownInitError() # Render and install the keepalivedlvs init script if init_system == consts.INIT_SYSTEMD: # mode 00644 mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH else: # mode 00755 mode = (stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) keepalived_pid, vrrp_pid, check_pid = util.keepalived_lvs_pids_path( listener_id) if not os.path.exists(file_path): with os.fdopen(os.open(file_path, flags, mode), 'w') as text_file: text = template.render( keepalived_pid=keepalived_pid, vrrp_pid=vrrp_pid, check_pid=check_pid, keepalived_cmd=consts.KEEPALIVED_CMD, keepalived_cfg=util.keepalived_lvs_cfg_path(listener_id), amphora_nsname=consts.AMPHORA_NAMESPACE, amphora_netns=consts.AMP_NETNS_SVC_PREFIX, administrative_log_facility=( CONF.amphora_agent.administrative_log_facility), ) text_file.write(text) # Make sure the keepalivedlvs service is enabled on boot if init_system == consts.INIT_SYSTEMD: util.run_systemctl_command( consts.ENABLE, "octavia-keepalivedlvs-%s" % str(listener_id)) elif init_system == consts.INIT_SYSVINIT: init_enable_cmd = "insserv {file}".format(file=file_path) try: subprocess.check_output(init_enable_cmd.split(), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: LOG.debug( 'Failed to enable ' 'octavia-keepalivedlvs service: ' '%(err)s', {'err': str(e)}) return webob.Response(json=dict( message="Error enabling " "octavia-keepalivedlvs service", details=e.output), status=500) if NEED_CHECK: # inject the check script for keepalived process script_path = os.path.join(util.keepalived_check_scripts_dir(), KEEPALIVED_CHECK_SCRIPT_NAME) if not os.path.exists(script_path): if not os.path.exists(util.keepalived_check_scripts_dir()): os.makedirs(util.keepalived_check_scripts_dir()) with os.fdopen(os.open(script_path, flags, stat.S_IEXEC), 'w') as script_file: text = check_script_file_template.render( consts=consts, init_system=init_system, keepalived_lvs_pid_dir=util.keepalived_lvs_dir()) script_file.write(text) util.vrrp_check_script_update(None, consts.AMP_ACTION_START) res = webob.Response(json={'message': 'OK'}, status=200) res.headers['ETag'] = stream.get_md5() return res
def delete_lvs_listener(self, listener_id): try: self._check_lvs_listener_exists(listener_id) except exceptions.HTTPException: return webob.Response(json={'message': 'OK'}) # check if that keepalived is still running and if stop it keepalived_pid, vrrp_pid, check_pid = util.keepalived_lvs_pids_path( listener_id) if os.path.exists(keepalived_pid) and os.path.exists( os.path.join('/proc', util.get_keepalivedlvs_pid(listener_id))): cmd = ("/usr/sbin/service " "octavia-keepalivedlvs-{0} stop".format(listener_id)) try: subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: LOG.error("Failed to stop keepalivedlvs service: %s", str(e)) return webob.Response(json=dict( message="Error stopping keepalivedlvs", details=e.output), status=500) # Since the lvs check script based on the keepalived pid file for # checking whether it is alived. So here, we had stop the keepalived # process by the previous step, must make sure the pid files are not # exist. if (os.path.exists(keepalived_pid) or os.path.exists(vrrp_pid) or os.path.exists(check_pid)): for pid in [keepalived_pid, vrrp_pid, check_pid]: os.remove(pid) # disable the service init_system = util.get_os_init_system() init_path = util.keepalived_lvs_init_path(init_system, listener_id) if init_system == consts.INIT_SYSTEMD: util.run_systemctl_command( consts.DISABLE, "octavia-keepalivedlvs-%s" % str(listener_id)) elif init_system == consts.INIT_SYSVINIT: init_disable_cmd = "insserv -r {file}".format(file=init_path) elif init_system != consts.INIT_UPSTART: raise util.UnknownInitError() if init_system == consts.INIT_SYSVINIT: try: subprocess.check_output(init_disable_cmd.split(), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: LOG.error( "Failed to disable " "octavia-keepalivedlvs-%(list)s service: " "%(err)s", { 'list': listener_id, 'err': str(e) }) return webob.Response(json=dict( message=("Error disabling octavia-keepalivedlvs-" "{0} service".format(listener_id)), details=e.output), status=500) # delete init script ,config file and log file for that listener if os.path.exists(init_path): os.remove(init_path) if os.path.exists(util.keepalived_lvs_cfg_path(listener_id)): os.remove(util.keepalived_lvs_cfg_path(listener_id)) return webob.Response(json={'message': 'OK'})
def get_udp_listener_resource_ipports_nsname(listener_id): # resource_ipport_mapping = {'Listener': {'id': listener-id, # 'ipport': ipport}, # 'Pool': {'id': pool-id}, # 'Members': [{'id': member-id-1, # 'ipport': ipport}, # {'id': member-id-2, # 'ipport': ipport}], # 'HealthMonitor': {'id': healthmonitor-id}} resource_ipport_mapping = {} with open(util.keepalived_lvs_cfg_path(listener_id), 'r') as f: cfg = f.read() ns_name = NS_REGEX.findall(cfg)[0] listener_ip_port = V4_VS_REGEX.findall(cfg) if not listener_ip_port: listener_ip_port = V6_VS_REGEX.findall(cfg) listener_ip_port = listener_ip_port[0] if listener_ip_port else [] if not listener_ip_port: # If not get listener_ip_port from the lvs config file, # that means the udp listener's default pool have no enabled member # yet. But at this moment, we can get listener_id and ns_name, so # for this function, we will just return ns_name return resource_ipport_mapping, ns_name cfg_line = cfg.split('\n') rs_ip_port_list = [] for line in cfg_line: if 'real_server' in line: res = V4_RS_REGEX.findall(line) if not res: res = V6_RS_REGEX.findall(line) rs_ip_port_list.append(res[0]) resource_type_ids = CONFIG_COMMENT_REGEX.findall(cfg) for resource_type, resource_id in resource_type_ids: value = {'id': resource_id} if resource_type == 'Member': resource_type = '%ss' % resource_type if resource_type not in resource_ipport_mapping: value = [value] if resource_type not in resource_ipport_mapping: resource_ipport_mapping[resource_type] = value elif resource_type == 'Members': resource_ipport_mapping[resource_type].append(value) if rs_ip_port_list: rs_ip_port_count = len(rs_ip_port_list) for index in range(rs_ip_port_count): if ipaddress.ip_address( six.text_type(rs_ip_port_list[index][0])).version == 6: rs_ip_port_list[index] = ( '[' + rs_ip_port_list[index][0] + ']', rs_ip_port_list[index][1]) resource_ipport_mapping['Members'][index]['ipport'] = ( rs_ip_port_list[index][0] + ':' + rs_ip_port_list[index][1]) if ipaddress.ip_address( six.text_type(listener_ip_port[0])).version == 6: listener_ip_port = ( '[' + listener_ip_port[0] + ']', listener_ip_port[1]) resource_ipport_mapping['Listener']['ipport'] = ( listener_ip_port[0] + ':' + listener_ip_port[1]) return resource_ipport_mapping, ns_name
def get_udp_listener_pool_status(listener_id): (resource_ipport_mapping, ns_name) = get_udp_listener_resource_ipports_nsname(listener_id) if 'Pool' not in resource_ipport_mapping: return {} if 'Members' not in resource_ipport_mapping: return { 'lvs': { 'uuid': resource_ipport_mapping['Pool']['id'], 'status': constants.DOWN, 'members': {} } } with open(util.keepalived_lvs_cfg_path(listener_id), 'r') as f: cfg = f.read() hm_enabled = len(CHECKER_REGEX.findall(cfg)) > 0 _, realserver_result = get_listener_realserver_mapping( ns_name, resource_ipport_mapping['Listener']['ipport'], hm_enabled) pool_status = constants.UP member_results = {} if realserver_result: member_ip_port_list = [ member['ipport'] for member in resource_ipport_mapping['Members'] ] down_member_ip_port_set = set(member_ip_port_list) - set( list(realserver_result.keys())) for member_ip_port in member_ip_port_list: member_id = None for member in resource_ipport_mapping['Members']: if member['ipport'] == member_ip_port: member_id = member['id'] if member_ip_port is None: status = constants.MAINT elif member_ip_port in down_member_ip_port_set: status = constants.DOWN elif int(realserver_result[member_ip_port]['Weight']) == 0: status = constants.DRAIN else: status = realserver_result[member_ip_port]['status'] if member_id: member_results[member_id] = status else: if hm_enabled: pool_status = constants.DOWN for member in resource_ipport_mapping['Members']: if member['ipport'] is None: member_results[member['id']] = constants.MAINT elif hm_enabled: member_results[member['id']] = constants.DOWN else: member_results[member['id']] = constants.NO_CHECK return { 'lvs': { 'uuid': resource_ipport_mapping['Pool']['id'], 'status': pool_status, 'members': member_results } }
def get_udp_listener_resource_ipports_nsname(listener_id): # resource_ipport_mapping = {'Listener': {'id': listener-id, # 'ipport': ipport}, # 'Pool': {'id': pool-id}, # 'Members': [{'id': member-id-1, # 'ipport': ipport}, # {'id': member-id-2, # 'ipport': ipport}], # 'HealthMonitor': {'id': healthmonitor-id}} resource_ipport_mapping = {} with open(util.keepalived_lvs_cfg_path(listener_id), 'r') as f: cfg = f.read() ns_name = NS_REGEX.findall(cfg)[0] listener_ip_port = V4_VS_REGEX.findall(cfg) if not listener_ip_port: listener_ip_port = V6_VS_REGEX.findall(cfg) listener_ip_port = listener_ip_port[0] if listener_ip_port else [] if not listener_ip_port: # If not get listener_ip_port from the lvs config file, # that means the udp listener's default pool have no enabled member # yet. But at this moment, we can get listener_id and ns_name, so # for this function, we will just return ns_name return resource_ipport_mapping, ns_name cfg_line = cfg.split('\n') rs_ip_port_list = [] for line in cfg_line: if 'real_server' in line: res = V4_RS_REGEX.findall(line) if not res: res = V6_RS_REGEX.findall(line) rs_ip_port_list.append(res[0]) resource_type_ids = CONFIG_COMMENT_REGEX.findall(cfg) for resource_type, resource_id in resource_type_ids: value = {'id': resource_id} if resource_type == 'Member': resource_type = '%ss' % resource_type if resource_type not in resource_ipport_mapping: value = [value] if resource_type not in resource_ipport_mapping: resource_ipport_mapping[resource_type] = value elif resource_type == 'Members': resource_ipport_mapping[resource_type].append(value) disabled_member_ids = DISABLED_MEMBER_COMMENT_REGEX.findall(cfg) resource_type = 'Members' for member_id in disabled_member_ids: value = {'id': member_id, 'ipport': None} if resource_type not in resource_ipport_mapping: resource_ipport_mapping[resource_type] = [] resource_ipport_mapping[resource_type].append(value) if rs_ip_port_list: rs_ip_port_count = len(rs_ip_port_list) for index in range(rs_ip_port_count): if ipaddress.ip_address( six.text_type(rs_ip_port_list[index][0])).version == 6: rs_ip_port_list[index] = ('[' + rs_ip_port_list[index][0] + ']', rs_ip_port_list[index][1]) resource_ipport_mapping['Members'][index]['ipport'] = ( rs_ip_port_list[index][0] + ':' + rs_ip_port_list[index][1]) if ipaddress.ip_address(six.text_type( listener_ip_port[0])).version == 6: listener_ip_port = ('[' + listener_ip_port[0] + ']', listener_ip_port[1]) resource_ipport_mapping['Listener']['ipport'] = (listener_ip_port[0] + ':' + listener_ip_port[1]) return resource_ipport_mapping, ns_name
def get_lvs_listener_pool_status(listener_id): (resource_ipport_mapping, ns_name) = get_lvs_listener_resource_ipports_nsname(listener_id) if 'Pool' not in resource_ipport_mapping: return {} if 'Members' not in resource_ipport_mapping: return { 'lvs': { 'uuid': resource_ipport_mapping['Pool']['id'], 'status': constants.UP, 'members': {} } } config_path = util.keepalived_lvs_cfg_path(listener_id) pids_pathes = util.keepalived_lvs_pids_path(listener_id) config_stat = os.stat(config_path) check_pid_stat = os.stat(pids_pathes[2]) # Indicates that keepalived configuration has been updated but the service # has yet to be restarted. # NOTE: It only works if we are doing a RESTART on configuration change, # Iaa34db6cb1dfed98e96a585c5d105e263c7efa65 forces a RESTART instead of a # RELOAD, we need to be careful if we want to switch back to RELOAD after # updating to a recent keepalived release. restarting = config_stat.st_mtime > check_pid_stat.st_mtime with open(util.keepalived_lvs_cfg_path(listener_id), 'r') as f: cfg = f.read() hm_enabled = len(CHECKER_REGEX.findall(cfg)) > 0 _, realserver_result = get_listener_realserver_mapping( ns_name, resource_ipport_mapping['Listener']['ipport'], hm_enabled) pool_status = constants.UP member_results = {} if realserver_result: member_ip_port_list = [ member['ipport'] for member in resource_ipport_mapping['Members'] ] down_member_ip_port_set = set(member_ip_port_list) - set( list(realserver_result.keys())) for member_ip_port in member_ip_port_list: member_id = None for member in resource_ipport_mapping['Members']: if member['ipport'] == member_ip_port: member_id = member['id'] if member_ip_port is None: status = constants.MAINT elif member_ip_port in down_member_ip_port_set: status = (constants.RESTARTING if restarting else constants.DOWN) elif int(realserver_result[member_ip_port]['Weight']) == 0: status = constants.DRAIN else: status = realserver_result[member_ip_port]['status'] if member_id: member_results[member_id] = status else: if hm_enabled: pool_status = constants.DOWN for member in resource_ipport_mapping['Members']: if member['ipport'] is None: member_results[member['id']] = constants.MAINT elif hm_enabled: member_results[member['id']] = (constants.RESTARTING if restarting else constants.DOWN) else: member_results[member['id']] = constants.NO_CHECK return { 'lvs': { 'uuid': resource_ipport_mapping['Pool']['id'], 'status': pool_status, 'members': member_results } }
def delete_udp_listener(self, listener_id): try: self._check_udp_listener_exists(listener_id) except exceptions.HTTPException: return webob.Response(json={'message': 'OK'}) # check if that keepalived is still running and if stop it keepalived_pid, vrrp_pid, check_pid = util.keepalived_lvs_pids_path( listener_id) if os.path.exists(keepalived_pid) and os.path.exists( os.path.join('/proc', util.get_keepalivedlvs_pid(listener_id))): cmd = ("/usr/sbin/service " "octavia-keepalivedlvs-{0} stop".format(listener_id)) try: subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: LOG.error("Failed to stop keepalivedlvs service: %s", e) return webob.Response(json=dict( message="Error stopping keepalivedlvs", details=e.output), status=500) # Since the lvs check script based on the keepalived pid file for # checking whether it is alived. So here, we had stop the keepalived # process by the previous step, must make sure the pid files are not # exist. if (os.path.exists(keepalived_pid) or os.path.exists(vrrp_pid) or os.path.exists(check_pid)): for pid in [keepalived_pid, vrrp_pid, check_pid]: os.remove(pid) # disable the service init_system = util.get_os_init_system() init_path = util.keepalived_lvs_init_path(init_system, listener_id) if init_system == consts.INIT_SYSTEMD: util.run_systemctl_command( consts.DISABLE, "octavia-keepalivedlvs-%s" % str(listener_id)) elif init_system == consts.INIT_SYSVINIT: init_disable_cmd = "insserv -r {file}".format(file=init_path) elif init_system != consts.INIT_UPSTART: raise util.UnknownInitError() if init_system == consts.INIT_SYSVINIT: try: subprocess.check_output(init_disable_cmd.split(), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: LOG.error("Failed to disable " "octavia-keepalivedlvs-%(list)s service: " "%(err)s", {'list': listener_id, 'err': e}) return webob.Response(json=dict( message=( "Error disabling octavia-keepalivedlvs-" "{0} service".format(listener_id)), details=e.output), status=500) # delete init script ,config file and log file for that listener if os.path.exists(init_path): os.remove(init_path) if os.path.exists(util.keepalived_lvs_cfg_path(listener_id)): os.remove(util.keepalived_lvs_cfg_path(listener_id)) return webob.Response(json={'message': 'OK'})
def upload_udp_listener_config(self, listener_id): stream = listener.Wrapped(flask.request.stream) NEED_CHECK = True if not os.path.exists(util.keepalived_lvs_dir()): os.makedirs(util.keepalived_lvs_dir()) if not os.path.exists(util.keepalived_backend_check_script_dir()): current_file_dir, _ = os.path.split(os.path.abspath(__file__)) try: script_dir = os.path.join(os.path.abspath( os.path.join(current_file_dir, '../..')), 'utils') assert True is os.path.exists(script_dir) assert True is os.path.exists(os.path.join( script_dir, CHECK_SCRIPT_NAME)) except Exception: raise exceptions.Conflict( description='%(file_name)s not Found for ' 'UDP Listener %(listener_id)s' % {'file_name': CHECK_SCRIPT_NAME, 'listener_id': listener_id}) os.makedirs(util.keepalived_backend_check_script_dir()) shutil.copy2(os.path.join(script_dir, CHECK_SCRIPT_NAME), util.keepalived_backend_check_script_path()) os.chmod(util.keepalived_backend_check_script_path(), stat.S_IEXEC) # Based on current topology setting, only the amphora instances in # Active-Standby topology will create the directory below. So for # Single topology, it should not create the directory and the check # scripts for status change. if not os.path.exists(util.keepalived_check_scripts_dir()): NEED_CHECK = False conf_file = util.keepalived_lvs_cfg_path(listener_id) flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC # mode 00644 mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH with os.fdopen(os.open(conf_file, flags, mode), 'wb') as f: b = stream.read(BUFFER) while b: f.write(b) b = stream.read(BUFFER) init_system = util.get_os_init_system() file_path = util.keepalived_lvs_init_path(init_system, listener_id) if init_system == consts.INIT_SYSTEMD: template = SYSTEMD_TEMPLATE # Render and install the network namespace systemd service util.install_netns_systemd_service() util.run_systemctl_command( consts.ENABLE, consts.AMP_NETNS_SVC_PREFIX) elif init_system == consts.INIT_UPSTART: template = UPSTART_TEMPLATE elif init_system == consts.INIT_SYSVINIT: template = SYSVINIT_TEMPLATE else: raise util.UnknownInitError() # Render and install the keepalivedlvs init script if init_system == consts.INIT_SYSTEMD: # mode 00644 mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH else: # mode 00755 mode = (stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) keepalived_pid, vrrp_pid, check_pid = util.keepalived_lvs_pids_path( listener_id) if not os.path.exists(file_path): with os.fdopen(os.open(file_path, flags, mode), 'w') as text_file: text = template.render( keepalived_pid=keepalived_pid, vrrp_pid=vrrp_pid, check_pid=check_pid, keepalived_cmd=consts.KEEPALIVED_CMD, keepalived_cfg=util.keepalived_lvs_cfg_path(listener_id), amphora_nsname=consts.AMPHORA_NAMESPACE, amphora_netns=consts.AMP_NETNS_SVC_PREFIX ) text_file.write(text) # Make sure the keepalivedlvs service is enabled on boot if init_system == consts.INIT_SYSTEMD: util.run_systemctl_command( consts.ENABLE, "octavia-keepalivedlvs-%s" % str(listener_id)) elif init_system == consts.INIT_SYSVINIT: init_enable_cmd = "insserv {file}".format(file=file_path) try: subprocess.check_output(init_enable_cmd.split(), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: LOG.debug('Failed to enable ' 'octavia-keepalivedlvs service: ' '%(err)s', {'err': e}) return webob.Response(json=dict( message="Error enabling " "octavia-keepalivedlvs service", details=e.output), status=500) if NEED_CHECK: # inject the check script for keepalived process script_path = os.path.join(util.keepalived_check_scripts_dir(), KEEPALIVED_CHECK_SCRIPT_NAME) if not os.path.exists(script_path): with os.fdopen(os.open(script_path, flags, stat.S_IEXEC), 'w') as script_file: text = check_script_file_template.render( consts=consts, init_system=init_system, keepalived_lvs_pid_dir=util.keepalived_lvs_dir() ) script_file.write(text) res = webob.Response(json={'message': 'OK'}, status=200) res.headers['ETag'] = stream.get_md5() return res