def upload_keepalived_config(): stream = listener.Wrapped(flask.request.stream) if not os.path.exists(util.keepalived_dir()): os.makedirs(util.keepalived_dir()) os.makedirs(util.keepalived_check_scripts_dir()) conf_file = util.keepalived_cfg_path() with open(conf_file, 'w') as f: b = stream.read(BUFFER) while b: f.write(b) b = stream.read(BUFFER) if not os.path.exists(util.keepalived_init_path()): with open(util.keepalived_init_path(), 'w') as text_file: text = template.render( keepalived_pid=util.keepalived_pid_path(), keepalived_cmd=consts.KEEPALIVED_CMD, keepalived_cfg=util.keepalived_cfg_path(), keepalived_log=util.keepalived_log_path() ) text_file.write(text) cmd = "chmod +x {file}".format(file=util.keepalived_init_path()) try: subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: LOG.debug("Failed to upload keepalived configuration. " "Unable to chmod init script.") return flask.make_response(flask.jsonify(dict( message="Failed to upload keepalived configuration. " "Unable to chmod init script.", details=e.output)), 500) # Renders the Keepalived check script with open(util.keepalived_check_script_path(), 'w') as text_file: text = check_script_template.render( check_scripts_dir=util.keepalived_check_scripts_dir() ) text_file.write(text) cmd = ("chmod +x {file}".format( file=util.keepalived_check_script_path())) try: subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: LOG.debug("Failed to upload keepalived configuration. " "Unable to chmod check script.") return flask.make_response(flask.jsonify(dict( message="Failed to upload keepalived configuration. " "Unable to chmod check script.", details=e.output)), 500) res = flask.make_response(flask.jsonify({ 'message': 'OK'}), 200) res.headers['ETag'] = stream.get_md5() return res
def upload_keepalived_config(): stream = listener.Wrapped(flask.request.stream) if not os.path.exists(util.keepalived_dir()): os.makedirs(util.keepalived_dir()) os.makedirs(util.keepalived_check_scripts_dir()) conf_file = util.keepalived_cfg_path() with open(conf_file, 'w') as f: b = stream.read(BUFFER) while b: f.write(b) b = stream.read(BUFFER) if not os.path.exists(util.keepalived_init_path()): with open(util.keepalived_init_path(), 'w') as text_file: text = template.render(keepalived_pid=util.keepalived_pid_path(), keepalived_cmd=consts.KEEPALIVED_CMD, keepalived_cfg=util.keepalived_cfg_path(), keepalived_log=util.keepalived_log_path()) text_file.write(text) cmd = "chmod +x {file}".format(file=util.keepalived_init_path()) try: subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: LOG.debug("Failed to upload keepalived configuration. " "Unable to chmod init script.") return flask.make_response( flask.jsonify( dict(message="Failed to upload keepalived configuration. " "Unable to chmod init script.", details=e.output)), 500) # Renders the Keepalived check script with open(util.keepalived_check_script_path(), 'w') as text_file: text = check_script_template.render( check_scripts_dir=util.keepalived_check_scripts_dir()) text_file.write(text) cmd = ("chmod +x {file}".format( file=util.keepalived_check_script_path())) try: subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: LOG.debug("Failed to upload keepalived configuration. " "Unable to chmod check script.") return flask.make_response( flask.jsonify( dict(message="Failed to upload keepalived configuration. " "Unable to chmod check script.", details=e.output)), 500) res = flask.make_response(flask.jsonify({'message': 'OK'}), 200) res.headers['ETag'] = stream.get_md5() return res
def test_upload_keepalived_config(self, mock_remove, mock_rename, mock_makedirs, mock_exists): flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC mock_exists.return_value = True cfg_path = util.keepalived_cfg_path() m = self.useFixture(test_utils.OpenFixture(cfg_path)).mock_open with mock.patch('os.open') as mock_open, mock.patch.object( os, 'fdopen', m) as mock_fdopen: mock_open.return_value = 123 rv = self.app.put('/' + api_server.VERSION + '/vrrp/upload', data='test') mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH mock_open.assert_called_with(cfg_path, flags, mode) mock_fdopen(123, 'w') self.assertEqual(200, rv.status_code) mock_exists.return_value = False script_path = util.keepalived_check_script_path() m = self.useFixture(test_utils.OpenFixture(script_path)).mock_open with mock.patch('os.open') as mock_open, mock.patch.object( os, 'fdopen', m) as mock_fdopen: mock_open.return_value = 123 rv = self.app.put('/' + api_server.VERSION + '/vrrp/upload', data='test') mode = (stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) mock_open.assert_called_with(script_path, flags, mode) mock_fdopen(123, 'w') self.assertEqual(200, rv.status_code)
def test_upload_keepalived_config(self, mock_remove, mock_rename, mock_makedirs, mock_exists): flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC mock_exists.return_value = True cfg_path = util.keepalived_cfg_path() m = self.useFixture(test_utils.OpenFixture(cfg_path)).mock_open with mock.patch('os.open') as mock_open, mock.patch.object( os, 'fdopen', m) as mock_fdopen: mock_open.return_value = 123 rv = self.app.put('/' + api_server.VERSION + '/vrrp/upload', data='test') mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH mock_open.assert_called_with(cfg_path, flags, mode) mock_fdopen.assert_called_with(123, 'w') self.assertEqual(200, rv.status_code) mock_exists.return_value = False script_path = util.keepalived_check_script_path() m = self.useFixture(test_utils.OpenFixture(script_path)).mock_open with mock.patch('os.open') as mock_open, mock.patch.object( os, 'fdopen', m) as mock_fdopen: mock_open.return_value = 123 rv = self.app.put('/' + api_server.VERSION + '/vrrp/upload', data='test') mode = (stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) mock_open.assert_called_with(script_path, flags, mode) mock_fdopen.assert_called_with(123, 'w') self.assertEqual(200, rv.status_code)
def build_keepalived_config(self, loadbalancer, amphora, vip_cidr): """Renders the loadblanacer keepalived configuration for Active/Standby :param loadbalancer: A lodabalancer object :param amp: An amphora object :param vip_cidr: The VIP subnet cidr """ # Note on keepalived configuration: The current base configuration # enforced Master election whenever a high priority VRRP instance # start advertising its presence. Accordingly, the fallback behavior # - which I described in the blueprint - is the default behavior. # Although this is a stable behavior, this can be undesirable for # several backend services. To disable the fallback behavior, we need # to add the "nopreempt" flag in the backup instance section. peers_ips = [] # Validate the VIP address and see if it is IPv6 vip = loadbalancer.vip.ip_address vip_addr = ipaddress.ip_address(vip) vip_ipv6 = vip_addr.version == 6 # Normalize and validate the VIP subnet CIDR vip_network_cidr = None if vip_ipv6: vip_network_cidr = ipaddress.IPv6Network(vip_cidr).with_prefixlen else: vip_network_cidr = ipaddress.IPv4Network(vip_cidr).with_prefixlen for amp in filter( lambda amp: amp.status == constants.AMPHORA_ALLOCATED, loadbalancer.amphorae): if amp.vrrp_ip != amphora.vrrp_ip: peers_ips.append(amp.vrrp_ip) return self.get_template(self.keepalived_template).render( { 'vrrp_group_name': loadbalancer.vrrp_group.vrrp_group_name, 'amp_role': amphora.role, 'amp_intf': amphora.vrrp_interface, 'amp_vrrp_id': amphora.vrrp_id, 'amp_priority': amphora.vrrp_priority, 'vrrp_garp_refresh': CONF.keepalived_vrrp.vrrp_garp_refresh_interval, 'vrrp_garp_refresh_repeat': CONF.keepalived_vrrp.vrrp_garp_refresh_count, 'vrrp_auth_type': loadbalancer.vrrp_group.vrrp_auth_type, 'vrrp_auth_pass': loadbalancer.vrrp_group.vrrp_auth_pass, 'amp_vrrp_ip': amphora.vrrp_ip, 'peers_vrrp_ips': peers_ips, 'vip_ip_address': vip, 'advert_int': loadbalancer.vrrp_group.advert_int, 'check_script_path': util.keepalived_check_script_path(), 'vrrp_check_interval': CONF.keepalived_vrrp.vrrp_check_interval, 'vrrp_fail_count': CONF.keepalived_vrrp.vrrp_fail_count, 'vrrp_success_count': CONF.keepalived_vrrp.vrrp_success_count, 'vip_network_cidr': vip_network_cidr, 'vip_ipv6': vip_ipv6 }, constants=constants)
def start_stop_listener(self, listener_id, action): action = action.lower() if action not in [ consts.AMP_ACTION_START, consts.AMP_ACTION_STOP, consts.AMP_ACTION_RELOAD ]: return webob.Response(json=dict( message='Invalid Request', details="Unknown action: {0}".format(action)), status=400) self._check_listener_exists(listener_id) # Since this script should be created at LB create time # we can check for this path to see if VRRP is enabled # on this amphora and not write the file if VRRP is not in use if os.path.exists(util.keepalived_check_script_path()): self.vrrp_check_script_update(listener_id, action) # HAProxy does not start the process when given a reload # so start it if haproxy is not already running if action == consts.AMP_ACTION_RELOAD: if consts.OFFLINE == self._check_haproxy_status(listener_id): action = consts.AMP_ACTION_START cmd = ("/usr/sbin/service haproxy-{listener_id} {action}".format( listener_id=listener_id, action=action)) try: subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: if 'Job is already running' not in e.output: LOG.debug( "Failed to %(action)s haproxy-%(list)s service: %(err)s " "%(out)s", { 'action': action, 'list': listener_id, 'err': e, 'out': e.output }) return webob.Response(json=dict( message="Error {0}ing haproxy".format(action), details=e.output), status=500) if action in [consts.AMP_ACTION_STOP, consts.AMP_ACTION_RELOAD]: return webob.Response(json=dict( message='OK', details='Listener {listener_id} {action}ed'.format( listener_id=listener_id, action=action)), status=202) details = ('Configuration file is valid\n' 'haproxy daemon for {0} started'.format(listener_id)) return webob.Response(json=dict(message='OK', details=details), status=202)
def start_stop_listener(self, listener_id, action): action = action.lower() if action not in [consts.AMP_ACTION_START, consts.AMP_ACTION_STOP, consts.AMP_ACTION_RELOAD]: return webob.Response(json=dict( message='Invalid Request', details="Unknown action: {0}".format(action)), status=400) self._check_listener_exists(listener_id) # Since this script should be created at LB create time # we can check for this path to see if VRRP is enabled # on this amphora and not write the file if VRRP is not in use if os.path.exists(util.keepalived_check_script_path()): self.vrrp_check_script_update(listener_id, action) # HAProxy does not start the process when given a reload # so start it if haproxy is not already running if action == consts.AMP_ACTION_RELOAD: if consts.OFFLINE == self._check_haproxy_status(listener_id): action = consts.AMP_ACTION_START cmd = ("/usr/sbin/service haproxy-{listener_id} {action}".format( listener_id=listener_id, action=action)) try: subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: if 'Job is already running' not in e.output: LOG.debug( "Failed to %(action)s haproxy-%(list)s service: %(err)s " "%(out)s", {'action': action, 'list': listener_id, 'err': e, 'out': e.output}) return webob.Response(json=dict( message="Error {0}ing haproxy".format(action), details=e.output), status=500) if action in [consts.AMP_ACTION_STOP, consts.AMP_ACTION_RELOAD]: return webob.Response(json=dict( message='OK', details='Listener {listener_id} {action}ed'.format( listener_id=listener_id, action=action)), status=202) details = ( 'Configuration file is valid\n' 'haproxy daemon for {0} started'.format(listener_id) ) return webob.Response(json=dict(message='OK', details=details), status=202)
def upload_keepalived_config(): stream = listener.Wrapped(flask.request.stream) if not os.path.exists(util.keepalived_dir()): os.makedirs(util.keepalived_dir()) os.makedirs(util.keepalived_check_scripts_dir()) conf_file = util.keepalived_cfg_path() flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC # mode 00644 mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH with os.fdopen(os.open(conf_file, flags, mode), 'w') as f: b = stream.read(BUFFER) while b: f.write(b) b = stream.read(BUFFER) file_path = util.keepalived_init_path() # mode 00755 mode = (stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) if not os.path.exists(file_path): with os.fdopen(os.open(file_path, flags, mode), 'w') as text_file: text = template.render( keepalived_pid=util.keepalived_pid_path(), keepalived_cmd=consts.KEEPALIVED_CMD, keepalived_cfg=util.keepalived_cfg_path(), keepalived_log=util.keepalived_log_path(), amphora_nsname=consts.AMPHORA_NAMESPACE ) text_file.write(text) # Renders the Keepalived check script keepalived_path = util.keepalived_check_script_path() open_obj = os.open(keepalived_path, flags, mode) with os.fdopen(open_obj, 'w') as text_file: text = check_script_template.render( check_scripts_dir=util.keepalived_check_scripts_dir() ) text_file.write(text) res = flask.make_response(flask.jsonify({ 'message': 'OK'}), 200) res.headers['ETag'] = stream.get_md5() return res
def start_stop_listener(listener_id, action): action = action.lower() if action not in ['start', 'stop', 'reload']: return flask.make_response( flask.jsonify( dict(message='Invalid Request', details="Unknown action: {0}".format(action))), 400) _check_listener_exists(listener_id) # Since this script should be created at LB create time # we can check for this path to see if VRRP is enabled # on this amphora and not write the file if VRRP is not in use if os.path.exists(util.keepalived_check_script_path()): vrrp_check_script_update(listener_id, action) cmd = ("/usr/sbin/service haproxy-{listener_id} {action}".format( listener_id=listener_id, action=action)) try: subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: if 'Job is already running' not in e.output: LOG.debug("Failed to %(action)s HAProxy service: %(err)s", { 'action': action, 'err': e }) return flask.make_response( flask.jsonify( dict(message="Error {0}ing haproxy".format(action), details=e.output)), 500) if action in ['stop', 'reload']: return flask.make_response( flask.jsonify( dict(message='OK', details='Listener {listener_id} {action}ed'.format( listener_id=listener_id, action=action))), 202) details = ('Configuration file is valid\nhaproxy daemon for {0} '.format( listener_id) + 'started') return flask.make_response( flask.jsonify(dict(message='OK', details=details)), 202)
def upload_keepalived_config(): stream = listener.Wrapped(flask.request.stream) if not os.path.exists(util.keepalived_dir()): os.makedirs(util.keepalived_dir()) os.makedirs(util.keepalived_check_scripts_dir()) conf_file = util.keepalived_cfg_path() flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC # mode 00644 mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH with os.fdopen(os.open(conf_file, flags, mode), 'w') as f: b = stream.read(BUFFER) while b: f.write(b) b = stream.read(BUFFER) file_path = util.keepalived_init_path() # mode 00755 mode = (stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) if not os.path.exists(file_path): with os.fdopen(os.open(file_path, flags, mode), 'w') as text_file: text = template.render(keepalived_pid=util.keepalived_pid_path(), keepalived_cmd=consts.KEEPALIVED_CMD, keepalived_cfg=util.keepalived_cfg_path(), keepalived_log=util.keepalived_log_path(), amphora_nsname=consts.AMPHORA_NAMESPACE) text_file.write(text) # Renders the Keepalived check script keepalived_path = util.keepalived_check_script_path() open_obj = os.open(keepalived_path, flags, mode) with os.fdopen(open_obj, 'w') as text_file: text = check_script_template.render( check_scripts_dir=util.keepalived_check_scripts_dir()) text_file.write(text) res = flask.make_response(flask.jsonify({'message': 'OK'}), 200) res.headers['ETag'] = stream.get_md5() return res
def build_keepalived_config(self, loadbalancer, amphora): """Renders the loadblanacer keepalived configuration for Active/Standby :param loadbalancer: A lodabalancer object :param amp: An amphora object """ # Note on keepalived configuration: The current base configuration # enforced Master election whenever a high priority VRRP instance # start advertising its presence. Accordingly, the fallback behavior # - which I described in the blueprint - is the default behavior. # Although this is a stable behavior, this can be undesirable for # several backend services. To disable the fallback behavior, we need # to add the "nopreempt" flag in the backup instance section. peers_ips = [] for amp in six.moves.filter( lambda amp: amp.status == constants.AMPHORA_ALLOCATED, loadbalancer.amphorae): if amp.vrrp_ip != amphora.vrrp_ip: peers_ips.append(amp.vrrp_ip) return self.get_template(self.keepalived_template).render( {'vrrp_group_name': loadbalancer.vrrp_group.vrrp_group_name, 'amp_role': amphora.role, 'amp_intf': amphora.vrrp_interface, 'amp_vrrp_id': amphora.vrrp_id, 'amp_priority': amphora.vrrp_priority, 'vrrp_garp_refresh': CONF.keepalived_vrrp.vrrp_garp_refresh_interval, 'vrrp_garp_refresh_repeat': CONF.keepalived_vrrp.vrrp_garp_refresh_count, 'vrrp_auth_type': loadbalancer.vrrp_group.vrrp_auth_type, 'vrrp_auth_pass': loadbalancer.vrrp_group.vrrp_auth_pass, 'amp_vrrp_ip': amphora.vrrp_ip, 'peers_vrrp_ips': peers_ips, 'vip_ip_address': loadbalancer.vip.ip_address, 'advert_int': loadbalancer.vrrp_group.advert_int, 'check_script_path': util.keepalived_check_script_path(), 'vrrp_check_interval': CONF.keepalived_vrrp.vrrp_check_interval, 'vrrp_fail_count': CONF.keepalived_vrrp.vrrp_fail_count, 'vrrp_success_count': CONF.keepalived_vrrp.vrrp_success_count}, constants=constants)
def start_stop_listener(listener_id, action): action = action.lower() if action not in ['start', 'stop', 'reload']: return flask.make_response(flask.jsonify(dict( message='Invalid Request', details="Unknown action: {0}".format(action))), 400) _check_listener_exists(listener_id) # Since this script should be created at LB create time # we can check for this path to see if VRRP is enabled # on this amphora and not write the file if VRRP is not in use if os.path.exists(util.keepalived_check_script_path()): vrrp_check_script_update(listener_id, action) cmd = ("/usr/sbin/service haproxy-{listener_id} {action}".format( listener_id=listener_id, action=action)) try: subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: if 'Job is already running' not in e.output: LOG.debug("Failed to %(action)s HAProxy service: %(err)s", {'action': action, 'err': e}) return flask.make_response(flask.jsonify(dict( message="Error {0}ing haproxy".format(action), details=e.output)), 500) if action in ['stop', 'reload']: return flask.make_response(flask.jsonify( dict(message='OK', details='Listener {listener_id} {action}ed'.format( listener_id=listener_id, action=action))), 202) details = ( 'Configuration file is valid\nhaproxy daemon for {0} '.format( listener_id) + 'started') return flask.make_response(flask.jsonify( dict(message='OK', details=details)), 202)
def delete_listener(self, listener_id): try: self._check_listener_exists(listener_id) except exceptions.HTTPException: return webob.Response(json={'message': 'OK'}) # check if that haproxy is still running and if stop it if os.path.exists(util.pid_path(listener_id)) and os.path.exists( os.path.join('/proc', util.get_haproxy_pid(listener_id))): cmd = "/usr/sbin/service haproxy-{0} stop".format(listener_id) try: subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: LOG.error("Failed to stop haproxy-%s service: %s %s", listener_id, e, e.output) return webob.Response(json=dict( message="Error stopping haproxy", details=e.output), status=500) # parse config and delete stats socket try: cfg = self._parse_haproxy_file(listener_id) os.remove(cfg['stats_socket']) except Exception: pass # Since this script should be deleted at LB delete time # we can check for this path to see if VRRP is enabled # on this amphora and not write the file if VRRP is not in use if os.path.exists(util.keepalived_check_script_path()): self.vrrp_check_script_update(listener_id, action=consts.AMP_ACTION_STOP) # delete the ssl files try: shutil.rmtree(self._cert_dir(listener_id)) except Exception: pass # disable the service init_system = util.get_os_init_system() init_path = util.init_path(listener_id, init_system) if init_system == consts.INIT_SYSTEMD: util.run_systemctl_command( consts.DISABLE, "haproxy-{list}".format(list=listener_id)) elif init_system == consts.INIT_SYSVINIT: init_disable_cmd = "insserv -r {file}".format(file=init_path) elif init_system != consts.INIT_UPSTART: raise util.UnknownInitError() if init_system == consts.INIT_SYSVINIT: try: subprocess.check_output(init_disable_cmd.split(), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: LOG.error( "Failed to disable haproxy-%(list)s service: " "%(err)s %(out)s", { 'list': listener_id, 'err': e, 'out': e.output }) return webob.Response(json=dict( message="Error disabling haproxy-{0} service".format( listener_id), details=e.output), status=500) # delete the directory + init script for that listener shutil.rmtree(util.haproxy_dir(listener_id)) if os.path.exists(init_path): os.remove(init_path) return webob.Response(json={'message': 'OK'})
def upload_keepalived_config(self): stream = listener.Wrapped(flask.request.stream) if not os.path.exists(util.keepalived_dir()): os.makedirs(util.keepalived_dir()) os.makedirs(util.keepalived_check_scripts_dir()) conf_file = util.keepalived_cfg_path() flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC # mode 00644 mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH with os.fdopen(os.open(conf_file, flags, mode), 'wb') as f: b = stream.read(BUFFER) while b: f.write(b) b = stream.read(BUFFER) init_system = util.get_os_init_system() file_path = util.keepalived_init_path(init_system) if init_system == consts.INIT_SYSTEMD: template = SYSTEMD_TEMPLATE init_enable_cmd = "systemctl enable octavia-keepalived" # Render and install the network namespace systemd service util.install_netns_systemd_service() util.run_systemctl_command(consts.ENABLE, consts.AMP_NETNS_SVC_PREFIX) elif init_system == consts.INIT_UPSTART: template = UPSTART_TEMPLATE elif init_system == consts.INIT_SYSVINIT: template = SYSVINIT_TEMPLATE init_enable_cmd = "insserv {file}".format(file=file_path) else: raise util.UnknownInitError() if init_system == consts.INIT_SYSTEMD: # mode 00644 mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH else: # mode 00755 mode = (stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) if not os.path.exists(file_path): with os.fdopen(os.open(file_path, flags, mode), 'w') as text_file: text = template.render( keepalived_pid=util.keepalived_pid_path(), keepalived_cmd=consts.KEEPALIVED_CMD, keepalived_cfg=util.keepalived_cfg_path(), keepalived_log=util.keepalived_log_path(), amphora_nsname=consts.AMPHORA_NAMESPACE, amphora_netns=consts.AMP_NETNS_SVC_PREFIX, administrative_log_facility=( CONF.amphora_agent.administrative_log_facility), ) text_file.write(text) # Renders the Keepalived check script keepalived_path = util.keepalived_check_script_path() # mode 00755 mode = (stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) open_obj = os.open(keepalived_path, flags, mode) with os.fdopen(open_obj, 'w') as text_file: text = check_script_template.render( check_scripts_dir=util.keepalived_check_scripts_dir()) text_file.write(text) # Make sure the new service is enabled on boot if init_system != consts.INIT_UPSTART: try: subprocess.check_output(init_enable_cmd.split(), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: LOG.debug( 'Failed to enable octavia-keepalived service: ' '%(err)s %(output)s', { 'err': e, 'output': e.output }) return webob.Response(json=dict( message="Error enabling octavia-keepalived service", details=e.output), status=500) res = webob.Response(json={'message': 'OK'}, status=200) res.headers['ETag'] = stream.get_md5() return res
def delete_listener(self, listener_id): try: self._check_listener_exists(listener_id) except exceptions.HTTPException: return webob.Response(json={'message': 'OK'}) # check if that haproxy is still running and if stop it if os.path.exists(util.pid_path(listener_id)) and os.path.exists( os.path.join('/proc', util.get_haproxy_pid(listener_id))): cmd = "/usr/sbin/service haproxy-{0} stop".format(listener_id) try: subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: LOG.error("Failed to stop haproxy-%s service: %s %s", listener_id, e, e.output) return webob.Response(json=dict( message="Error stopping haproxy", details=e.output), status=500) # parse config and delete stats socket try: cfg = self._parse_haproxy_file(listener_id) os.remove(cfg['stats_socket']) except Exception: pass # Since this script should be deleted at LB delete time # we can check for this path to see if VRRP is enabled # on this amphora and not write the file if VRRP is not in use if os.path.exists(util.keepalived_check_script_path()): self.vrrp_check_script_update( listener_id, action=consts.AMP_ACTION_STOP) # delete the ssl files try: shutil.rmtree(self._cert_dir(listener_id)) except Exception: pass # disable the service init_system = util.get_os_init_system() init_path = util.init_path(listener_id, init_system) if init_system == consts.INIT_SYSTEMD: util.run_systemctl_command( consts.DISABLE, "haproxy-{list}".format( list=listener_id)) elif init_system == consts.INIT_SYSVINIT: init_disable_cmd = "insserv -r {file}".format(file=init_path) elif init_system != consts.INIT_UPSTART: raise util.UnknownInitError() if init_system == consts.INIT_SYSVINIT: try: subprocess.check_output(init_disable_cmd.split(), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: LOG.error("Failed to disable haproxy-%(list)s service: " "%(err)s %(out)s", {'list': listener_id, 'err': e, 'out': e.output}) return webob.Response(json=dict( message="Error disabling haproxy-{0} service".format( listener_id), details=e.output), status=500) # delete the directory + init script for that listener shutil.rmtree(util.haproxy_dir(listener_id)) if os.path.exists(init_path): os.remove(init_path) return webob.Response(json={'message': 'OK'})
def build_keepalived_config(self, loadbalancer, amphora, vip_cidr): """Renders the loadblanacer keepalived configuration for Active/Standby :param loadbalancer: A lodabalancer object :param amp: An amphora object :param vip_cidr: The VIP subnet cidr """ # Note on keepalived configuration: The current base configuration # enforced Master election whenever a high priority VRRP instance # start advertising its presence. Accordingly, the fallback behavior # - which I described in the blueprint - is the default behavior. # Although this is a stable behavior, this can be undesirable for # several backend services. To disable the fallback behavior, we need # to add the "nopreempt" flag in the backup instance section. peers_ips = [] # Validate the VIP address and see if it is IPv6 vip = loadbalancer.vip.ip_address vip_addr = ipaddress.ip_address( vip if isinstance(vip, six.text_type) else six.u(vip)) vip_ipv6 = vip_addr.version == 6 # Normalize and validate the VIP subnet CIDR vip_network_cidr = None vip_cidr = (vip_cidr if isinstance(vip_cidr, six.text_type) else six.u(vip_cidr)) if vip_ipv6: vip_network_cidr = ipaddress.IPv6Network(vip_cidr).with_prefixlen else: vip_network_cidr = ipaddress.IPv4Network(vip_cidr).with_prefixlen for amp in six.moves.filter( lambda amp: amp.status == constants.AMPHORA_ALLOCATED, loadbalancer.amphorae): if amp.vrrp_ip != amphora.vrrp_ip: peers_ips.append(amp.vrrp_ip) return self.get_template(self.keepalived_template).render( {'vrrp_group_name': loadbalancer.vrrp_group.vrrp_group_name, 'amp_role': amphora.role, 'amp_intf': amphora.vrrp_interface, 'amp_vrrp_id': amphora.vrrp_id, 'amp_priority': amphora.vrrp_priority, 'vrrp_garp_refresh': CONF.keepalived_vrrp.vrrp_garp_refresh_interval, 'vrrp_garp_refresh_repeat': CONF.keepalived_vrrp.vrrp_garp_refresh_count, 'vrrp_auth_type': loadbalancer.vrrp_group.vrrp_auth_type, 'vrrp_auth_pass': loadbalancer.vrrp_group.vrrp_auth_pass, 'amp_vrrp_ip': amphora.vrrp_ip, 'peers_vrrp_ips': peers_ips, 'vip_ip_address': vip, 'advert_int': loadbalancer.vrrp_group.advert_int, 'check_script_path': util.keepalived_check_script_path(), 'vrrp_check_interval': CONF.keepalived_vrrp.vrrp_check_interval, 'vrrp_fail_count': CONF.keepalived_vrrp.vrrp_fail_count, 'vrrp_success_count': CONF.keepalived_vrrp.vrrp_success_count, 'vip_network_cidr': vip_network_cidr, 'vip_ipv6': vip_ipv6}, constants=constants)