def _insert_iptables_rules(self): if iptables.enabled(): iptables.FIREWALL.ensure([ {"jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": "3306"}, ]) '''
def on_start(self): if iptables.enabled(): iptables.save() optparser = bus.optparser if Flag.exists(Flag.REBOOT) or Flag.exists(Flag.HALT): self._logger.info("Scalarizr resumed after reboot") Flag.clear(Flag.REBOOT) Flag.clear(Flag.HALT) self._check_control_ports() self._start_after_reboot() elif optparser and optparser.values.import_server: self._logger.info('Server will be imported into Scalr') self._start_import() elif self._cnf.state == ScalarizrState.IMPORTING: self._logger.info('Server import resumed. Awaiting Rebundle message') elif self._cnf.state == ScalarizrState.BOOTSTRAPPING: self._logger.info("Starting initialization") self._start_init() else: self._logger.info("Normal start") self._check_control_ports()
def on_start(self): if iptables.enabled(): iptables.save() optparser = bus.optparser if self._flag_exists(self.FLAG_REBOOT): self._logger.info("Scalarizr resumed after reboot") self._clear_flag(self.FLAG_REBOOT) self._start_after_reboot() elif self._flag_exists(self.FLAG_HALT): self._logger.info("Scalarizr resumed after stop") self._clear_flag(self.FLAG_HALT) self._start_after_stop() elif optparser.values.import_server: self._logger.info('Server will be imported into Scalr') self._start_import() elif self._cnf.state == ScalarizrState.IMPORTING: self._logger.info('Server import resumed. Awaiting Rebundle message') elif self._cnf.state == ScalarizrState.BOOTSTRAPPING: self._logger.info("Starting initialization") self._start_init() else: self._logger.info("Normal start")
def _close_port(port): if iptables.enabled(): rule = {"jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": str(port)} try: iptables.FIREWALL.remove(rule) except LinuxError: pass
def _insert_iptables_rules(self): self._logger.debug('Adding iptables rules for scalarizr ports') if iptables.enabled(): rules = [ { "jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": "8008" }, { "jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": "8012" }, { "jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": "8013" }, { "jump": "ACCEPT", "protocol": "udp", "match": "udp", "dport": "8014" }, ] iptables.FIREWALL.ensure(rules)
def _ensure_security(self): ports = "{0}:{1}".format(__redis__['ports_range'][0], __redis__['ports_range'][-1]) if self.use_passwords and iptables.enabled(): if __node__['state'] == 'running': # TODO: deprecate and remove it in 2015 # Fix to enable access outside farm when use_passwords=True try: iptables.FIREWALL.remove({ "protocol": "tcp", "match": "tcp", "dport": ports, "jump": "DROP" }) except: # silently ignore non existed rule error pass iptables.FIREWALL.ensure([{ "jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": ports }]) else: self.init_farm_security([ports])
def _ensure_security(self): ports = "{0}:{1}".format( __redis__['ports_range'][0], __redis__['ports_range'][-1]) if self.use_passwords and iptables.enabled(): if __node__['state'] == 'running': # TODO: deprecate and remove it in 2015 # Fix to enable access outside farm when use_passwords=True try: iptables.FIREWALL.remove({ "protocol": "tcp", "match": "tcp", "dport": ports, "jump": "DROP" }) except: # silently ignore non existed rule error pass iptables.FIREWALL.ensure([{ "jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": ports }]) else: self.init_farm_security([ports])
def remove_proxy(self, port): """ Removes listen and backend sections from haproxy.cfg and restarts service """ backend_name = self.naming_mgr.get_pattern({ 'port': port, 'type': 'backend' }) listener_name = self.naming_mgr.get_pattern({ 'port': port, 'type': 'backend' }) listener_xpath = self.cfg.find_one_xpath('listen', 'name', listener_name) backend_xpath = self.cfg.find_one_xpath('backend', 'name', backend_name) self.cfg.remove(listener_xpath) self.cfg.remove(backend_xpath) self.cfg.save() self.svc.reload() if iptables.enabled(): close_port(port)
def _insert_iptables_rules(self): if iptables.enabled(): iptables.FIREWALL.ensure([ {"jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": str(POSTGRESQL_DEFAULT_PORT)}, ]) """
def _insert_iptables_rules(self): if iptables.enabled(): iptables.FIREWALL.ensure([{ "jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": '5672' }, { "jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": '15672' }, { "jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": '55672' }, { "jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": '4369' }, { "jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": str(RABBITMQ_CLUSTERING_PORT) }])
def on_start(self): if iptables.enabled(): iptables.save() optparser = bus.optparser if self._flag_exists(self.FLAG_REBOOT) or self._flag_exists( self.FLAG_HALT): self._logger.info("Scalarizr resumed after reboot") self._clear_flag(self.FLAG_REBOOT) self._clear_flag(self.FLAG_HALT) self._check_control_ports() self._start_after_reboot() elif optparser.values.import_server: self._logger.info('Server will be imported into Scalr') self._start_import() elif self._cnf.state == ScalarizrState.IMPORTING: self._logger.info( 'Server import resumed. Awaiting Rebundle message') elif self._cnf.state == ScalarizrState.BOOTSTRAPPING: self._logger.info("Starting initialization") self._start_init() else: self._logger.info("Normal start") self._check_control_ports()
def _insert_iptables_rules(self): if self.use_passwords and iptables.enabled(): ports = "{0}:{1}".format( __redis__['ports_range'][0], __redis__['ports_range'][-1]) iptables.FIREWALL.ensure([ {"jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": ports} ])
def _insert_iptables_rules(self): if iptables.enabled(): iptables.FIREWALL.ensure([ {"jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": '5672'}, {"jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": '15672'}, {"jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": '55672'}, {"jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": '4369'} ])
def _insert_iptables_rules(self): if iptables.enabled(): for port in (8080, 8443): iptables.FIREWALL.ensure([{ "jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": str(port) }])
def _insert_iptables_rules(self): if iptables.enabled(): ports = "{0}:{1}".format(__redis__['ports_range'][0], __redis__['ports_range'][-1]) iptables.FIREWALL.ensure([{ "jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": ports }])
def _insert_iptables_rules(self): if iptables.enabled(): iptables.FIREWALL.ensure([ { "jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": str(__postgresql__['port']) }, ])
def _insert_iptables_rules(self): self._logger.debug('Adding iptables rules for scalarizr ports') if iptables.enabled(): rules = [ {"jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": "8008"}, {"jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": "8012"}, {"jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": "8013"}, {"jump": "ACCEPT", "protocol": "udp", "match": "udp", "dport": "8014"}, ] iptables.FIREWALL.ensure(rules)
def _open_ports(self, ports): if iptables.enabled(): rules = [] for port in ports: if port not in self.current_open_ports: self.current_open_ports.append(port) rules.append({"jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": str(port)}) if rules: LOG.info("Ensuring ports %s are allowed in IPtables" % str(ports)) iptables.FIREWALL.ensure(rules) else: LOG.warning("Cannot open ports %s: IPtables disabled" % str(ports))
def _insert_iptables_rules(self, *args, **kwargs): self._logger.debug("Adding iptables rules for scalarizr ports") if iptables.enabled(): # Scalarizr ports iptables.FIREWALL.ensure( [ {"jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": "8008"}, {"jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": "8010"}, {"jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": "8012"}, {"jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": "8013"}, {"jump": "ACCEPT", "protocol": "udp", "match": "udp", "dport": "8014"}, ] )
def _launch(self, ports=[], passwords=[], op=None): LOG.debug('Launching redis processes on ports %s with passwords %s' % (ports, passwords)) is_replication_master = self.is_replication_master primary_ip = self.get_primary_ip() assert primary_ip is not None new_passwords = [] new_ports = [] for port,password in zip(ports, passwords or [None for port in ports]): if op: op.step('Launch Redis %s on port %s' % ('Master' if is_replication_master else 'Slave', port)) try: if op: op.__enter__() if iptables.enabled(): iptables.FIREWALL.ensure({ "jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": port }) redis_service.create_redis_conf_copy(port) redis_process = redis_service.Redis(is_replication_master, self.persistence_type, port, password) if not redis_process.service.running: LOG.debug('Launch Redis %s on port %s' % ('Master' if is_replication_master else 'Slave', port)) if is_replication_master: current_password = redis_process.init_master(STORAGE_PATH) else: current_password = redis_process.init_slave(STORAGE_PATH, primary_ip, port) new_passwords.append(current_password) new_ports.append(port) LOG.debug('Redis process has been launched on port %s with password %s' % (port, current_password)) else: raise BaseException('Cannot launch redis on port %s: the process is already running' % port) except: if op: op.__exit__(sys.exc_info()) raise finally: if op: op.__exit__(None) return (new_ports, new_passwords)
def _launch(self, ports=None, passwords=None, op=None): log = op.logger if op else LOG ports = ports or [] passwords = passwords or [] log.debug('Launching redis processes on ports %s with passwords %s', ports, passwords) primary_ip = self.get_primary_ip() assert primary_ip is not None new_passwords = [] new_ports = [] for port, password in zip(ports, passwords or [None for port in ports]): log.info('Launch Redis %s on port %s', 'Master' if __redis__["replication_master"] else 'Slave', port) if iptables.enabled(): iptables.FIREWALL.ensure({ "jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": port }) redis_service.create_redis_conf_copy(port) redis_process = redis_service.Redis(port, password) if not redis_process.service.running: if __redis__["replication_master"]: current_password = redis_process.init_master(STORAGE_PATH) else: current_password = redis_process.init_slave( STORAGE_PATH, primary_ip, port) new_passwords.append(current_password) new_ports.append(port) log.debug( 'Redis process has been launched on port %s with password %s' % (port, current_password)) else: raise BaseException( 'Cannot launch redis on port %s: the process is already running' % port) return new_ports, new_passwords
def _insert_iptables_rules(self, *args, **kwargs): if iptables.enabled(): iptables.FIREWALL.ensure([ { "jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": "80" }, { "jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": "443" }, ])
def on_start(self): if iptables.enabled(): iptables.save() optparser = bus.optparser if os_dist['family'] != 'Windows': if os.path.exists(self.saved_boot_id_file): saved_boot_id = None current_boot_id = None with open(self.boot_id_file, 'r') as fp: current_boot_id = fp.read() with open(self.saved_boot_id_file, 'r') as fp: saved_boot_id = fp.read() if saved_boot_id and saved_boot_id != current_boot_id: Flag.set(Flag.REBOOT) with open(self.boot_id_file, 'r') as fp: current_boot_id = fp.read() with open(self.saved_boot_id_file, 'w') as saved_fp: saved_fp.write(current_boot_id) if Flag.exists(Flag.REBOOT) or Flag.exists(Flag.HALT): self._logger.info("Scalarizr resumed after reboot") Flag.clear(Flag.REBOOT) Flag.clear(Flag.HALT) self._check_control_ports() self._start_after_reboot() elif optparser and optparser.values.import_server: self._logger.info('Server will be imported into Scalr') self._start_import() elif self._cnf.state == ScalarizrState.IMPORTING: self._logger.info( 'Server import resumed. Awaiting Rebundle message') elif self._cnf.state == ScalarizrState.BOOTSTRAPPING: self._logger.info("Starting initialization") self._start_init() else: self._logger.info("Normal start") self._check_control_ports()
def _open_ports(self, ports): if iptables.enabled(): rules = [] for port in ports: if port not in self.current_open_ports: self.current_open_ports.append(port) rules.append({ "jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": str(port) }) if rules: LOG.info("Ensuring ports %s are allowed in IPtables" % str(ports)) iptables.FIREWALL.ensure(rules) else: LOG.warning("Cannot open ports %s: IPtables disabled" % str(ports))
def on_start(self): if iptables.enabled(): iptables.save() optparser = bus.optparser if os_dist['family'] != 'Windows': if os.path.exists(self.saved_boot_id_file): saved_boot_id = None current_boot_id = None with open(self.boot_id_file, 'r') as fp: current_boot_id = fp.read() with open(self.saved_boot_id_file, 'r') as fp: saved_boot_id = fp.read() if saved_boot_id and saved_boot_id != current_boot_id: Flag.set(Flag.REBOOT) with open(self.boot_id_file, 'r') as fp: current_boot_id = fp.read() with open(self.saved_boot_id_file, 'w') as saved_fp: saved_fp.write(current_boot_id) if Flag.exists(Flag.REBOOT) or Flag.exists(Flag.HALT): self._logger.info("Scalarizr resumed after reboot") Flag.clear(Flag.REBOOT) Flag.clear(Flag.HALT) self._check_control_ports() self._start_after_reboot() elif optparser and optparser.values.import_server: self._logger.info('Server will be imported into Scalr') self._start_import() elif self._cnf.state == ScalarizrState.IMPORTING: self._logger.info('Server import resumed. Awaiting Rebundle message') elif self._cnf.state == ScalarizrState.BOOTSTRAPPING: self._logger.info("Starting initialization") self._start_init() else: self._logger.info("Normal start") self._check_control_ports()
def delete_listener(self, port=None, protocol=None): """ Removes listen section(s) by port (and)or protocol. """ ln = haproxy.naming('listen', protocol, port) if not self.cfg.sections(ln): raise exceptions.NotFound('Listen `%s` not found can`t remove it' % ln) try: default_backend = self.cfg.listener[ln]['default_backend'] except: default_backend = None for path in self.cfg.sections(ln): del self.cfg['listen'][ln] LOG.debug('HAProxyAPI.delete_listener: removed listener `%s`' % ln) if default_backend: has_ref = False for ln in self.cfg.listener: try: if self.cfg.listener[ln][ 'default_backend'] == default_backend: has_ref = True break except: pass if not has_ref: #it not used in other section, so will be deleting del self.cfg.backends[default_backend] try: if iptables.enabled(): iptables.FIREWALL.remove({ "jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": port }) except Exception, e: raise exceptions.NotFound(e)
def delete_listener(self, port=None, protocol=None): """ Removes listen section(s) by port (and)or protocol. """ ln = haproxy.naming('listen', protocol, port) if not self.cfg.sections(ln): raise exceptions.NotFound('Listen `%s` not found can`t remove it' % ln) try: default_backend = self.cfg.listener[ln]['default_backend'] except: default_backend = None for path in self.cfg.sections(ln): del self.cfg['listen'][ln] LOG.debug('HAProxyAPI.delete_listener: removed listener `%s`' % ln) if default_backend: has_ref = False for ln in self.cfg.listener: try: if self.cfg.listener[ln]['default_backend'] == default_backend: has_ref = True break except: pass if not has_ref: #it not used in other section, so will be deleting del self.cfg.backends[default_backend] try: if iptables.enabled(): iptables.FIREWALL.remove({ "jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": port }) except Exception, e: raise exceptions.NotFound(e)
def _launch(self, ports=None, passwords=None, op=None): log = op.logger if op else LOG ports = ports or [] passwords = passwords or [] log.debug('Launching redis processes on ports %s with passwords %s', ports, passwords) primary_ip = self.get_primary_ip() assert primary_ip is not None new_passwords = [] new_ports = [] for port, password in zip(ports, passwords or [None for port in ports]): log.info('Launch Redis %s on port %s', 'Master' if __redis__["replication_master"] else 'Slave', port) if iptables.enabled(): iptables.FIREWALL.ensure({ "jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": port }) redis_service.create_redis_conf_copy(port) redis_process = redis_service.Redis(port, password) if not redis_process.service.running: if __redis__["replication_master"]: current_password = redis_process.init_master(STORAGE_PATH) else: current_password = redis_process.init_slave(STORAGE_PATH, primary_ip, port) new_passwords.append(current_password) new_ports.append(port) log.debug('Redis process has been launched on port %s with password %s' % (port, current_password)) else: raise BaseException('Cannot launch redis on port %s: the process is already running' % port) return new_ports, new_passwords
def make_proxy(self, port, backend_port=None, backends=None, check_timeout=None, maxconn=None, **default_server_params): """ Add listener and backend sections to the haproxy conf and restart the service. :param port: listener port :type port: int :param backend_port: port for backend server to listen on :type backend_port: int :param backends: list of dicts, each dict represents role or server :type backends: list # :param roles: role ids (ints) or dicts with "id" key # :type roles: list # :param servers: server ips # :type servers: list :param check_timeout: ``timeout check`` - additional read timeout, e.g. "3s" :type check_timeout: str :param maxconn: set ``maxconn`` of the frontend :type maxconn: str :param **default_server_params: following kwargs will be applied to the ``default-server`` key of the backend :param check_interval: value for ``inter``, e.g. "3s" :type check_interval: str :param fall_threshold: value for ``fall`` :type fall_threshold: int :param rise_threshold: value for ``rise`` :type rise_threshold: int :param server_maxconn: value for ``maxconn``, not to confuse with the frontend's ``maxconn`` :type server_maxconn: str :param down: value for ``disabled`` :type down: bool :param backup: value for ``backup`` :type backup: bool :returns: ? .. note:: official documentation on the global parameters and server \ options can be found at \ http://cbonte.github.com/haproxy-dconv/configuration-1.4.html """ # args preprocessing: default values and short forms if not backend_port: backend_port = port # new: backends instead of separate roles/hosts args if not backends: backends = [] roles = filter(lambda spec: "farm_role_id" in spec, backends) servers = filter(lambda spec: "host" in spec, backends) roles = map(lambda x: {"farm_role_id": x} if isinstance(x, int) else dict(x), roles) servers = map(lambda x: {"host": x} if isinstance(x, str) else dict(x), servers) # create a single server list with proper params for each server # 1. extract servers from the roles and apply role params to them roles_servers = [] for role in roles: role_id, role_params = role.pop("farm_role_id"), role role_servers = map(lambda ip: {"host": ip}, get_role_servers(role_id)) LOG.debug("get_role_servers response: %s", pformat(role_servers)) # for testing on a single machine purposes / get_servers retunrs "host:port" for server in role_servers: if ':' in server["host"]: host, port_ = server["host"].split(':') server["host"] = host server["port"] = port_ #/ [server.update(role_params) for server in role_servers] roles_servers.extend(role_servers) # 2. get all servers together, enable healthchecks, ensure `port` and # convert some keys servers.extend(roles_servers) [server.setdefault("check", True) for server in servers] [server.setdefault("port", backend_port) for server in servers] servers = map(rename, servers) LOG.debug(" Backend servers:\n" + pformat(servers)) # construct listener and backend sections for the conf listener_name = haproxy.naming('listen', "tcp", port) backend_name = haproxy.naming('backend', "tcp", port) listener = { 'mode': "tcp", 'balance': 'roundrobin', 'bind': '*:%s' % port, 'default_backend': backend_name, } if maxconn: listener["maxconn"] = maxconn backend = { "mode": "tcp", "server": {}, } backend.update(HEALTHCHECK_DEFAULTS) if check_timeout: backend["timeout"]["check"] = check_timeout backend["default-server"].update(rename(default_server_params)) for server in servers: backend['server'][self._server_name(server)] = server # update the cfg self.cfg['listen'][listener_name] = listener if not self.cfg.backend or not backend_name in self.cfg.backend: self.cfg['backend'][backend_name] = backend if iptables.enabled(): iptables.FIREWALL.ensure( [{"jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": port}] ) self.cfg.save() if self.svc.status() == 0: self.svc.reload()
def _insert_iptables_rules(self): if iptables.enabled(): iptables.FIREWALL.ensure([ {"jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": str(__postgresql__['port'])}, ])
class HAProxyAPI(BehaviorAPI): """ Basic API for configuring HAProxy settings and controlling service status. Namespace:: haproxy """ __metaclass__ = Singleton behavior = 'haproxy' def __init__(self, path=None): self.path_cfg = path self.cfg = haproxy.HAProxyCfg(path) self.svc = haproxy.HAProxyInitScript(path) def _server_name(self, server): if isinstance(server, basestring): return server.replace('.', '-') elif isinstance(server, dict): return self._server_name(':'.join( [server["host"], str(server["port"])])) else: raise TypeError("server must be a dict or a string") @rpc.command_method def start_service(self): """ Starts HAProxy service. Example:: api.haproxy.start_service() """ self.svc.start() @rpc.command_method def stop_service(self): """ Stops HAProxy service. Example:: api.haproxy.stop_service() """ self.svc.stop() @rpc.command_method def reload_service(self): """ Reloads HAProxy configuration. Example:: api.haproxy.reload_service() """ self.svc.reload() @rpc.command_method def restart_service(self): """ Restarts HAProxy service. Example:: api.haproxy.restart_service() """ self.svc.restart() @rpc.command_method def get_service_status(self): """ Checks Chef service status. RUNNING = 0 DEAD_PID_FILE_EXISTS = 1 DEAD_VAR_LOCK_EXISTS = 2 NOT_RUNNING = 3 UNKNOWN = 4 :return: Status num. :rtype: int Example:: >>> api.haproxy.get_service_status() 0 """ return self.svc.status() def make_proxy(self, port, backend_port=None, backends=None, check_timeout=None, maxconn=None, **default_server_params): """ Add listener and backend sections to the haproxy conf and restart the service. :param port: listener port :type port: int :param backend_port: port for backend server to listen on :type backend_port: int :param backends: list of dicts, each dict represents role or server :type backends: list # :param roles: role ids (ints) or dicts with "id" key # :type roles: list # :param servers: server ips # :type servers: list :param check_timeout: ``timeout check`` - additional read timeout, e.g. "3s" :type check_timeout: str :param maxconn: set ``maxconn`` of the frontend :type maxconn: str :param **default_server_params: following kwargs will be applied to the ``default-server`` key of the backend :param check_interval: value for ``inter``, e.g. "3s" :type check_interval: str :param fall_threshold: value for ``fall`` :type fall_threshold: int :param rise_threshold: value for ``rise`` :type rise_threshold: int :param server_maxconn: value for ``maxconn``, not to confuse with the frontend's ``maxconn`` :type server_maxconn: str :param down: value for ``disabled`` :type down: bool :param backup: value for ``backup`` :type backup: bool :returns: ? .. note:: official documentation on the global parameters and server \ options can be found at \ http://cbonte.github.com/haproxy-dconv/configuration-1.4.html """ # args preprocessing: default values and short forms if not backend_port: backend_port = port # new: backends instead of separate roles/hosts args if not backends: backends = [] roles = filter(lambda spec: "farm_role_id" in spec, backends) servers = filter(lambda spec: "host" in spec, backends) roles = map( lambda x: {"farm_role_id": x} if isinstance(x, int) else dict(x), roles) servers = map(lambda x: {"host": x} if isinstance(x, str) else dict(x), servers) # create a single server list with proper params for each server # 1. extract servers from the roles and apply role params to them roles_servers = [] for role in roles: role_id, role_params = role.pop("farm_role_id"), role role_servers = map(lambda ip: {"host": ip}, get_role_servers(role_id)) LOG.debug("get_role_servers response: %s", pformat(role_servers)) # for testing on a single machine purposes / get_servers retunrs "host:port" for server in role_servers: if ':' in server["host"]: host, port_ = server["host"].split(':') server["host"] = host server["port"] = port_ #/ [server.update(role_params) for server in role_servers] roles_servers.extend(role_servers) # 2. get all servers together, enable healthchecks, ensure `port` and # convert some keys servers.extend(roles_servers) [server.setdefault("check", True) for server in servers] [server.setdefault("port", backend_port) for server in servers] servers = map(rename, servers) LOG.debug(" Backend servers:\n" + pformat(servers)) # construct listener and backend sections for the conf listener_name = haproxy.naming('listen', "tcp", port) backend_name = haproxy.naming('backend', "tcp", port) listener = { 'mode': "tcp", 'balance': 'roundrobin', 'bind': '*:%s' % port, 'default_backend': backend_name, } if maxconn: listener["maxconn"] = maxconn backend = { "mode": "tcp", "server": {}, } backend.update(HEALTHCHECK_DEFAULTS) if check_timeout: backend["timeout"]["check"] = check_timeout backend["default-server"].update(rename(default_server_params)) for server in servers: backend['server'][self._server_name(server)] = server # update the cfg self.cfg['listen'][listener_name] = listener if not self.cfg.backend or not backend_name in self.cfg.backend: self.cfg['backend'][backend_name] = backend if iptables.enabled(): iptables.FIREWALL.ensure([{ "jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": port }]) self.cfg.save() if self.svc.status() == 0: self.svc.reload() def recreate_conf(self): LOG.debug("Recreating haproxy conf at %s", self.cfg.cnf_path) with open(self.cfg.cnf_path, 'w') as f: f.write("defaults\n") self.cfg.reload() self.cfg.defaults['timeout']['connect'] = '5000ms' self.cfg.defaults['timeout']['client'] = '5000ms' self.cfg.defaults['timeout']['server'] = '5000ms' self.cfg.save() self.cfg.reload() def reset_conf(self): self.cfg.reload() # TODO: remove all iptables rules as well? backends = map(lambda listener: listener["backend"], self.list_listeners()) for backend in backends: for server in self.list_servers(backend=backend): self.remove_server(server=server, backend=backend) @rpc.command_method def add_server(self, server=None, backend=None): """ Adds server with ipaddr to backend section. :param server: Server configuration. :type server: dict :param server: Backend name. :type backend: str Example:: TBD. """ self.cfg.reload() if backend: backend = backend.strip() LOG.debug('HAProxyAPI.add_server') LOG.debug(' %s' % haproxy.naming('backend', backend=backend)) bnds = self.cfg.sections(haproxy.naming('backend', backend=backend)) if not bnds: if backend: raise exceptions.NotFound('Backend not found: %s' % (backend, )) else: raise exceptions.Empty('No listeners to add server to') #with self.svc.trans(exit='running'): #with self.cfg.trans(exit='working'): server.setdefault("check", True) server = rename(server) for bnd in bnds: self.cfg.backends[bnd]['server'][self._server_name( server)] = server self.cfg.save() self.svc.reload() @rpc.command_method def remove_server(self, server, backend=None): """ Removes server with ipaddr from backend section. :param server: Server configuration. :type server: dict :param server: Backend name. :type backend: str Example:: TBD. """ if backend: backend = backend.strip() srv_name = self._server_name(server) for bd in self.cfg.sections(haproxy.naming('backend', backend=backend)): if ':' in srv_name: if srv_name in self.cfg.backends[bd]['server']: del self.cfg.backends[bd]['server'][srv_name] else: for srv_name_ in list(self.cfg.backends[bd]['server']): if srv_name_.startswith(srv_name): del self.cfg.backends[bd]['server'][srv_name_] self.cfg.save() if self.svc.status() == 0: self.svc.reload() def health(self): try: # if self.cfg.defaults['stats'][''] == 'enable' and \ if self.cfg.globals['stats'][ 'socket'] == '/var/run/haproxy-stats.sock': pass except: self.cfg.globals['stats']['socket'] = '/var/run/haproxy-stats.sock' self.cfg.globals['spread-checks'] = 5 self.cfg.save() self.svc.reload() stats = haproxy.StatSocket().show_stat() # filter the stats relevant_keys = [ "pxname", "svname", "status", "act", "bck", "chkfail", "chkdown", "downtime", "check_status", "check_duration", ] stats = filter( lambda health: health["svname"] not in ("FRONTEND", "BACKEND"), stats) for health in stats: for key in health.keys(): if key not in relevant_keys: del health[key] # TODO: return data in different format return stats # --- @rpc.command_method @validate.param('port', 'server_port', type=int) @validate.param('protocol', required=_rule_protocol) @validate.param('server_port', optional=True, type=int) @validate.param('backend', optional=_rule_backend) def create_listener(self, port=None, protocol=None, server_port=None, server_protocol=None, backend=None): """ APIDOC TBD. """ LOG.debug('create_listener: %s, %s, %s, %s, %s, %s', self, port, protocol, server_port, server_protocol, backend) if protocol: protocol = protocol.lower() ln = haproxy.naming('listen', protocol, port) bnd = haproxy.naming('backend', server_protocol or protocol, server_port or port, backend=backend) return ln, bnd listener = backend = None LOG.debug( 'HAProxyAPI.create_listener: listener = `%s`, backend = `%s`', ln, bnd) try: if self.cfg.listener[ln]: raise 'Duplicate' except Exception, e: if 'Duplicate' in e: raise exceptions.Duplicate('Listener %s:%s already exists' % (protocol, port)) if protocol == 'tcp': listener = {'balance': 'roundrobin'} elif protocol == 'http': listener = {'option': {'forwardfor': True}} else: raise ValueError('Unexpected protocol: %s' % (protocol, )) #TODO: not correct for https or ssl... # listen config: listener.update({ 'bind': '*:%s' % port, 'mode': protocol, 'default_backend': bnd }) backend_protocol = server_protocol or protocol if backend_protocol == 'tcp': backend = {} elif backend_protocol == 'http': backend = {'option': {'httpchk': True}} else: raise ValueError('Unexpected protocol: %s' % (backend_protocol, )) #TODO: not correct for https or ssl... # backend config: backend.update({'mode': backend_protocol}) backend.update(HEALTHCHECK_DEFAULTS) # apply changes #with self.svc.trans(exit='running'): # with self.cfg.trans(enter='reload', exit='working'): #TODO: change save() and reload(),`if True` condition to `with...` enter, exit if True: self.cfg['listen'][ln] = listener if not self.cfg.backend or not bnd in self.cfg.backend: self.cfg['backend'][bnd] = backend try: if iptables.enabled(): iptables.FIREWALL.ensure({ "jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": port }) except Exception, e: raise exceptions.Duplicate(e) self.cfg.save() self.svc.reload() return listener
def _insert_iptables_rules(self, *args, **kwargs): if iptables.enabled(): iptables.FIREWALL.ensure([ {"jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": "80"}, {"jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": "443"}, ])
def _open_port(port): if iptables.enabled(): rule = {"jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": str(port)} iptables.FIREWALL.ensure([rule])
def make_proxy(self, port, backend_port=None, backends=None, check_timeout=None, maxconn=None, **default_server_params): """ Add listener and backend sections to the haproxy conf and restart the service. :param port: listener port :type port: int :param backend_port: port for backend server to listen on :type backend_port: int :param backends: list of dicts, each dict represents role or server :type backends: list # :param roles: role ids (ints) or dicts with "id" key # :type roles: list # :param servers: server ips # :type servers: list :param check_timeout: ``timeout check`` - additional read timeout, e.g. "3s" :type check_timeout: str :param maxconn: set ``maxconn`` of the frontend :type maxconn: str :param **default_server_params: following kwargs will be applied to the ``default-server`` key of the backend :param check_interval: value for ``inter``, e.g. "3s" :type check_interval: str :param fall_threshold: value for ``fall`` :type fall_threshold: int :param rise_threshold: value for ``rise`` :type rise_threshold: int :param server_maxconn: value for ``maxconn``, not to confuse with the frontend's ``maxconn`` :type server_maxconn: str :param down: value for ``disabled`` :type down: bool :param backup: value for ``backup`` :type backup: bool :returns: ? .. note:: official documentation on the global parameters and server \ options can be found at \ http://cbonte.github.com/haproxy-dconv/configuration-1.4.html """ # args preprocessing: default values and short forms if not backend_port: backend_port = port # new: backends instead of separate roles/hosts args if not backends: backends = [] roles = filter(lambda spec: "farm_role_id" in spec, backends) servers = filter(lambda spec: "host" in spec, backends) roles = map( lambda x: {"farm_role_id": x} if isinstance(x, int) else dict(x), roles) servers = map(lambda x: {"host": x} if isinstance(x, str) else dict(x), servers) # create a single server list with proper params for each server # 1. extract servers from the roles and apply role params to them roles_servers = [] for role in roles: role_id, role_params = role.pop("farm_role_id"), role role_servers = map(lambda ip: {"host": ip}, get_role_servers(role_id)) LOG.debug("get_role_servers response: %s", pformat(role_servers)) # for testing on a single machine purposes / get_servers retunrs "host:port" for server in role_servers: if ':' in server["host"]: host, port_ = server["host"].split(':') server["host"] = host server["port"] = port_ #/ [server.update(role_params) for server in role_servers] roles_servers.extend(role_servers) # 2. get all servers together, enable healthchecks, ensure `port` and # convert some keys servers.extend(roles_servers) [server.setdefault("check", True) for server in servers] [server.setdefault("port", backend_port) for server in servers] servers = map(rename, servers) LOG.debug(" Backend servers:\n" + pformat(servers)) # construct listener and backend sections for the conf listener_name = haproxy.naming('listen', "tcp", port) backend_name = haproxy.naming('backend', "tcp", port) listener = { 'mode': "tcp", 'balance': 'roundrobin', 'bind': '*:%s' % port, 'default_backend': backend_name, } if maxconn: listener["maxconn"] = maxconn backend = { "mode": "tcp", "server": {}, } backend.update(HEALTHCHECK_DEFAULTS) if check_timeout: backend["timeout"]["check"] = check_timeout backend["default-server"].update(rename(default_server_params)) for server in servers: backend['server'][self._server_name(server)] = server # update the cfg self.cfg['listen'][listener_name] = listener if not self.cfg.backend or not backend_name in self.cfg.backend: self.cfg['backend'][backend_name] = backend if iptables.enabled(): iptables.FIREWALL.ensure([{ "jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": port }]) self.cfg.save() if self.svc.status() == 0: self.svc.reload()
def _insert_iptables_rules(self): if iptables.enabled(): iptables.FIREWALL.ensure([ {"jump": "ACCEPT", "protocol": "tcp", "match": "tcp", "dport": "3306"}, ])
def make_proxy(self, port, backend_port=None, backends=None, template=None, check_timeout=None, maxconn=None, **default_server_params): """ Add listen and backend sections to the haproxy conf and restart the service. :param port: listener port :type port: int :param backend_port: port for backend server to listen on :type backend_port: int :param backends: list of dicts, each dict represents role or server :type backends: list # :param roles: role ids (ints) or dicts with "id" key # :type roles: list # :param servers: server ips # :type servers: list :param check_timeout: ``timeout check`` - additional read timeout, e.g. "3s" :type check_timeout: str :param maxconn: set ``maxconn`` of the frontend :type maxconn: str :param **default_server_params: following kwargs will be applied to the ``default-server`` key of the backend :param check_interval: value for ``inter``, e.g. "3s" :type check_interval: str :param fall_threshold: value for ``fall`` :type fall_threshold: int :param rise_threshold: value for ``rise`` :type rise_threshold: int :param server_maxconn: value for ``maxconn``, not to confuse with the frontend's ``maxconn`` :type server_maxconn: str :param down: value for ``disabled`` :type down: bool :param backup: value for ``backup`` :type backup: bool :returns: ? .. note:: official documentation on the global parameters and server \ options can be found at \ http://cbonte.github.com/haproxy-dconv/configuration-1.4.html """ # args preprocessing: default values and short forms if not backend_port: backend_port = port # new: backends instead of separate roles/hosts args if not backends: backends = [] roles = filter(lambda spec: "farm_role_id" in spec, backends) servers = filter(lambda spec: "host" in spec, backends) roles = map( lambda x: dict(farm_role_id=x) if isinstance(x, int) else dict(x), roles) servers = map( lambda x: dict(host=x) if isinstance(x, str) else dict(x), servers) role_ids = [role['farm_role_id'] for role in roles] for server in servers: server['name'] = None server['address'] = server.pop('host') # create a single server list with proper params for each server # 1. extract servers from the roles and apply role params to them roles_servers = [] for role in roles: role_id, role_params = role.pop("farm_role_id"), role role.pop('farm_role_alias', None) role_servers = map( lambda ip: {"address": ip}, get_role_servers(role_id, network=role_params.pop('network', None))) LOG.debug("get_role_servers response: %s", pformat(role_servers)) # for testing on a single machine purposes / get_servers returns "address:port" for server in role_servers: if ':' in server["address"]: address, port_ = server["address"].split(':') server["address"] = address server["port"] = port_ #/ for server in role_servers: server.update(role_params) roles_servers.extend(role_servers) # 2. get all servers together, enable healthchecks, ensure `port` and # convert some keys servers.extend(roles_servers) for server in servers: server.setdefault('check', True) server.setdefault('port', backend_port) servers = map(normalize_params, servers) LOG.debug(" Backend servers:\n" + pformat(servers)) # construct listener and backend sections for the conf # listener_name = haproxy.naming('listen', "tcp", port) backend_name = self.naming_mgr.make_name(port, 'backend', role_ids) bind_addr = OrderedDict((('address', '*'), ('port', str(port)))) listener = OrderedDict(( ('name', self.naming_mgr.make_name(port, 'listen', role_ids)), ('mode', 'tcp'), ('balance', { 'algorithm': 'roundrobin' }), ('bind', { 'bind_addr': bind_addr }), ('default_backend', backend_name), )) if maxconn: listener['maxconn'] = maxconn backend = OrderedDict(( ('name', backend_name), ('mode', 'tcp'), ('server', {}), )) backend.update(HEALTHCHECK_DEFAULTS) if check_timeout: backend["timeout_check"] = check_timeout backend["default-server"].update( normalize_params(default_server_params)) _servers = [] for server in servers: server['name'] = self._server_name(server) _servers.append(self._ordered_server_params(server)) backend['server'] = _servers # update the cfg listener_xpath = self.cfg.find_one_xpath('listen', 'name', listener['name']) if listener_xpath is None: self.cfg.add('listen', listener) else: self.cfg.set(listener_xpath, listener) backend_xpath = self.cfg.find_one_xpath('backend', 'name', backend['name']) if backend_xpath is None: self.cfg.add('backend', backend) self.cfg.save() if template: if template.endswith('\n'): template = template[:-1] template = '##### listen template start #####\n' + \ template + \ '\n##### listen template end #####' self.cfg.extend_section(template, listener['name']) if iptables.enabled(): open_port(port) if self.svc.status() == Status.RUNNING: self.svc.reload()