def __init__(self, config_id, config, runner, labels, log=None): self.config_id = config_id self.config = config self.labels = labels self.runner = runner # Leverage pre-configured logger self.log = log or common.configure_logging(__name__)
def service_create(container, cconfig, sysdir=constants.SYSTEMD_DIR, log=None): """Create a service in systemd :param container: container name :type container: String :param cconfig: container configuration :type cconfig: Dictionary :param sysdir: systemd unit files directory :type sysdir: String :param log: optional pre-defined logger for messages :type log: logging.RootLogger """ log = log or common.configure_logging(__name__) # We prefix the SystemD service so we can identify them better: # e.g. systemctl list-unit-files | grep paunch # It'll help to not conflict when rpms are installed on the host and # have the same service name as their container name. # For example haproxy rpm and haproxy container would have the same # service name so the prefix will help to not having this conflict # when removing the rpms during a cleanup by the operator. service = 'tripleo_' + container wants = " ".join( str(x) + '.service' for x in cconfig.get('depends_on', [])) restart = cconfig.get('restart', 'always') stop_grace_period = cconfig.get('stop_grace_period', '10') # SystemD doesn't have the equivalent of docker unless-stopped. # Let's force 'always' so containers aren't restarted when stopped by # systemd, but restarted when in failure. Also this code is only for # podman now, so nothing changed for Docker deployments. if restart == 'unless-stopped': restart = 'always' sysd_unit_f = sysdir + service + '.service' log.debug('Creating systemd unit file: %s' % sysd_unit_f) s_config = { 'name': container, 'wants': wants, 'restart': restart, 'stop_grace_period': stop_grace_period, } with open(sysd_unit_f, 'w') as unit_file: os.chmod(unit_file.name, 0o644) unit_file.write("""[Unit] Description=%(name)s container After=paunch-container-shutdown.service Wants=%(wants)s [Service] Restart=%(restart)s ExecStart=/usr/bin/podman start -a %(name)s ExecStop=/usr/bin/podman stop -t %(stop_grace_period)s %(name)s KillMode=process [Install] WantedBy=multi-user.target""" % s_config) subprocess.call(['systemctl', 'daemon-reload']) subprocess.call(['systemctl', 'enable', '--now', service])
def delete(config_ids, managed_by, cont_cmd='podman', default_runtime=None, log_level=None, log_file=None): """Delete containers with the specified config IDs. :param list config_ids: List of config IDs to delete the containers for. :param str managed_by: Name of the tool managing the containers. Only containers labelled with this will be modified. :param str cont_cmd: Optional override to the container command to run. :param str default_runtime: (deprecated) does nothing. """ log = common.configure_logging(__name__, log_level, log_file) if default_runtime: log.warning("DEPRECATION: 'default_runtime' does nothing, " "use 'cont_cmd' instead") if not config_ids: log.warn('No config IDs specified') if cont_cmd == 'podman': r = runner.PodmanRunner(managed_by, cont_cmd=cont_cmd, log=log) log.warning("paunch cleanup is partially supported with podman") else: r = runner.DockerRunner(managed_by, cont_cmd=cont_cmd, log=log) for conf_id in config_ids: r.remove_containers(conf_id)
def __init__(self, module, results): super(PaunchManager, self).__init__() self.module = module self.results = results self.config = self.module.params['config'] self.config_id = self.module.params['config_id'] self.action = self.module.params['action'] self.healthcheck_disabled = \ self.module.params['healthcheck_disabled'] self.container_cli = self.module.params['container_cli'] self.container_log_stdout_path = \ self.module.params['container_log_stdout_path'] self.managed_by = self.module.params['managed_by'] self.debug = self.module.params['debug'] self.log_file = self.module.params['log_file'] if self.debug: self.log_level = 3 else: # if debug is disabled, only show WARNING level self.log_level = 1 self.log = putils_common.configure_logging('paunch-ansible', level=self.log_level, log_file=self.log_file) if self.config: self.config_yaml = putils_common.load_config(self.config) if self.action == 'apply': self.paunch_apply() elif self.action == 'cleanup': self.paunch_cleanup()
def list(managed_by, cont_cmd='podman', default_runtime=None, log_level=None, log_file=None): """List all containers associated with all config IDs. :param str managed_by: Name of the tool managing the containers. Only containers labelled with this will be modified. :param str cont_cmd: Optional override to the container command to run. :param str default_runtime: (deprecated) does nothing. :param int log_level: optional log level for loggers :param int log_file: optional log file for messages :returns a dict where the key is the config ID and the value is a list of 'podman inspect' dicts for each container. :rtype: defaultdict(list) """ log = common.configure_logging(__name__, log_level, log_file) if default_runtime: log.warning("DEPRECATION: 'default_runtime' does nothing, " "use 'cont_cmd' instead") if cont_cmd == 'podman': r = runner.PodmanRunner(managed_by, cont_cmd=cont_cmd, log=log) else: r = runner.DockerRunner(managed_by, cont_cmd=cont_cmd, log=log) return r.list_configs()
def service_delete(container, sysdir=constants.SYSTEMD_DIR, log=None): """Delete a service in systemd :param container: container name :type container: String :param sysdir: systemd unit files directory :type sysdir: string :param log: optional pre-defined logger for messages :type log: logging.RootLogger """ log = log or common.configure_logging(__name__) # prefix is explained in the service_create(). service = 'tripleo_' + container sysd_unit_f = service + '.service' sysd_health_f = service + '_healthcheck.service' sysd_timer_f = service + '_healthcheck.timer' for sysd_f in sysd_unit_f, sysd_health_f, sysd_timer_f: if os.path.isfile(sysdir + sysd_f): log.debug('Stopping and disabling systemd service for %s' % service) subprocess.call(['systemctl', 'stop', sysd_f]) subprocess.call(['systemctl', 'disable', sysd_f]) log.debug('Removing systemd unit file %s' % sysd_f) os.remove(sysdir + sysd_f) subprocess.call(['systemctl', 'daemon-reload']) else: log.warning('No systemd unit file was found for %s' % sysd_f)
def cleanup(config_ids, managed_by, cont_cmd='podman', default_runtime=None, log_level=None, log_file=None): """Delete containers no longer applied, rename others to preferred name. :param list config_ids: List of config IDs still applied. All containers managed by this tool will be deleted if their config ID is not specified in this list. :param str managed_by: Name of the tool managing the containers. Only containers labelled with this will be modified. :param str cont_cmd: Optional override to the container command to run. :param str default_runtime: (deprecated) does nothing. :param int log_level: optional log level for loggers :param int log_file: optional log file for messages """ log = common.configure_logging(__name__, log_level, log_file) if default_runtime: log.warning("DEPRECATION: 'default_runtime' does nothing, " "use 'cont_cmd' instead") if cont_cmd == 'podman': r = runner.PodmanRunner(managed_by, cont_cmd=cont_cmd, log=log) log.warning("paunch cleanup is partially supported with podman") else: r = runner.DockerRunner(managed_by, cont_cmd=cont_cmd, log=log) r.delete_missing_configs(config_ids) r.rename_containers()
def apply(config_id, config, managed_by, labels=None, cont_cmd='podman', default_runtime=None, log_level=None, log_file=None, cont_log_path=None, healthcheck_disabled=False): """Execute supplied container configuration. :param str config_id: Unique config ID, should not be re-used until any running containers with that config ID have been deleted. :param dict config: Configuration data describing container actions to apply. :param str managed_by: Name of the tool managing the containers. Only containers labelled with this will be modified. :param dict labels: Optional keys/values of labels to apply to containers created with this invocation. :param str cont_cmd: Optional override to the container command to run. :param str default_runtime: (deprecated) does nothing. :param int log_level: optional log level for loggers :param str log_file: optional log file for messages :param str cont_log_path: optional log path for containers. Works only for podman engine. Must be an absolute path. :param bool healthcheck_disabled: optional boolean to disable container healthcheck. :returns (list, list, int) lists of stdout and stderr for each execution, and a single return code representing the overall success of the apply. :rtype: tuple """ log = common.configure_logging(__name__, log_level, log_file) if default_runtime: log.warning("DEPRECATION: 'default_runtime' does nothing, " "use 'cont_cmd' instead") if cont_cmd == 'podman': r = runner.PodmanRunner(managed_by, cont_cmd=cont_cmd, log=log) builder = podman.PodmanBuilder( config_id=config_id, config=config, runner=r, labels=labels, log=log, cont_log_path=cont_log_path, healthcheck_disabled=healthcheck_disabled ) else: r = runner.DockerRunner(managed_by, cont_cmd=cont_cmd, log=log) builder = compose1.ComposeV1Builder( config_id=config_id, config=config, runner=r, labels=labels, log=log ) return builder.apply()
def __init__(self, module, results): super(PaunchManager, self).__init__() self.module = module self.results = results # Fail early if containers were not deployed by Paunch before. if os.path.isfile('/var/lib/tripleo-config/.ansible-managed'): msg = ('Containers were previously deployed with ' 'tripleo-ansible, paunch module can not be used. ' 'Make sure EnablePaunch is set to False.') self.module.fail_json(msg=msg, stdout='', stderr='', rc=1) self.config = self.module.params['config'] if (isinstance(self.module.params['config_id'], list) and len(self.module.params['config_id']) == 1): self.config_id = self.module.params['config_id'][0] else: self.config_id = self.module.params['config_id'] self.action = self.module.params['action'] self.healthcheck_disabled = \ self.module.params['healthcheck_disabled'] self.container_cli = self.module.params['container_cli'] self.cleanup = self.module.params['cleanup'] self.config_overrides = self.module.params['config_overrides'] self.container_log_stdout_path = \ self.module.params['container_log_stdout_path'] self.managed_by = self.module.params['managed_by'] self.debug = self.module.params['debug'] self.log_file = self.module.params['log_file'] if self.debug: self.log_level = 3 else: # if debug is disabled, only show WARNING level self.log_level = 1 self.log = putils_common.configure_logging('paunch-ansible', level=self.log_level, log_file=self.log_file) if self.config: if self.config.endswith('.json'): self.module.warn('Only one config was given, cleanup disabled') self.cleanup = False self.config_yaml = putils_common.load_config( self.config, overrides=self.config_overrides) if self.action == 'apply': self.paunch_apply() elif self.action == 'cleanup': self.paunch_cleanup()
def healthcheck_timer_create(container, cconfig, sysdir='/etc/systemd/system/', log=None): """Create a systemd timer for a healthcheck :param container: container name :type container: String :param cconfig: container configuration :type cconfig: Dictionary :param sysdir: systemd unit files directory :type sysdir: string :param log: optional pre-defined logger for messages :type log: logging.RootLogger """ log = log or common.configure_logging(__name__) service = 'tripleo_' + container healthcheck_timer = service + '_healthcheck.timer' sysd_timer_f = sysdir + healthcheck_timer log.debug('Creating systemd timer file: %s' % sysd_timer_f) interval = cconfig.get('check_interval', 60) s_config = { 'name': container, 'service': service, 'interval': interval, 'randomize': int(interval) * 3 / 4 } with open(sysd_timer_f, 'w') as timer_file: os.chmod(timer_file.name, 0o644) timer_file.write("""[Unit] Description=%(name)s container healthcheck PartOf=%(service)s.service [Timer] OnActiveSec=120 OnUnitActiveSec=%(interval)s RandomizedDelaySec=%(randomize)s [Install] WantedBy=timers.target""" % s_config) try: subprocess.check_call(['systemctl', 'enable', '--now', healthcheck_timer]) subprocess.check_call(['systemctl', 'add-requires', service + '.service', healthcheck_timer]) subprocess.check_call(['systemctl', 'daemon-reload']) except subprocess.CalledProcessError: log.exception("systemctl failed") raise
def execute(cmd, log=None, quiet=False): if not log: log = common.configure_logging(__name__) if not quiet: log.debug('$ %s' % ' '.join(cmd)) subproc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout, cmd_stderr = subproc.communicate() if not quiet: log.debug(cmd_stdout) log.debug(cmd_stderr) return (cmd_stdout.decode('utf-8'), cmd_stderr.decode('utf-8'), subproc.returncode)
def __init__(self, module, results): super(PaunchManager, self).__init__() self.module = module self.results = results self.config = self.module.params['config'] self.config_id = self.module.params['config_id'] self.action = self.module.params['action'] self.healthcheck_disabled = \ self.module.params['healthcheck_disabled'] self.container_cli = self.module.params['container_cli'] self.container_log_stdout_path = \ self.module.params['container_log_stdout_path'] self.managed_by = self.module.params['managed_by'] self.debug = self.module.params['debug'] self.log_file = self.module.params['log_file'] if self.debug: self.log_level = 3 else: # if debug is disabled, only show WARNING level self.log_level = 1 self.log = putils_common.configure_logging('paunch-ansible', level=self.log_level, log_file=self.log_file) if self.config: if os.path.isdir(self.config): container_configs = {} config_files = [c_json for c_json in os.listdir(self.config) if c_json.startswith('hashed-') and c_json.endswith('.json')] for cf in config_files: with open(os.path.join(self.config, cf), 'r') as f: c = re.sub('^hashed-', '', os.path.splitext(cf)[0]) container_configs[c] = {} container_configs[c].update(yaml.safe_load(f)) self.config_yaml = container_configs else: with open(self.config, 'r') as f: self.config_yaml = yaml.safe_load(f) if self.action == 'apply': self.paunch_apply() elif self.action == 'cleanup': self.paunch_cleanup()
def __init__(self, managed_by, cont_cmd, log=None, cont_log_path=None, healthcheck_disabled=False): self.managed_by = managed_by self.cont_cmd = cont_cmd # Leverage pre-configured logger self.log = log or common.configure_logging(__name__) self.cont_log_path = cont_log_path self.healthcheck_disabled = healthcheck_disabled if self.cont_cmd == 'docker': self.log.warning('docker runtime is deprecated in Stein ' 'and will be removed in Train.')
def __init__(self, config_id, config, runner, labels, log=None, cont_log_path=None, healthcheck_disabled=False): self.config_id = config_id self.config = config self.labels = labels self.runner = runner # Leverage pre-configured logger self.log = log or common.configure_logging(__name__) self.cont_log_path = cont_log_path self.healthcheck_disabled = healthcheck_disabled
def service_delete(container, sysdir=constants.SYSTEMD_DIR, log=None): """Delete a service in systemd :param container: container name :type container: String :param sysdir: systemd unit files directory :type sysdir: string :param log: optional pre-defined logger for messages :type log: logging.RootLogger """ log = log or common.configure_logging(__name__) # prefix is explained in the service_create(). service = 'tripleo_' + container sysd_unit_f = service + '.service' sysd_health_f = service + '_healthcheck.service' sysd_timer_f = service + '_healthcheck.timer' sysd_health_req_d = sysd_unit_f + '.requires' sysd_health_req_f = sysd_health_req_d + '/' + sysd_timer_f for sysd_f in sysd_health_req_f, sysd_unit_f, sysd_health_f, sysd_timer_f: if os.path.isfile(sysdir + sysd_f): log.debug('Stopping and disabling systemd service for %s' % service) sysd_unit = os.path.basename(sysd_f) try: subprocess.check_call(['systemctl', 'stop', sysd_unit]) subprocess.check_call(['systemctl', 'disable', sysd_unit]) except subprocess.CalledProcessError: log.exception("systemctl failed") raise log.debug('Removing systemd unit file %s' % sysd_f) if os.path.exists(sysdir + sysd_f): os.remove(sysdir + sysd_f) try: subprocess.check_call(['systemctl', 'daemon-reload']) except subprocess.CalledProcessError: log.exception("systemctl failed") raise else: log.info('No systemd unit file was found for %s' % sysd_f) if os.path.exists(os.path.join(sysdir, sysd_health_req_d)): log.info('Removing %s.requires' % service) os.rmdir(os.path.join(sysdir, sysd_health_req_d))
def healthcheck_create(container, sysdir='/etc/systemd/system/', log=None, test='/openstack/healthcheck'): """Create a healthcheck for a service in systemd :param container: container name :type container: String :param sysdir: systemd unit files directory :type sysdir: String :param log: optional pre-defined logger for messages :type log: logging.RootLogger :param test: optional test full command :type test: String """ log = log or common.configure_logging(__name__) service = 'tripleo_' + container healthcheck = service + '_healthcheck.service' sysd_unit_f = sysdir + healthcheck log.debug('Creating systemd unit file: %s' % sysd_unit_f) s_config = { 'name': container, 'service': service, 'restart': 'restart', 'test': test, } with open(sysd_unit_f, 'w') as unit_file: os.chmod(unit_file.name, 0o644) unit_file.write("""[Unit] Description=%(name)s healthcheck After=paunch-container-shutdown.service %(service)s.service Requisite=%(service)s.service [Service] Type=oneshot ExecStart=/usr/bin/podman exec %(name)s %(test)s [Install] WantedBy=multi-user.target """ % s_config)
def healthcheck_timer_create(container, cconfig, sysdir='/etc/systemd/system/', log=None): """Create a systemd timer for a healthcheck :param container: container name :type container: String :param cconfig: container configuration :type cconfig: Dictionary :param sysdir: systemd unit files directory :type sysdir: string :param log: optional pre-defined logger for messages :type log: logging.RootLogger """ log = log or common.configure_logging(__name__) service = 'tripleo_' + container healthcheck_timer = service + '_healthcheck.timer' sysd_timer_f = sysdir + healthcheck_timer log.debug('Creating systemd timer file: %s' % sysd_timer_f) interval = cconfig.get('check_interval', 30) s_config = {'name': container, 'service': service, 'interval': interval} with open(sysd_timer_f, 'w') as timer_file: os.chmod(timer_file.name, 0o644) timer_file.write("""[Unit] Description=%(name)s container healthcheck Requires=%(service)s_healthcheck.service [Timer] OnUnitActiveSec=90 OnCalendar=*-*-* *:*:00/%(interval)s [Install] WantedBy=timers.target""" % s_config) subprocess.call(['systemctl', 'enable', '--now', healthcheck_timer]) subprocess.call(['systemctl', 'daemon-reload'])
def __init__(self, managed_by, cont_cmd, log=None): self.managed_by = managed_by self.cont_cmd = cont_cmd # Leverage pre-configured logger self.log = log or common.configure_logging(__name__)
def debug(config_id, container_name, action, config, managed_by, labels=None, cont_cmd='podman', default_runtime=None, log_level=None, log_file=None): """Execute supplied container configuration. :param str config_id: Unique config ID, should not be re-used until any running containers with that config ID have been deleted. :param str container_name: Name of the container in the config you wish to manipulate. :param str action: Action to take. :param dict config: Configuration data describing container actions to apply. :param str managed_by: Name of the tool managing the containers. Only containers labeled with this will be modified. :param dict labels: Optional keys/values of labels to apply to containers created with this invocation. :param str cont_cmd: Optional override to the container command to run. :param str default_runtime: (deprecated) does nothing. :param int log_level: optional log level for loggers :param int log_file: optional log file for messages :returns integer return value from running command or failure for any other reason. :rtype: int """ log = common.configure_logging(__name__, log_level, log_file) if default_runtime: log.warning("DEPRECATION: 'default_runtime' does nothing, " "use 'cont_cmd' instead") if cont_cmd == 'podman': r = runner.PodmanRunner(managed_by, cont_cmd=cont_cmd, log=log) builder = podman.PodmanBuilder( config_id=config_id, config=config, runner=r, labels=labels, log=log ) else: r = runner.DockerRunner(managed_by, cont_cmd=cont_cmd, log=log) builder = compose1.ComposeV1Builder( config_id=config_id, config=config, runner=r, labels=labels, log=log ) if action == 'print-cmd': cmd = [ r.cont_cmd, 'run', '--name', r.unique_container_name(container_name) ] builder.container_run_args(cmd, container_name) print(' '.join(cmd)) elif action == 'run': cmd = [ r.cont_cmd, 'run', '--name', r.unique_container_name(container_name) ] builder.container_run_args(cmd, container_name) return r.execute_interactive(cmd, log) elif action == 'dump-yaml': print(yaml.safe_dump(config, default_flow_style=False)) elif action == 'dump-json': print(json.dumps(config, indent=4)) else: raise ValueError('action should be one of: "dump-json", "dump-yaml"', '"print-cmd", or "run"')
def execute_interactive(cmd, log=None): if not log: log = common.configure_logging(__name__) log.debug('$ %s' % ' '.join(cmd)) return subprocess.call(cmd)