def serve(etcd_discovery=None, size=3, *args, **kwargs): cluster_name = '-'.join(ifilter(None, ('etcd2', kwargs['cluster_name']))) status = run('status {cluster_name}'.format(cluster_name=cluster_name), warn_only=True, quiet=True) if status.succeeded and status.endswith('start/running'): sudo('stop {cluster_name}'.format(cluster_name=cluster_name)) if 'start/running' in run('status {cluster_name}'.format(cluster_name=cluster_name), warn_only=True, quiet=True): raise RuntimeError('Cluster hasn\'t stopped') etcd_discovery = shared_serve(etcd_discovery, size, kwargs) append('/etc/environment', 'ETCD_DISCOVERY={etcd_discovery}'.format(etcd_discovery=etcd_discovery), use_sudo=True) data_dir = '/var/{cluster_name}_data_dir'.format(cluster_name=cluster_name) sudo('mkdir -p "{data_dir}"'.format(data_dir=data_dir)) upload_template( path.join(path.dirname(resource_filename('offregister_etcd', '__init__.py')), 'data', 'etcd2.upstart.conf'), '/etc/init/{cluster_name}.conf'.format(cluster_name=cluster_name), context={k: str(v) for k, v in update_d(kwargs, locals()).iteritems()}, use_sudo=True ) sudo('initctl reload-configuration') sudo('start {cluster_name}'.format(cluster_name=cluster_name), warn_only=True) return etcd_discovery
def __init__(self, env, node, node_name, dns_name, **kwargs): super(OffAnsible, self).__init__(env, node, node_name, dns_name) self.env = env self.driver_node_env = { k: getattr(env, k) for k in dir(env) if not k.startswith("_") and k not in ( "count", "index") and not isinstance(getattr(env, k), property) } ansible_settings = kwargs.get("ansible_settings", {}) # initialize needed objects self.variable_manager = VariableManager() self.loader = DataLoader() self.options = Options(**update_d( dict( connection="ssh", module_path="/path/to/mymodules", forks=100, become=None, become_method=None, become_user=None, check=False, ), ansible_settings.get("options"), )) self.passwords = update_d( dict(vault_pass="******"), ansible_settings.get("passwords"), conn_pass=self.env_namedtuple.password if self.env_namedtuple.password else None, ) # Instantiate our ResultCallback for handling results as they come in self.results_callback = ResultCallback() # create inventory and pass to var manager self.inventory = Inventory( loader=self.loader, variable_manager=self.variable_manager, host_list=env.hosts, ) self.variable_manager.set_inventory(self.inventory)
def setup_nginx_init1(*args, **kwargs): if exists("/run/systemd/system"): if run("grep -qF sites-enabled /etc/nginx/nginx.conf", warn_only=True).failed: sudo("mkdir -p /etc/nginx/sites-enabled") sudo( "sed -i '$i\ \ \ include /etc/nginx/sites-enabled/*;' /etc/nginx/nginx.conf", shell_escape=True, ) sudo("systemctl stop nginx", warn_only=True, quiet=True) return sudo("systemctl start nginx") return "nginx already configured for sites-enabled" default_conf = { "AUTHOR": __author__, "DESCRIPTION": "nginx http daemon", "DAEMON": "/usr/sbin/nginx", "PID": "/var/run/nginx.pid", } init_name = kwargs.get("nginx-init-name", "nginx.conf") init_dir = kwargs.get("nginx-init-dir", "/etc/init") init_local_filename = kwargs.get( "nginx-upstart-filename", resource_filename("offregister_nginx", path.join("conf", "nginx.upstart.conf")), ) service = init_name.partition(".")[0] upload_template( init_local_filename, "{init_dir}/{init_name}".format(init_dir=init_dir, init_name=init_name), context=update_d(default_conf, kwargs.get("nginx-init-context")), use_sudo=True, ) status_cmd = "status {service}".format(service=service) if "start/running" in run(status_cmd): sudo("reload {service}".format(service=service)) else: sudo("start {service}".format(service=service)) return run(status_cmd)
def prepare_cluster_obj(self, cluster, res): cluster_args = cluster["args"] if "args" in cluster else tuple() cluster_kwargs = update_d( { "domain": self.dns_name, "node_name": self.node_name, "public_ipv4": self.node.public_ips[-1], "cache": {}, "cluster_name": cluster.get("cluster_name"), }, cluster["kwargs"] if "kwargs" in cluster else {}, ) if "source" in cluster: cluster_kwargs.update(play_source=cluster["source"]) cluster_type = cluster["module"].replace("-", "_") cluster_path = "/".join(_f for _f in (cluster_type, cluster_kwargs["cluster_name"]) if _f) cluster_kwargs.update(cluster_path=cluster_path) if "cache" not in cluster_kwargs: cluster_kwargs["cache"] = {} if ":" in cluster_type: cluster_type, _, tag = cluster_type.rpartition(":") del _ else: tag = None cluster_kwargs.update(tag=tag) # create play with tasks if "play_source" not in cluster_kwargs: raise NotImplementedError( "Ansible from module/playbook not implemented yet. Inline your source." ) cluster_kwargs["play_source"].update(hosts=self.env_namedtuple.hosts) extra_vars = { "ansible_host": self.env_namedtuple.host, "ansible_ssh_host": self.env_namedtuple.host, "ansible_user": self.env_namedtuple.user, "ansible_ssh_user": self.env_namedtuple.user, "ansible_port": self.env_namedtuple.port, "ansible_ssh_port": self.env_namedtuple.port, } if self.env.password is not None: extra_vars.update( {"ansible_ssh_pass": self.env_namedtuple.password}) if self.env.key_filename is None: extra_vars.update({ "ansible_ssh_private_key_file": self.env_namedtuple.key_filename }) if (self.env.ssh_config is not None and "StrictHostKeyChecking" in self.env.ssh_config): host_key_checking = (self.env_namedtuple. ssh_config["StrictHostKeyChecking"] == "yes") extra_vars.update({ "ansible_ssh_extra_args": "-o " + " -o ".join('{k}="{v}"'.format(k=k, v=v) for k, v in iteritems(self.env.ssh_config) if k not in frozenset(("Port", "User", "Host"))), "ansible_host_key_checking": host_key_checking, }) self.variable_manager.extra_vars = extra_vars cluster_kwargs["play"] = Play().load( cluster_kwargs["play_source"], variable_manager=self.variable_manager, loader=self.loader, ) return PreparedClusterObj( cluster_path=cluster_path, cluster_type=cluster_type, cluster_args=cluster_args, cluster_kwargs=cluster_kwargs, res=res, tag=tag, )
def setup_git_pull_upstart2(*args, **kwargs): if exists("/run/systemd/system"): raise NotImplementedError("SystemD not implemented yet") apt_depends("inotify-tools") default_conf = { "AUTHOR": __author__, "DESCRIPTION": "git pull (force update to whatever is on git repo)", "GIT_BRANCH": "master", "GIT_REMOTE": "origin", "HOOKSERVE_LOGFILE": "/var/log/upstart/{service}.log".format( service=kwargs.get("hookserve-init-name", "hookserve.conf").partition( path.extsep )[0] ), } if "SERVER_LOCATION" in kwargs.get("git_pull-init-context", {}): kwargs["git_pull-init-context"]["GIT_DIR"] = kwargs["git_pull-init-context"][ "SERVER_LOCATION" ] required = ( ("GIT_DIR", "/var/www/somegitrepo"), ("GIT_REPO", "https://github.com/foo/bar.git"), ) validate_conf( kwargs.get("git_pull-init-context", {}), required, logger=logger, name="git_pull-init-context", ) if not exists(kwargs["git_pull-init-context"]["GIT_DIR"]): sudo( "git clone {git_repo} {to_dir}".format( git_repo=kwargs["git_pull-init-context"]["GIT_REPO"], to_dir=kwargs["git_pull-init-context"]["GIT_DIR"], ) ) init_name = kwargs.get("git_pull-init-name", "git_pull.conf") init_dir = kwargs.get("git_pull-init-dir", "/etc/init") init_local_filename = kwargs.get( "git_pull-upstart-filename", resource_filename( "offregister_githook", path.join("conf", "git_pull.upstart.job.conf") ), ) service = init_name.partition(".")[0] upload_template( init_local_filename, "{init_dir}/{init_name}".format(init_dir=init_dir, init_name=init_name), context=update_d(default_conf, kwargs.get("git_pull-init-context", {})), use_sudo=True, ) status_cmd = "status {service}".format(service=service) if "start/running" in run(status_cmd): sudo("stop {service}".format(service=service)) logger.info(sudo("start {service}".format(service=service))) return run(status_cmd)
def setup_hookserve1(*args, **kwargs): os_version = ubuntu_version() default_conf = { "AUTHOR": __author__, "DESCRIPTION": "hookserve git hook server and git pull task", "DAEMON": "/usr/local/bin/hookserve", "DAEMON_ARGS": "--port={DAEMON_PORT:d} echo", "DAEMON_PORT": 8888, "PID": "/var/run/hookserve.pid", } context = update_d(default_conf, kwargs.get("hookserve-init-context", {})) context["DAEMON_ARGS"] = context["DAEMON_ARGS"].format( DAEMON_PORT=context["DAEMON_PORT"] ) if os_version < 15.04: init_dir = kwargs.get("hookserve-init-dir", "/etc/init") init_name = ( "{}.service".format(kwargs["hookserve-init-name"]) if "hookserve-init-name" in kwargs else "hookserve.conf" ) init_local_filename = kwargs.get( "hookserve-upstart-filename", resource_filename( "offregister_githook", path.join("conf", "hookserve.upstart.conf") ), ) service = init_name.partition(".")[0] upload_template( init_local_filename, "{init_dir}/{init_name}".format(init_dir=init_dir, init_name=init_name), context=context, use_sudo=True, ) status_cmd = "status {service}".format(service=service) if "start/running" in run(status_cmd): sudo("stop {service}".format(service=service)) sudo("start {service}".format(service=service)) return run(status_cmd) else: unit_name = ( "{}.unit".format(kwargs["hookserve-init-name"]) if "hookserve-init-name" in kwargs else "hookserve.unit" ) unit_local_filename = kwargs.get( "hookserve-upstart-filename", resource_filename( "offregister_githook", path.join("conf", "hookserve.systemd.unit") ), ) service = unit_name.partition(".")[0] unit_file = "/etc/systemd/system/{init_name}".format(init_name=unit_name) upload_template(unit_local_filename, unit_file, context=context, use_sudo=True) sudo("chmod 664 {unit_file}".format(unit_file=unit_file)) sudo("systemctl daemon-reload") status_cmd = "systemctl status {service}".format(service=service) if "start/running" in run(status_cmd): sudo("systemctl stop {service}".format(service=service)) sudo("systemctl start {service}".format(service=service)) return run(status_cmd)
def setup_nginx_conf2(*args, **kwargs): if exists("/run/systemd/system"): raise NotImplementedError("SystemD not implemented yet") init_name = kwargs.get("nginx-init-name", "nginx.conf") init_dir = kwargs.get("nginx-init-dir", "/etc/init") init_filename = "{init_dir}/{init_name}".format(init_dir=init_dir, init_name=init_name) service = init_name.partition(".")[0] status_cmd = "status {service}".format(service=service) conf_local_filepath = kwargs.get( "nginx-conf-file", resource_filename("offregister_nginx", path.join("conf", "nginx.proxy_pass.conf")), ) conf_remote_filename = "/etc/nginx/sites-enabled/{}".format( kwargs.get("nginx-conf-filename", path.basename(conf_local_filepath))) conf = update_d( { "SERVER_NAME": "forum.*", "ROUTE_BLOCK": "location / { try_files $uri @proxy_to_app; }", "exit": "forum_app_server", "LISTEN": "", }, kwargs.get("nginx-init-context"), **kwargs.get("nginx-conf-context", {})) required = ( ("SERVER_LOCATION", "unix:/edx/var/forum/forum.sock"), ("SERVER_NAME", "forum.*"), ) validate_conf(conf, required, logger) if "LISTEN_PORT" in kwargs.get("nginx-conf-context", {}): conf["LISTEN"] = "listen {LISTEN_PORT!d};".format( LISTEN_PORT=conf["LISTEN_PORT"]) if conf["SERVER_LOCATION"].startswith("unix"): if not exists(conf["SERVER_LOCATION"]): raise EnvironmentError("{server_location} doesn't exist".format( server_location=conf["SERVER_LOCATION"])) # http/https is okay, those can be started asynchronously if exists(init_filename): sio = StringIO() get(init_filename, sio) offset = 0 for l in sio: if "include /etc/nginx/sites-enabled/*;" in l: if "#" in l: sio.seek(offset) sio.write(l.replace("#", " ")) sio.seek(0) put(sio, init_filename, use_sudo=True) break offset = sio.tell() else: logger.warn("{init_name} not found in {init_dir}".format( init_name=init_name, init_dir=init_dir)) raise NotImplementedError("init conf in a weird place; erring") upload_template(conf_local_filepath, conf_remote_filename, context=conf, use_sudo=True) if "start/running" in run(status_cmd): sudo("reload {service}".format(service=service)) else: sudo("start {service}".format(service=service)) return run(status_cmd)
def add_to_cluster(self, cluster, res): """ Specification: 0. Search and handle `master` tag in `cluster_name` 1. Imports `cluster_name`, seeks and sets (`install` xor `setup`) and (serve` or `start`) callables 2. Installs `cluster_name` 3. Serves `cluster_name` """ args = cluster['args'] if 'args' in cluster else tuple() kwargs = update_d({ 'domain': self.dns_name, 'node_name': self.node_name, 'public_ipv4': self.node.public_ips[-1], 'cache': {}, 'cluster_name': cluster.get('cluster_name') }, cluster['kwargs'] if 'kwargs' in cluster else {}) cluster_type = cluster['module'].replace('-', '_') cluster_path = '/'.join(ifilter(None, (cluster_type, kwargs['cluster_name']))) kwargs.update(cluster_path=cluster_path) if ':' in cluster_type: cluster_type, _, tag = cluster_type.rpartition(':') del _ else: tag = None kwargs.update(tag=tag) if tag == 'master': kwargs.update(master=True) if hasattr(self.node, 'private_ips') and len(self.node.private_ips): kwargs.update(private_ipv4=self.node.private_ips[-1]) guessed_os = self.guess_os() # import `cluster_type` try: setattr(self, 'fab', getattr(__import__(cluster_type, globals(), locals(), [guessed_os], -1), guessed_os)) except AttributeError as e: if e.message != "'module' object has no attribute '{os}'".format(os=guessed_os): raise raise ImportError('Cannot `import {os} from {cluster_type}`'.format(os=guessed_os, cluster_type=cluster_type)) fab_dir = dir(self.fab) # Sort functions like so: `step0`, `step1` func_names = sorted( (j for j in fab_dir if not j.startswith('_') and str.isdigit(j[-1])), key=lambda s: int(''.join(takewhile(str.isdigit, s[::-1]))[::-1] or -1) ) if 'run_cmds' in cluster: mapping = {'>=': operator.ge, '<': operator.lt, '>': operator.gt, '<=': operator.le} # TODO: There must be a full list somewhere! def dict_type(run_cmds, func_names): op = mapping[run_cmds['op']] return [func_name for func_name in func_names if op(int(''.join(takewhile(str.isdigit, func_name[::-1]))[::-1]), int(run_cmds['val']))] run_cmds_type = type(cluster['run_cmds']) if 'exclude' in cluster['run_cmds']: func_names = tuple(ifilter(lambda func: func not in cluster['run_cmds']['exclude'], func_names)) func_names = dict_type(cluster['run_cmds'], func_names) '''{ DictType: dict_type(cluster['run_cmds'], func_names) }.get(run_cmds_type, raise_f(NotImplementedError, '{!s} unexpected for run_cmds'.format(run_cmds_type)))''' if not func_names: try: get_attr = lambda a, b: a if hasattr(self.fab, a) else b if hasattr(self.fab, b) else raise_f( AttributeError, '`{a}` nor `{b}`'.format(a=a, b=b)) func_names = ( get_attr('install', 'setup'), get_attr('serve', 'start') ) except AttributeError as e: logger.error('{e} found in {cluster_type}'.format(e=e, cluster_type=cluster_type)) raise AttributeError( 'Function names in {cluster_type} must end in a number'.format(cluster_type=cluster_type) ) # 'must'! logger.warn('Deprecation: Function names in {cluster_type} should end in a number'.format( cluster_type=cluster_type) ) self.handle_deprecations(func_names) for idx, step in enumerate(func_names): exec_output = execute(getattr(self.fab, step), *args, **kwargs)[self.dns_name] if idx == 0: res[self.dns_name] = {cluster_path: {step: exec_output}} if tag == 'master': save_node_info('master', [self.node_name], folder=cluster_type, marshall=json) else: res[self.dns_name][cluster_path][step] = exec_output save_node_info(self.node_name, node_to_dict(self.node), folder=cluster_path, marshall=json)
def prepare_cluster_obj(self, cluster, res): cluster_args = cluster["args"] if "args" in cluster else tuple() cluster_kwargs = update_d( { "domain": self.dns_name, "node_name": self.node_name, "public_ipv4": self.node.public_ips[-1], "cache": {}, "cluster_name": cluster.get("cluster_name"), }, cluster["kwargs"] if "kwargs" in cluster else {}, ) cluster_type = cluster["module"].replace("-", "_") cluster_path = "/".join(_f for _f in (cluster_type, cluster_kwargs["cluster_name"]) if _f) cluster_kwargs.update(cluster_path=cluster_path) if "cache" not in cluster_kwargs: cluster_kwargs["cache"] = {} if ":" in cluster_type: cluster_type, _, tag = cluster_type.rpartition(":") del _ else: tag = None cluster_kwargs.update(tag=tag) if tag == "master": cluster_kwargs.update(master=True) if hasattr(self.node, "private_ips") and len(self.node.private_ips): cluster_kwargs.update(private_ipv4=self.node.private_ips[-1]) guessed_os = guess_os(node=self.node) # import `cluster_type` try: setattr( self, "fab", getattr( __import__(cluster_type, globals(), locals(), [guessed_os]), guessed_os, ), ) except AttributeError as e: if e.message != "'module' object has no attribute '{os}'".format( os=guessed_os): raise raise ImportError( "Cannot `import {os} from {cluster_type}`".format( os=guessed_os, cluster_type=cluster_type)) fab_dir = dir(self.fab) # Sort functions like so: `step0`, `step1` func_names = get_sorted_strnum(fab_dir) func_names = self.filter_funcs(cluster, func_names) if not func_names: try: get_attr = ( lambda a, b: a if hasattr(self.fab, a) else b if hasattr(self.fab, b) else raise_f( AttributeError, "`{a}` nor `{b}`".format(a=a, b=b))) func_names = (get_attr("install", "setup"), get_attr("serve", "start")) except AttributeError as e: root_logger.error("{e} found in {cluster_type}".format( e=e, cluster_type=cluster_type)) raise AttributeError( "Function names in {cluster_type} must end in a number". format(cluster_type=cluster_type)) # 'must'! root_logger.warn( "Deprecation: Function names in {cluster_type} should end in a number" .format(cluster_type=cluster_type)) self.handle_deprecations(func_names) self.func_names = func_names return PreparedClusterObj( cluster_path=cluster_path, cluster_type=cluster_type, cluster_args=cluster_args, cluster_kwargs=cluster_kwargs, res=res, tag=tag, )