def do_deploy_async(self, values, sender=None): log.debug9("TRACE: do_deploy_async") # Run whatever series of actions are needed to deploy # this role in a meaningful way. # # Return a dictionary with the following structure: # target = {'Role': 'emptyrole', # 'Instance': self.get_name(), # 'Description': Empty Example Role - %s" % # self.get_name(), # 'Wants': ['wanted.service'], # 'Requires': ['required.service'] # 'After': ['syslog.target', 'network.target']} # the 'Wants', 'Requires', etc. are systemd unit file directives # See http://www.freedesktop.org/software/systemd/man/systemd.unit.html#%5BUnit%5D%20Section%20Options # # In case of error raise an appropriate RolekitError exception # If you need to call out to a long-running routine, use an asynchronous function # and yield until it returns. # Example: # result = yield async.subprocess_future(forked_process_args) # We're done! # Since this is an asynchronous function, we need to 'yield' the final # result. #yield target # Remove this line for real roles raise NotImplementedError()
def do_deploy_async(self, values, sender=None): log.debug9("TRACE: do_deploy_async") # Run whatever series of actions are needed to deploy # this role in a meaningful way. # # Return a dictionary with the following structure: # target = {'Role': 'emptyrole', # 'Instance': self.get_name(), # 'Description': Empty Example Role - %s" % # self.get_name(), # 'Wants': ['wanted.service'], # 'Requires': ['required.service'] # 'After': ['syslog.target', 'network.target']} # the 'Wants', 'Requires', etc. are systemd unit file directives # See http://www.freedesktop.org/software/systemd/man/systemd.unit.html#%5BUnit%5D%20Section%20Options # # In case of error raise an appropriate RolekitError exception # If you need to call out to a long-running routine, use an asynchronous function # and yield until it returns. # Example: # result = yield async.subprocess_future(forked_process_args) # We're done! # Since this is an asynchronous function, we need to 'yield' the final # result. #yield target # Remove this line for real roles raise NotImplementedError()
def create_target(self, target): self._settings['target_unit'] = target.get_unit_name() log.debug9("Creating target unit %s" % self._settings['target_unit']) target.write() # Tell systemd to reload the daemon configuration log.debug9("Reloading systemd units\n") with SystemdJobHandler() as job_handler: job_handler.manager.Reload()
def create_target(self, target): self._settings['target_unit'] = target.get_unit_name() log.debug9("Creating target unit %s" % self._settings['target_unit']) target.write() # Tell systemd to reload the daemon configuration log.debug9("Reloading systemd units\n") with SystemdJobHandler() as job_handler: job_handler.manager.Reload()
def create_target(self, target): target['targetname'] = \ 'role-%s-%s.target' % (target['Role'], target['Instance']) log.debug9("Creating target file {0}".format(target['targetname'])) target['failurename'] = \ 'role-fail-%s-%s.service' % (target['Role'], target['Instance']) log.debug9("Creating failure file {0}".format(target['failurename'])) # Create target unit target['extensions'] = SystemdTargetUnit(target).write() # Create failure notification unit SystemdFailureUnit(target).write() # Create extension units for required components extunits = SystemdExtensionUnits(target) for dep in SYSTEMD_DEPS: if dep in target: for unit in target[dep]: log.debug9("Creating extension unit for {0}".format(unit)) extunits.write(unit) # Tell systemd to reload the daemon configuration log.debug9("Reloading systemd units\n") with SystemdJobHandler() as job_handler: job_handler.manager.Reload()
def __redeploy_async(self, values, sender): values = dbus_to_python(values) # Make sure we are in the proper state self.assert_state(READY_TO_START, ERROR) # Log log.debug1("%s.redeploy(%s)", self._log_prefix, values) # Check values try: self.check_values(values) except Exception as e: # checking of values failed, set state to error self.change_state(ERROR, error=str(e), write=True) raise try: # Change to redeploying state self.change_state(REDEPLOYING) # Uninstall firewall self.uninstallFirewall() # Copy _DEFAULTS to self._settings self.copy_defaults() # Install package groups and packages log.debug9("TRACE: Installing packages") yield async .call_future(self.installPackages()) # Install firewall self.installFirewall() # Call do_redeploy_async log.debug9("TRACE: Performing role-specific redeployment") yield async .call_future(self.do_redeploy_async(values, sender)) # Continue only after successful deployment: # Apply values to self._settings log.debug9( "TRACE: role-specific redeployment complete, applying values") self.apply_values(values) # Change to ready to start state self.change_state(READY_TO_START, write=True) # Attempt to start the newly-deployed role # We do this because many role-installers will conclude by # starting anyway and we want to ensure that our role mechanism # is in sync with them. log.debug9("TRACE: Starting %s" % self.get_name()) yield async .call_future(self.__start_async(sender)) except Exception as e: # Something failed, set state to error self.change_state(ERROR, error=str(e), write=True) raise
def __redeploy_async(self, values, sender): values = dbus_to_python(values) # Make sure we are in the proper state self.assert_state(READY_TO_START, ERROR) # Log log.debug1("%s.redeploy(%s)", self._log_prefix, values) # Check values try: self.check_values(values) except Exception as e: # checking of values failed, set state to error self.change_state(ERROR, error=str(e), write=True) raise try: # Change to redeploying state self.change_state(REDEPLOYING) # Uninstall firewall self.uninstallFirewall() # Copy _DEFAULTS to self._settings self.copy_defaults() # Install package groups and packages log.debug9("TRACE: Installing packages") yield async.call_future(self.installPackages()) # Install firewall self.installFirewall() # Call do_redeploy_async log.debug9("TRACE: Performing role-specific redeployment") yield async.call_future(self.do_redeploy_async(values, sender)) # Continue only after successful deployment: # Apply values to self._settings log.debug9("TRACE: role-specific redeployment complete, applying values") self.apply_values(values) # Change to ready to start state self.change_state(READY_TO_START, write=True) # Attempt to start the newly-deployed role # We do this because many role-installers will conclude by # starting anyway and we want to ensure that our role mechanism # is in sync with them. log.debug9("TRACE: Starting %s" % self.get_name()) yield async.call_future(self.__start_async(sender)) except Exception as e: # Something failed, set state to error self.change_state(ERROR, error=str(e), write=True) raise
def cleanup_targets(self): # remove created target units if "target_unit" not in self._settings: return try: os.unlink(os.path.join(SYSTEMD_UNITS, self._settings['target_unit'])) except Exception as e: log.warning( "Couldn't remove unit '{}': {!s}\n".format( self._settings['target_unit'], e)) else: log.debug9("Removed unit '{}'\n".format(self._settings['target_unit'])) # tell systemd about it with SystemdJobHandler() as job_handler: job_handler.manager.Reload()
def cleanup_targets(self): # remove created target units if "target_unit" not in self._settings: return try: os.unlink( os.path.join(SYSTEMD_UNITS, self._settings['target_unit'])) except Exception as e: log.warning("Couldn't remove unit '{}': {!s}\n".format( self._settings['target_unit'], e)) else: log.debug9("Removed unit '{}'\n".format( self._settings['target_unit'])) # tell systemd about it with SystemdJobHandler() as job_handler: job_handler.manager.Reload()
def stop_services_async(self): """stop_services_async""" log.debug1("%s.stop_services_async()", self._log_prefix) with SystemdJobHandler() as job_handler: target_unit = get_target_unit_name(self.get_type(), self.get_name()) log.debug9("Stopping %s" % target_unit) job_path = job_handler.manager.StopUnit(target_unit, "replace") job_handler.register_job(job_path) job_results = yield job_handler.all_jobs_done_future() if any([x for x in job_results.values() if x not in ("skipped", "done")]): details = ", ".join(["%s: %s" % item for item in job_results.items()]) raise RolekitError(COMMAND_FAILED, "Stopping services failed: %s" % details) log.debug9("Disabling %s" % target_unit) disable_units([target_unit])
def stop_services_async(self): """stop_services_async""" log.debug1("%s.stop_services_async()", self._log_prefix) with SystemdJobHandler() as job_handler: target_unit = get_target_unit_name(self.get_type(), self.get_name()) log.debug9("Disabling %s" % target_unit) disable_units([target_unit]) log.debug9("Stopping %s" % target_unit) job_path = job_handler.manager.StopUnit(target_unit, "replace") job_handler.register_job(job_path) job_results = yield job_handler.all_jobs_done_future() if any( [x for x in job_results.values() if x not in ("skipped", "done")]): details = ", ".join( ["%s: %s" % item for item in job_results.items()]) raise RolekitError(COMMAND_FAILED, "Stopping services failed: %s" % details)
def do_deploy_async(self, values, sender=None): log.debug9("TRACE: do_deploy_async") # Do the magic # # In case of error raise an exception # Get the domain name from the passed-in settings # or set it to the instance name if ommitted if 'domain_name' not in values: values['domain_name'] = self.get_name() if not self._valid_fqdn(values['domain_name']): raise RolekitError( INVALID_VALUE, "Invalid domain name: %s" % values['domain_name']) if "host_name" not in values: # Let's construct a new host name. host_part = self._get_hostname() if host_part.startswith("localhost"): # We'll assign a random hostname starting with "dc-" random_part = ''.join( random.choice(string.ascii_lowercase) for _ in range(16)) host_part = "dc-%s" % random_part values['host_name'] = "%s.%s" % (host_part, values['domain_name']) if not self._valid_fqdn(values['host_name']): raise RolekitError(INVALID_VALUE, "Invalid host name: %s" % values['host_name']) # Change the hostname with the hostnamectl API yield set_hostname(values['host_name']) # If left unspecified, default the realm to the # upper-case version of the domain name if 'realm_name' not in values: values['realm_name'] = values['domain_name'].upper() # If left unspecified, assign a random password for # the administrative user if 'admin_password' not in values: admin_pw_provided = False values['admin_password'] = generate_password() else: admin_pw_provided = True # If left unspecified, assign a random password for # the directory manager if 'dm_password' not in values: dm_pw_provided = False values['dm_password'] = generate_password() else: dm_pw_provided = True # Call ipa-server-install with the requested arguments ipa_install_args = [ 'ipa-server-install', '-U', '-r', values['realm_name'], '-d', values['domain_name'], '-p', values['dm_password'], '-a', values['admin_password'], ] # If the user has requested the DNS server, enable it if 'serve_dns' not in values: values['serve_dns'] = self._settings['serve_dns'] if values['serve_dns']: ipa_install_args.append('--setup-dns') # Pass the primary IP address if 'primary_ip' in values: ipa_install_args.append('--ip-address=%s' % values['primary_ip']) # if the user has requested DNS forwarders, add them if 'dns_forwarders' in values: [ ipa_install_args.append("--forwarder=%s" % x) for x in values['dns_forwarders']['ipv4'] ] [ ipa_install_args.append("--forwarder=%s" % x) for x in values['dns_forwarders']['ipv6'] ] else: ipa_install_args.append('--no-forwarders') # If the user has requested the reverse zone add it if 'reverse_zone' in values: for zone in values['reverse_zone']: ipa_install_args.append('--reverse-zone=%s' % zone) else: ipa_install_args.append('--no-reverse') # If the user has requested a specified ID range, # set up the argument to ipa-server-install if 'id_start' in values or 'id_max' in values: if ('id_start' not in values or 'id_max' not in values or not values['id_start'] or not values['id_max']): raise RolekitError( INVALID_VALUE, "Must specify id_start and id_max together") if (values['id_start'] and values['id_max'] <= values['id_start']): raise RolekitError(INVALID_VALUE, "id_max must be greater than id_start") ipa_install_args.append('--idstart=%d' % values['id_start']) ipa_install_args.append('--idmax=%d' % values['id_max']) # TODO: If the user has specified a root CA file, # set up the argument to ipa-server-install # Remove the passwords from the values so # they won't be saved to the settings if admin_pw_provided: values.pop('admin_password', None) if dm_pw_provided: values.pop('dm_password', None) result = yield async .subprocess_future(ipa_install_args) if result.status: # If the subprocess returned non-zero, raise an exception raise RolekitError(COMMAND_FAILED, "%d" % result.status) # Create the systemd target definition target = RoleDeploymentValues(self.get_type(), self.get_name(), "Domain Controller") target.add_required_units(['ipa.service']) # We're done! yield target
def deploy_async(self, values, sender=None): """deploy role""" remove_instance = False values = dbus_to_python(values) # Make sure we are in the proper state self.assert_state(NASCENT) # Log log.debug1("%s.deploy(%s)", self._log_prefix, values) # Check values try: self.check_values(values) except Exception as e: # Check values failed, remove the instance again if verification # failed, set state to error, save it (will be visible in the # .old backup file). self.change_state(ERROR, error=str(e), write=True) # cleanup self.__remove_instance() raise try: # Change to deploying state self.change_state(DEPLOYING) # Copy _DEFAULTS to self._settings self.copy_defaults() # Install package groups and packages log.debug9("TRACE: Installing packages") yield async.call_future(self.installPackages()) # Install firewall self.installFirewall() # Call do_deploy log.debug9("TRACE: Performing role-specific deployment") try: target = yield async.call_future(self.do_deploy_async(values, sender)) except RolekitError as e: if e.code == INVALID_VALUE: # If we failed because the input values were incorrect, # also remove the instance. remove_instance = True raise # Continue only after successful deployment: # Apply values to self._settings log.debug9("TRACE: role-specific deployment complete, applying values") self.apply_values(values) # Set up systemd target files log.debug9("TRACE: Creating systemd target files") self.create_target(target) # Change to ready to start state self.change_state(READY_TO_START, write=True) # In case this was a nextboot deployment, make sure to remove # the deferred role settings and systemd unit try: # Remove settings deferredsettings = "%s/%s/%s.json" % (ETC_ROLEKIT_DEFERREDROLES, self.get_type(), self.get_name()) os.unlink(deferredsettings) # Remove systemd service unit deferredunit = "%s/deferred-role-deployment-%s-%s.service" % ( SYSTEMD_UNITS, self.get_type(), self.get_name()) disable_units([deferredunit]) os.unlink(deferredunit) except FileNotFoundError: # Files didn't exist; ignore that pass except PermissionError: # SELinux bug? log.fatal("ERROR: permission error attempting to delete %s or %s" % ( deferredsettings, deferredunit )) # We'll continue anyway, since the service should be runnable at this point # The ConditionPathExists will prevent the service from trying to deploy # again # Tell systemd to reload the daemon configuration log.debug9("Reloading systemd units\n") with SystemdJobHandler() as job_handler: job_handler.manager.Reload() # Start monitoring the role self.monitor_unit() # Attempt to start the newly-deployed role # We do this because many role-installers will conclude by # starting anyway and we want to ensure that our role mechanism # is in sync with them. log.debug9("TRACE: Starting %s" % self.get_name()) yield async.call_future(self.__start_async(sender)) except Exception as e: # Something failed, set state to error self.change_state(ERROR, error=str(e), write=True) if remove_instance: self.__remove_instance() raise
def do_deploy_async(self, values, sender=None): log.debug9("TRACE do_deploy_async(databaseserver)") # Do the magic # # In case of error raise an exception first_instance = True # Check whether this is the first instance of the database for value in self._parent.get_instances().values(): if ('databaseserver' == value.get_type() and self.get_name() != value.get_name() and self.get_state() in deployed_states): first_instance = False break # If the database name wasn't specified if 'database' not in values: # Use the instance name if it was manually specified if self.get_name()[0].isalpha(): values['database'] = self.get_name() else: # Either it was autogenerated or begins with a # non-alphabetic character; prefix it with db_ values['database'] = "db_%s" % self.get_name() if 'owner' not in values: # We'll default to db_owner values['owner'] = "db_owner" # We will assume the owner is new until adding them fails new_owner = True # Determine if a password was passed in, so we know whether to # suppress it from the settings list later. if 'password' in values: password_provided = True else: password_provided = False if 'postgresql_conf' not in values: values['postgresql_conf'] = self._settings['postgresql_conf'] if 'pg_hba_conf' not in values: values['pg_hba_conf'] = self._settings['pg_hba_conf'] # Get the UID and GID of the 'postgres' user try: self.pg_uid = pwd.getpwnam('postgres').pw_uid except KeyError: raise RolekitError(MISSING_ID, "Could not retrieve UID for postgres user") try: self.pg_gid = grp.getgrnam('postgres').gr_gid except KeyError: raise RolekitError(MISSING_ID, "Could not retrieve GID for postgres group") if first_instance: # Initialize the database on the filesystem initdb_args = ["/usr/bin/postgresql-setup", "--initdb"] log.debug2("TRACE: Initializing database") result = yield async .subprocess_future(initdb_args) if result.status: # If this fails, it may be just that the filesystem # has already been initialized. We'll log the message # and continue. log.debug1("INITDB: %s" % result.stdout) # Now we have to start the service to set everything else up # It's safe to start an already-running service, so we'll # just always make this call, particularly in case other instances # exist but aren't running. log.debug2("TRACE: Starting postgresql.service unit") try: with SystemdJobHandler() as job_handler: job_path = job_handler.manager.StartUnit( "postgresql.service", "replace") job_handler.register_job(job_path) log.debug2("TRACE: unit start job registered") job_results = yield job_handler.all_jobs_done_future() log.debug2("TRACE: unit start job concluded") if any([ x for x in job_results.values() if x not in ("skipped", "done") ]): details = ", ".join( ["%s: %s" % item for item in job_results.items()]) log.error("Starting services failed: {}".format(details)) raise RolekitError( COMMAND_FAILED, "Starting services failed: %s" % details) except Exception as e: log.error("Error received starting unit: {}".format(e)) raise # Next we create the owner log.debug2("TRACE: Creating owner of new database") createuser_args = ["/usr/bin/createuser", values['owner']] result = yield async .subprocess_future(createuser_args, uid=self.pg_uid, gid=self.pg_gid) if result.status: # If the subprocess returned non-zero, the user probably already exists # (such as when we're using db_owner). If the caller was trying to set # a password, they probably didn't realize this, so we need to throw # an exception. log.info1("User {} already exists in the database".format( values['owner'])) if password_provided: raise RolekitError(INVALID_SETTING, "Cannot set password on pre-existing user") # If no password was specified, we'll continue new_owner = False # If no password was requested, generate a random one here if not password_provided: values['password'] = generate_password() log.debug2("TRACE: Creating new database") createdb_args = [ "/usr/bin/createdb", values['database'], "-O", values['owner'] ] result = yield async .subprocess_future(createdb_args, uid=self.pg_uid, gid=self.pg_gid) if result.status: # If the subprocess returned non-zero, raise an exception raise RolekitError(COMMAND_FAILED, "Creating database failed: %d" % result.status) # Next, set the password on the owner # We'll skip this phase if the the user already existed if new_owner: log.debug2("TRACE: Setting password for database owner") pwd_args = [ ROLEKIT_ROLES + "/databaseserver/tools/rk_db_setpwd.py", "--database", values['database'], "--user", values['owner'] ] result = yield async .subprocess_future(pwd_args, stdin=values['password'], uid=self.pg_uid, gid=self.pg_gid) if result.status: # If the subprocess returned non-zero, raise an exception log.error("Setting owner password failed: {}".format( result.status)) raise RolekitError( COMMAND_FAILED, "Setting owner password failed: %d" % result.status) # If this password was provided by the user, don't save it to # the settings for later retrieval. That could be a security # issue if password_provided: values.pop("password", None) else: # Not a new owner # Never save the password to settings for an existing owner log.debug2("TRACE: Owner already exists, not setting password") values.pop("password", None) if first_instance: # Then update the server configuration to accept network # connections. log.debug2("TRACE: Opening access to external addresses") # edit postgresql.conf to add listen_addresses = '*' conffile = values['postgresql_conf'] bakfile = conffile + ".rksave" try: linkfile(conffile, bakfile) with open(conffile) as f: conflines = f.readlines() tweaking_rules = [{ 'regex': r"^\s*#?\s*listen_addresses\s*=.*", 'replace': r"listen_addresses = '*'", 'append_if_missing': True }] overwrite_safely( conffile, "".join(_tweak_lines(conflines, tweaking_rules))) except Exception as e: log.fatal("Couldn't write {!r}: {}".format(conffile, e)) # At this point, conffile is unmodified, otherwise # overwrite_safely() would have succeeded try: os.unlink(bakfile) except Exception as x: if not (isinstance(x, OSError) and x.errno == errno.ENOENT): log.error("Couldn't remove {!r}: {}".format( bakfile, x)) raise RolekitError( COMMAND_FAILED, "Opening access to external addresses in '{}'" "failed: {}".format(conffile, e)) # Edit pg_hba.conf to allow 'md5' auth on IPv4 and # IPv6 interfaces. conffile = values['pg_hba_conf'] bakfile = conffile + ".rksave" try: linkfile(conffile, bakfile) with open(conffile) as f: conflines = f.readlines() tweaking_rules = [{ 'regex': r"^\s*host((?:\s.*)$)", 'replace': r"#host\1" }, { 'regex': r"^\s*local(?:\s.*|)$", 'append': "# Use md5 method for all connections\nhost all all all md5" }] overwrite_safely( conffile, "".join(_tweak_lines(conflines, tweaking_rules))) except Exception as e: log.fatal("Couldn't write {!r}: {}".format(conffile, e)) # At this point, conffile is unmodified, otherwise # overwrite_safely() would have succeeded try: os.unlink(bakfile) except Exception as x: if not (isinstance(x, OSError) and x.errno == errno.ENOENT): log.error("Couldn't remove {!r}: {}".format( bakfile, x)) # Restore previous postgresql.conf from the backup conffile = values['postgresql_conf'] bakfile = conffile + ".rksave" try: os.rename(bakfile, conffile) except Exception as x: log.error( "Couldn't restore {!r} from backup {!r}: {}".format( conffile, bakfile, x)) raise RolekitError( COMMAND_FAILED, "Changing all connections to use md5 method in '{}'" "failed: {}".format(values['pg_hba_conf'], e)) # Restart the postgresql server to accept the new configuration log.debug2("TRACE: Restarting postgresql.service unit") with SystemdJobHandler() as job_handler: job_path = job_handler.manager.RestartUnit( "postgresql.service", "replace") job_handler.register_job(job_path) job_results = yield job_handler.all_jobs_done_future() if any([ x for x in job_results.values() if x not in ("skipped", "done") ]): details = ", ".join( ["%s: %s" % item for item in job_results.items()]) raise RolekitError( COMMAND_FAILED, "Restarting service failed: %s" % details) # Create the systemd target definition target = RoleDeploymentValues(self.get_type(), self.get_name(), "Database Server") target.add_required_units(['postgresql.service']) log.debug2("TRACE: Database server deployed") yield target
def do_deploy_async(self, values, sender=None): log.debug9("TRACE: do_deploy_async") # Run whatever series of actions are needed to deploy # this role in a meaningful way. # import docker # Get the default cache size # Find out how much RAM is available on the system if 'cache_size' not in values: # Do a late import of psutil. This will only get # used during a deployment, so we don't need to # have it as a dependency for rolekit itself import psutil # Get the total number of bytes in local system memory total_ram = psutil.virtual_memory().total # If 25% of the available memory is less than 1GB, use # that for the cache. if total_ram / 4 < GiB_SIZE: # Set cache_size in MiB values['cache_size'] = int(total_ram / 4 / MiB_SIZE) else: # Cap the default size at 1 GB in MiB values['cache_size'] = int(GiB_SIZE / MiB_SIZE) # Set defaults if "connections" not in values: values["connections"] = self._DEFAULTS["connections"] if "threads" not in values: values["threads"] = self._DEFAULTS["threads"] # Create a container for memcached and launch that log.debug2("Enabling the Docker container manager") # Enable and start the docker service enable_units(['docker.service']) log.debug2("Starting the Docker container manager") with SystemdJobHandler() as job_handler: job_path = job_handler.manager.StartUnit("docker.service", "replace") job_handler.register_job(job_path) job_results = yield job_handler.all_jobs_done_future() if any([x for x in job_results.values() if x not in ("skipped", "done")]): details = ", ".join(["%s: %s" % item for item in job_results.items()]) raise RolekitError(COMMAND_FAILED, "Starting docker.service failed: %s" % details) log.debug2("Pulling %s image from Docker Hub" % MEMCACHED_DOCKER_IMAGE) dockerclient = docker.Client(base_url=docker.utils.utils.DEFAULT_UNIX_SOCKET, version='auto') # First, pull down the latest version of the memcached container dockerclient.pull(MEMCACHED_DOCKER_IMAGE, tag="latest") log.debug2("Creating systemd service unit") # Generate a systemd service unit for this container container_unit = SystemdContainerServiceUnit( image_name = MEMCACHED_DOCKER_IMAGE, container_name = "memcached_%s" % self.get_name(), desc="memcached docker container - %s" % self.get_name(), env = { "MEMCACHED_CACHE_SIZE": str(values['cache_size']), "MEMCACHED_CONNECTIONS": str(values['connections']), "MEMCACHED_THREADS": str(values['threads']) }, ports = ("{0}:{0}/tcp".format(MEMCACHED_DEFAULT_PORT), "{0}:{0}/udp".format(MEMCACHED_DEFAULT_PORT)) ) container_unit.write() # Make systemd load this new unit file log.debug2("Running systemd daemon-reload") with SystemdJobHandler() as job_handler: job_handler.manager.Reload() # Return the target information target = RoleDeploymentValues(self.get_type(), self.get_name(), "Memory Cache") target.add_required_units(['memcached_%s.service' % self.get_name()]) log.debug9("TRACE: exiting do_deploy_async") yield target
def do_deploy_async(self, values, sender=None): log.debug9("TRACE: do_deploy_async") # Do the magic # # In case of error raise an exception # Ensure we have all the mandatory arguments if 'admin_password' not in values: raise RolekitError(INVALID_VALUE, "admin_password unset") # If the hostname wasn't specified, get it from the system fqdn = socket.getfqdn() if 'host_name' not in values: values['host_name'] = fqdn # Make sure this is a real hostname, not localhost.localdomain if values['host_name'].startswith("localhost"): raise RolekitError(INVALID_VALUE, "invalid hostname") # We have been asked to change the hostname as part of the # creation of the domain controller if values['host_name'] != fqdn: # Change the domain with the hostnamectl API yield set_hostname(values['host_name']) # Set the domain to the domain part of the if 'domain_name' not in values: values['domain_name'] = self._get_domain() # If left unspecified, default the realm to the # upper-case version of the domain name if 'realm_name' not in values: values['realm_name'] = values['domain_name'].upper() # If left unspecified, assign a random password for # the directory manager if 'dm_password' not in values: # Generate a random password values['dm_password'] = generate_password() # Call ipa-server-install with the requested arguments ipa_install_args = [ 'ipa-server-install', '-U', '-r', values['realm_name'], '-d', values['domain_name'], '-p', values['dm_password'], '-a', values['admin_password'], ] # If the user has requested the DNS server, enable it if 'serve_dns' not in values: values['serve_dns'] = self._settings['serve_dns'] if values['serve_dns']: ipa_install_args.append('--setup-dns') # Pass the primary IP address if 'primary_ip' in values: ipa_install_args.append('--ip-address=%s' % values['primary_ip']) # if the user has requested DNS forwarders, add them if 'dns_forwarders' in values: [ ipa_install_args.append("--forwarder=%s" % x) for x in values['dns_forwarders']['ipv4'] ] [ ipa_install_args.append("--forwarder=%s" % x) for x in values['dns_forwarders']['ipv6'] ] pass else: ipa_install_args.append('--no-forwarders') # If the user has requested the reverse zone add it if 'reverse_zone' in values: for zone in values['reverse_zone']: ipa_install_args.append('--reverse-zone=%s' % zone) else: ipa_install_args.append('--no-reverse') # If the user has requested a specified ID range, # set up the argument to ipa-server-install if 'id_start' in values or 'id_max' in values: if ('id_start' not in values or 'id_max' not in values or not values['id_start'] or not values['id_max']): raise RolekitError( INVALID_VALUE, "Must specify id_start and id_max together") if (values['id_start'] and values['id_max'] <= values['id_start']): raise RolekitError(INVALID_VALUE, "id_max must be greater than id_start") ipa_install_args.append('--idstart=%d' % values['id_start']) ipa_install_args.append('--idmax=%d' % values['id_max']) # TODO: If the user has specified a root CA file, # set up the argument to ipa-server-install # Remove the admin_password from the values so # it won't be saved to the settings values.pop('admin_password', None) result = yield async .subprocess_future(ipa_install_args) if result.status: # If the subprocess returned non-zero, raise an exception raise RolekitError(COMMAND_FAILED, "%d" % result.status) # Create the systemd target definition target = { 'Role': 'domaincontroller', 'Instance': self.get_name(), 'Description': "Domain Controller Role - %s" % self.get_name(), 'Wants': ['ipa.service'], 'After': ['syslog.target', 'network.target'] } # We're done! yield target
def do_deploy_async(self, values, sender=None): log.debug9("TRACE do_deploy_async(databaseserver)") # Do the magic # # In case of error raise an exception first_instance = True # Check whether this is the first instance of the database for value in self._parent.get_instances().values(): if ('databaseserver' == value._type and self._name != value._name and self.get_state() in deployed_states): first_instance = False break # First, check for all mandatory arguments if 'database' not in values: raise RolekitError(INVALID_VALUE, "Database name unset") if 'owner' not in values: # We'll default to db_owner values['owner'] = "db_owner" # We will assume the owner is new until adding them fails new_owner = True # Determine if a password was passed in, so we know whether to # suppress it from the settings list later. if 'password' in values: password_provided = True else: password_provided = False if 'postgresql_conf' not in values: values['postgresql_conf'] = self._settings['postgresql_conf'] if 'pg_hba_conf' not in values: values['pg_hba_conf'] = self._settings['pg_hba_conf'] # Get the UID and GID of the 'postgres' user try: self.pg_uid = pwd.getpwnam('postgres').pw_uid except KeyError: raise RolekitError(MISSING_ID, "Could not retrieve UID for postgress user") try: self.pg_gid = grp.getgrnam('postgres').gr_gid except KeyError: raise RolekitError(MISSING_ID, "Could not retrieve GID for postgress group") if first_instance: # Initialize the database on the filesystem initdb_args = ["/usr/bin/postgresql-setup", "--initdb"] log.debug2("TRACE: Initializing database") result = yield async .subprocess_future(initdb_args) if result.status: # If this fails, it may be just that the filesystem # has already been initialized. We'll log the message # and continue. log.debug1("INITDB: %s" % result.stdout) # Now we have to start the service to set everything else up # It's safe to start an already-running service, so we'll # just always make this call, particularly in case other instances # exist but aren't running. log.debug2("TRACE: Starting postgresql.service unit") try: with SystemdJobHandler() as job_handler: job_path = job_handler.manager.StartUnit( "postgresql.service", "replace") job_handler.register_job(job_path) log.debug2("TRACE: unit start job registered") job_results = yield job_handler.all_jobs_done_future() log.debug2("TRACE: unit start job concluded") if any([ x for x in job_results.values() if x not in ("skipped", "done") ]): details = ", ".join( ["%s: %s" % item for item in job_results.items()]) log.error("Starting services failed: {}".format(details)) raise RolekitError( COMMAND_FAILED, "Starting services failed: %s" % details) except Exception as e: log.error("Error received starting unit: {}".format(e)) raise # Next we create the owner log.debug2("TRACE: Creating owner of new database") createuser_args = ["/usr/bin/createuser", values['owner']] result = yield async .subprocess_future(createuser_args, uid=self.pg_uid, gid=self.pg_gid) if result.status: # If the subprocess returned non-zero, the user probably already exists # (such as when we're using db_owner). If the caller was trying to set # a password, they probably didn't realize this, so we need to throw # an exception. log.info1("User {} already exists in the database".format( values['owner'])) if password_provided: raise RolekitError(INVALID_SETTING, "Cannot set password on pre-existing user") # If no password was specified, we'll continue new_owner = False # If no password was requested, generate a random one here if not password_provided: values['password'] = generate_password() log.debug2("TRACE: Creating new database") createdb_args = [ "/usr/bin/createdb", values['database'], "-O", values['owner'] ] result = yield async .subprocess_future(createdb_args, uid=self.pg_uid, gid=self.pg_gid) if result.status: # If the subprocess returned non-zero, raise an exception raise RolekitError(COMMAND_FAILED, "Creating database failed: %d" % result.status) # Next, set the password on the owner # We'll skip this phase if the the user already existed if new_owner: log.debug2("TRACE: Setting password for database owner") pwd_args = [ ROLEKIT_ROLES + "/databaseserver/tools/rk_db_setpwd.py", "--database", values['database'], "--user", values['owner'] ] result = yield async .subprocess_future(pwd_args, stdin=values['password'], uid=self.pg_uid, gid=self.pg_gid) if result.status: # If the subprocess returned non-zero, raise an exception log.error("Setting owner password failed: {}".format( result.status)) raise RolekitError( COMMAND_FAILED, "Setting owner password failed: %d" % result.status) # If this password was provided by the user, don't save it to # the settings for later retrieval. That could be a security # issue if password_provided: values.pop("password", None) else: # Not a new owner # Never save the password to settings for an existing owner log.debug2("TRACE: Owner already exists, not setting password") values.pop("password", None) if first_instance: # Then update the server configuration to accept network # connections. # edit postgresql.conf to add listen_addresses = '*' log.debug2("TRACE: Opening access to external addresses") sed_args = [ "/bin/sed", "-e", "s@^[#]listen_addresses\W*=\W*'.*'@listen_addresses = '\*'@", "-i.rksave", values['postgresql_conf'] ] result = yield async .subprocess_future(sed_args) if result.status: # If the subprocess returned non-zero, raise an exception raise RolekitError( COMMAND_FAILED, "Changing listen_addresses in '%s' failed: %d" % (values['postgresql_conf'], result.status)) # Edit pg_hba.conf to allow 'md5' auth on IPv4 and # IPv6 interfaces. sed_args = [ "/bin/sed", "-e", "s@^host@#host@", "-e", '/^local/a # Use md5 method for all connections', "-e", '/^local/a host all all all md5', "-i.rksave", values['pg_hba_conf'] ] result = yield async .subprocess_future(sed_args) if result.status: # If the subprocess returned non-zero, raise an exception raise RolekitError( COMMAND_FAILED, "Changing all connections to use md5 method in '%s' failed: %d" % (values['pg_hba_conf'], result.status)) # Restart the postgresql server to accept the new configuration log.debug2("TRACE: Restarting postgresql.service unit") with SystemdJobHandler() as job_handler: job_path = job_handler.manager.RestartUnit( "postgresql.service", "replace") job_handler.register_job(job_path) job_results = yield job_handler.all_jobs_done_future() if any([ x for x in job_results.values() if x not in ("skipped", "done") ]): details = ", ".join( ["%s: %s" % item for item in job_results.items()]) raise RolekitError( COMMAND_FAILED, "Restarting service failed: %s" % details) # Create the systemd target definition # # We use all of BindsTo, Requires and RequiredBy so we can ensure that # all database instances are started and stopped together, since # they're really all a single daemon service. # # The intention here is that starting or stopping any role instance or # the main postgresql server will result in the same action happening # to all roles. This way, rolekit maintains an accurate view of what # instances are running and can communicate that to anyone registered # to listen for notifications. target = { 'Role': 'databaseserver', 'Instance': self.get_name(), 'Description': "Database Server Role - %s" % self.get_name(), 'BindsTo': ['postgresql.service'], 'Requires': ['postgresql.service'], 'RequiredBy': ['postgresql.service'], 'After': ['syslog.target', 'network.target'] } log.debug2("TRACE: Database server deployed") yield target
def subprocess_future(args, stdin=None, uid=None, gid=None): """Start a subprocess and return a future used to wait for it to finish. :param args: A sequence of program arguments (see subprocess.Popen()) :param stdin: A string containing one or more lines of stdin input to pass to the child process. :param uid: If specified, this must be a numerical UID that the subprocess will run under. If it is used, gid must also be specified. :param gid: If specified, this must be a numerical UID that the subprocess will run under. If it is used, uid must also be specified. :return: a future for an object with the members status, stdout and stderr, representing waitpid()-like status, stdout output and stderr output, respectively. """ log.debug9("subprocess: {0}".format(args)) def demote(user_uid, user_gid): """ Pass the function 'set_ids' to preexec_fn, rather than just calling setuid and setgid. This will change the ids for that subprocess only. We have to contstruct a callable that requires no arguments in order to pass it to preexec_fn. """ # Look up the username for an initgroups call # This is not a perfect solution, as it is # possible (though not recommended) that the UID # may match more than one username (such as aliases) # This approach will use only whichever name the # system deems is canonical for this UID. username = pwd.getpwuid(user_uid).pw_name def set_ids(): os.setregid(user_gid, user_gid) os.initgroups(username, user_gid) os.setreuid(user_uid, user_uid) return set_ids if (uid is None) != (gid is None): # If one or the other is specified, but not both, # throw an error. raise RolekitError(INVALID_SETTING) if (uid is not None): # The UID and GID are both set # Impersonate this UID and GID in the subprocess preexec_fn = demote(uid, gid) else: preexec_fn = None try: process = subprocess.Popen(args, close_fds=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=preexec_fn) except OSError as e: if e.errno is errno.EPERM: # Could not change users prior to executing the subprocess log.error("Insufficient privileges to impersonate UID/GID %s/%s" % (uid, gid)) raise # Send the input data if needed. if stdin: process.stdin.write(stdin.encode('utf-8')) process.stdin.close() # The three partial results. stdout_future = _fd_output_future(process.stdout, log.debug1) stderr_future = _fd_output_future(process.stderr, log.error) waitpid_future = Future() def child_exited(unused_pid, status): waitpid_future.set_result(status) # GLib has retrieved the process status and freed the PID. Ask the # subprocess.Popen object to wait for the process as well; we know this # will fail, but it prevents the subprocess module from calling # waitpid() on that freed PID in some indeterminate time in the future, # where it might take over an unrelated process. At this point we are # technically calling waitpid() on an unallocated PID, which is # generally racy, but we don’t have any concurrently running threads # creating subprocesses under our hands, so we should be OK. process.wait() GLib.child_watch_add(GLib.PRIORITY_DEFAULT, process.pid, child_exited) # Resolve the returned future when all partial results are resolved. future = Future() def check_if_done(unused_future): if (waitpid_future.done() and stdout_future.done() and stderr_future.done()): r = _AsyncSubprocessResult(status=waitpid_future.result(), stdout=stdout_future.result(), stderr=stderr_future.result()) future.set_result(r) for f in (waitpid_future, stdout_future, stderr_future): f.add_done_callback(check_if_done) return future
def deploy_async(self, values, sender=None): """deploy role""" remove_instance = False values = dbus_to_python(values) # Make sure we are in the proper state self.assert_state(NASCENT) # Log log.debug1("%s.deploy(%s)", self._log_prefix, values) # Check values try: self.check_values(values) except: # Check values failed, remove the instance again if verification # failed, set state to error, save it (will be visible in the # .old backup file). self.change_state(ERROR, write=True) # cleanup self.__remove_instance() raise try: # Change to deploying state self.change_state(DEPLOYING) # Copy _DEFAULTS to self._settings self.copy_defaults() # Install package groups and packages log.debug9("TRACE: Installing packages") yield async.call_future(self.installPackages()) # Install firewall self.installFirewall() # Call do_deploy log.debug9("TRACE: Performing role-specific deployment") try: target = yield async.call_future(self.do_deploy_async(values, sender)) except RolekitError as e: if e.code == INVALID_VALUE: # If we failed because the input values were incorrect, # also remove the instance. remove_instance = True raise # Continue only after successful deployment: # Apply values to self._settings log.debug9("TRACE: role-specific deployment complete, applying values") self.apply_values(values) # Set up systemd target files log.debug9("TRACE: Creating systemd target files") self.create_target(target) # Change to ready to start state self.change_state(READY_TO_START, write=True) # Attempt to start the newly-deployed role # We do this because many role-installers will conclude by # starting anyway and we want to ensure that our role mechanism # is in sync with them. log.debug9("TRACE: Starting %s" % self.name) yield async.call_future(self.__start_async(sender)) except: # Something failed, set state to error self.change_state(ERROR, write=True) if remove_instance: self.__remove_instance() raise
def do_deploy_async(self, values, sender=None): log.debug9("TRACE do_deploy_async(databaseserver)") # Do the magic # # In case of error raise an exception first_instance = True # Check whether this is the first instance of the database for value in self._parent.get_instances().values(): if ( "databaseserver" == value.get_type() and self.get_name() != value.get_name() and self.get_state() in deployed_states ): first_instance = False break # If the database name wasn't specified if "database" not in values: # Use the instance name if it was manually specified if self.get_name()[0].isalpha(): values["database"] = self.get_name() else: # Either it was autogenerated or begins with a # non-alphabetic character; prefix it with db_ values["database"] = "db_%s" % self.get_name() if "owner" not in values: # We'll default to db_owner values["owner"] = "db_owner" # We will assume the owner is new until adding them fails new_owner = True # Determine if a password was passed in, so we know whether to # suppress it from the settings list later. if "password" in values: password_provided = True else: password_provided = False if "postgresql_conf" not in values: values["postgresql_conf"] = self._settings["postgresql_conf"] if "pg_hba_conf" not in values: values["pg_hba_conf"] = self._settings["pg_hba_conf"] # Get the UID and GID of the 'postgres' user try: self.pg_uid = pwd.getpwnam("postgres").pw_uid except KeyError: raise RolekitError(MISSING_ID, "Could not retrieve UID for postgres user") try: self.pg_gid = grp.getgrnam("postgres").gr_gid except KeyError: raise RolekitError(MISSING_ID, "Could not retrieve GID for postgres group") if first_instance: # Initialize the database on the filesystem initdb_args = ["/usr/bin/postgresql-setup", "--initdb"] log.debug2("TRACE: Initializing database") result = yield async.subprocess_future(initdb_args) if result.status: # If this fails, it may be just that the filesystem # has already been initialized. We'll log the message # and continue. log.debug1("INITDB: %s" % result.stdout) # Now we have to start the service to set everything else up # It's safe to start an already-running service, so we'll # just always make this call, particularly in case other instances # exist but aren't running. log.debug2("TRACE: Starting postgresql.service unit") try: with SystemdJobHandler() as job_handler: job_path = job_handler.manager.StartUnit("postgresql.service", "replace") job_handler.register_job(job_path) log.debug2("TRACE: unit start job registered") job_results = yield job_handler.all_jobs_done_future() log.debug2("TRACE: unit start job concluded") if any([x for x in job_results.values() if x not in ("skipped", "done")]): details = ", ".join(["%s: %s" % item for item in job_results.items()]) log.error("Starting services failed: {}".format(details)) raise RolekitError(COMMAND_FAILED, "Starting services failed: %s" % details) except Exception as e: log.error("Error received starting unit: {}".format(e)) raise # Next we create the owner log.debug2("TRACE: Creating owner of new database") createuser_args = ["/usr/bin/createuser", values["owner"]] result = yield async.subprocess_future(createuser_args, uid=self.pg_uid, gid=self.pg_gid) if result.status: # If the subprocess returned non-zero, the user probably already exists # (such as when we're using db_owner). If the caller was trying to set # a password, they probably didn't realize this, so we need to throw # an exception. log.info1("User {} already exists in the database".format(values["owner"])) if password_provided: raise RolekitError(INVALID_SETTING, "Cannot set password on pre-existing user") # If no password was specified, we'll continue new_owner = False # If no password was requested, generate a random one here if not password_provided: values["password"] = generate_password() log.debug2("TRACE: Creating new database") createdb_args = ["/usr/bin/createdb", values["database"], "-O", values["owner"]] result = yield async.subprocess_future(createdb_args, uid=self.pg_uid, gid=self.pg_gid) if result.status: # If the subprocess returned non-zero, raise an exception raise RolekitError(COMMAND_FAILED, "Creating database failed: %d" % result.status) # Next, set the password on the owner # We'll skip this phase if the the user already existed if new_owner: log.debug2("TRACE: Setting password for database owner") pwd_args = [ ROLEKIT_ROLES + "/databaseserver/tools/rk_db_setpwd.py", "--database", values["database"], "--user", values["owner"], ] result = yield async.subprocess_future(pwd_args, stdin=values["password"], uid=self.pg_uid, gid=self.pg_gid) if result.status: # If the subprocess returned non-zero, raise an exception log.error("Setting owner password failed: {}".format(result.status)) raise RolekitError(COMMAND_FAILED, "Setting owner password failed: %d" % result.status) # If this password was provided by the user, don't save it to # the settings for later retrieval. That could be a security # issue if password_provided: values.pop("password", None) else: # Not a new owner # Never save the password to settings for an existing owner log.debug2("TRACE: Owner already exists, not setting password") values.pop("password", None) if first_instance: # Then update the server configuration to accept network # connections. log.debug2("TRACE: Opening access to external addresses") # edit postgresql.conf to add listen_addresses = '*' conffile = values["postgresql_conf"] bakfile = conffile + ".rksave" try: linkfile(conffile, bakfile) with open(conffile) as f: conflines = f.readlines() tweaking_rules = [ { "regex": r"^\s*#?\s*listen_addresses\s*=.*", "replace": r"listen_addresses = '*'", "append_if_missing": True, } ] overwrite_safely(conffile, "".join(_tweak_lines(conflines, tweaking_rules))) except Exception as e: log.fatal("Couldn't write {!r}: {}".format(conffile, e)) # At this point, conffile is unmodified, otherwise # overwrite_safely() would have succeeded try: os.unlink(bakfile) except Exception as x: if not (isinstance(x, OSError) and x.errno == errno.ENOENT): log.error("Couldn't remove {!r}: {}".format(bakfile, x)) raise RolekitError( COMMAND_FAILED, "Opening access to external addresses in '{}'" "failed: {}".format(conffile, e) ) # Edit pg_hba.conf to allow 'md5' auth on IPv4 and # IPv6 interfaces. conffile = values["pg_hba_conf"] bakfile = conffile + ".rksave" try: linkfile(conffile, bakfile) with open(conffile) as f: conflines = f.readlines() tweaking_rules = [ {"regex": r"^\s*host((?:\s.*)$)", "replace": r"#host\1"}, { "regex": r"^\s*local(?:\s.*|)$", "append": "# Use md5 method for all connections\nhost all all all md5", }, ] overwrite_safely(conffile, "".join(_tweak_lines(conflines, tweaking_rules))) except Exception as e: log.fatal("Couldn't write {!r}: {}".format(conffile, e)) # At this point, conffile is unmodified, otherwise # overwrite_safely() would have succeeded try: os.unlink(bakfile) except Exception as x: if not (isinstance(x, OSError) and x.errno == errno.ENOENT): log.error("Couldn't remove {!r}: {}".format(bakfile, x)) # Restore previous postgresql.conf from the backup conffile = values["postgresql_conf"] bakfile = conffile + ".rksave" try: os.rename(bakfile, conffile) except Exception as x: log.error("Couldn't restore {!r} from backup {!r}: {}".format(conffile, bakfile, x)) raise RolekitError( COMMAND_FAILED, "Changing all connections to use md5 method in '{}'" "failed: {}".format(values["pg_hba_conf"], e), ) # Restart the postgresql server to accept the new configuration log.debug2("TRACE: Restarting postgresql.service unit") with SystemdJobHandler() as job_handler: job_path = job_handler.manager.RestartUnit("postgresql.service", "replace") job_handler.register_job(job_path) job_results = yield job_handler.all_jobs_done_future() if any([x for x in job_results.values() if x not in ("skipped", "done")]): details = ", ".join(["%s: %s" % item for item in job_results.items()]) raise RolekitError(COMMAND_FAILED, "Restarting service failed: %s" % details) # Create the systemd target definition target = RoleDeploymentValues(self.get_type(), self.get_name(), "Database Server") target.add_required_units(["postgresql.service"]) log.debug2("TRACE: Database server deployed") yield target
def do_deploy_async(self, values, sender=None): log.debug9("TRACE do_deploy_async(databaseserver)") # Do the magic # # In case of error raise an exception first_instance = True # Check whether this is the first instance of the database for value in self._parent.get_instances().values(): if ('databaseserver' == value._type and self._name != value._name and self.get_state() in deployed_states): first_instance = False break # First, check for all mandatory arguments if 'database' not in values: raise RolekitError(INVALID_VALUE, "Database name unset") if 'owner' not in values: # We'll default to db_owner values['owner'] = "db_owner" # We will assume the owner is new until adding them fails new_owner = True # Determine if a password was passed in, so we know whether to # suppress it from the settings list later. if 'password' in values: password_provided = True else: password_provided = False if 'postgresql_conf' not in values: values['postgresql_conf'] = self._settings['postgresql_conf'] if 'pg_hba_conf' not in values: values['pg_hba_conf'] = self._settings['pg_hba_conf'] # Get the UID and GID of the 'postgres' user try: self.pg_uid = pwd.getpwnam('postgres').pw_uid except KeyError: raise RolekitError(MISSING_ID, "Could not retrieve UID for postgress user") try: self.pg_gid = grp.getgrnam('postgres').gr_gid except KeyError: raise RolekitError(MISSING_ID, "Could not retrieve GID for postgress group") if first_instance: # Initialize the database on the filesystem initdb_args = ["/usr/bin/postgresql-setup", "--initdb"] log.debug2("TRACE: Initializing database") result = yield async.subprocess_future(initdb_args) if result.status: # If this fails, it may be just that the filesystem # has already been initialized. We'll log the message # and continue. log.debug1("INITDB: %s" % result.stdout) # Now we have to start the service to set everything else up # It's safe to start an already-running service, so we'll # just always make this call, particularly in case other instances # exist but aren't running. log.debug2("TRACE: Starting postgresql.service unit") try: with SystemdJobHandler() as job_handler: job_path = job_handler.manager.StartUnit("postgresql.service", "replace") job_handler.register_job(job_path) log.debug2("TRACE: unit start job registered") job_results = yield job_handler.all_jobs_done_future() log.debug2("TRACE: unit start job concluded") if any([x for x in job_results.values() if x not in ("skipped", "done")]): details = ", ".join(["%s: %s" % item for item in job_results.items()]) log.error("Starting services failed: {}".format(details)) raise RolekitError(COMMAND_FAILED, "Starting services failed: %s" % details) except Exception as e: log.error("Error received starting unit: {}".format(e)) raise # Next we create the owner log.debug2("TRACE: Creating owner of new database") createuser_args = ["/usr/bin/createuser", values['owner']] result = yield async.subprocess_future(createuser_args, uid=self.pg_uid, gid=self.pg_gid) if result.status: # If the subprocess returned non-zero, the user probably already exists # (such as when we're using db_owner). If the caller was trying to set # a password, they probably didn't realize this, so we need to throw # an exception. log.info1("User {} already exists in the database".format( values['owner'])) if password_provided: raise RolekitError(INVALID_SETTING, "Cannot set password on pre-existing user") # If no password was specified, we'll continue new_owner = False # If no password was requested, generate a random one here if not password_provided: values['password'] = generate_password() log.debug2("TRACE: Creating new database") createdb_args = ["/usr/bin/createdb", values['database'], "-O", values['owner']] result = yield async.subprocess_future(createdb_args, uid=self.pg_uid, gid=self.pg_gid) if result.status: # If the subprocess returned non-zero, raise an exception raise RolekitError(COMMAND_FAILED, "Creating database failed: %d" % result.status) # Next, set the password on the owner # We'll skip this phase if the the user already existed if new_owner: log.debug2("TRACE: Setting password for database owner") pwd_args = [ROLEKIT_ROLES + "/databaseserver/tools/rk_db_setpwd.py", "--database", values['database'], "--user", values['owner']] result = yield async.subprocess_future(pwd_args, stdin=values['password'], uid=self.pg_uid, gid=self.pg_gid) if result.status: # If the subprocess returned non-zero, raise an exception log.error("Setting owner password failed: {}".format(result.status)) raise RolekitError(COMMAND_FAILED, "Setting owner password failed: %d" % result.status) # If this password was provided by the user, don't save it to # the settings for later retrieval. That could be a security # issue if password_provided: values.pop("password", None) else: # Not a new owner # Never save the password to settings for an existing owner log.debug2("TRACE: Owner already exists, not setting password") values.pop("password", None) if first_instance: # Then update the server configuration to accept network # connections. # edit postgresql.conf to add listen_addresses = '*' log.debug2("TRACE: Opening access to external addresses") sed_args = [ "/bin/sed", "-e", "s@^[#]listen_addresses\W*=\W*'.*'@listen_addresses = '\*'@", "-i.rksave", values['postgresql_conf'] ] result = yield async.subprocess_future(sed_args) if result.status: # If the subprocess returned non-zero, raise an exception raise RolekitError(COMMAND_FAILED, "Changing listen_addresses in '%s' failed: %d" % (values['postgresql_conf'], result.status)) # Edit pg_hba.conf to allow 'md5' auth on IPv4 and # IPv6 interfaces. sed_args = [ "/bin/sed", "-e", "s@^host@#host@", "-e", '/^local/a # Use md5 method for all connections', "-e", '/^local/a host all all all md5', "-i.rksave", values['pg_hba_conf'] ] result = yield async.subprocess_future(sed_args) if result.status: # If the subprocess returned non-zero, raise an exception raise RolekitError(COMMAND_FAILED, "Changing all connections to use md5 method in '%s' failed: %d" % (values['pg_hba_conf'], result.status)) # Restart the postgresql server to accept the new configuration log.debug2("TRACE: Restarting postgresql.service unit") with SystemdJobHandler() as job_handler: job_path = job_handler.manager.RestartUnit("postgresql.service", "replace") job_handler.register_job(job_path) job_results = yield job_handler.all_jobs_done_future() if any([x for x in job_results.values() if x not in ("skipped", "done")]): details = ", ".join(["%s: %s" % item for item in job_results.items()]) raise RolekitError(COMMAND_FAILED, "Restarting service failed: %s" % details) # Create the systemd target definition # # We use all of BindsTo, Requires and RequiredBy so we can ensure that # all database instances are started and stopped together, since # they're really all a single daemon service. # # The intention here is that starting or stopping any role instance or # the main postgresql server will result in the same action happening # to all roles. This way, rolekit maintains an accurate view of what # instances are running and can communicate that to anyone registered # to listen for notifications. target = {'Role': 'databaseserver', 'Instance': self.get_name(), 'Description': "Database Server Role - %s" % self.get_name(), 'BindsTo': ['postgresql.service'], 'Requires': ['postgresql.service'], 'RequiredBy': ['postgresql.service'], 'After': ['syslog.target', 'network.target']} log.debug2("TRACE: Database server deployed") yield target
def do_deploy_async(self, values, sender=None): log.debug9("TRACE: do_deploy_async") # Do the magic # # In case of error raise an exception # Ensure we have all the mandatory arguments if 'admin_password' not in values: raise RolekitError(INVALID_VALUE, "admin_password unset") # If the hostname wasn't specified, get it from the system fqdn = socket.getfqdn() if 'host_name' not in values: values['host_name'] = fqdn # Make sure this is a real hostname, not localhost.localdomain if values['host_name'].startswith("localhost"): raise RolekitError(INVALID_VALUE, "invalid hostname") # We have been asked to change the hostname as part of the # creation of the domain controller if values['host_name'] != fqdn: # Change the domain with the hostnamectl API yield set_hostname(values['host_name']) # Set the domain to the domain part of the if 'domain_name' not in values: values['domain_name'] = self._get_domain() # If left unspecified, default the realm to the # upper-case version of the domain name if 'realm_name' not in values: values['realm_name'] = values['domain_name'].upper() # If left unspecified, assign a random password for # the directory manager if 'dm_password' not in values: # Generate a random password values['dm_password'] = generate_password() # Call ipa-server-install with the requested arguments ipa_install_args = [ 'ipa-server-install', '-U', '-r', values['realm_name'], '-d', values['domain_name'], '-p', values['dm_password'], '-a', values['admin_password'], ] # If the user has requested the DNS server, enable it if 'serve_dns' not in values: values['serve_dns'] = self._settings['serve_dns'] if values['serve_dns']: ipa_install_args.append('--setup-dns') # Pass the primary IP address if 'primary_ip' in values: ipa_install_args.append('--ip-address=%s' % values['primary_ip']) # if the user has requested DNS forwarders, add them if 'dns_forwarders' in values: [ipa_install_args.append("--forwarder=%s" % x) for x in values['dns_forwarders']['ipv4']] [ipa_install_args.append("--forwarder=%s" % x) for x in values['dns_forwarders']['ipv6']] pass else: ipa_install_args.append('--no-forwarders') # If the user has requested the reverse zone add it if 'reverse_zone' in values: for zone in values['reverse_zone']: ipa_install_args.append('--reverse-zone=%s' % zone) else: ipa_install_args.append('--no-reverse') # If the user has requested a specified ID range, # set up the argument to ipa-server-install if 'id_start' in values or 'id_max' in values: if ('id_start' not in values or 'id_max' not in values or not values['id_start'] or not values['id_max']): raise RolekitError(INVALID_VALUE, "Must specify id_start and id_max together") if (values['id_start'] and values['id_max'] <= values['id_start']): raise RolekitError(INVALID_VALUE, "id_max must be greater than id_start") ipa_install_args.append('--idstart=%d' % values['id_start']) ipa_install_args.append('--idmax=%d' % values['id_max']) # TODO: If the user has specified a root CA file, # set up the argument to ipa-server-install # Remove the admin_password from the values so # it won't be saved to the settings values.pop('admin_password', None) result = yield async.subprocess_future(ipa_install_args) if result.status: # If the subprocess returned non-zero, raise an exception raise RolekitError(COMMAND_FAILED, "%d" % result.status) # Create the systemd target definition target = {'Role': 'domaincontroller', 'Instance': self.get_name(), 'Description': "Domain Controller Role - %s" % self.get_name(), 'Wants': ['ipa.service'], 'After': ['syslog.target', 'network.target']} # We're done! yield target
def deploy_async(self, values, sender=None): """deploy role""" remove_instance = False values = dbus_to_python(values) # Make sure we are in the proper state self.assert_state(NASCENT) # Log log.debug1("%s.deploy(%s)", self._log_prefix, values) # Check values try: self.check_values(values) except Exception as e: # Check values failed, remove the instance again if verification # failed, set state to error, save it (will be visible in the # .old backup file). self.change_state(ERROR, error=str(e), write=True) # cleanup self.__remove_instance() raise try: # Change to deploying state self.change_state(DEPLOYING) # Copy _DEFAULTS to self._settings self.copy_defaults() # Install package groups and packages log.debug9("TRACE: Installing packages") yield async .call_future(self.installPackages()) # Install firewall self.installFirewall() # Call do_deploy log.debug9("TRACE: Performing role-specific deployment") try: target = yield async .call_future( self.do_deploy_async(values, sender)) except RolekitError as e: if e.code == INVALID_VALUE: # If we failed because the input values were incorrect, # also remove the instance. remove_instance = True raise # Continue only after successful deployment: # Apply values to self._settings log.debug9( "TRACE: role-specific deployment complete, applying values") self.apply_values(values) # Set up systemd target files log.debug9("TRACE: Creating systemd target files") self.create_target(target) # Change to ready to start state self.change_state(READY_TO_START, write=True) # In case this was a nextboot deployment, make sure to remove # the deferred role settings and systemd unit try: # Remove settings deferredsettings = "%s/%s/%s.json" % ( ETC_ROLEKIT_DEFERREDROLES, self.get_type(), self.get_name()) os.unlink(deferredsettings) # Remove systemd service unit deferredunit = "%s/deferred-role-deployment-%s-%s.service" % ( SYSTEMD_UNITS, self.get_type(), self.get_name()) disable_units([deferredunit]) os.unlink(deferredunit) except FileNotFoundError: # Files didn't exist; ignore that pass except PermissionError: # SELinux bug? log.fatal( "ERROR: permission error attempting to delete %s or %s" % (deferredsettings, deferredunit)) # We'll continue anyway, since the service should be runnable at this point # The ConditionPathExists will prevent the service from trying to deploy # again # Tell systemd to reload the daemon configuration log.debug9("Reloading systemd units\n") with SystemdJobHandler() as job_handler: job_handler.manager.Reload() # Start monitoring the role self.monitor_unit() # Attempt to start the newly-deployed role # We do this because many role-installers will conclude by # starting anyway and we want to ensure that our role mechanism # is in sync with them. log.debug9("TRACE: Starting %s" % self.get_name()) yield async .call_future(self.__start_async(sender)) except Exception as e: # Something failed, set state to error self.change_state(ERROR, error=str(e), write=True) if remove_instance: self.__remove_instance() raise
def subprocess_future(args, stdin=None, uid=None, gid=None): """Start a subprocess and return a future used to wait for it to finish. :param args: A sequence of program arguments (see subprocess.Popen()) :param stdin: A string containing one or more lines of stdin input to pass to the child process. :param uid: If specified, this must be a numerical UID that the subprocess will run under. If it is used, gid must also be specified. :param gid: If specified, this must be a numerical UID that the subprocess will run under. If it is used, uid must also be specified. :return: a future for an object with the members status, stdout and stderr, representing waitpid()-like status, stdout output and stderr output, respectively. """ log.debug9("subprocess: {0}".format(args)) def demote(user_uid, user_gid): """ Pass the function 'set_ids' to preexec_fn, rather than just calling setuid and setgid. This will change the ids for that subprocess only. We have to contstruct a callable that requires no arguments in order to pass it to preexec_fn. """ # Look up the username for an initgroups call # This is not a perfect solution, as it is # possible (though not recommended) that the UID # may match more than one username (such as aliases) # This approach will use only whichever name the # system deems is canonical for this UID. username = pwd.getpwuid(user_uid).pw_name def set_ids(): os.setregid(user_gid, user_gid) os.initgroups(username, user_gid) os.setreuid(user_uid, user_uid) return set_ids if (uid is None) != (gid is None): # If one or the other is specified, but not both, # throw an error. raise RolekitError(INVALID_SETTING) if (uid is not None): # The UID and GID are both set # Impersonate this UID and GID in the subprocess preexec_fn = demote(uid, gid) else: preexec_fn = None try: process = subprocess.Popen(args, close_fds=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=preexec_fn) except OSError as e: if e.errno is errno.EPERM: # Could not change users prior to executing the subprocess log.error("Insufficient privileges to impersonate UID/GID %s/%s" % (uid, gid)) raise # Send the input data if needed. if stdin: process.stdin.write(stdin.encode('utf-8')) process.stdin.close() # The three partial results. stdout_future = _fd_output_future(process.stdout, log.debug1) stderr_future = _fd_output_future(process.stderr, log.error) waitpid_future = Future() def child_exited(unused_pid, status): waitpid_future.set_result(status) # GLib has retrieved the process status and freed the PID. Ask the # subprocess.Popen object to wait for the process as well; we know this # will fail, but it prevents the subprocess module from calling # waitpid() on that freed PID in some indeterminate time in the future, # where it might take over an unrelated process. At this point we are # technically calling waitpid() on an unallocated PID, which is # generally racy, but we don’t have any concurrently running threads # creating subprocesses under our hands, so we should be OK. process.wait() GLib.child_watch_add(GLib.PRIORITY_DEFAULT, process.pid, child_exited) # Resolve the returned future when all partial results are resolved. future = Future() def check_if_done(unused_future): if (waitpid_future.done() and stdout_future.done() and stderr_future.done()): r = _AsyncSubprocessResult(status=waitpid_future.result(), stdout=stdout_future.result(), stderr=stderr_future.result()) future.set_result(r) for f in (waitpid_future, stdout_future, stderr_future): f.add_done_callback(check_if_done) return future
def do_deploy_async(self, values, sender=None): log.debug9("TRACE: do_deploy_async") # Do the magic # # In case of error raise an exception # Get the domain name from the passed-in settings # or set it to the instance name if ommitted if 'domain_name' not in values: values['domain_name'] = self.get_name() if not self._valid_fqdn(values['domain_name']): raise RolekitError(INVALID_VALUE, "Invalid domain name: %s" % values['domain_name']) if "host_name" not in values: # Let's construct a new host name. host_part = self._get_hostname() if host_part.startswith("localhost"): # We'll assign a random hostname starting with "dc-" random_part = ''.join(random.choice(string.ascii_lowercase) for _ in range(16)) host_part = "dc-%s" % random_part values['host_name'] = "%s.%s" % (host_part, values['domain_name']) if not self._valid_fqdn(values['host_name']): raise RolekitError(INVALID_VALUE, "Invalid host name: %s" % values['host_name']) # Change the hostname with the hostnamectl API yield set_hostname(values['host_name']) # If left unspecified, default the realm to the # upper-case version of the domain name if 'realm_name' not in values: values['realm_name'] = values['domain_name'].upper() # If left unspecified, assign a random password for # the administrative user if 'admin_password' not in values: admin_pw_provided = False values['admin_password'] = generate_password() else: admin_pw_provided = True # If left unspecified, assign a random password for # the directory manager if 'dm_password' not in values: dm_pw_provided = False values['dm_password'] = generate_password() else: dm_pw_provided = True # Call ipa-server-install with the requested arguments ipa_install_args = [ 'ipa-server-install', '-U', '-r', values['realm_name'], '-d', values['domain_name'], '-p', values['dm_password'], '-a', values['admin_password'], ] # If the user has requested the DNS server, enable it if 'serve_dns' not in values: values['serve_dns'] = self._settings['serve_dns'] if values['serve_dns']: ipa_install_args.append('--setup-dns') # Pass the primary IP address if 'primary_ip' in values: ipa_install_args.append('--ip-address=%s' % values['primary_ip']) # if the user has requested DNS forwarders, add them if 'dns_forwarders' in values: [ipa_install_args.append("--forwarder=%s" % x) for x in values['dns_forwarders']['ipv4']] [ipa_install_args.append("--forwarder=%s" % x) for x in values['dns_forwarders']['ipv6']] else: ipa_install_args.append('--no-forwarders') # If the user has requested the reverse zone add it if 'reverse_zone' in values: for zone in values['reverse_zone']: ipa_install_args.append('--reverse-zone=%s' % zone) else: ipa_install_args.append('--no-reverse') # If the user has requested a specified ID range, # set up the argument to ipa-server-install if 'id_start' in values or 'id_max' in values: if ('id_start' not in values or 'id_max' not in values or not values['id_start'] or not values['id_max']): raise RolekitError(INVALID_VALUE, "Must specify id_start and id_max together") if (values['id_start'] and values['id_max'] <= values['id_start']): raise RolekitError(INVALID_VALUE, "id_max must be greater than id_start") ipa_install_args.append('--idstart=%d' % values['id_start']) ipa_install_args.append('--idmax=%d' % values['id_max']) # TODO: If the user has specified a root CA file, # set up the argument to ipa-server-install # Remove the passwords from the values so # they won't be saved to the settings if admin_pw_provided: values.pop('admin_password', None) if dm_pw_provided: values.pop('dm_password', None) result = yield async.subprocess_future(ipa_install_args) if result.status: # If the subprocess returned non-zero, raise an exception raise RolekitError(COMMAND_FAILED, "%d" % result.status) # Create the systemd target definition target = RoleDeploymentValues(self.get_type(), self.get_name(), "Domain Controller") target.add_required_units(['ipa.service']) # We're done! yield target
def do_deploy_async(self, values, sender=None): log.debug9("TRACE: do_deploy_async") # Run whatever series of actions are needed to deploy # this role in a meaningful way. # import docker # Get the default cache size # Find out how much RAM is available on the system if 'cache_size' not in values: # Do a late import of psutil. This will only get # used during a deployment, so we don't need to # have it as a dependency for rolekit itself import psutil # Get the total number of bytes in local system memory total_ram = psutil.virtual_memory().total # If 25% of the available memory is less than 1GB, use # that for the cache. if total_ram / 4 < GiB_SIZE: # Set cache_size in MiB values['cache_size'] = int(total_ram / 4 / MiB_SIZE) else: # Cap the default size at 1 GB in MiB values['cache_size'] = int(GiB_SIZE / MiB_SIZE) # Set defaults if "connections" not in values: values["connections"] = self._DEFAULTS["connections"] if "threads" not in values: values["threads"] = self._DEFAULTS["threads"] # Create a container for memcached and launch that log.debug2("Enabling the Docker container manager") # Enable and start the docker service enable_units(['docker.service']) log.debug2("Starting the Docker container manager") with SystemdJobHandler() as job_handler: job_path = job_handler.manager.StartUnit("docker.service", "replace") job_handler.register_job(job_path) job_results = yield job_handler.all_jobs_done_future() if any([ x for x in job_results.values() if x not in ("skipped", "done") ]): details = ", ".join( ["%s: %s" % item for item in job_results.items()]) raise RolekitError( COMMAND_FAILED, "Starting docker.service failed: %s" % details) log.debug2("Pulling %s image from Docker Hub" % MEMCACHED_DOCKER_IMAGE) dockerclient = docker.Client( base_url=docker.utils.utils.DEFAULT_UNIX_SOCKET, version='auto') # First, pull down the latest version of the memcached container dockerclient.pull(MEMCACHED_DOCKER_IMAGE, tag="latest") log.debug2("Creating systemd service unit") # Generate a systemd service unit for this container container_unit = SystemdContainerServiceUnit( image_name=MEMCACHED_DOCKER_IMAGE, container_name="memcached_%s" % self.get_name(), desc="memcached docker container - %s" % self.get_name(), env={ "MEMCACHED_CACHE_SIZE": str(values['cache_size']), "MEMCACHED_CONNECTIONS": str(values['connections']), "MEMCACHED_THREADS": str(values['threads']) }, ports=("{0}:{0}/tcp".format(MEMCACHED_DEFAULT_PORT), "{0}:{0}/udp".format(MEMCACHED_DEFAULT_PORT))) container_unit.write() # Make systemd load this new unit file log.debug2("Running systemd daemon-reload") with SystemdJobHandler() as job_handler: job_handler.manager.Reload() # Return the target information target = RoleDeploymentValues(self.get_type(), self.get_name(), "Memory Cache") target.add_required_units(['memcached_%s.service' % self.get_name()]) log.debug9("TRACE: exiting do_deploy_async") yield target