def apply_values(self, values): # Copy key value pairs for the properties that are read-write to # self._settings and write the settings out. values = dbus_to_python(values) changed = [] for x in values: if x in self._DEFAULTS: if x in self._READONLY_SETTINGS: raise RolekitError(READONLY_SETTING, x) # use _check_property method from derived or parent class self._check_property(x, values[x]) # set validated setting self._settings[x] = values[x] changed.append(x) else: raise RolekitError(UNKNOWN_SETTING, x) if len(changed) > 0: dbus_changed = dbus.Dictionary(signature="sv") for x in changed: dbus_changed[x] = self.get_dbus_property(self, x) self.PropertiesChanged(DBUS_INTERFACE_ROLE_INSTANCE, dbus_changed, []) # write validated setting self._settings.write()
def check_values(self, values): # Check key value pairs for the properties values = dbus_to_python(values) for x in values: if x in self._DEFAULTS: if x in self._READONLY_SETTINGS: raise RolekitError(READONLY_SETTING, x) # use _check_property method from derived or parent class self._check_property(x, values[x]) else: log.error("Unknown property: %s" % x) raise RolekitError(UNKNOWN_SETTING, x)
def do_check_property(self, prop, value): # All options passed to the role must be validated # At minimum, this routine should call one of the # following routines for all known settings: # * self.check_type_bool(value) # * self.check_type_dict(value) # * self.check_type_int(value) # * self.check_type_list(value) # * self.check_type_string(value) # * self.check_type_string_list(value) # Each of these routines will return True if # the value is appropriate or raise a # RolekitError if it is not. # If you wish to add your own checks, this # function must return as follows: # * True: The value passes all validation # * False: The setting was unknown to this role # * RolekitError: The value failed to pass validation # In the case of RolekitError, it is recommended to # provide an explanation of the failure as the msg # field of the exception. # Example: # raise RolekitError(INVALID_VALUE, # "{0} must be at least eight characters" # .format(prop)) if prop in ["cache_size"]: import psutil self.check_type_int(value) if value > psutil.virtual_memory().total / MiB_SIZE: raise RolekitError(INVALID_VALUE, "Cache size exceeds physical memory") return True elif prop in ["connections"]: return self.check_type_int(value) elif prop in ["threads"]: self.check_type_int(value) # Up to four threads should be safe on any platform # More than that should be limited by the available CPUs if value <= 4: return True elif value > os.cpu_count(): raise RolekitError(INVALID_VALUE, "Number of threads exceeds available CPUs") return True # We didn't recognize this argument return False
def __deploy_async(self, name, values): values = dbus_to_python(values) name = dbus_to_python(name) log.debug1("%s.deploy('%s', %s)", self._log_prefix, name, values) # limit role instances to max instances per role if len(self._instances) >= self._role._MAX_INSTANCES: raise RolekitError(TOO_MANY_INSTANCES, "> %d" % \ self._role._MAX_INSTANCES) # TODO: lock # Create the settings object. If no name has been passed in, # this function will create one from the next available value. # Note: this isn't protected by a lock, so name-generation # might be racy. settings = RoleSettings(self.get_name(), name) # create escaped name and check if it is already in use instance_escaped_name = dbus_label_escape(settings.get_name()) if instance_escaped_name in self._instances: raise RolekitError(NAME_CONFLICT, instance_escaped_name) try: settings.read() except ValueError: raise RolekitError(NAME_CONFLICT, settings.filename) except IOError: pass else: raise RolekitError(NAME_CONFLICT, settings.filename) # create role role = self._role( self, settings.get_name(), self.get_name(), self._directory, settings, self.busname, "%s/%s/%s" % (DBUS_PATH_ROLES, self._escaped_name, instance_escaped_name), persistent=self.persistent) self._instances[instance_escaped_name] = role self.InstanceAdded(instance_escaped_name) # TODO: unlock # deploy role, lock in role now result = yield async .call_future(role.deploy_async(values)) yield result
def installPackages(self): """install packages""" log.debug1("%s.installPackages()", self._log_prefix) # are there any groups or packages to install? if len(self._settings["packages"]) < 1: log.debug1("No groups or packages to install") yield None return # There is a bug in DNF where it will return exit code 1 if # all of the packages requested are @group and their contents # already installed. There's a hacky workaround by adding a # non-@group package to the command-line, so we'll add rolekit dnf_install = [ "dnf", "-y", "install", "rolekit" ] + \ self._settings["packages"] result = yield async .subprocess_future(dnf_install) if result.status: # If the subprocess returned non-zero, raise an exception raise RolekitError(COMMAND_FAILED, "%d" % result.status) # Completed successfully yield None
def get_property(self, prop): if prop == "name": return self.get_name() elif prop == "DEFAULTS": return self._role._DEFAULTS raise RolekitError(UNKNOWN_SETTING, prop)
def Set(self, interface_name, property_name, new_value, sender=None): interface_name = dbus_to_python(interface_name) property_name = dbus_to_python(property_name) new_value = dbus_to_python(new_value) log.debug1("%s.Set('%s', '%s', '%s')", self._log_prefix, interface_name, property_name, new_value) if interface_name != DBUS_INTERFACE_ROLE: raise dbus.exceptions.DBusException( "org.freedesktop.DBus.Error.UnknownInterface: " "RolekitD does not implement %s" % interface_name) if property_name in self._exported_rw_properties: if not hasattr(self, "__check_%s", property_name): raise RolekitError(MISSING_CHECK, property_name) x = getattr(self, "__check_%s", property_name) x(new_value) self._settings.set(property_name, new_value) self._settings.write() self.PropertiesChanged(interface_name, {property_name: new_value}, []) elif property_name in self._exported_ro_properties: raise dbus.exceptions.DBusException( "org.freedesktop.DBus.Error.PropertyReadOnly: " "Property '%s' is read-only" % property_name) else: raise dbus.exceptions.DBusException( "org.freedesktop.DBus.Error.AccessDenied: " "Property '%s' does not exist" % property_name)
def do_get_dbus_property(x, prop): # This method tells rolekit what D-BUS type to use for each # of this role's custom settings. # # Examples: if prop in ["string1", "string2"]: return dbus.String(x.get_property(x, prop)) elif prop in ["array1", "array2"]: # This assumes array1 and array2 are arrays of # strings. return dbus.Array(x.get_property(x, prop), "s") elif prop in ["bool1", "bool2"]: return dbus.Boolean(x.get_property(x, prop)) elif prop in ["int1", "int2"]: return dbus.Int32(x.get_property(x, prop)) elif prop in ["dict1"]: # This example dictionary is a string key with # an array of strings as the value return dbus.Dictionary(x.get_property(x, prop), "sas") # If you have any arguments that should be "write-only" # (such as passwords used only for the initial deployment), # include them here and raise a RolekitError: # if prop in [ "password" ]: # raise RolekitError(UNKNOWN_SETTING, prop) # Lastly, always fall through to INVALID_PROPERTY if # the setting is unknown. raise RolekitError(INVALID_PROPERTY, prop)
def getNamedInstance(self, name, sender=None): """ return the role with the name, otherwise raise error """ name = dbus_to_python(name) log.debug1("%s.getNamedInstance('%s')", self._log_prefix, name) instance_escaped_name = dbus_label_escape(name) if instance_escaped_name in self._instances: return self._instances[instance_escaped_name] raise RolekitError(INVALID_INSTANCE, name)
def getNamedRole(self, name, sender=None): """ return the role with the name, otherwise raise error """ name = dbus_to_python(name) log.debug1("getNamedRole('%s')", name) for obj in self._roles: if obj.get_name() == name: return obj raise RolekitError(INVALID_ROLE, name)
def do_get_dbus_property(x, prop): # Cover additional settings and return a proper dbus type. if prop in [ "database", "owner", "password", "postgresql_conf", "pg_hba_conf" ]: return dbus.String(x.get_property(x, prop)) raise RolekitError(INVALID_PROPERTY, prop)
def do_decommission_async(self, force=False, sender=None): # We need to run the FreeIPA uninstallation result = yield async .subprocess_future( ['ipa-server-install', '-U', '--uninstall']) if result.status: # Something went wrong with the uninstall raise RolekitError(COMMAND_FAILED, "%d" % result.status) yield None
def check_type_string(self, value): """ Checks if value is of type string. Raises INVALID_VALUE error if the values is not matching, returns True otherwise. """ if type(value) is not str: raise RolekitError(INVALID_VALUE, "%s is not a string" % value) return True
def check_type_bool(self, value): """ Checks if value is of type bool. Raises INVALID_VALUE error if the values is not matching, returns True otherwise. """ if type(value) is not bool: raise RolekitError(INVALID_VALUE, "%s is not bool." % value) return True
def check_type_list(self, value): """ Checks if value is of type list. Raises INVALID_VALUE error if the values is not matching, returns True otherwise. """ if type(value) is not list: raise RolekitError(INVALID_VALUE, "%s is not a list" % value) return True
def validate_name(name): """ Check instance name for valid chars. See INSTANCE_NAME_REGEXP in rolekit.config. Raises INVALID_NAME error if the name is not valid, returns True otherwise. """ if not re.match(VALIDATE_NAME_REGEXP, name): raise RolekitError(INVALID_NAME, "'%s' is not a valid name." % name) return True
def do_get_dbus_property(x, prop): # This method tells rolekit what D-BUS type to use for each # of this role's custom settings. if prop in ["connections", "threads"]: return dbus.Int32(x.get_property(x, prop)) elif prop in ["cache_size"]: return dbus.Int64(x.get_property(x, prop)) # Lastly, always fall through to INVALID_PROPERTY if # the setting is unknown. raise RolekitError(INVALID_PROPERTY, prop)
def _check_property(self, prop, value): if prop in ["name", "type", "state", "lasterror"]: return self.check_type_string(value) elif prop in ["packages", "services", "firewall_zones"]: # "backup_paths" return self.check_type_string_list(value) elif prop in ["version"]: return self.check_type_int(value) elif prop in ["firewall"]: self.check_type_dict(value) for x in value.keys(): if x not in ["ports", "services"]: raise RolekitError(INVALID_VALUE, "wrong key '%s'" % x) self.check_type_string_list(value[x]) if "ports" in value: for x in value["ports"]: try: port, proto = x.split("/") except: raise RolekitError(INVALID_VALUE, "Port %s is invalid" % x) p_range = getPortRange(port) if p_range == -2: raise RolekitError(INVALID_VALUE, "Port '%s' is too big" % port) elif p_range == -1: raise RolekitError(INVALID_VALUE, "Port range '%s' is invalid" % port) elif p_range == None: raise RolekitError( INVALID_VALUE, "Port range '%s' is ambiguous" % port) elif len(p_range) == 2 and p_range[0] >= p_range[1]: raise RolekitError(INVALID_VALUE, "Port range '%s' is invalid" % port) if proto not in ["tcp", "udp"]: raise RolekitError( INVALID_VALUE, "Protocol '%s' not from {'tcp'|'udp'}" % proto) elif prop in ["custom_firewall"]: return self.check_type_bool(value) elif hasattr(self, "do_check_property"): if self.do_check_property(prop, value): return True raise RolekitError(MISSING_CHECK, prop)
def __init__(self, image_name=None, container_name=None, desc=None, env=None, ports=None): if not image_name: raise RolekitError("Missing container image name") if not desc: raise RolekitError("Missing description") if not ports: raise RolekitError("No ports specified") self.image_name = image_name self.container_name = container_name self.desc = desc self.ports = ports if env: self.env = env else: self.env = {}
def do_check_property(self, prop, value): if prop in ["database", "owner"]: return self.check_type_string(value) elif prop in ["password"]: self.check_type_string(value) if len(value) < 8: raise RolekitError( INVALID_VALUE, "{0} must be at least eight characters".format(prop)) return True elif prop in ["postgresql_conf", "pg_hba_conf"]: self.check_type_string(value) if not os.path.isfile(value): raise RolekitError( INVALID_VALUE, "{0} is not a valid configuration file".format(value)) return True return False
def get_property(x, prop): if hasattr(x, "_settings") and prop in x._settings: return x._settings[prop] if prop == "name": return x.get_name() elif prop == "type": return x.get_type() elif prop == "state": return "" elif prop == "lasterror": return "" elif prop in x._DEFAULTS: return x._DEFAULTS[prop] raise RolekitError(UNKNOWN_SETTING, prop)
def do_get_dbus_property(x, prop): # Cover additional settings and return a proper dbus type. if prop in [ "domain_name", "realm_name", "host_name", "admin_password", "dm_password", "root_ca_file", "primary_ip" ]: return dbus.String(x.get_property(x, prop)) elif prop in ["reverse_zone"]: return dbus.Array(x.get_property(x, prop), "s") elif prop in ["serve_dns"]: return dbus.Boolean(x.get_property(x, prop)) elif prop in ["id_start", "id_max"]: return dbus.Int32(x.get_property(x, prop)) elif prop in ["dns_forwarders"]: return dbus.Dictionary(x.get_property(x, prop), "sas") raise RolekitError(INVALID_PROPERTY, prop)
def change_state(self, state, error="", write=False): # change the state of the instance to state if it is valid and not in # this state already if state not in PERSISTENT_STATES and \ state not in TRANSITIONAL_STATES: raise RolekitError(INVALID_STATE, state) if "lasterror" not in self._settings or \ self._settings["lasterror"] != error: # emit PropertiesChanged only if lasterror really changed self._settings["lasterror"] = error # force write write = True if state != self._settings["state"]: self._settings["state"] = state self.StateChanged(state) if write: self._settings.write()
def stop_services_async(self): """stop_services_async""" log.debug1("%s.stop_services_async()", self._log_prefix) with SystemdJobHandler() as job_handler: target_unit = get_target_unit_name(self.get_type(), self.get_name()) log.debug9("Disabling %s" % target_unit) disable_units([target_unit]) log.debug9("Stopping %s" % target_unit) job_path = job_handler.manager.StopUnit(target_unit, "replace") job_handler.register_job(job_path) job_results = yield job_handler.all_jobs_done_future() if any( [x for x in job_results.values() if x not in ("skipped", "done")]): details = ", ".join( ["%s: %s" % item for item in job_results.items()]) raise RolekitError(COMMAND_FAILED, "Stopping services failed: %s" % details)
def do_deploy_async(self, values, sender=None): log.debug9("TRACE do_deploy_async(databaseserver)") # Do the magic # # In case of error raise an exception first_instance = True # Check whether this is the first instance of the database for value in self._parent.get_instances().values(): if ('databaseserver' == value.get_type() and self.get_name() != value.get_name() and self.get_state() in deployed_states): first_instance = False break # If the database name wasn't specified if 'database' not in values: # Use the instance name if it was manually specified if self.get_name()[0].isalpha(): values['database'] = self.get_name() else: # Either it was autogenerated or begins with a # non-alphabetic character; prefix it with db_ values['database'] = "db_%s" % self.get_name() if 'owner' not in values: # We'll default to db_owner values['owner'] = "db_owner" # We will assume the owner is new until adding them fails new_owner = True # Determine if a password was passed in, so we know whether to # suppress it from the settings list later. if 'password' in values: password_provided = True else: password_provided = False if 'postgresql_conf' not in values: values['postgresql_conf'] = self._settings['postgresql_conf'] if 'pg_hba_conf' not in values: values['pg_hba_conf'] = self._settings['pg_hba_conf'] # Get the UID and GID of the 'postgres' user try: self.pg_uid = pwd.getpwnam('postgres').pw_uid except KeyError: raise RolekitError(MISSING_ID, "Could not retrieve UID for postgres user") try: self.pg_gid = grp.getgrnam('postgres').gr_gid except KeyError: raise RolekitError(MISSING_ID, "Could not retrieve GID for postgres group") if first_instance: # Initialize the database on the filesystem initdb_args = ["/usr/bin/postgresql-setup", "--initdb"] log.debug2("TRACE: Initializing database") result = yield async .subprocess_future(initdb_args) if result.status: # If this fails, it may be just that the filesystem # has already been initialized. We'll log the message # and continue. log.debug1("INITDB: %s" % result.stdout) # Now we have to start the service to set everything else up # It's safe to start an already-running service, so we'll # just always make this call, particularly in case other instances # exist but aren't running. log.debug2("TRACE: Starting postgresql.service unit") try: with SystemdJobHandler() as job_handler: job_path = job_handler.manager.StartUnit( "postgresql.service", "replace") job_handler.register_job(job_path) log.debug2("TRACE: unit start job registered") job_results = yield job_handler.all_jobs_done_future() log.debug2("TRACE: unit start job concluded") if any([ x for x in job_results.values() if x not in ("skipped", "done") ]): details = ", ".join( ["%s: %s" % item for item in job_results.items()]) log.error("Starting services failed: {}".format(details)) raise RolekitError( COMMAND_FAILED, "Starting services failed: %s" % details) except Exception as e: log.error("Error received starting unit: {}".format(e)) raise # Next we create the owner log.debug2("TRACE: Creating owner of new database") createuser_args = ["/usr/bin/createuser", values['owner']] result = yield async .subprocess_future(createuser_args, uid=self.pg_uid, gid=self.pg_gid) if result.status: # If the subprocess returned non-zero, the user probably already exists # (such as when we're using db_owner). If the caller was trying to set # a password, they probably didn't realize this, so we need to throw # an exception. log.info1("User {} already exists in the database".format( values['owner'])) if password_provided: raise RolekitError(INVALID_SETTING, "Cannot set password on pre-existing user") # If no password was specified, we'll continue new_owner = False # If no password was requested, generate a random one here if not password_provided: values['password'] = generate_password() log.debug2("TRACE: Creating new database") createdb_args = [ "/usr/bin/createdb", values['database'], "-O", values['owner'] ] result = yield async .subprocess_future(createdb_args, uid=self.pg_uid, gid=self.pg_gid) if result.status: # If the subprocess returned non-zero, raise an exception raise RolekitError(COMMAND_FAILED, "Creating database failed: %d" % result.status) # Next, set the password on the owner # We'll skip this phase if the the user already existed if new_owner: log.debug2("TRACE: Setting password for database owner") pwd_args = [ ROLEKIT_ROLES + "/databaseserver/tools/rk_db_setpwd.py", "--database", values['database'], "--user", values['owner'] ] result = yield async .subprocess_future(pwd_args, stdin=values['password'], uid=self.pg_uid, gid=self.pg_gid) if result.status: # If the subprocess returned non-zero, raise an exception log.error("Setting owner password failed: {}".format( result.status)) raise RolekitError( COMMAND_FAILED, "Setting owner password failed: %d" % result.status) # If this password was provided by the user, don't save it to # the settings for later retrieval. That could be a security # issue if password_provided: values.pop("password", None) else: # Not a new owner # Never save the password to settings for an existing owner log.debug2("TRACE: Owner already exists, not setting password") values.pop("password", None) if first_instance: # Then update the server configuration to accept network # connections. log.debug2("TRACE: Opening access to external addresses") # edit postgresql.conf to add listen_addresses = '*' conffile = values['postgresql_conf'] bakfile = conffile + ".rksave" try: linkfile(conffile, bakfile) with open(conffile) as f: conflines = f.readlines() tweaking_rules = [{ 'regex': r"^\s*#?\s*listen_addresses\s*=.*", 'replace': r"listen_addresses = '*'", 'append_if_missing': True }] overwrite_safely( conffile, "".join(_tweak_lines(conflines, tweaking_rules))) except Exception as e: log.fatal("Couldn't write {!r}: {}".format(conffile, e)) # At this point, conffile is unmodified, otherwise # overwrite_safely() would have succeeded try: os.unlink(bakfile) except Exception as x: if not (isinstance(x, OSError) and x.errno == errno.ENOENT): log.error("Couldn't remove {!r}: {}".format( bakfile, x)) raise RolekitError( COMMAND_FAILED, "Opening access to external addresses in '{}'" "failed: {}".format(conffile, e)) # Edit pg_hba.conf to allow 'md5' auth on IPv4 and # IPv6 interfaces. conffile = values['pg_hba_conf'] bakfile = conffile + ".rksave" try: linkfile(conffile, bakfile) with open(conffile) as f: conflines = f.readlines() tweaking_rules = [{ 'regex': r"^\s*host((?:\s.*)$)", 'replace': r"#host\1" }, { 'regex': r"^\s*local(?:\s.*|)$", 'append': "# Use md5 method for all connections\nhost all all all md5" }] overwrite_safely( conffile, "".join(_tweak_lines(conflines, tweaking_rules))) except Exception as e: log.fatal("Couldn't write {!r}: {}".format(conffile, e)) # At this point, conffile is unmodified, otherwise # overwrite_safely() would have succeeded try: os.unlink(bakfile) except Exception as x: if not (isinstance(x, OSError) and x.errno == errno.ENOENT): log.error("Couldn't remove {!r}: {}".format( bakfile, x)) # Restore previous postgresql.conf from the backup conffile = values['postgresql_conf'] bakfile = conffile + ".rksave" try: os.rename(bakfile, conffile) except Exception as x: log.error( "Couldn't restore {!r} from backup {!r}: {}".format( conffile, bakfile, x)) raise RolekitError( COMMAND_FAILED, "Changing all connections to use md5 method in '{}'" "failed: {}".format(values['pg_hba_conf'], e)) # Restart the postgresql server to accept the new configuration log.debug2("TRACE: Restarting postgresql.service unit") with SystemdJobHandler() as job_handler: job_path = job_handler.manager.RestartUnit( "postgresql.service", "replace") job_handler.register_job(job_path) job_results = yield job_handler.all_jobs_done_future() if any([ x for x in job_results.values() if x not in ("skipped", "done") ]): details = ", ".join( ["%s: %s" % item for item in job_results.items()]) raise RolekitError( COMMAND_FAILED, "Restarting service failed: %s" % details) # Create the systemd target definition target = RoleDeploymentValues(self.get_type(), self.get_name(), "Database Server") target.add_required_units(['postgresql.service']) log.debug2("TRACE: Database server deployed") yield target
def do_deploy_async(self, values, sender=None): log.debug9("TRACE: do_deploy_async") # Run whatever series of actions are needed to deploy # this role in a meaningful way. # import docker # Get the default cache size # Find out how much RAM is available on the system if 'cache_size' not in values: # Do a late import of psutil. This will only get # used during a deployment, so we don't need to # have it as a dependency for rolekit itself import psutil # Get the total number of bytes in local system memory total_ram = psutil.virtual_memory().total # If 25% of the available memory is less than 1GB, use # that for the cache. if total_ram / 4 < GiB_SIZE: # Set cache_size in MiB values['cache_size'] = int(total_ram / 4 / MiB_SIZE) else: # Cap the default size at 1 GB in MiB values['cache_size'] = int(GiB_SIZE / MiB_SIZE) # Set defaults if "connections" not in values: values["connections"] = self._DEFAULTS["connections"] if "threads" not in values: values["threads"] = self._DEFAULTS["threads"] # Create a container for memcached and launch that log.debug2("Enabling the Docker container manager") # Enable and start the docker service enable_units(['docker.service']) log.debug2("Starting the Docker container manager") with SystemdJobHandler() as job_handler: job_path = job_handler.manager.StartUnit("docker.service", "replace") job_handler.register_job(job_path) job_results = yield job_handler.all_jobs_done_future() if any([ x for x in job_results.values() if x not in ("skipped", "done") ]): details = ", ".join( ["%s: %s" % item for item in job_results.items()]) raise RolekitError( COMMAND_FAILED, "Starting docker.service failed: %s" % details) log.debug2("Pulling %s image from Docker Hub" % MEMCACHED_DOCKER_IMAGE) dockerclient = docker.Client( base_url=docker.utils.utils.DEFAULT_UNIX_SOCKET, version='auto') # First, pull down the latest version of the memcached container dockerclient.pull(MEMCACHED_DOCKER_IMAGE, tag="latest") log.debug2("Creating systemd service unit") # Generate a systemd service unit for this container container_unit = SystemdContainerServiceUnit( image_name=MEMCACHED_DOCKER_IMAGE, container_name="memcached_%s" % self.get_name(), desc="memcached docker container - %s" % self.get_name(), env={ "MEMCACHED_CACHE_SIZE": str(values['cache_size']), "MEMCACHED_CONNECTIONS": str(values['connections']), "MEMCACHED_THREADS": str(values['threads']) }, ports=("{0}:{0}/tcp".format(MEMCACHED_DEFAULT_PORT), "{0}:{0}/udp".format(MEMCACHED_DEFAULT_PORT))) container_unit.write() # Make systemd load this new unit file log.debug2("Running systemd daemon-reload") with SystemdJobHandler() as job_handler: job_handler.manager.Reload() # Return the target information target = RoleDeploymentValues(self.get_type(), self.get_name(), "Memory Cache") target.add_required_units(['memcached_%s.service' % self.get_name()]) log.debug9("TRACE: exiting do_deploy_async") yield target
def do_check_property(self, prop, value): if prop in ["realm_name"]: return self.check_type_string(value) elif prop in ["admin_password", "dm_password"]: self.check_type_string(value) if len(value) < 8: raise RolekitError( INVALID_VALUE, "{0} must be at least eight characters".format(prop)) return True elif prop in ["host_name"]: self.check_type_string(value) if not self._valid_fqdn(value): raise RolekitError(INVALID_VALUE, "Invalid hostname: %s" % value) return True elif prop in ["domain_name"]: self.check_type_string(value) if not self._valid_fqdn(value): raise RolekitError(INVALID_VALUE, "Invalid domain name: %s" % value) return True elif prop in ["root_ca_file"]: self.check_type_string(value) if not os.path.isfile(value): raise RolekitError(INVALID_VALUE, "{0} is not a valid CA file".format(value)) return True if prop in ["reverse_zone"]: # TODO: properly parse reverse zones here # Getting this right is very complex and # FreeIPA already does it internally. return self.check_type_string_list(value) elif prop in ["serve_dns"]: return self.check_type_bool(value) elif prop in ["id_start", "id_max"]: return self.check_type_int(value) elif prop in ["dns_forwarders"]: self.check_type_dict(value) for family in value.keys(): self.check_type_string(family) if family not in ["ipv4", "ipv6"]: raise RolekitError( INVALID_VALUE, "{0} is not a supported IP family".format(family)) self.check_type_string_list(value[family]) for address in value[family]: try: IP(address) except ValueError as ve: raise RolekitError( INVALID_VALUE, "{0} is not a valid IP address".format(address)) return True elif prop in ["primary_ip"]: try: IP(value) except ValueError as ve: raise RolekitError( INVALID_VALUE, "{} is not a valid IP address: {}".format(value, ve)) return True # We didn't recognize this argument return False
def do_get_dbus_property(x, prop): # Cover additional settings and return a proper dbus type. if prop == "myownsetting": return dbus.String(x.get_property(x, prop)) raise RolekitError(INVALID_PROPERTY, prop)
def do_deploy_async(self, values, sender=None): log.debug9("TRACE: do_deploy_async") # Do the magic # # In case of error raise an exception # Get the domain name from the passed-in settings # or set it to the instance name if ommitted if 'domain_name' not in values: values['domain_name'] = self.get_name() if not self._valid_fqdn(values['domain_name']): raise RolekitError( INVALID_VALUE, "Invalid domain name: %s" % values['domain_name']) if "host_name" not in values: # Let's construct a new host name. host_part = self._get_hostname() if host_part.startswith("localhost"): # We'll assign a random hostname starting with "dc-" random_part = ''.join( random.choice(string.ascii_lowercase) for _ in range(16)) host_part = "dc-%s" % random_part values['host_name'] = "%s.%s" % (host_part, values['domain_name']) if not self._valid_fqdn(values['host_name']): raise RolekitError(INVALID_VALUE, "Invalid host name: %s" % values['host_name']) # Change the hostname with the hostnamectl API yield set_hostname(values['host_name']) # If left unspecified, default the realm to the # upper-case version of the domain name if 'realm_name' not in values: values['realm_name'] = values['domain_name'].upper() # If left unspecified, assign a random password for # the administrative user if 'admin_password' not in values: admin_pw_provided = False values['admin_password'] = generate_password() else: admin_pw_provided = True # If left unspecified, assign a random password for # the directory manager if 'dm_password' not in values: dm_pw_provided = False values['dm_password'] = generate_password() else: dm_pw_provided = True # Call ipa-server-install with the requested arguments ipa_install_args = [ 'ipa-server-install', '-U', '-r', values['realm_name'], '-d', values['domain_name'], '-p', values['dm_password'], '-a', values['admin_password'], ] # If the user has requested the DNS server, enable it if 'serve_dns' not in values: values['serve_dns'] = self._settings['serve_dns'] if values['serve_dns']: ipa_install_args.append('--setup-dns') # Pass the primary IP address if 'primary_ip' in values: ipa_install_args.append('--ip-address=%s' % values['primary_ip']) # if the user has requested DNS forwarders, add them if 'dns_forwarders' in values: [ ipa_install_args.append("--forwarder=%s" % x) for x in values['dns_forwarders']['ipv4'] ] [ ipa_install_args.append("--forwarder=%s" % x) for x in values['dns_forwarders']['ipv6'] ] else: ipa_install_args.append('--no-forwarders') # If the user has requested the reverse zone add it if 'reverse_zone' in values: for zone in values['reverse_zone']: ipa_install_args.append('--reverse-zone=%s' % zone) else: ipa_install_args.append('--no-reverse') # If the user has requested a specified ID range, # set up the argument to ipa-server-install if 'id_start' in values or 'id_max' in values: if ('id_start' not in values or 'id_max' not in values or not values['id_start'] or not values['id_max']): raise RolekitError( INVALID_VALUE, "Must specify id_start and id_max together") if (values['id_start'] and values['id_max'] <= values['id_start']): raise RolekitError(INVALID_VALUE, "id_max must be greater than id_start") ipa_install_args.append('--idstart=%d' % values['id_start']) ipa_install_args.append('--idmax=%d' % values['id_max']) # TODO: If the user has specified a root CA file, # set up the argument to ipa-server-install # Remove the passwords from the values so # they won't be saved to the settings if admin_pw_provided: values.pop('admin_password', None) if dm_pw_provided: values.pop('dm_password', None) result = yield async .subprocess_future(ipa_install_args) if result.status: # If the subprocess returned non-zero, raise an exception raise RolekitError(COMMAND_FAILED, "%d" % result.status) # Create the systemd target definition target = RoleDeploymentValues(self.get_type(), self.get_name(), "Domain Controller") target.add_required_units(['ipa.service']) # We're done! yield target
def do_decommission_async(self, force=False, sender=None): # Do the magic # # In case of error raise an exception # Get the UID and GID of the 'postgres' user try: self.pg_uid = pwd.getpwnam('postgres').pw_uid except KeyError: raise RolekitError(MISSING_ID, "Could not retrieve UID for postgres user") try: self.pg_gid = grp.getgrnam('postgres').gr_gid except KeyError: raise RolekitError(MISSING_ID, "Could not retrieve GID for postgres group") # Check whether this is the last instance of the database last_instance = True for value in self._parent.get_instances().values(): # Check if there are any other instances of databaseserver # We have to exclude our own instance name since it hasn't # been removed yet. if 'databaseserver' == value.get_type() and \ self.get_name() != value.get_name(): last_instance = False break # The postgresql service must be running to remove # the database and owner with SystemdJobHandler() as job_handler: job_path = job_handler.manager.StartUnit("postgresql.service", "replace") job_handler.register_job(job_path) job_results = yield job_handler.all_jobs_done_future() if any([ x for x in job_results.values() if x not in ("skipped", "done") ]): details = ", ".join( ["%s: %s" % item for item in job_results.items()]) raise RolekitError(COMMAND_FAILED, "Starting services failed: %s" % details) # Drop the database dropdb_args = [ "/usr/bin/dropdb", "-w", "--if-exists", self._settings['database'] ] result = yield async .subprocess_future(dropdb_args, uid=self.pg_uid, gid=self.pg_gid) if result.status: # If the subprocess returned non-zero, raise an exception raise RolekitError(COMMAND_FAILED, "Dropping database failed: %d" % result.status) # Drop the owner dropuser_args = [ "/usr/bin/dropuser", "-w", "--if-exists", self._settings['owner'] ] result = yield async .subprocess_future(dropuser_args, uid=self.pg_uid, gid=self.pg_gid) if result.status: # If the subprocess returned non-zero, the user may # still be there. This is probably due to the owner # having privileges on other instances. This is non-fatal. log.error("Dropping owner failed: %d" % result.status) # If this is the last instance, restore the configuration if last_instance: try: os.rename("%s.rksave" % self._settings['pg_hba_conf'], self._settings['pg_hba_conf']) os.rename("%s.rksave" % self._settings['postgresql_conf'], self._settings['postgresql_conf']) except: log.error( "Could not restore pg_hba.conf and/or postgresql.conf. " "Manual intervention required") # Not worth stopping here. # Since this is the last instance, turn off the postgresql service with SystemdJobHandler() as job_handler: job_path = job_handler.manager.StopUnit( "postgresql.service", "replace") job_handler.register_job(job_path) job_results = yield job_handler.all_jobs_done_future() if any([ x for x in job_results.values() if x not in ("skipped", "done") ]): details = ", ".join( ["%s: %s" % item for item in job_results.items()]) raise RolekitError( COMMAND_FAILED, "Stopping services failed: %s" % details) # Decommissioning complete yield None