def deleteAgreements(self, suffix=None): ''' Delete all the agreements for the suffix ''' # check the validity of the suffix if not suffix: self.log.fatal("disableReplication: suffix is missing") raise InvalidArgumentError("suffix missing") else: nsuffix = normalizeDN(suffix) # Build the replica config DN mtents = self.conn.mappingtree.list(suffix=nsuffix) mtent = mtents[0] dn_replica = ','.join((RDN_REPLICA, mtent.dn)) # Delete the agreements try: agmts = self.conn.agreement.list(suffix=suffix) for agmt in agmts: try: self.conn.delete_s(agmt.dn) except ldap.LDAPError, e: self.log.fatal('Failed to delete replica agreement (%s), error: %s' % (admt.dn, e.message('desc'))) raise ldap.LDAPError except ldap.LDAPError, e: self.log.fatal('Failed to search for replication agreements under (%s), error: %s' % (dn_replica, e.message('desc'))) raise ldap.LDAPError
def disableReplication(self, suffix=None): ''' Delete a replica related to the provided suffix. If this replica role was REPLICAROLE_HUB or REPLICAROLE_MASTER, it also deletes the changelog associated to that replica. If it exists some replication agreement below that replica, they are deleted. @param suffix - dn of suffix @return None @raise InvalidArgumentError - if suffix is missing ldap.LDAPError - for all other update failures ''' # check the validity of the suffix if not suffix: self.log.fatal("disableReplication: suffix is missing") raise InvalidArgumentError("suffix missing") else: nsuffix = normalizeDN(suffix) # Build the replica config DN mtents = self.conn.mappingtree.list(suffix=nsuffix) mtent = mtents[0] dn_replica = ','.join((RDN_REPLICA, mtent.dn)) # Delete the agreements try: self.deleteAgreements(nsuffix) except ldap.LDAPError, e: self.log.fatal('Failed to delete replica agreements!') raise ldap.LDAPError
def deleteAgreements(self, suffix=None): ''' Delete all the agreements for the suffix ''' # check the validity of the suffix if not suffix: self.log.fatal("disableReplication: suffix is missing") raise InvalidArgumentError("suffix missing") else: nsuffix = normalizeDN(suffix) # Build the replica config DN mtents = self.conn.mappingtree.list(suffix=nsuffix) mtent = mtents[0] dn_replica = ','.join((RDN_REPLICA, mtent.dn)) # Delete the agreements try: agmts = self.conn.agreement.list(suffix=suffix) for agmt in agmts: try: self.conn.delete_s(agmt.dn) except ldap.LDAPError, e: self.log.fatal( 'Failed to delete replica agreement (%s), error: %s' % (admt.dn, e.message('desc'))) raise ldap.LDAPError except ldap.LDAPError, e: self.log.fatal( 'Failed to search for replication agreements under (%s), error: %s' % (dn_replica, e.message('desc'))) raise ldap.LDAPError
def ruv(self, suffix, tryrepl=False): """return a replica update vector for the given suffix. @param suffix - eg. 'o=netscapeRoot' @raises NoSuchEntryError if missing """ filt = "(&(nsUniqueID=%s)(objectclass=%s))" % (REPLICA_RUV_UUID, REPLICA_OC_TOMBSTONE) attrs = ['nsds50ruv', 'nsruvReplicaLastModified'] ents = self.conn.search_s(suffix, ldap.SCOPE_SUBTREE, filt, attrs) ent = None if ents and (len(ents) > 0): ent = ents[0] elif tryrepl: self.log.warn("Could not get RUV from %r entry - trying cn=replica" % suffix) ensuffix = escapeDNValue(normalizeDN(suffix)) dn = ','.join(("cn=replica", "cn=%s" % ensuffix, DN_MAPPING_TREE)) ents = self.conn.search_s(dn, ldap.SCOPE_BASE, "objectclass=*", attrs) if ents and (len(ents) > 0): ent = ents[0] self.log.debug("RUV entry is %r" % ent) return RUV(ent) raise NoSuchEntryError("RUV not found: suffix: %r" % suffix)
def ruv(self, suffix, tryrepl=False): """return a replica update vector for the given suffix. @param suffix - eg. 'o=netscapeRoot' @raises NoSuchEntryError if missing """ filt = "(&(nsUniqueID=%s)(objectclass=%s))" % (REPLICA_RUV_UUID, REPLICA_OC_TOMBSTONE) attrs = ['nsds50ruv', 'nsruvReplicaLastModified'] ents = self.conn.search_s(suffix, ldap.SCOPE_SUBTREE, filt, attrs) ent = None if ents and (len(ents) > 0): ent = ents[0] elif tryrepl: self.log.warn( "Could not get RUV from %r entry - trying cn=replica" % suffix) ensuffix = escapeDNValue(normalizeDN(suffix)) dn = ','.join(("cn=replica", "cn=%s" % ensuffix, DN_MAPPING_TREE)) ents = self.conn.search_s(dn, ldap.SCOPE_BASE, "objectclass=*", attrs) if ents and (len(ents) > 0): ent = ents[0] self.log.debug("RUV entry is %r" % ent) return RUV(ent) raise NoSuchEntryError("RUV not found: suffix: %r" % suffix)
def handle(self, dn, entry): """ Append single record to dictionary of all records. """ if not dn: dn = '' newentry = Entry((dn, entry)) self.dndict[normalizeDN(dn)] = newentry self.dnlist.append(newentry)
def init(self, suffix=None, consumer_host=None, consumer_port=None): """Trigger a total update of the consumer replica - self is the supplier, - consumer is a DirSrv object (consumer can be a master) - cn_format - use this string to format the agreement name @param - suffix is the suffix targeted by the total update [mandatory] @param - consumer_host hostname of the consumer [mandatory] @param - consumer_port port of the consumer [mandatory] @raise InvalidArgument: if missing mandatory argurment (suffix/host/port) """ # # check the required parameters are set # if not suffix: self.log.fatal("initAgreement: suffix is missing") raise InvalidArgumentError('suffix is mandatory argument') nsuffix = normalizeDN(suffix) if not consumer_host: self.log.fatal("initAgreement: host is missing") raise InvalidArgumentError('host is mandatory argument') if not consumer_port: self.log.fatal("initAgreement: port is missing") raise InvalidArgumentError('port is mandatory argument') # # check the replica agreement already exist # replica_entries = self.conn.replica.list(suffix) if not replica_entries: raise NoSuchEntryError("Error: no replica set up for suffix " + suffix) replica_entry = replica_entries[0] self.log.debug( "initAgreement: looking for replica agreements under %s" % replica_entry.dn) try: filt = ( "(&(objectclass=nsds5replicationagreement)(nsds5replicahost=" + "%s)(nsds5replicaport=%d)(nsds5replicaroot=%s))" % (consumer_host, consumer_port, nsuffix)) entry = self.conn.getEntry(replica_entry.dn, ldap.SCOPE_ONELEVEL, filt) except ldap.NO_SUCH_OBJECT: self.log.fatal( "initAgreement: No replica agreement to %s:%d for suffix %s" % (consumer_host, consumer_port, nsuffix)) raise # # trigger the total init # self.log.info("Starting total init %s" % entry.dn) mod = [(ldap.MOD_ADD, 'nsds5BeginReplicaRefresh', 'start')] self.conn.modify_s(entry.dn, mod)
def list(self, suffix=None, backend_dn=None, bename=None): """ Returns a search result of the backend(s) entries with all their attributes If 'suffix'/'backend_dn'/'benamebase' are specified. It uses 'backend_dn' first, then 'suffix', then 'benamebase'. If neither 'suffix', 'backend_dn' and 'benamebase' are specified, it returns all the backend entries Get backends by name or suffix @param suffix - suffix of the backend @param backend_dn - DN of the backend entry @param bename - 'commonname'/'cn' of the backend (e.g. 'userRoot') @return backend entries @raise None """ filt = "(objectclass=%s)" % BACKEND_OBJECTCLASS_VALUE if backend_dn: self.log.info("List backend %s", backend_dn) base = backend_dn scope = ldap.SCOPE_BASE elif suffix: self.log.info("List backend with suffix=%s", suffix) base = DN_PLUGIN scope = ldap.SCOPE_SUBTREE filt = ("(&%s(|(%s=%s)(%s=%s)))" % (filt, BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_SUFFIX], suffix, BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_SUFFIX], normalizeDN(suffix)) ) elif bename: self.log.info("List backend 'cn=%s'", bename) base = "%s=%s,%s" % (BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_NAME], bename, DN_LDBM) scope = ldap.SCOPE_BASE else: self.log.info("List all the backends") base = DN_PLUGIN scope = ldap.SCOPE_SUBTREE try: ents = self.conn.search_s(base, scope, filt) except ldap.NO_SUCH_OBJECT: return None return ents
def list(self, suffix=None, backend_dn=None, bename=None): """ Returns a search result of the backend(s) entries with all their attributes If 'suffix'/'backend_dn'/'benamebase' are specified. It uses 'backend_dn' first, then 'suffix', then 'benamebase'. If neither 'suffix', 'backend_dn' and 'benamebase' are specified, it returns all the backend entries Get backends by name or suffix @param suffix - suffix of the backend @param backend_dn - DN of the backend entry @param bename - 'commonname'/'cn' of the backend (e.g. 'userRoot') @return backend entries @raise None """ filt = "(objectclass=%s)" % BACKEND_OBJECTCLASS_VALUE if backend_dn: self.log.info("List backend %s" % backend_dn) base = backend_dn scope = ldap.SCOPE_BASE elif suffix: self.log.info("List backend with suffix=%s" % suffix) base = DN_PLUGIN scope = ldap.SCOPE_SUBTREE filt = ("(&%s(|(%s=%s)(%s=%s)))" % (filt, BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_SUFFIX], suffix, BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_SUFFIX], normalizeDN(suffix)) ) elif bename: self.log.info("List backend 'cn=%s'" % bename) base = "%s=%s,%s" % (BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_NAME], bename, DN_LDBM) scope = ldap.SCOPE_BASE else: self.log.info("List all the backends") base = DN_PLUGIN scope = ldap.SCOPE_SUBTREE try: ents = self.conn.search_s(base, scope, filt) except ldap.NO_SUCH_OBJECT: return None return ents
def init(self, suffix=None, consumer_host=None, consumer_port=None): """Trigger a total update of the consumer replica - self is the supplier, - consumer is a DirSrv object (consumer can be a master) - cn_format - use this string to format the agreement name @param - suffix is the suffix targeted by the total update [mandatory] @param - consumer_host hostname of the consumer [mandatory] @param - consumer_port port of the consumer [mandatory] @raise InvalidArgument: if missing mandatory argurment (suffix/host/port) """ # # check the required parameters are set # if not suffix: self.log.fatal("initAgreement: suffix is missing") raise InvalidArgumentError('suffix is mandatory argument') nsuffix = normalizeDN(suffix) if not consumer_host: self.log.fatal("initAgreement: host is missing") raise InvalidArgumentError('host is mandatory argument') if not consumer_port: self.log.fatal("initAgreement: port is missing") raise InvalidArgumentError('port is mandatory argument') # # check the replica agreement already exist # replica_entries = self.conn.replica.list(suffix) if not replica_entries: raise NoSuchEntryError( "Error: no replica set up for suffix " + suffix) replica_entry = replica_entries[0] self.log.debug("initAgreement: looking for replica agreements under %s" % replica_entry.dn) try: filt = ("(&(objectclass=nsds5replicationagreement)(nsds5replicahost=" + "%s)(nsds5replicaport=%d)(nsds5replicaroot=%s))" % (consumer_host, consumer_port, nsuffix)) entry = self.conn.getEntry(replica_entry.dn, ldap.SCOPE_ONELEVEL, filt) except ldap.NO_SUCH_OBJECT: self.log.fatal("initAgreement: No replica agreement to %s:%d for suffix %s" % (consumer_host, consumer_port, nsuffix)) raise # # trigger the total init # self.log.info("Starting total init %s" % entry.dn) mod = [(ldap.MOD_ADD, 'nsds5BeginReplicaRefresh', 'start')] self.conn.modify_s(entry.dn, mod)
def delete(self, suffix=None, backend_dn=None, bename=None): """ Deletes the backend entry with the following steps: Delete the indexes entries under this backend Delete the encrypted attributes entries under this backend Delete the encrypted attributes keys entries under this backend If a mapping tree entry uses this backend (nsslapd-backend), it raise UnwillingToPerformError If 'suffix'/'backend_dn'/'benamebase' are specified. It uses 'backend_dn' first, then 'suffix', then 'benamebase'. If neither 'suffix', 'backend_dn' and 'benamebase' are specified, it raise InvalidArgumentError @param suffix - suffix of the backend @param backend_dn - DN of the backend entry @param bename - 'commonname'/'cn' of the backend (e.g. 'userRoot') @return None @raise InvalidArgumentError - if missing arguments or invalid UnwillingToPerformError - if several backends match the argument provided suffix does not match backend suffix It exists a mapping tree that use that backend """ # First check the backend exists and retrieved its suffix be_ents = self.conn.backend.list(suffix=suffix, backend_dn=backend_dn, bename=bename) if len(be_ents) == 0: raise InvalidArgumentError("Unable to retrieve the backend (%r, %r, %r)" % (suffix, backend_dn, bename)) elif len(be_ents) > 1: for ent in be_ents: self.log.fatal("Multiple backend match the definition: %s" % ent.dn) if (not suffix) and (not backend_dn) and (not bename): raise InvalidArgumentError("suffix and backend DN and backend name are missing") raise UnwillingToPerformError("Not able to identify the backend to delete") else: be_ent = be_ents[0] be_suffix = be_ent.getValue(BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_SUFFIX]) # Verify the provided suffix is the one stored in the found backend if suffix: if normalizeDN(suffix) != normalizeDN(be_suffix): raise UnwillingToPerformError("provided suffix (%s) differs from backend suffix (%s)" % (suffix, be_suffix)) # now check there is no mapping tree for that suffix mt_ents = self.conn.mappingtree.list(suffix=be_suffix) if len(mt_ents) > 0: raise UnwillingToPerformError("It still exists a mapping tree (%s) for that backend (%s)" % (mt_ents[0].dn, be_ent.dn)) # Now delete the indexes found_bename = be_ent.getValue(BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_NAME]) if not bename: bename = found_bename elif bename != found_bename: raise UnwillingToPerformError("Backend name specified (%s) differs from the retrieved one (%s)" % (bename, found_bename)) self.conn.index.delete_all(bename) # finally delete the backend children and the backend itself ents = self.conn.search_s(be_ent.dn, ldap.SCOPE_ONELEVEL) for ent in ents: self.log.debug("Delete entry children %s" % (ent.dn)) self.conn.delete_s(ent.dn) self.log.debug("Delete backend entry %s" % (be_ent.dn)) self.conn.delete_s(be_ent.dn) return
def enableReplication(self, suffix=None, role=None, replicaId=CONSUMER_REPLICAID, binddn=None): if not suffix: self.log.fatal("enableReplication: suffix not specified") raise ValueError("suffix missing") if not role: self.log.fatal( "enableReplication: replica role not specify (REPLICAROLE_*)") raise ValueError("role missing") # # Check the validity of the parameters # # First role and replicaID if role != REPLICAROLE_MASTER and role != REPLICAROLE_HUB and role != REPLICAROLE_CONSUMER: self.log.fatal("enableReplication: replica role invalid (%s) " % role) raise ValueError("invalid role: %s" % role) # master replica_type = REPLICA_RDWR_TYPE else: # hub or consumer replica_type = REPLICA_RDONLY_TYPE if role == REPLICAROLE_MASTER: # check the replicaId [1..CONSUMER_REPLICAID[ if not decimal.Decimal(replicaId) or (replicaId <= 0) or ( replicaId >= CONSUMER_REPLICAID): self.log.fatal( "enableReplication: invalid replicaId (%s) for a RW replica" % replicaId) raise ValueError( "invalid replicaId %d (expected [1..CONSUMER_REPLICAID[" % replicaId) elif replicaId != CONSUMER_REPLICAID: # check the replicaId is CONSUMER_REPLICAID self.log.fatal( "enableReplication: invalid replicaId (%s) for a Read replica (expected %d)" % (replicaId, CONSUMER_REPLICAID)) raise ValueError( "invalid replicaId: %d for HUB/CONSUMER replicaId is CONSUMER_REPLICAID" % replicaId) # Now check we have a suffix entries_backend = self.conn.backend.list(suffix=suffix) if not entries_backend: self.log.fatal( "enableReplication: enable to retrieve the backend for %s" % suffix) raise ValueError("no backend for suffix %s" % suffix) ent = entries_backend[0] if normalizeDN(suffix) != normalizeDN(ent.getValue('nsslapd-suffix')): self.log.warning( "enableReplication: suffix (%s) and backend suffix (%s) differs" % (suffix, entries_backend[0].nsslapd - suffix)) pass # Now prepare the bindDN property if not binddn: binddn = defaultProperties.get(REPLICATION_BIND_DN, None) if not binddn: # weird, internal error we do not retrieve the default replication bind DN # this replica will not be updatable through replication until the binddn # property will be set self.log.warning( "enableReplication: binddn not provided and default value unavailable" ) pass # Now do the effectif job # First add the changelog if master/hub if (role == REPLICAROLE_MASTER) or (role == REPLICAROLE_HUB): self.conn.changelog.create() # Second create the default replica manager entry if it does not exist # it should not be called from here but for the moment I am unsure when to create it elsewhere self.conn.replica.create_repl_manager() # then enable replication properties = {REPLICA_BINDDN: [binddn]} ret = self.conn.replica.create(suffix=suffix, role=role, rid=replicaId, args=properties) return ret
def create(self, suffix=None, host=None, port=None, properties=None): """Create (and return) a replication agreement from self to consumer. - self is the supplier, @param consumer: one of the following (consumer can be a master) * a DirSrv object if chaining * an object with attributes: host, port, sslport, __str__ @param suffix - eg. 'dc=babel,dc=it' @param properties - further properties dict. Support properties RA_NAME RA_SUFFIX RA_BINDDN RA_BINDPW RA_METHOD RA_DESCRIPTION RA_SCHEDULE RA_TRANSPORT_PROT RA_FRAC_EXCLUDE RA_FRAC_EXCLUDE_TOTAL_UPDATE RA_FRAC_STRIP RA_CONSUMER_PORT RA_CONSUMER_HOST RA_CONSUMER_TOTAL_INIT RA_TIMEOUT RA_CHANGES @return dn_agreement - DN of the created agreement @raise InvalidArgumentError - If the suffix is missing @raise NosuchEntryError - if a replica doesn't exist for that suffix @raise UNWILLING_TO_PERFORM if the database was previously in read-only state. To create new agreements you need to *restart* the directory server """ import string # Check we have a suffix [ mandatory ] if not suffix: self.log.warning("create: suffix is missing") raise InvalidArgumentError('suffix is mandatory') if properties: binddn = properties.get(RA_BINDDN) or defaultProperties[REPLICATION_BIND_DN] bindpw = properties.get(RA_BINDPW) or defaultProperties[REPLICATION_BIND_PW] bindmethod = properties.get(RA_METHOD) or defaultProperties[REPLICATION_BIND_METHOD] format = properties.get(RA_NAME) or r'meTo_$host:$port' description = properties.get(RA_DESCRIPTION) or format transport = properties.get(RA_TRANSPORT_PROT) or defaultProperties[REPLICATION_TRANSPORT] timeout = properties.get(RA_TIMEOUT) or defaultProperties[REPLICATION_TIMEOUT] else: binddn = defaultProperties[REPLICATION_BIND_DN] bindpw = defaultProperties[REPLICATION_BIND_PW] bindmethod = defaultProperties[REPLICATION_BIND_METHOD] format = r'meTo_$host:$port' description = format transport = defaultProperties[REPLICATION_TRANSPORT] timeout = defaultProperties[REPLICATION_TIMEOUT] # Compute the normalized suffix to be set in RA entry nsuffix = normalizeDN(suffix) # adding agreement under the replica entry replica_entries = self.conn.replica.list(suffix) if not replica_entries: raise NoSuchEntryError( "Error: no replica set up for suffix " + suffix) replica = replica_entries[0] # define agreement entry cn = string.Template(format).substitute({'host': host, 'port': port}) dn_agreement = ','.join(["cn=%s" % cn, replica.dn]) # This is probably unnecessary because # we can just raise ALREADY_EXISTS try: entry = self.conn.getEntry(dn_agreement, ldap.SCOPE_BASE) self.log.warn("Agreement already exists: %r" % dn_agreement) return dn_agreement except ldap.NO_SUCH_OBJECT: entry = None # In a separate function in this scope? entry = Entry(dn_agreement) entry.update({ 'objectclass': ["top", RA_OBJECTCLASS_VALUE], RA_PROPNAME_TO_ATTRNAME[RA_NAME]: cn, RA_PROPNAME_TO_ATTRNAME[RA_SUFFIX]: nsuffix, RA_PROPNAME_TO_ATTRNAME[RA_CONSUMER_HOST]: host, RA_PROPNAME_TO_ATTRNAME[RA_CONSUMER_PORT]: str(port), RA_PROPNAME_TO_ATTRNAME[RA_TRANSPORT_PROT]: transport, RA_PROPNAME_TO_ATTRNAME[RA_TIMEOUT]: str(timeout), RA_PROPNAME_TO_ATTRNAME[RA_BINDDN]: binddn, RA_PROPNAME_TO_ATTRNAME[RA_BINDPW]: bindpw, RA_PROPNAME_TO_ATTRNAME[RA_METHOD]: bindmethod, RA_PROPNAME_TO_ATTRNAME[RA_DESCRIPTION]: string.Template(description).substitute({'host': host, 'port': port}) }) # we make a copy here because we cannot change # the passed in properties dict propertiescopy = {} if properties: import copy propertiescopy = copy.deepcopy(properties) # further arguments if 'winsync' in propertiescopy: # state it clearly! self.conn.setupWinSyncAgmt(propertiescopy, entry) try: self.log.debug("Adding replica agreement: [%r]" % entry) self.conn.add_s(entry) except: # FIXME check please! raise entry = self.conn.waitForEntry(dn_agreement) if entry: # More verbose but shows what's going on if 'chain' in propertiescopy: raise NotImplementedError chain_args = { 'suffix': suffix, 'binddn': binddn, 'bindpw': bindpw } # Work on `self` aka producer if replica.nsds5replicatype == MASTER_TYPE: self.conn.setupChainingFarm(**chain_args) # Work on `consumer` # TODO - is it really required? if replica.nsds5replicatype == LEAF_TYPE: chain_args.update({ 'isIntermediate': 0, 'urls': self.conn.toLDAPURL(), 'args': propertiescopy['chainargs'] }) consumer.setupConsumerChainOnUpdate(**chain_args) elif replica.nsds5replicatype == HUB_TYPE: chain_args.update({ 'isIntermediate': 1, 'urls': self.conn.toLDAPURL(), 'args': propertiescopy['chainargs'] }) consumer.setupConsumerChainOnUpdate(**chain_args) return dn_agreement
def create(self, suffix=None, role=None, rid=None, args=None): """ Create a replica entry on an existing suffix. @param suffix - dn of suffix @param role - REPLICAROLE_MASTER, REPLICAROLE_HUB or REPLICAROLE_CONSUMER @param rid - number that identify the supplier replica (role=REPLICAROLE_MASTER) in the topology. For hub/consumer (role=REPLICAROLE_HUB or REPLICAROLE_CONSUMER), rid value is not used. This parameter is mandatory for supplier. @param args - dictionnary of initial replica's properties Supported properties are: REPLICA_SUFFIX REPLICA_ID REPLICA_TYPE REPLICA_LEGACY_CONS ['off'] REPLICA_BINDDN [defaultProperties[REPLICATION_BIND_DN]] REPLICA_PURGE_INTERVAL REPLICA_PURGE_DELAY REPLICA_PRECISE_PURGING REPLICA_REFERRAL REPLICA_FLAGS @return replica DN @raise InvalidArgumentError - if missing mandatory arguments ValueError - argument with invalid value """ # Check validity of role if not role: self.log.fatal( "Replica.create: replica role not specify (REPLICAROLE_*)") raise InvalidArgumentError("role missing") if not Replica._valid_role(role): self.log.fatal("enableReplication: replica role invalid (%s) " % role) raise ValueError("invalid role: %s" % role) # check the validity of 'rid' if not Replica._valid_rid(role, rid=rid): self.log.fatal( "Replica.create: replica role is master but 'rid' is missing or invalid value" ) raise InvalidArgumentError("rid missing or invalid value") # check the validity of the suffix if not suffix: self.log.fatal("Replica.create: suffix is missing") raise InvalidArgumentError("suffix missing") else: nsuffix = normalizeDN(suffix) # role is fine, set the replica type if role == REPLICAROLE_MASTER: rtype = REPLICA_RDWR_TYPE else: rtype = REPLICA_RDONLY_TYPE # Set the properties provided as mandatory parameter # The attribute name is not prefixed '+'/'-' => ldap.MOD_REPLACE properties = { REPLICA_SUFFIX: nsuffix, REPLICA_ID: str(rid), REPLICA_TYPE: str(rtype) } # If the properties in args are valid # add them to the 'properties' dictionary # The attribute name may be prefixed '+'/'-' => keep MOD type as provided in args if args: for prop in args: if not inProperties(prop, REPLICA_PROPNAME_TO_ATTRNAME): raise ValueError("unknown property: %s" % prop) properties[prop] = args[prop] # Now set default values of unset properties Replica._set_or_default(REPLICA_LEGACY_CONS, properties, 'off') Replica._set_or_default(REPLICA_BINDDN, properties, [defaultProperties[REPLICATION_BIND_DN]]) if role != REPLICAROLE_CONSUMER: properties[REPLICA_FLAGS] = "1" # create replica entry in mapping-tree mtents = self.conn.mappingtree.list(suffix=nsuffix) mtent = mtents[0] dn_replica = ','.join((RDN_REPLICA, mtent.dn)) try: entry = self.conn.getEntry(dn_replica, ldap.SCOPE_BASE) self.log.warn("Already setup replica for suffix %r" % nsuffix) self.conn.suffixes.setdefault(nsuffix, {}) self.conn.replica.setProperties(replica_dn=dn_replica, properties=properties) return dn_replica except ldap.NO_SUCH_OBJECT: entry = None # # Now create the replica entry # entry = Entry(dn_replica) entry.setValues("objectclass", "top", REPLICA_OBJECTCLASS_VALUE, "extensibleobject") self.conn.replica.setProperties(replica_entry=entry, properties=properties) self.conn.add_s(entry) # check if the entry exists TODO better to raise! self.conn._test_entry(dn_replica, ldap.SCOPE_BASE) self.conn.suffixes[nsuffix] = {'dn': dn_replica, 'type': rtype} return dn_replica
def create(self, suffix=None, host=None, port=None, properties=None): """Create (and return) a replication agreement from self to consumer. - self is the supplier, @param consumer: one of the following (consumer can be a master) * a DirSrv object if chaining * an object with attributes: host, port, sslport, __str__ @param suffix - eg. 'dc=babel,dc=it' @param properties - further properties dict. Support properties RA_NAME RA_SUFFIX RA_BINDDN RA_BINDPW RA_METHOD RA_DESCRIPTION RA_SCHEDULE RA_TRANSPORT_PROT RA_FRAC_EXCLUDE RA_FRAC_EXCLUDE_TOTAL_UPDATE RA_FRAC_STRIP RA_CONSUMER_PORT RA_CONSUMER_HOST RA_CONSUMER_TOTAL_INIT RA_TIMEOUT RA_CHANGES @return dn_agreement - DN of the created agreement @raise InvalidArgumentError - If the suffix is missing @raise NosuchEntryError - if a replica doesn't exist for that suffix @raise UNWILLING_TO_PERFORM if the database was previously in read-only state. To create new agreements you need to *restart* the directory server """ import string # Check we have a suffix [ mandatory ] if not suffix: self.log.warning("create: suffix is missing") raise InvalidArgumentError('suffix is mandatory') if properties: binddn = properties.get( RA_BINDDN) or defaultProperties[REPLICATION_BIND_DN] bindpw = properties.get( RA_BINDPW) or defaultProperties[REPLICATION_BIND_PW] bindmethod = properties.get( RA_METHOD) or defaultProperties[REPLICATION_BIND_METHOD] format = properties.get(RA_NAME) or r'meTo_$host:$port' description = properties.get(RA_DESCRIPTION) or format transport = properties.get( RA_TRANSPORT_PROT) or defaultProperties[REPLICATION_TRANSPORT] timeout = properties.get( RA_TIMEOUT) or defaultProperties[REPLICATION_TIMEOUT] else: binddn = defaultProperties[REPLICATION_BIND_DN] bindpw = defaultProperties[REPLICATION_BIND_PW] bindmethod = defaultProperties[REPLICATION_BIND_METHOD] format = r'meTo_$host:$port' description = format transport = defaultProperties[REPLICATION_TRANSPORT] timeout = defaultProperties[REPLICATION_TIMEOUT] # Compute the normalized suffix to be set in RA entry nsuffix = normalizeDN(suffix) # adding agreement under the replica entry replica_entries = self.conn.replica.list(suffix) if not replica_entries: raise NoSuchEntryError("Error: no replica set up for suffix " + suffix) replica = replica_entries[0] # define agreement entry cn = string.Template(format).substitute({'host': host, 'port': port}) dn_agreement = ','.join(["cn=%s" % cn, replica.dn]) # This is probably unnecessary because # we can just raise ALREADY_EXISTS try: entry = self.conn.getEntry(dn_agreement, ldap.SCOPE_BASE) self.log.warn("Agreement already exists: %r" % dn_agreement) return dn_agreement except ldap.NO_SUCH_OBJECT: entry = None # In a separate function in this scope? entry = Entry(dn_agreement) entry.update({ 'objectclass': ["top", RA_OBJECTCLASS_VALUE], RA_PROPNAME_TO_ATTRNAME[RA_NAME]: cn, RA_PROPNAME_TO_ATTRNAME[RA_SUFFIX]: nsuffix, RA_PROPNAME_TO_ATTRNAME[RA_CONSUMER_HOST]: host, RA_PROPNAME_TO_ATTRNAME[RA_CONSUMER_PORT]: str(port), RA_PROPNAME_TO_ATTRNAME[RA_TRANSPORT_PROT]: transport, RA_PROPNAME_TO_ATTRNAME[RA_TIMEOUT]: str(timeout), RA_PROPNAME_TO_ATTRNAME[RA_BINDDN]: binddn, RA_PROPNAME_TO_ATTRNAME[RA_BINDPW]: bindpw, RA_PROPNAME_TO_ATTRNAME[RA_METHOD]: bindmethod, RA_PROPNAME_TO_ATTRNAME[RA_DESCRIPTION]: string.Template(description).substitute({ 'host': host, 'port': port }) }) # we make a copy here because we cannot change # the passed in properties dict propertiescopy = {} if properties: import copy propertiescopy = copy.deepcopy(properties) # further arguments if 'winsync' in propertiescopy: # state it clearly! self.conn.setupWinSyncAgmt(propertiescopy, entry) try: self.log.debug("Adding replica agreement: [%r]" % entry) self.conn.add_s(entry) except: # FIXME check please! raise entry = self.conn.waitForEntry(dn_agreement) if entry: # More verbose but shows what's going on if 'chain' in propertiescopy: raise NotImplementedError chain_args = { 'suffix': suffix, 'binddn': binddn, 'bindpw': bindpw } # Work on `self` aka producer if replica.nsds5replicatype == MASTER_TYPE: self.conn.setupChainingFarm(**chain_args) # Work on `consumer` # TODO - is it really required? if replica.nsds5replicatype == LEAF_TYPE: chain_args.update({ 'isIntermediate': 0, 'urls': self.conn.toLDAPURL(), 'args': propertiescopy['chainargs'] }) consumer.setupConsumerChainOnUpdate(**chain_args) elif replica.nsds5replicatype == HUB_TYPE: chain_args.update({ 'isIntermediate': 1, 'urls': self.conn.toLDAPURL(), 'args': propertiescopy['chainargs'] }) consumer.setupConsumerChainOnUpdate(**chain_args) return dn_agreement
def create(self, suffix=None, properties=None): """ Creates backend entry and returns its dn. If the properties 'chain-bind-pwd' and 'chain-bind-dn' and 'chain-urls' are specified the backend is a chained backend. A chaining backend is created under 'cn=chaining database,cn=plugins,cn=config'. A local backend is created under 'cn=ldbm database,cn=plugins,cn=config' @param suffix - suffix stored in the backend @param properties - dictionary with properties values supported properties are BACKEND_NAME = 'name' BACKEND_READONLY = 'read-only' BACKEND_REQ_INDEX = 'require-index' BACKEND_CACHE_ENTRIES = 'entry-cache-number' BACKEND_CACHE_SIZE = 'entry-cache-size' BACKEND_DNCACHE_SIZE = 'dn-cache-size' BACKEND_DIRECTORY = 'directory' BACKEND_DB_DEADLOCK = 'db-deadlock' BACKEND_CHAIN_BIND_DN = 'chain-bind-dn' BACKEND_CHAIN_BIND_PW = 'chain-bind-pw' BACKEND_CHAIN_URLS = 'chain-urls' BACKEND_SUFFIX = 'suffix' BACKEND_SAMPLE_ENTRIES = 'sample_entries' @return backend DN of the created backend @raise LDAPError """ def _getBackendName(parent): ''' Use to build a backend name that is not already used ''' index = 1 while True: bename = "local%ddb" % index base = ("%s=%s,%s" % (BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_NAME], bename, parent)) filt = "(objectclass=%s)" % BACKEND_OBJECTCLASS_VALUE self.log.debug("_getBackendName: baser=%s : fileter=%s", base, filt) try: self.conn.getEntry(base, ldap.SCOPE_BASE, filt) except (NoSuchEntryError, ldap.NO_SUCH_OBJECT): self.log.info("backend name will be %s", bename) return bename index += 1 # suffix is mandatory. If may be in the properties if isinstance(properties, dict) and properties.get( BACKEND_SUFFIX, None) is not None: suffix = properties.get(BACKEND_SUFFIX) if not suffix: raise ldap.UNWILLING_TO_PERFORM('Missing Suffix') else: nsuffix = normalizeDN(suffix) # Check it does not already exist a backend for that suffix if self.conn.verbose: self.log.info("Checking suffix %s for existence", suffix) ents = self.conn.backend.list(suffix=suffix) if len(ents) != 0: raise ldap.ALREADY_EXISTS # Check if we are creating a local/chained backend chained_suffix = (properties and (BACKEND_CHAIN_BIND_DN in properties) and (BACKEND_CHAIN_BIND_PW in properties) and (BACKEND_CHAIN_URLS in properties)) if chained_suffix: self.log.info("Creating a chaining backend") dnbase = DN_CHAIN else: self.log.info("Creating a local backend") dnbase = DN_LDBM # Get the future backend name if properties and BACKEND_NAME in properties: cn = properties[BACKEND_NAME] else: cn = _getBackendName(dnbase) # Check the future backend name does not already exists # we can imagine having no backends for 'suffix' but having a backend # with the same name dn = "%s=%s,%s" % (BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_NAME], cn, dnbase) ents = self.conn.backend.list(backend_dn=dn) if ents: raise ldap.ALREADY_EXISTS( "Backend already exists with that DN: %s" % ents[0].dn) # All checks are done, Time to create the backend try: entry = Entry(dn) entry.update({ 'objectclass': ['top', 'extensibleObject', BACKEND_OBJECTCLASS_VALUE], BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_NAME]: cn, BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_SUFFIX]: nsuffix, }) if chained_suffix: entry.update({ BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_CHAIN_URLS]: properties[BACKEND_CHAIN_URLS], BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_CHAIN_BIND_DN]: properties[BACKEND_CHAIN_BIND_DN], BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_CHAIN_BIND_PW]: properties[BACKEND_CHAIN_BIND_PW] }) self.log.debug("adding entry: %r", entry) self.conn.add_s(entry) except ldap.ALREADY_EXISTS as e: self.log.error("Entry already exists: %r", dn) raise ldap.ALREADY_EXISTS("%s : %r" % (e, dn)) except ldap.LDAPError as e: self.log.error("Could not add backend entry: %r", dn) raise e backend_entry = self.conn._test_entry(dn, ldap.SCOPE_BASE) return backend_entry
def init(self, suffix=None, consumer_host=None, consumer_port=None): """Trigger a total update of the consumer replica - self is the supplier, - consumer is a DirSrv object (consumer can be a master) - cn_format - use this string to format the agreement name :param suffix: The suffix targeted by the total update [mandatory] :type suffix: str :param consumer_host: Hostname of the consumer [mandatory] :type consumer_host: str :param consumer_port: Port of the consumer [mandatory] :type consumer_port: int :returns: None :raises: InvalidArgument - if missing mandatory argument """ # # check the required parameters are set # if not suffix: self.log.fatal("initAgreement: suffix is missing") raise InvalidArgumentError('suffix is mandatory argument') nsuffix = normalizeDN(suffix) if not consumer_host: self.log.fatal("initAgreement: host is missing") raise InvalidArgumentError('host is mandatory argument') if not consumer_port: self.log.fatal("initAgreement: port is missing") raise InvalidArgumentError('port is mandatory argument') # # check the replica agreement already exist # replica_entries = self.conn.replica.list(suffix) if not replica_entries: raise NoSuchEntryError("Error: no replica set up for suffix " + suffix) replica_entry = replica_entries[0] self.log.debug("initAgreement: looking for replica agreements " + "under %s" % replica_entry.dn) try: ''' Currently python does not like long continuous lines when it comes to string formatting, so we need to separate each line like this. ''' filt = "(&(objectclass=nsds5replicationagreement)" filt += "(nsds5replicahost=%s)" % consumer_host filt += "(nsds5replicaport=%d)" % consumer_port filt += "(nsds5replicaroot=%s))" % nsuffix entry = self.conn.getEntry(replica_entry.dn, ldap.SCOPE_ONELEVEL, filt) except ldap.NO_SUCH_OBJECT: msg = ('initAgreement: No replica agreement to ' + '{host}:{port} for suffix {suffix}'.format( host=consumer_host, port=consumer_port, suffix=nsuffix)) self.log.fatal(msg) raise # # trigger the total init # self.log.info("Starting total init %s" % entry.dn) mod = [(ldap.MOD_ADD, 'nsds5BeginReplicaRefresh', ensure_bytes('start'))] self.conn.modify_s(entry.dn, mod)
def _install_ds(self, general, slapd, backends): """ Actually install the Ds from the dicts provided. You should never call this directly, as it bypasses assertions. """ ######################## WARNING ############################# # DO NOT CHANGE THIS FUNCTION OR ITS CONTENTS WITHOUT READING # ALL OF THE COMMENTS FIRST. THERE ARE VERY DELICATE # AND DETAILED INTERACTIONS OF COMPONENTS IN THIS FUNCTION. # # IF IN DOUBT CONTACT WILLIAM BROWN <*****@*****.**> ### This first section is about creating the *minimal* required paths and config to get # directory server to start: After this, we then perform all configuration as online # changes from after this point. # Create dse.ldif with a temporary root password. # This is done first, because instances are found for removal and listing by detecting # the present of their dse.ldif!!!! # The template is in slapd['data_dir']/dirsrv/data/template-dse.ldif # Variables are done with %KEY%. self.log.debug("ACTION: Creating dse.ldif") try: os.umask( 0o007 ) # For parent dirs that get created -> sets 770 for perms os.makedirs(slapd['config_dir'], mode=0o770) except OSError: pass # Get suffix for some plugin defaults (if possible) # annoyingly for legacy compat backend takes TWO key types # and we have to now deal with that .... # # Create ds_suffix here else it won't be in scope .... ds_suffix = '' if len(backends) > 0: ds_suffix = normalizeDN(backends[0]['nsslapd-suffix']) dse = "" with open( os.path.join(slapd['data_dir'], 'dirsrv', 'data', 'template-dse.ldif')) as template_dse: for line in template_dse.readlines(): dse += line.replace('%', '{', 1).replace('%', '}', 1) # Check if we are in a container, if so don't use /dev/shm for the db home dir # as containers typically don't allocate enough space for dev/shm and we don't # want to unexpectedly break the server after an upgrade # # If we know we are are in a container, we don't need to re-detect on systemd. # It actually turns out if you add systemd-detect-virt, that pulls in system # which subsequently breaks containers starting as instance.start then believes # it COULD check the ds status. The times we need to check for systemd are mainly # in other environments that use systemd natively in their containers. container_result = 1 if not self.containerised: container_result = subprocess.run(["systemd-detect-virt", "-c"], stdout=subprocess.PIPE) if self.containerised or container_result.returncode == 0: # In a container, set the db_home_dir to the db path self.log.debug( "Container detected setting db home directory to db directory." ) slapd['db_home_dir'] = slapd['db_dir'] with open(os.path.join(slapd['config_dir'], 'dse.ldif'), 'w') as file_dse: dse_fmt = dse.format( schema_dir=slapd['schema_dir'], lock_dir=slapd['lock_dir'], tmp_dir=slapd['tmp_dir'], cert_dir=slapd['cert_dir'], ldif_dir=slapd['ldif_dir'], bak_dir=slapd['backup_dir'], run_dir=slapd['run_dir'], inst_dir=slapd['inst_dir'], log_dir=slapd['log_dir'], fqdn=general['full_machine_name'], ds_port=slapd['port'], ds_user=slapd['user'], rootdn=slapd['root_dn'], instance_name=slapd['instance_name'], ds_passwd=self. _secure_password, # We set our own password here, so we can connect and mod. # This is because we never know the users input root password as they can validly give # us a *hashed* input. ds_suffix=ds_suffix, config_dir=slapd['config_dir'], db_dir=slapd['db_dir'], db_home_dir=slapd['db_home_dir'], db_lib=slapd['db_lib'], ldapi_enabled="on", ldapi=slapd['ldapi'], ldapi_autobind="on", ) file_dse.write(dse_fmt) self.log.info("Create file system structures ...") # Create all the needed paths # we should only need to make bak_dir, cert_dir, config_dir, db_dir, ldif_dir, lock_dir, log_dir, run_dir? for path in ('backup_dir', 'cert_dir', 'db_dir', 'db_home_dir', 'ldif_dir', 'lock_dir', 'log_dir', 'run_dir'): self.log.debug("ACTION: creating %s", slapd[path]) try: os.umask( 0o007 ) # For parent dirs that get created -> sets 770 for perms os.makedirs(slapd[path], mode=0o770) except OSError: pass os.chown(slapd[path], slapd['user_uid'], slapd['group_gid']) # /var/lock/dirsrv needs special attention... parentdir = os.path.abspath(os.path.join(slapd['lock_dir'], os.pardir)) os.chown(parentdir, slapd['user_uid'], slapd['group_gid']) ### Warning! We need to down the directory under db too for .restore to work. # During a restore, the db dir is deleted and recreated, which is why we need # to own it for a restore. # # However, in a container, we can't always guarantee this due to how the volumes # work and are mounted. Specifically, if we have an anonymous volume we will # NEVER be able to own it, but in a true deployment it is reasonable to expect # we DO own it. Thus why we skip it in this specific context if not self.containerised: db_parent = os.path.join(slapd['db_dir'], '..') os.chown(db_parent, slapd['user_uid'], slapd['group_gid']) # Copy correct data to the paths. # Copy in the schema # This is a little fragile, make it better. # It won't matter when we move schema to usr anyway ... _ds_shutil_copytree( os.path.join(slapd['sysconf_dir'], 'dirsrv/schema'), slapd['schema_dir']) os.chown(slapd['schema_dir'], slapd['user_uid'], slapd['group_gid']) os.chmod(slapd['schema_dir'], 0o770) # Copy in the collation srcfile = os.path.join(slapd['sysconf_dir'], 'dirsrv/config/slapd-collations.conf') dstfile = os.path.join(slapd['config_dir'], 'slapd-collations.conf') shutil.copy(srcfile, dstfile) os.chown(dstfile, slapd['user_uid'], slapd['group_gid']) os.chmod(dstfile, 0o440) # Copy in the certmap configuration srcfile = os.path.join(slapd['sysconf_dir'], 'dirsrv/config/certmap.conf') dstfile = os.path.join(slapd['config_dir'], 'certmap.conf') shutil.copy(srcfile, dstfile) os.chown(dstfile, slapd['user_uid'], slapd['group_gid']) os.chmod(dstfile, 0o440) # If we are on the correct platform settings, systemd if general['systemd']: # Should create the symlink we need, but without starting it. result = subprocess.run( ["systemctl", "enable", "dirsrv@%s" % slapd['instance_name']], stdout=subprocess.PIPE, stderr=subprocess.PIPE) args = ' '.join(ensure_list_str(result.args)) stdout = ensure_str(result.stdout) stderr = ensure_str(result.stderr) # Systemd encodes some odd charecters into it's symlink output on newer versions which # can trip up the logger. self.log.debug( f"CMD: {args} ; STDOUT: {stdout} ; STDERR: {stderr}".encode( "utf-8")) # Setup tmpfiles_d tmpfile_d = ds_paths.tmpfiles_d + "/dirsrv-" + slapd[ 'instance_name'] + ".conf" with open(tmpfile_d, "w") as TMPFILE_D: TMPFILE_D.write("d {} 0770 {} {}\n".format( slapd['run_dir'], slapd['user'], slapd['group'])) TMPFILE_D.write("d {} 0770 {} {}\n".format( slapd['lock_dir'].replace( "slapd-" + slapd['instance_name'], ""), slapd['user'], slapd['group'])) TMPFILE_D.write("d {} 0770 {} {}\n".format( slapd['lock_dir'], slapd['user'], slapd['group'])) # Else we need to detect other init scripts? # WB: No, we just install and assume that docker will start us ... # Bind sockets to our type? # Create certdb in sysconfidir self.log.debug("ACTION: Creating certificate database is %s", slapd['cert_dir']) # BELOW THIS LINE - all actions are now ONLINE changes to the directory server. # if it all possible, ALWAYS ADD NEW INSTALLER CHANGES AS ONLINE ACTIONS. # Should I move this import? I think this prevents some recursion from lib389 import DirSrv ds_instance = DirSrv(self.verbose, containerised=self.containerised) if self.containerised: ds_instance.systemd_override = general['systemd'] # By default SUSE does something extremely silly - it creates a hostname # that CANT be resolved by DNS. As a result this causes all installs to # fail. We need to guarantee that we only connect to localhost here, as # it's the only stable and guaranteed way to connect to the instance # at this point. # # Use ldapi which would prevent the need # to configure a temp root pw in the setup phase. args = { SER_HOST: "localhost", SER_PORT: slapd['port'], SER_SERVERID_PROP: slapd['instance_name'], SER_ROOT_DN: slapd['root_dn'], SER_ROOT_PW: self._raw_secure_password, SER_DEPLOYED_DIR: slapd['prefix'], SER_LDAPI_ENABLED: 'on', SER_LDAPI_SOCKET: slapd['ldapi'], SER_LDAPI_AUTOBIND: 'on' } ds_instance.allocate(args) # Does this work? assert_c(ds_instance.exists(), "Instance failed to install, does not exist when expected") # Create a certificate database. tlsdb = NssSsl(dirsrv=ds_instance, dbpath=slapd['cert_dir']) if not tlsdb._db_exists(): tlsdb.reinit() if slapd['self_sign_cert']: self.log.info("Create self-signed certificate database ...") etc_dirsrv_path = os.path.join(slapd['sysconf_dir'], 'dirsrv/') ssca_path = os.path.join(etc_dirsrv_path, 'ssca/') ssca = NssSsl(dbpath=ssca_path) # If it doesn't exist, create a CA DB if not ssca._db_exists(): ssca.reinit() ssca.create_rsa_ca(months=slapd['self_sign_cert_valid_months']) # If CA is expired or will expire soon, # Reissue it and resign the existing certs that were signed by the cert previously elif ssca.rsa_ca_needs_renew(): ca = ssca.renew_rsa_ca( months=slapd['self_sign_cert_valid_months']) # Import CA to the existing instances except the one we install now (we import it later) for dir in os.listdir(etc_dirsrv_path): if dir.startswith("slapd-") and dir != slapd['cert_dir']: tlsdb_inst = NssSsl( dbpath=os.path.join(etc_dirsrv_path, dir)) tlsdb_inst.import_rsa_crt(ca) csr = tlsdb.create_rsa_key_and_csr( alt_names=[general['full_machine_name']]) (ca, crt) = ssca.rsa_ca_sign_csr(csr) tlsdb.import_rsa_crt(ca, crt) if general['selinux']: # Set selinux port label selinux_label_port(slapd['secure_port']) # Do selinux fixups if general['selinux']: self.log.info("Perform SELinux labeling ...") selinux_paths = ('backup_dir', 'cert_dir', 'config_dir', 'db_dir', 'ldif_dir', 'lock_dir', 'log_dir', 'db_home_dir', 'run_dir', 'schema_dir', 'tmp_dir') for path in selinux_paths: selinux_restorecon(slapd[path]) selinux_label_port(slapd['port']) # Start the server # Make changes using the temp root self.log.debug(f"asan_enabled={ds_instance.has_asan()}") self.log.debug( f"libfaketime installed ={'libfaketime' in sys.modules}") assert_c( not ds_instance.has_asan() or 'libfaketime' not in sys.modules, "libfaketime python module is incompatible with ASAN build.") ds_instance.start(timeout=60) ds_instance.open() # In some cases we may want to change log settings # ds_instance.config.enable_log('audit') # Create the configs related to this version. base_config = get_config(general['defaults']) base_config_inst = base_config(ds_instance) base_config_inst.apply_config(install=True) # Setup TLS with the instance. # We *ALWAYS* set secure port, even if security is off, because it breaks # tests with standalone.enable_tls if we do not. It's only when security; on # that we actually start listening on it. if not slapd['secure_port']: slapd['secure_port'] = "636" ds_instance.config.set('nsslapd-secureport', '%s' % slapd['secure_port']) if slapd['self_sign_cert']: ds_instance.config.set('nsslapd-security', 'on') # Before we create any backends, create any extra default indexes that may be # dynamically provisioned, rather than from template-dse.ldif. Looking at you # entryUUID (requires rust enabled). # # Indexes defaults to default_index_dn indexes = Indexes(ds_instance) if ds_instance.ds_paths.rust_enabled: indexes.create( properties={ 'cn': 'entryUUID', 'nsSystemIndex': 'false', 'nsIndexType': ['eq', 'pres'], }) # Create the backends as listed # Load example data if needed. for backend in backends: self.log.info( f"Create database backend: {backend['nsslapd-suffix']} ...") is_sample_entries_in_props = "sample_entries" in backend create_suffix_entry_in_props = backend.pop('create_suffix_entry', False) ds_instance.backends.create(properties=backend) if not is_sample_entries_in_props and create_suffix_entry_in_props: # Set basic ACIs c_aci = '(targetattr="c || description || objectClass")(targetfilter="(objectClass=country)")(version 3.0; acl "Enable anyone c read"; allow (read, search, compare)(userdn="ldap:///anyone");)' o_aci = '(targetattr="o || description || objectClass")(targetfilter="(objectClass=organization)")(version 3.0; acl "Enable anyone o read"; allow (read, search, compare)(userdn="ldap:///anyone");)' dc_aci = '(targetattr="dc || description || objectClass")(targetfilter="(objectClass=domain)")(version 3.0; acl "Enable anyone domain read"; allow (read, search, compare)(userdn="ldap:///anyone");)' ou_aci = '(targetattr="ou || description || objectClass")(targetfilter="(objectClass=organizationalUnit)")(version 3.0; acl "Enable anyone ou read"; allow (read, search, compare)(userdn="ldap:///anyone");)' cn_aci = '(targetattr="cn || description || objectClass")(targetfilter="(objectClass=nscontainer)")(version 3.0; acl "Enable anyone cn read"; allow (read, search, compare)(userdn="ldap:///anyone");)' suffix_rdn_attr = backend['nsslapd-suffix'].split( '=')[0].lower() if suffix_rdn_attr == 'dc': domain = create_base_domain(ds_instance, backend['nsslapd-suffix']) domain.add('aci', dc_aci) elif suffix_rdn_attr == 'o': org = create_base_org(ds_instance, backend['nsslapd-suffix']) org.add('aci', o_aci) elif suffix_rdn_attr == 'ou': orgunit = create_base_orgunit(ds_instance, backend['nsslapd-suffix']) orgunit.add('aci', ou_aci) elif suffix_rdn_attr == 'cn': cn = create_base_cn(ds_instance, backend['nsslapd-suffix']) cn.add('aci', cn_aci) elif suffix_rdn_attr == 'c': c = create_base_c(ds_instance, backend['nsslapd-suffix']) c.add('aci', c_aci) else: # Unsupported rdn raise ValueError( "Suffix RDN '{}' in '{}' is not supported. Supported RDN's are: 'c', 'cn', 'dc', 'o', and 'ou'" .format(suffix_rdn_attr, backend['nsslapd-suffix'])) # Create all required sasl maps: if we have a single backend ... # our default maps are really really bad, and we should feel bad. # they basically only work with a single backend, and they'll break # GSSAPI in some cases too :( if len(backends) > 0: self.log.debug("Adding sasl maps for suffix %s" % backend['nsslapd-suffix']) backend = backends[0] saslmappings = SaslMappings(ds_instance) saslmappings.create( properties={ 'cn': 'rfc 2829 u syntax', 'nsSaslMapRegexString': '^u:\\(.*\\)', 'nsSaslMapBaseDNTemplate': backend['nsslapd-suffix'], 'nsSaslMapFilterTemplate': '(uid=\\1)' }) # I think this is for LDAPI saslmappings.create( properties={ 'cn': 'uid mapping', 'nsSaslMapRegexString': '^[^:@]+$', 'nsSaslMapBaseDNTemplate': backend['nsslapd-suffix'], 'nsSaslMapFilterTemplate': '(uid=&)' }) else: self.log.debug("Skipping default SASL maps - no backend found!") self.log.info("Perform post-installation tasks ...") # Change the root password finally ds_instance.config.set('nsslapd-rootpw', slapd['root_password']) # We need to log the password when containerised if self.containerised: self.log.debug("Root DN password: {}".format( slapd['root_password'])) # Complete. if general['start']: # Restart for changes to take effect - this could be removed later ds_instance.restart(post_open=False) else: # Just stop the instance now. ds_instance.stop() self.log.debug(" 🎉 Instance setup complete")
def create(self, suffix=None, role=None, rid=None, args=None): """ Create a replica entry on an existing suffix. @param suffix - dn of suffix @param role - REPLICAROLE_MASTER, REPLICAROLE_HUB or REPLICAROLE_CONSUMER @param rid - number that identify the supplier replica (role=REPLICAROLE_MASTER) in the topology. For hub/consumer (role=REPLICAROLE_HUB or REPLICAROLE_CONSUMER), rid value is not used. This parameter is mandatory for supplier. @param args - dictionnary of initial replica's properties Supported properties are: REPLICA_SUFFIX REPLICA_ID REPLICA_TYPE REPLICA_LEGACY_CONS ['off'] REPLICA_BINDDN [defaultProperties[REPLICATION_BIND_DN]] REPLICA_PURGE_INTERVAL REPLICA_PURGE_DELAY REPLICA_PRECISE_PURGING REPLICA_REFERRAL REPLICA_FLAGS @return replica DN @raise InvalidArgumentError - if missing mandatory arguments ValueError - argument with invalid value """ # Check validity of role if not role: self.log.fatal("Replica.create: replica role not specify (REPLICAROLE_*)") raise InvalidArgumentError("role missing") if not Replica._valid_role(role): self.log.fatal("enableReplication: replica role invalid (%s) " % role) raise ValueError("invalid role: %s" % role) # check the validity of 'rid' if not Replica._valid_rid(role, rid=rid): self.log.fatal("Replica.create: replica role is master but 'rid' is missing or invalid value") raise InvalidArgumentError("rid missing or invalid value") # check the validity of the suffix if not suffix: self.log.fatal("Replica.create: suffix is missing") raise InvalidArgumentError("suffix missing") else: nsuffix = normalizeDN(suffix) # role is fine, set the replica type if role == REPLICAROLE_MASTER: rtype = REPLICA_RDWR_TYPE else: rtype = REPLICA_RDONLY_TYPE # Set the properties provided as mandatory parameter # The attribute name is not prefixed '+'/'-' => ldap.MOD_REPLACE properties = {REPLICA_SUFFIX: nsuffix, REPLICA_ID: str(rid), REPLICA_TYPE: str(rtype)} # If the properties in args are valid # add them to the 'properties' dictionary # The attribute name may be prefixed '+'/'-' => keep MOD type as provided in args if args: for prop in args: if not inProperties(prop, REPLICA_PROPNAME_TO_ATTRNAME): raise ValueError("unknown property: %s" % prop) properties[prop] = args[prop] # Now set default values of unset properties Replica._set_or_default(REPLICA_LEGACY_CONS, properties, 'off') Replica._set_or_default(REPLICA_BINDDN, properties, [defaultProperties[REPLICATION_BIND_DN]]) if role != REPLICAROLE_CONSUMER: properties[REPLICA_FLAGS] = "1" # create replica entry in mapping-tree mtents = self.conn.mappingtree.list(suffix=nsuffix) mtent = mtents[0] dn_replica = ','.join((RDN_REPLICA, mtent.dn)) try: entry = self.conn.getEntry(dn_replica, ldap.SCOPE_BASE) self.log.warn("Already setup replica for suffix %r" % nsuffix) self.conn.suffixes.setdefault(nsuffix, {}) self.conn.replica.setProperties(replica_dn=dn_replica, properties=properties) return dn_replica except ldap.NO_SUCH_OBJECT: entry = None # # Now create the replica entry # entry = Entry(dn_replica) entry.setValues("objectclass", "top", REPLICA_OBJECTCLASS_VALUE, "extensibleobject") self.conn.replica.setProperties(replica_entry=entry, properties=properties) self.conn.add_s(entry) # check if the entry exists TODO better to raise! self.conn._test_entry(dn_replica, ldap.SCOPE_BASE) self.conn.suffixes[nsuffix] = {'dn': dn_replica, 'type': rtype} return dn_replica
def _install_ds(self, general, slapd, backends): """ Actually install the Ds from the dicts provided. You should never call this directly, as it bypasses assertions. """ ######################## WARNING ############################# # DO NOT CHANGE THIS FUNCTION OR ITS CONTENTS WITHOUT READING # ALL OF THE COMMENTS FIRST. THERE ARE VERY DELICATE # AND DETAILED INTERACTIONS OF COMPONENTS IN THIS FUNCTION. # # IF IN DOUBT CONTACT WILLIAM BROWN <*****@*****.**> ### This first section is about creating the *minimal* required paths and config to get # directory server to start: After this, we then perform all configuration as online # changes from after this point. # Create dse.ldif with a temporary root password. # This is done first, because instances are found for removal and listing by detecting # the present of their dse.ldif!!!! # The template is in slapd['data_dir']/dirsrv/data/template-dse.ldif # Variables are done with %KEY%. self.log.debug("ACTION: Creating dse.ldif") try: os.umask(0o007) # For parent dirs that get created -> sets 770 for perms os.makedirs(slapd['config_dir'], mode=0o770) except OSError: pass # Get suffix for some plugin defaults (if possible) # annoyingly for legacy compat backend takes TWO key types # and we have to now deal with that .... # # Create ds_suffix here else it won't be in scope .... ds_suffix = '' if len(backends) > 0: ds_suffix = normalizeDN(backends[0]['nsslapd-suffix']) dse = "" with open(os.path.join(slapd['data_dir'], 'dirsrv', 'data', 'template-dse.ldif')) as template_dse: for line in template_dse.readlines(): dse += line.replace('%', '{', 1).replace('%', '}', 1) with open(os.path.join(slapd['config_dir'], 'dse.ldif'), 'w') as file_dse: file_dse.write(dse.format( schema_dir=slapd['schema_dir'], lock_dir=slapd['lock_dir'], tmp_dir=slapd['tmp_dir'], cert_dir=slapd['cert_dir'], ldif_dir=slapd['ldif_dir'], bak_dir=slapd['backup_dir'], run_dir=slapd['run_dir'], inst_dir=slapd['inst_dir'], log_dir=slapd['log_dir'], fqdn=general['full_machine_name'], ds_port=slapd['port'], ds_user=slapd['user'], rootdn=slapd['root_dn'], ds_passwd=self._secure_password, # We set our own password here, so we can connect and mod. # This is because we never know the users input root password as they can validily give # us a *hashed* input. ds_suffix=ds_suffix, config_dir=slapd['config_dir'], db_dir=slapd['db_dir'], )) # Create all the needed paths # we should only need to make bak_dir, cert_dir, config_dir, db_dir, ldif_dir, lock_dir, log_dir, run_dir? for path in ('backup_dir', 'cert_dir', 'db_dir', 'ldif_dir', 'lock_dir', 'log_dir', 'run_dir'): self.log.debug("ACTION: creating %s", slapd[path]) try: os.umask(0o007) # For parent dirs that get created -> sets 770 for perms os.makedirs(slapd[path], mode=0o770) except OSError: pass os.chown(slapd[path], slapd['user_uid'], slapd['group_gid']) # /var/lock/dirsrv needs special attention... parentdir = os.path.abspath(os.path.join(slapd['lock_dir'], os.pardir)) os.chown(parentdir, slapd['user_uid'], slapd['group_gid']) ### Warning! We need to down the directory under db too for .restore to work. # See dblayer.c for more! db_parent = os.path.join(slapd['db_dir'], '..') os.chown(db_parent, slapd['user_uid'], slapd['group_gid']) # Copy correct data to the paths. # Copy in the schema # This is a little fragile, make it better. # It won't matter when we move schema to usr anyway ... _ds_shutil_copytree(os.path.join(slapd['sysconf_dir'], 'dirsrv/schema'), slapd['schema_dir']) os.chown(slapd['schema_dir'], slapd['user_uid'], slapd['group_gid']) os.chmod(slapd['schema_dir'], 0o770) # Copy in the collation srcfile = os.path.join(slapd['sysconf_dir'], 'dirsrv/config/slapd-collations.conf') dstfile = os.path.join(slapd['config_dir'], 'slapd-collations.conf') shutil.copy2(srcfile, dstfile) os.chown(dstfile, slapd['user_uid'], slapd['group_gid']) os.chmod(dstfile, 0o440) # Copy in the certmap configuration srcfile = os.path.join(slapd['sysconf_dir'], 'dirsrv/config/certmap.conf') dstfile = os.path.join(slapd['config_dir'], 'certmap.conf') shutil.copy2(srcfile, dstfile) os.chown(dstfile, slapd['user_uid'], slapd['group_gid']) os.chmod(dstfile, 0o440) # If we are on the correct platform settings, systemd if general['systemd']: # Should create the symlink we need, but without starting it. subprocess.check_call(["systemctl", "enable", "dirsrv@%s" % slapd['instance_name']]) # Setup tmpfiles_d tmpfile_d = ds_paths.tmpfiles_d + "/dirsrv-" + slapd['instance_name'] + ".conf" with open(tmpfile_d, "w") as TMPFILE_D: TMPFILE_D.write("d {} 0770 {} {}\n".format(slapd['run_dir'], slapd['user'], slapd['group'])) TMPFILE_D.write("d {} 0770 {} {}\n".format(slapd['lock_dir'].replace("slapd-" + slapd['instance_name'], ""), slapd['user'], slapd['group'])) TMPFILE_D.write("d {} 0770 {} {}\n".format(slapd['lock_dir'], slapd['user'], slapd['group'])) # Else we need to detect other init scripts? # WB: No, we just install and assume that docker will start us ... # Bind sockets to our type? # Create certdb in sysconfidir self.log.debug("ACTION: Creating certificate database is %s", slapd['cert_dir']) # BELOWE THIS LINE - all actions are now ONLINE changes to the directory server. # if it all possible, ALWAYS ADD NEW INSTALLER CHANGES AS ONLINE ACTIONS. # Should I move this import? I think this prevents some recursion from lib389 import DirSrv ds_instance = DirSrv(self.verbose) if self.containerised: ds_instance.systemd = general['systemd'] args = { SER_PORT: slapd['port'], SER_SERVERID_PROP: slapd['instance_name'], SER_ROOT_DN: slapd['root_dn'], SER_ROOT_PW: self._raw_secure_password, SER_DEPLOYED_DIR: slapd['prefix'] } ds_instance.allocate(args) # Does this work? assert_c(ds_instance.exists(), "Instance failed to install, does not exist when expected") # Create a certificate database. tlsdb = NssSsl(dbpath=slapd['cert_dir']) if not tlsdb._db_exists(): tlsdb.reinit() if slapd['self_sign_cert']: etc_dirsrv_path = os.path.join(slapd['sysconf_dir'], 'dirsrv/') ssca_path = os.path.join(etc_dirsrv_path, 'ssca/') ssca = NssSsl(dbpath=ssca_path) # If it doesn't exist, create a CA DB if not ssca._db_exists(): ssca.reinit() ssca.create_rsa_ca(months=slapd['self_sign_cert_valid_months']) # If CA is expired or will expire soon, # Reissue it and resign the existing certs that were signed by the cert previously elif ssca.rsa_ca_needs_renew(): ca = ssca.renew_rsa_ca(months=slapd['self_sign_cert_valid_months']) # Import CA to the existing instances except the one we install now (we import it later) for dir in os.listdir(etc_dirsrv_path): if dir.startswith("slapd-") and dir != slapd['cert_dir']: tlsdb_inst = NssSsl(dbpath=os.path.join(etc_dirsrv_path, dir)) tlsdb_inst.import_rsa_crt(ca) csr = tlsdb.create_rsa_key_and_csr() (ca, crt) = ssca.rsa_ca_sign_csr(csr) tlsdb.import_rsa_crt(ca, crt) if general['selinux']: # Set selinux port label selinux_label_port(slapd['secure_port']) # Do selinux fixups if general['selinux']: selinux_paths = ('backup_dir', 'cert_dir', 'config_dir', 'db_dir', 'ldif_dir', 'lock_dir', 'log_dir', 'run_dir', 'schema_dir', 'tmp_dir') for path in selinux_paths: selinux_restorecon(slapd[path]) selinux_label_port(slapd['port']) # Start the server # Make changes using the temp root ds_instance.start(timeout=60) ds_instance.open() # In some cases we may want to change log settings # ds_instance.config.enable_log('audit') # Create the configs related to this version. base_config = get_config(general['defaults']) base_config_inst = base_config(ds_instance) base_config_inst.apply_config(install=True) # Setup TLS with the instance. # We *ALWAYS* set secure port, even if security is off, because it breaks # tests with standalone.enable_tls if we do not. It's only when security; on # that we actually start listening on it. if not slapd['secure_port']: slapd['secure_port'] = "636" ds_instance.config.set('nsslapd-secureport', '%s' % slapd['secure_port']) if slapd['self_sign_cert']: ds_instance.config.set('nsslapd-security', 'on') # Create the backends as listed # Load example data if needed. for backend in backends: is_sample_entries_in_props = "sample_entries" in backend create_suffix_entry_in_props = backend.pop('create_suffix_entry', False) ds_instance.backends.create(properties=backend) if not is_sample_entries_in_props and create_suffix_entry_in_props: domain = create_base_domain(ds_instance, backend['nsslapd-suffix']) # Set basic ACI domain.add('aci', [ # Allow reading the base domain object '(targetattr="dc || description || objectClass")(targetfilter="(objectClass=domain)")(version 3.0; acl "Enable anyone domain read"; allow (read, search, compare)(userdn="ldap:///anyone");)', # Allow reading the ou '(targetattr="ou || objectClass")(targetfilter="(objectClass=organizationalUnit)")(version 3.0; acl "Enable anyone ou read"; allow (read, search, compare)(userdn="ldap:///anyone");)' ]) # Initialise ldapi socket information. IPA expects this .... ldapi_path = os.path.join(slapd['local_state_dir'], "run/slapd-%s.socket" % slapd['instance_name']) ds_instance.config.set('nsslapd-ldapifilepath', ldapi_path) ds_instance.config.set('nsslapd-ldapilisten', 'on') ds_instance.config.set('nsslapd-ldapiautobind', 'on') ds_instance.config.set('nsslapd-ldapimaprootdn', slapd['root_dn']) # Create all required sasl maps: if we have a single backend ... # our default maps are really really bad, and we should feel bad. # they basically only work with a single backend, and they'll break # GSSAPI in some cases too :( if len(backends) > 0: self.log.debug("Adding sasl maps for suffix %s" % backend['nsslapd-suffix']) backend = backends[0] saslmappings = SaslMappings(ds_instance) saslmappings.create(properties={ 'cn': 'rfc 2829 u syntax', 'nsSaslMapRegexString': '^u:\\(.*\\)', 'nsSaslMapBaseDNTemplate': backend['nsslapd-suffix'], 'nsSaslMapFilterTemplate': '(uid=\\1)' }) # I think this is for LDAPI saslmappings.create(properties={ 'cn': 'uid mapping', 'nsSaslMapRegexString': '^[^:@]+$', 'nsSaslMapBaseDNTemplate': backend['nsslapd-suffix'], 'nsSaslMapFilterTemplate': '(uid=&)' }) else: self.log.debug("Skipping default SASL maps - no backend found!") # Change the root password finally ds_instance.config.set('nsslapd-rootpw', slapd['root_password']) # We need to log the password when containerised if self.containerised: self.log.debug("Root DN password: {}".format(slapd['root_password'])) # Complete. if general['start']: # Restart for changes to take effect - this could be removed later ds_instance.restart(post_open=False) else: # Just stop the instance now. ds_instance.stop()
def enableReplication(self, suffix=None, role=None, replicaId=CONSUMER_REPLICAID, binddn=None): if not suffix: self.log.fatal("enableReplication: suffix not specified") raise ValueError("suffix missing") if not role: self.log.fatal("enableReplication: replica role not specify (REPLICAROLE_*)") raise ValueError("role missing") # # Check the validity of the parameters # # First role and replicaID if role != REPLICAROLE_MASTER and role != REPLICAROLE_HUB and role != REPLICAROLE_CONSUMER: self.log.fatal("enableReplication: replica role invalid (%s) " % role) raise ValueError("invalid role: %s" % role) # master replica_type = REPLICA_RDWR_TYPE else: # hub or consumer replica_type = REPLICA_RDONLY_TYPE if role == REPLICAROLE_MASTER: # check the replicaId [1..CONSUMER_REPLICAID[ if not decimal.Decimal(replicaId) or (replicaId <= 0) or (replicaId >= CONSUMER_REPLICAID): self.log.fatal("enableReplication: invalid replicaId (%s) for a RW replica" % replicaId) raise ValueError("invalid replicaId %d (expected [1..CONSUMER_REPLICAID[" % replicaId) elif replicaId != CONSUMER_REPLICAID: # check the replicaId is CONSUMER_REPLICAID self.log.fatal("enableReplication: invalid replicaId (%s) for a Read replica (expected %d)" % (replicaId, CONSUMER_REPLICAID)) raise ValueError("invalid replicaId: %d for HUB/CONSUMER replicaId is CONSUMER_REPLICAID" % replicaId) # Now check we have a suffix entries_backend = self.conn.backend.list(suffix=suffix) if not entries_backend: self.log.fatal("enableReplication: enable to retrieve the backend for %s" % suffix) raise ValueError("no backend for suffix %s" % suffix) ent = entries_backend[0] if normalizeDN(suffix) != normalizeDN(ent.getValue('nsslapd-suffix')): self.log.warning("enableReplication: suffix (%s) and backend suffix (%s) differs" % (suffix, entries_backend[0].nsslapd - suffix)) pass # Now prepare the bindDN property if not binddn: binddn = defaultProperties.get(REPLICATION_BIND_DN, None) if not binddn: # weird, internal error we do not retrieve the default replication bind DN # this replica will not be updatable through replication until the binddn # property will be set self.log.warning("enableReplication: binddn not provided and default value unavailable") pass # Now do the effectif job # First add the changelog if master/hub if (role == REPLICAROLE_MASTER) or (role == REPLICAROLE_HUB): self.conn.changelog.create() # Second create the default replica manager entry if it does not exist # it should not be called from here but for the moment I am unsure when to create it elsewhere self.conn.replica.create_repl_manager() # then enable replication properties = {REPLICA_BINDDN: [binddn]} ret = self.conn.replica.create(suffix=suffix, role=role, rid=replicaId, args=properties) return ret
def create(self, suffix=None, bename=None, parent=None): ''' Create a mapping tree entry (under "cn=mapping tree,cn=config"), for the 'suffix' and that is stored in 'bename' backend. 'bename' backend must exist before creating the mapping tree entry. If a 'parent' is provided that means that we are creating a sub-suffix mapping tree. @param suffix - suffix mapped by this mapping tree entry. It will be the common name ('cn') of the entry @param benamebase - backend common name (e.g. 'userRoot') @param parent - if provided is a parent suffix of 'suffix' @return DN of the mapping tree entry @raise ldap.NO_SUCH_OBJECT - if the backend entry or parent mapping tree does not exist ValueError - if missing a parameter, ''' # Check suffix is provided if not suffix: raise ValueError("suffix is mandatory") else: nsuffix = normalizeDN(suffix) # Check backend name is provided if not bename: raise ValueError("backend name is mandatory") # Check that if the parent suffix is provided then # it exists a mapping tree for it if parent: nparent = normalizeDN(parent) filt = suffixfilt(parent) try: entry = self.conn.getEntry(DN_MAPPING_TREE, ldap.SCOPE_SUBTREE, filt) pass except NoSuchEntryError: raise ValueError("parent suffix has no mapping tree") else: nparent = "" # Check if suffix exists, return filt = suffixfilt(suffix) try: entry = self.conn.getEntry(DN_MAPPING_TREE, ldap.SCOPE_SUBTREE, filt) return entry except ldap.NO_SUCH_OBJECT: entry = None # # Now start the real work # # fix me when we can actually used escaped DNs dn = ','.join(('cn="%s"' % nsuffix, DN_MAPPING_TREE)) entry = Entry(dn) entry.update({ 'objectclass': ['top', 'extensibleObject', MT_OBJECTCLASS_VALUE], 'nsslapd-state': 'backend', # the value in the dn has to be DN escaped # internal code will add the quoted value - unquoted value is # useful for searching. MT_PROPNAME_TO_ATTRNAME[MT_SUFFIX]: nsuffix, MT_PROPNAME_TO_ATTRNAME[MT_BACKEND]: bename }) # possibly add the parent if parent: entry.setValues(MT_PROPNAME_TO_ATTRNAME[MT_PARENT_SUFFIX], nparent) try: self.log.debug("Creating entry: %s", entry.dn) self.log.info("Entry %r", entry) self.conn.add_s(entry) except ldap.LDAPError as e: raise ldap.LDAPError("Error adding suffix entry " + dn, e) ret = self.conn._test_entry(dn, ldap.SCOPE_BASE) return ret
def create(self, suffix=None, properties=None): """ Creates backend entry and returns its dn. If the properties 'chain-bind-pwd' and 'chain-bind-dn' and 'chain-urls' are specified the backend is a chained backend. A chaining backend is created under 'cn=chaining database,cn=plugins,cn=config'. A local backend is created under 'cn=ldbm database,cn=plugins,cn=config' @param suffix - suffix stored in the backend @param properties - dictionary with properties values supported properties are BACKEND_NAME = 'name' BACKEND_READONLY = 'read-only' BACKEND_REQ_INDEX = 'require-index' BACKEND_CACHE_ENTRIES = 'entry-cache-number' BACKEND_CACHE_SIZE = 'entry-cache-size' BACKEND_DNCACHE_SIZE = 'dn-cache-size' BACKEND_DIRECTORY = 'directory' BACKEND_DB_DEADLOCK = 'db-deadlock' BACKEND_CHAIN_BIND_DN = 'chain-bind-dn' BACKEND_CHAIN_BIND_PW = 'chain-bind-pw' BACKEND_CHAIN_URLS = 'chain-urls' @return backend DN of the created backend @raise ValueError - If missing suffix InvalidArgumentError - If it already exists a backend for that suffix or a backend with the same DN """ def _getBackendName(parent): ''' Use to build a backend name that is not already used ''' index = 1 while True: bename = "local%ddb" % index base = "%s=%s,%s" % (BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_NAME], bename, parent) scope = ldap.SCOPE_BASE filt = "(objectclass=%s)" % BACKEND_OBJECTCLASS_VALUE self.log.debug("_getBackendName: baser=%s : fileter=%s" % (base, filt)) try: ents = self.conn.getEntry(base, ldap.SCOPE_BASE, filt) except (NoSuchEntryError, ldap.NO_SUCH_OBJECT) as e: self.log.info("backend name will be %s" % bename) return bename index += 1 # suffix is mandatory if not suffix: raise ValueError("suffix is mandatory") else: nsuffix = normalizeDN(suffix) # Check it does not already exist a backend for that suffix ents = self.conn.backend.list(suffix=suffix) if len(ents) != 0: raise InvalidArgumentError("It already exists backend(s) for %s: %s" % (suffix, ents[0].dn)) # Check if we are creating a local/chained backend chained_suffix = properties and (BACKEND_CHAIN_BIND_DN in properties) and (BACKEND_CHAIN_BIND_PW in properties) and (BACKEND_CHAIN_URLS in properties) if chained_suffix: self.log.info("Creating a chaining backend") dnbase = DN_CHAIN else: self.log.info("Creating a local backend") dnbase = DN_LDBM # Get the future backend name if properties and BACKEND_NAME in properties: cn = properties[BACKEND_NAME] else: cn = _getBackendName(dnbase) # Check the future backend name does not already exists # we can imagine having no backends for 'suffix' but having a backend # with the same name dn = "%s=%s,%s" % (BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_NAME], cn, dnbase) ents = self.conn.backend.list(backend_dn=dn) if ents: raise InvalidArgumentError("It already exists a backend with that DN: %s" % ents[0].dn) # All checks are done, Time to create the backend try: entry = Entry(dn) entry.update({ 'objectclass': ['top', 'extensibleObject', BACKEND_OBJECTCLASS_VALUE], BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_NAME]: cn, BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_SUFFIX]: nsuffix }) if chained_suffix: entry.update({ BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_CHAIN_URLS]: properties[BACKEND_CHAIN_URLS], BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_CHAIN_BIND_DN]: properties[BACKEND_CHAIN_BIND_DN], BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_CHAIN_BIND_PW]: properties[BACKEND_CHAIN_BIND_PW] }) self.log.debug("adding entry: %r" % entry) self.conn.add_s(entry) except ldap.ALREADY_EXISTS, e: self.log.error("Entry already exists: %r" % dn) raise ldap.ALREADY_EXISTS("%s : %r" % (e, dn))
def get(self, dn): ndn = normalizeDN(dn) return self.dndict.get(ndn, Entry(None))
def create(self, suffix=None, host=None, port=None, properties=None, winsync=False): """Create (and return) a replication agreement from self to consumer. - self is the supplier, @param suffix - Replication Root @param host - Consumer host @param port - Consumer port @param winsync - Identifies the agree as a WinSync agreement @param properties - Agreement properties @return dn_agreement - DN of the created agreement @raise InvalidArgumentError - If the suffix is missing @raise NoSuchEntryError - if a replica doesn't exist for that suffix @raise ldap.LDAPError - ldap error """ # Check we have a suffix [ mandatory ] if not suffix: self.log.warning("create: suffix is missing") raise InvalidArgumentError('suffix is mandatory') # Compute the normalized suffix to be set in RA entry properties[RA_SUFFIX] = normalizeDN(suffix) # Adding agreement under the replica entry replica_entries = self.conn.replica.list(suffix) if not replica_entries: raise NoSuchEntryError( "Error: no replica set up for suffix: %s" % suffix) replica = replica_entries[0] # Define agreement entry if RA_NAME not in properties: properties[RA_NAME] = 'meTo_%s:%s' % (host, port) dn_agreement = ','.join(["cn=%s" % properties[RA_NAME], replica.dn]) # Set the required properties(if not already set) if RA_BINDDN not in properties: properties[RA_BINDDN] = defaultProperties[REPLICATION_BIND_DN] if RA_BINDPW not in properties: properties[RA_BINDPW] = defaultProperties[REPLICATION_BIND_PW] if RA_METHOD not in properties: properties[RA_METHOD] = defaultProperties[REPLICATION_BIND_METHOD] if RA_TRANSPORT_PROT not in properties: properties[RA_TRANSPORT_PROT] = \ defaultProperties[REPLICATION_TRANSPORT] if RA_TIMEOUT not in properties: properties[RA_TIMEOUT] = defaultProperties[REPLICATION_TIMEOUT] if RA_DESCRIPTION not in properties: properties[RA_DESCRIPTION] = properties[RA_NAME] if RA_CONSUMER_HOST not in properties: properties[RA_CONSUMER_HOST] = host if RA_CONSUMER_PORT not in properties: properties[RA_CONSUMER_PORT] = str(port) # This is probably unnecessary because # we can just raise ALREADY_EXISTS try: entry = self.conn.getEntry(dn_agreement, ldap.SCOPE_BASE) self.log.warn("Agreement already exists: %r" % dn_agreement) return dn_agreement except ldap.NO_SUCH_OBJECT: entry = None # Iterate over the properties, adding them to the entry entry = Entry(dn_agreement) entry.update({'objectclass': ["top", RA_OBJECTCLASS_VALUE]}) for prop in properties: entry.update({RA_PROPNAME_TO_ATTRNAME[prop]: properties[prop]}) # we make a copy here because we cannot change # the passed in properties dict propertiescopy = {} if properties: import copy propertiescopy = copy.deepcopy(properties) # Check if this a Winsync Agreement if winsync: self.conn.setupWinSyncAgmt(propertiescopy, entry) try: self.log.debug("Adding replica agreement: [%r]" % entry) self.conn.add_s(entry) except ldap.LDAPError as e: self.log.fatal('Failed to add replication agreement: %s' % str(e)) raise e entry = self.conn.waitForEntry(dn_agreement) if entry: # More verbose but shows what's going on if 'chain' in propertiescopy: raise NotImplementedError chain_args = { 'suffix': suffix, 'binddn': binddn, 'bindpw': bindpw } # Work on `self` aka producer if replica.nsds5replicatype == MASTER_TYPE: self.conn.setupChainingFarm(**chain_args) # Work on `consumer` # TODO - is it really required? if replica.nsds5replicatype == LEAF_TYPE: chain_args.update({ 'isIntermediate': 0, 'urls': self.conn.toLDAPURL(), 'args': propertiescopy['chainargs'] }) consumer.setupConsumerChainOnUpdate(**chain_args) elif replica.nsds5replicatype == HUB_TYPE: chain_args.update({ 'isIntermediate': 1, 'urls': self.conn.toLDAPURL(), 'args': propertiescopy['chainargs'] }) consumer.setupConsumerChainOnUpdate(**chain_args) return dn_agreement
def create(self, suffix=None, host=None, port=None, properties=None, winsync=False): """Create (and return) a replication agreement from self to consumer. Self is the supplier. :param suffix: Replication Root :type suffix: str :param host: Consumer host :type host: str :param port: Consumer port :type port: int :param winsync: Identifies the agree as a WinSync agreement :type winsync: bool :param properties: Agreement properties :type properties: dict :returns: DN of the created agreement :raises: - InvalidArgumentError - If the suffix is missing - NoSuchEntryError - if a replica doesn't exist for that suffix - ldap.LDAPError - ldap error """ # Check we have a suffix [ mandatory ] if not suffix: self.log.warning("create: suffix is missing") raise InvalidArgumentError('suffix is mandatory') if not properties: properties = {} # Compute the normalized suffix to be set in RA entry properties[RA_SUFFIX] = normalizeDN(suffix) # Adding agreement under the replica entry replica_entries = self.conn.replica.list(suffix) if not replica_entries: raise NoSuchEntryError("Error: no replica set up for suffix: %s" % suffix) replica = replica_entries[0] # Define agreement entry if RA_NAME not in properties: properties[RA_NAME] = 'meTo_%s:%s' % (host, port) dn_agreement = ','.join(["cn=%s" % properties[RA_NAME], replica.dn]) # Set the required properties(if not already set) if RA_BINDDN not in properties: properties[RA_BINDDN] = defaultProperties[REPLICATION_BIND_DN] if RA_BINDPW not in properties: properties[RA_BINDPW] = defaultProperties[REPLICATION_BIND_PW] if RA_METHOD not in properties: properties[RA_METHOD] = defaultProperties[REPLICATION_BIND_METHOD] if RA_TRANSPORT_PROT not in properties: properties[RA_TRANSPORT_PROT] = \ defaultProperties[REPLICATION_TRANSPORT] if RA_TIMEOUT not in properties: properties[RA_TIMEOUT] = defaultProperties[REPLICATION_TIMEOUT] if RA_DESCRIPTION not in properties: properties[RA_DESCRIPTION] = properties[RA_NAME] if RA_CONSUMER_HOST not in properties: properties[RA_CONSUMER_HOST] = host if RA_CONSUMER_PORT not in properties: properties[RA_CONSUMER_PORT] = str(port) # This is probably unnecessary because # we can just raise ALREADY_EXISTS try: entry = self.conn.getEntry(dn_agreement, ldap.SCOPE_BASE) self.log.warn("Agreement already exists: %r" % dn_agreement) return dn_agreement except ldap.NO_SUCH_OBJECT: entry = None # Iterate over the properties, adding them to the entry entry = Entry(dn_agreement) entry.update({'objectclass': ["top", RA_OBJECTCLASS_VALUE]}) for prop in properties: entry.update({RA_PROPNAME_TO_ATTRNAME[prop]: properties[prop]}) # we make a copy here because we cannot change # the passed in properties dict propertiescopy = {} if properties: import copy propertiescopy = copy.deepcopy(properties) # Check if this a Winsync Agreement if winsync: self.conn.setupWinSyncAgmt(propertiescopy, entry) try: self.log.debug("Adding replica agreement: [%r]" % entry) self.conn.add_s(entry) except ldap.LDAPError as e: self.log.fatal('Failed to add replication agreement: %s' % str(e)) raise e entry = self.conn.waitForEntry(dn_agreement) if entry: # More verbose but shows what's going on if 'chain' in propertiescopy: raise NotImplementedError chain_args = { 'suffix': suffix, 'binddn': binddn, 'bindpw': bindpw } # Work on `self` aka producer if replica.nsds5replicatype == MASTER_TYPE: self.conn.setupChainingFarm(**chain_args) # Work on `consumer` # TODO - is it really required? if replica.nsds5replicatype == LEAF_TYPE: chain_args.update({ 'isIntermediate': 0, 'urls': self.conn.toLDAPURL(), 'args': propertiescopy['chainargs'] }) consumer.setupConsumerChainOnUpdate(**chain_args) elif replica.nsds5replicatype == HUB_TYPE: chain_args.update({ 'isIntermediate': 1, 'urls': self.conn.toLDAPURL(), 'args': propertiescopy['chainargs'] }) consumer.setupConsumerChainOnUpdate(**chain_args) return dn_agreement
def init(self, suffix=None, consumer_host=None, consumer_port=None): """Trigger a total update of the consumer replica - self is the supplier, - consumer is a DirSrv object (consumer can be a master) - cn_format - use this string to format the agreement name @param - suffix is the suffix targeted by the total update [mandatory] @param - consumer_host hostname of the consumer [mandatory] @param - consumer_port port of the consumer [mandatory] @raise InvalidArgument: if missing mandatory argurment (suffix/host/port) """ # # check the required parameters are set # if not suffix: self.log.fatal("initAgreement: suffix is missing") raise InvalidArgumentError('suffix is mandatory argument') nsuffix = normalizeDN(suffix) if not consumer_host: self.log.fatal("initAgreement: host is missing") raise InvalidArgumentError('host is mandatory argument') if not consumer_port: self.log.fatal("initAgreement: port is missing") raise InvalidArgumentError('port is mandatory argument') # # check the replica agreement already exist # replica_entries = self.conn.replica.list(suffix) if not replica_entries: raise NoSuchEntryError( "Error: no replica set up for suffix " + suffix) replica_entry = replica_entries[0] self.log.debug("initAgreement: looking for replica agreements " + "under %s" % replica_entry.dn) try: ''' Currently python does not like long continuous lines when it comes to string formatting, so we need to separate each line like this. ''' filt = "(&(objectclass=nsds5replicationagreement)" filt += "(nsds5replicahost=%s)" % consumer_host filt += "(nsds5replicaport=%d)" % consumer_port filt += "(nsds5replicaroot=%s))" % nsuffix entry = self.conn.getEntry(replica_entry.dn, ldap.SCOPE_ONELEVEL, filt) except ldap.NO_SUCH_OBJECT: msg = ('initAgreement: No replica agreement to ' + '{host}:{port} for suffix {suffix}'.format( host=consumer_host, port=consumer_port, suffix=nsuffix)) self.log.fatal(msg) raise # # trigger the total init # self.log.info("Starting total init %s" % entry.dn) mod = [(ldap.MOD_ADD, 'nsds5BeginReplicaRefresh', 'start')] self.conn.modify_s(entry.dn, mod)
def create(self, suffix=None, bename=None, parent=None): ''' Create a mapping tree entry (under "cn=mapping tree,cn=config"), for the 'suffix' and that is stored in 'bename' backend. 'bename' backend must exists before creating the mapping tree entry. If a 'parent' is provided that means that we are creating a sub-suffix mapping tree. @param suffix - suffix mapped by this mapping tree entry. It will be the common name ('cn') of the entry @param benamebase - backend common name (e.g. 'userRoot') @param parent - if provided is a parent suffix of 'suffix' @return DN of the mapping tree entry @raise ldap.NO_SUCH_OBJECT - if the backend entry or parent mapping tree does not exist ''' # Check suffix is provided if not suffix: raise ValueError("suffix is mandatory") else: nsuffix = normalizeDN(suffix) # Check backend name is provided if not bename: raise ValueError("backend name is mandatory") # Check that if the parent suffix is provided then # it exists a mapping tree for it if parent: nparent = normalizeDN(parent) filt = suffixfilt(parent) try: entry = self.conn.getEntry(DN_MAPPING_TREE, ldap.SCOPE_SUBTREE, filt) pass except NoSuchEntryError: raise ValueError("parent suffix has no mapping tree") else: nparent = "" # Check if suffix exists, return filt = suffixfilt(suffix) try: entry = self.conn.getEntry(DN_MAPPING_TREE, ldap.SCOPE_SUBTREE, filt) return entry except NoSuchEntryError: entry = None # # Now start the real work # # fix me when we can actually used escaped DNs dn = ','.join(('cn="%s"' % nsuffix, DN_MAPPING_TREE)) entry = Entry(dn) entry.update({ 'objectclass': ['top', 'extensibleObject', MT_OBJECTCLASS_VALUE], 'nsslapd-state': 'backend', # the value in the dn has to be DN escaped # internal code will add the quoted value - unquoted value is useful for searching MT_PROPNAME_TO_ATTRNAME[MT_SUFFIX]: nsuffix, MT_PROPNAME_TO_ATTRNAME[MT_BACKEND]: bename }) # possibly add the parent if parent: entry.setValues(MT_PROPNAME_TO_ATTRNAME[MT_PARENT_SUFFIX], nparent) try: self.log.debug("Creating entry: %s" % entry.dn) self.log.info("Entry %r" % entry) self.conn.add_s(entry) except ldap.LDAPError, e: raise ldap.LDAPError("Error adding suffix entry " + dn, e)
def delete(self, suffix=None, backend_dn=None, bename=None): """ Deletes the backend entry with the following steps: Delete the indexes entries under this backend Delete the encrypted attributes entries under this backend Delete the encrypted attributes keys entries under this backend If a mapping tree entry uses this backend (nsslapd-backend), it raise ldap.UNWILLING_TO_PERFORM If 'suffix'/'backend_dn'/'benamebase' are specified. It uses 'backend_dn' first, then 'suffix', then 'benamebase'. @param suffix - suffix of the backend @param backend_dn - DN of the backend entry @param bename - 'commonname'/'cn' of the backend (e.g. 'userRoot') @return None @raise ldap.UNWILLING_TO_PERFORM - if several backends match the argument provided suffix does not match backend suffix. It exists a mapping tree that use that backend """ # First check the backend exists and retrieved its suffix be_ents = self.conn.backend.list(suffix=suffix, backend_dn=backend_dn, bename=bename) if len(be_ents) == 0: raise ldap.UNWILLING_TO_PERFORM( "Unable to retrieve the backend (%r, %r, %r)" % (suffix, backend_dn, bename)) elif len(be_ents) > 1: for ent in be_ents: self.log.fatal("Multiple backend match the definition: %s", ent.dn) if (not suffix) and (not backend_dn) and (not bename): raise ldap.UNWILLING_TO_PERFORM( "suffix and backend DN and backend name are missing") raise ldap.UNWILLING_TO_PERFORM( "Not able to identify the backend to delete") else: be_ent = be_ents[0] be_suffix = be_ent.getValue( BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_SUFFIX]) # Verify the provided suffix is the one stored in the found backend if suffix: if normalizeDN(suffix) != normalizeDN(be_suffix): raise ldap.UNWILLING_TO_PERFORM( "provided suffix (%s) differs from backend suffix (%s)" % (suffix, be_suffix)) # now check there is no mapping tree for that suffix mt_ents = self.conn.mappingtree.list(suffix=be_suffix) if len(mt_ents) > 0: raise ldap.UNWILLING_TO_PERFORM( "It still exists a mapping tree (%s) for that backend (%s)" % (mt_ents[0].dn, be_ent.dn)) # Now delete the indexes found_bename = ensure_str( be_ent.getValue(BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_NAME])) if not bename: bename = found_bename elif bename.lower() != found_bename.lower(): raise ldap.UNWILLING_TO_PERFORM( "Backend name specified (%s) differs from the retrieved one (%s)" % (bename, found_bename)) self.conn.index.delete_all(bename) # finally delete the backend children and the backend itself ents = self.conn.search_s(be_ent.dn, ldap.SCOPE_ONELEVEL) for ent in ents: self.log.debug("Delete entry children %s", ent.dn) self.conn.delete_s(ent.dn) self.log.debug("Delete backend entry %s", be_ent.dn) self.conn.delete_s(be_ent.dn) return