def test_update_complex(self): # compare two entries created with different methods nsuffix, replid, replicatype = ("dc=example,dc=com", 5, lib389.REPLICA_RDWR_TYPE) binddnlist, legacy = ['uid=pippo, cn=config'], 'off' dn = "dc=example,dc=com" entry = Entry(dn) entry.setValues( 'objectclass', "top", "nsds5replica", "extensibleobject") entry.setValues('cn', "replica") entry.setValues('nsds5replicaroot', nsuffix) entry.setValues('nsds5replicaid', str(replid)) entry.setValues('nsds5replicatype', str(replicatype)) entry.setValues('nsds5flags', "1") entry.setValues('nsds5replicabinddn', binddnlist) entry.setValues('nsds5replicalegacyconsumer', legacy) uentry = Entry(( dn, {'objectclass': ["top", "nsds5replica", "extensibleobject"], 'cn': ["replica"]}) ) log.debug("Entry created with dict:", uentry) # Entry.update *replaces*, so be careful with multi-valued attrs uentry.update({ 'nsds5replicaroot': nsuffix, 'nsds5replicaid': str(replid), 'nsds5replicatype': str(replicatype), 'nsds5flags': '1', 'nsds5replicabinddn': binddnlist, 'nsds5replicalegacyconsumer': legacy }) uentry_s, entry_s = list(map(str, (uentry, entry))) assert uentry_s == entry_s, "Mismatching entries [%r] vs [%r]" % ( uentry, entry)
def test_update_complex(self): # compare two entries created with different methods nsuffix, replid, replicatype = ("dc=example,dc=com", 5, lib389.REPLICA_RDWR_TYPE) binddnlist, legacy = ['uid=pippo, cn=config'], 'off' dn = "dc=example,dc=com" entry = Entry(dn) entry.setValues('objectclass', "top", "nsds5replica", "extensibleobject") entry.setValues('cn', "replica") entry.setValues('nsds5replicaroot', nsuffix) entry.setValues('nsds5replicaid', str(replid)) entry.setValues('nsds5replicatype', str(replicatype)) entry.setValues('nsds5flags', "1") entry.setValues('nsds5replicabinddn', binddnlist) entry.setValues('nsds5replicalegacyconsumer', legacy) uentry = Entry((dn, { 'objectclass': ["top", "nsds5replica", "extensibleobject"], 'cn': ["replica"] })) log.debug("Entry created with dict:", uentry) # Entry.update *replaces*, so be careful with multi-valued attrs uentry.update({ 'nsds5replicaroot': nsuffix, 'nsds5replicaid': str(replid), 'nsds5replicatype': str(replicatype), 'nsds5flags': '1', 'nsds5replicabinddn': binddnlist, 'nsds5replicalegacyconsumer': legacy }) uentry_s, entry_s = list(map(str, (uentry, entry))) assert uentry_s == entry_s, "Mismatching entries [%r] vs [%r]" % ( uentry, entry)
def bak2db(self, bename=None, backup_dir=None, args=None): ''' Restore a backup by creating a bak2db task @param bename - 'commonname'/'cn' of the backend (e.g. 'userRoot') @param backup_dir - backup directory @param args - is a dictionary that contains modifier of the task wait: True/[False] - If True, waits for the completion of the task before to return @return exit code @raise ValueError: if bename name does not exist ''' # Checking the parameters if not backup_dir: raise ValueError("You must specify a backup directory") if not os.path.exists(backup_dir): raise ValueError("Backup file (%s) does not exist" % backup_dir) # If a backend name was provided then verify it if bename: ents = self.conn.mappingtree.list(bename=bename) if len(ents) != 1: raise ValueError("invalid backend name: %s" % bename) # build the task entry cn = "restore_" + time.strftime("%m%d%Y_%H%M%S", time.localtime()) dn = "cn=%s,%s" % (cn, DN_RESTORE_TASK) entry = Entry(dn) entry.update({ 'objectclass': ['top', 'extensibleObject'], 'cn': cn, 'nsArchiveDir': backup_dir, 'nsDatabaseType': 'ldbm database' }) if bename: entry.update({'nsInstance': bename}) # start the task and possibly wait for task completion try: self.conn.add_s(entry) except ldap.ALREADY_EXISTS: self.log.error("Fail to add the backup task (%s)" % dn) return -1 exitCode = 0 if args and args.get(TASK_WAIT, False): (done, exitCode) = self.conn.tasks.checkTask(entry, True) if exitCode: self.log.error("Error: restore task %s exited with %d" % (cn, exitCode)) else: self.log.info("Restore task %s completed successfully" % (cn)) return exitCode
def test_update(self): expected = 'pluto minnie' given = {'cn': expected} t = ('o=pippo', { 'o': ['pippo'], 'objectclass': ['organization', 'top'] }) e = Entry(t) e.update(given) assert e.cn == expected, "Bad cn: %s, expected: %s" % (e.cn, expected)
def enable_ssl(self, secport=636, secargs=None): """Configure SSL support into cn=encryption,cn=config. secargs is a dict like { 'nsSSLPersonalitySSL': 'Server-Cert' } """ self._log.debug("config.enable_ssl is deprecated! Use RSA, Encryption instead!") self._log.debug("configuring SSL with secargs:%r" % secargs) secargs = secargs or {} dn_enc = 'cn=encryption,cn=config' ciphers = ('-rsa_null_md5,+rsa_rc4_128_md5,+rsa_rc4_40_md5,' '+rsa_rc2_40_md5,+rsa_des_sha,+rsa_fips_des_sha,' '+rsa_3des_sha,+rsa_fips_3des_sha,+tls_rsa_export1024' '_with_rc4_56_sha,+tls_rsa_export1024_with_des_cbc_sha') mod = [(ldap.MOD_REPLACE, 'nsSSL3', secargs.get('nsSSL3', 'on')), (ldap.MOD_REPLACE, 'nsSSLClientAuth', secargs.get('nsSSLClientAuth', 'allowed')), (ldap.MOD_REPLACE, 'nsSSL3Ciphers', secargs.get('nsSSL3Ciphers', ciphers))] self._instance.modify_s(dn_enc, mod) dn_rsa = 'cn=RSA,cn=encryption,cn=config' e_rsa = Entry(dn_rsa) e_rsa.update({ 'objectclass': ['top', 'nsEncryptionModule'], 'nsSSLPersonalitySSL': secargs.get('nsSSLPersonalitySSL', 'Server-Cert'), 'nsSSLToken': secargs.get('nsSSLToken', 'internal (software)'), 'nsSSLActivation': secargs.get('nsSSLActivation', 'on') }) try: self._instance.add_s(e_rsa) except ldap.ALREADY_EXISTS: pass mod = [ (ldap.MOD_REPLACE, 'nsslapd-security', secargs.get('nsslapd-security', 'on')), (ldap.MOD_REPLACE, 'nsslapd-ssl-check-hostname', secargs.get('nsslapd-ssl-check-hostname', 'off')), (ldap.MOD_REPLACE, 'nsslapd-secureport', str(secport)) ] self._log.debug("trying to modify %r with %r" % (DN_CONFIG, mod)) self._instance.modify_s(DN_CONFIG, mod) fields = 'nsslapd-security nsslapd-ssl-check-hostname'.split() return self._instance.getEntry(DN_CONFIG, attrlist=fields)
def db2bak(self, backup_dir=None, args=None): ''' Perform a backup by creating a db2bak task @param backup_dir - backup directory @param args - is a dictionary that contains modifier of the task wait: True/[False] - If True, waits for the completion of the task before to return @return exit code @raise ValueError: if bename name does not exist ''' # Checking the parameters if not backup_dir: raise ValueError("You must specify a backup directory.") # build the task entry cn = "backup_" + time.strftime("%m%d%Y_%H%M%S", time.localtime()) dn = "cn=%s,%s" % (cn, DN_BACKUP_TASK) entry = Entry(dn) entry.update({ 'objectclass': ['top', 'extensibleObject'], 'cn': cn, 'nsArchiveDir': backup_dir, 'nsDatabaseType': 'ldbm database' }) # start the task and possibly wait for task completion try: self.conn.add_s(entry) except ldap.ALREADY_EXISTS: self.log.error("Fail to add the backup task (%s)" % dn) return -1 exitCode = 0 if args and args.get(TASK_WAIT, False): (done, exitCode) = self.conn.tasks.checkTask(entry, True) if exitCode: self.log.error("Error: backup task %s exited with %d" % (cn, exitCode)) else: self.log.info("Backup task %s completed successfully" % (cn)) self.dn = dn self.entry = entry return exitCode
def create(self, dbname=DEFAULT_CHANGELOG_DB): """Add and return the replication changelog entry. If dbname starts with "/" then it's considered a full path, otherwise it's relative to self.dbdir """ dn = DN_CHANGELOG attribute, changelog_name = dn.split(",")[0].split("=", 1) dirpath = os.path.join(self.conn.dbdir, dbname) entry = Entry(dn) entry.update({ 'objectclass': ("top", "extensibleobject"), CHANGELOG_PROPNAME_TO_ATTRNAME[CHANGELOG_NAME]: changelog_name, CHANGELOG_PROPNAME_TO_ATTRNAME[CHANGELOG_DIR]: dirpath }) self.log.debug("adding changelog entry: %r" % entry) self.changelogdir = dirpath try: self.conn.add_s(entry) except ldap.ALREADY_EXISTS: self.log.warn("entry %s already exists" % dn) return (dn)
def create(self, dbname=DEFAULT_CHANGELOG_DB): """Add and return the replication changelog entry. If dbname starts with "/" then it's considered a full path, otherwise it's relative to self.dbdir """ dn = DN_CHANGELOG attribute, changelog_name = dn.split(",")[0].split("=", 1) dirpath = os.path.join(self.conn.dbdir, dbname) entry = Entry(dn) entry.update({ 'objectclass': ("top", "extensibleobject"), CHANGELOG_PROPNAME_TO_ATTRNAME[CHANGELOG_NAME]: changelog_name, CHANGELOG_PROPNAME_TO_ATTRNAME[CHANGELOG_DIR]: dirpath }) self.log.debug("adding changelog entry: %r" % entry) self.changelogdir = dirpath try: self.conn.add_s(entry) except ldap.ALREADY_EXISTS: self.log.warn("entry %s already exists" % dn) return(dn)
def create(self, dbname=DEFAULT_CHANGELOG_DB): """Add and return the replication changelog entry. :param dbname: Database name, it will be used for creating a changelog dir path :type dbname: str """ dn = DN_CHANGELOG attribute, changelog_name = dn.split(",")[0].split("=", 1) dirpath = os.path.join(os.path.dirname(self.conn.dbdir), dbname) entry = Entry(dn) entry.update({ 'objectclass': ("top", "extensibleobject"), CHANGELOG_PROPNAME_TO_ATTRNAME[CHANGELOG_NAME]: changelog_name, CHANGELOG_PROPNAME_TO_ATTRNAME[CHANGELOG_DIR]: dirpath }) self.log.debug("adding changelog entry: %r" % entry) self.conn.changelogdir = dirpath try: self.conn.add_s(entry) except ldap.ALREADY_EXISTS: self.log.warn("entry %s already exists" % dn) return dn
def exportLDIF(self, suffix=None, benamebase=None, output_file=None, args=None): ''' Export in a LDIF format a given 'suffix' (or 'benamebase' that stores that suffix). It uses an internal task to acheive this request. If 'suffix' and 'benamebase' are specified, it uses 'benamebase' first else 'suffix'. If both 'suffix' and 'benamebase' are missing it raises ValueError 'output_file' is the output file of the export @param suffix - suffix of the backend @param benamebase - 'commonname'/'cn' of the backend (e.g. 'userRoot') @param output_file - file that will contain the exported suffix in LDIF format @param args - is a dictionary that contains modifier of the export task wait: True/[False] - If True, 'export' waits for the completion of the task before to return repl-info: True/[False] - If True, it adds the replication meta data (state information, tombstones and RUV) in the exported file @return None @raise ValueError ''' # Checking the parameters if not benamebase and not suffix: raise ValueError("Specify either bename or suffix") if not output_file: raise ValueError("output_file is mandatory") # Prepare the task entry cn = "export_" + time.strftime("%m%d%Y_%H%M%S", time.localtime()) dn = "cn=%s,%s" % (cn, DN_EXPORT_TASK) entry = Entry(dn) entry.update({ 'objectclass': ['top', 'extensibleObject'], 'cn': cn, 'nsFilename': output_file }) if benamebase: entry.setValues('nsInstance', benamebase) else: entry.setValues('nsIncludeSuffix', suffix) if args.get(EXPORT_REPL_INFO, False): entry.setValues('nsExportReplica', 'true') # start the task and possibly wait for task completion self.conn.add_s(entry) exitCode = 0 if args and args.get(TASK_WAIT, False): (done, exitCode) = self.conn.tasks.checkTask(entry, True) if exitCode: self.log.error("Error: export task %s for file %s exited with %d" % ( cn, output_file, exitCode)) else: self.log.info("Export task %s for file %s completed successfully" % ( cn, output_file)) return exitCode
def create(self, suffix=None, bename=None, parent=None): ''' Create a mapping tree entry (under "cn=mapping tree,cn=config"), for the 'suffix' and that is stored in 'bename' backend. 'bename' backend must exists before creating the mapping tree entry. If a 'parent' is provided that means that we are creating a sub-suffix mapping tree. @param suffix - suffix mapped by this mapping tree entry. It will be the common name ('cn') of the entry @param benamebase - backend common name (e.g. 'userRoot') @param parent - if provided is a parent suffix of 'suffix' @return DN of the mapping tree entry @raise ldap.NO_SUCH_OBJECT - if the backend entry or parent mapping tree does not exist ''' # Check suffix is provided if not suffix: raise ValueError("suffix is mandatory") else: nsuffix = normalizeDN(suffix) # Check backend name is provided if not bename: raise ValueError("backend name is mandatory") # Check that if the parent suffix is provided then # it exists a mapping tree for it if parent: nparent = normalizeDN(parent) filt = suffixfilt(parent) try: entry = self.conn.getEntry(DN_MAPPING_TREE, ldap.SCOPE_SUBTREE, filt) pass except NoSuchEntryError: raise ValueError("parent suffix has no mapping tree") else: nparent = "" # Check if suffix exists, return filt = suffixfilt(suffix) try: entry = self.conn.getEntry(DN_MAPPING_TREE, ldap.SCOPE_SUBTREE, filt) return entry except NoSuchEntryError: entry = None # # Now start the real work # # fix me when we can actually used escaped DNs dn = ','.join(('cn="%s"' % nsuffix, DN_MAPPING_TREE)) entry = Entry(dn) entry.update({ 'objectclass': ['top', 'extensibleObject', MT_OBJECTCLASS_VALUE], 'nsslapd-state': 'backend', # the value in the dn has to be DN escaped # internal code will add the quoted value - unquoted value is useful for searching MT_PROPNAME_TO_ATTRNAME[MT_SUFFIX]: nsuffix, MT_PROPNAME_TO_ATTRNAME[MT_BACKEND]: bename }) # possibly add the parent if parent: entry.setValues(MT_PROPNAME_TO_ATTRNAME[MT_PARENT_SUFFIX], nparent) try: self.log.debug("Creating entry: %s" % entry.dn) self.log.info("Entry %r" % entry) self.conn.add_s(entry) except ldap.LDAPError, e: raise ldap.LDAPError("Error adding suffix entry " + dn, e)
def create(self, suffix=None, host=None, port=None, properties=None, winsync=False): """Create (and return) a replication agreement from self to consumer. Self is the supplier. :param suffix: Replication Root :type suffix: str :param host: Consumer host :type host: str :param port: Consumer port :type port: int :param winsync: Identifies the agree as a WinSync agreement :type winsync: bool :param properties: Agreement properties :type properties: dict :returns: DN of the created agreement :raises: - InvalidArgumentError - If the suffix is missing - NoSuchEntryError - if a replica doesn't exist for that suffix - ldap.LDAPError - ldap error """ # Check we have a suffix [ mandatory ] if not suffix: self.log.warning("create: suffix is missing") raise InvalidArgumentError('suffix is mandatory') if not properties: properties = {} # Compute the normalized suffix to be set in RA entry properties[RA_SUFFIX] = normalizeDN(suffix) # Adding agreement under the replica entry replica_entries = self.conn.replica.list(suffix) if not replica_entries: raise NoSuchEntryError("Error: no replica set up for suffix: %s" % suffix) replica = replica_entries[0] # Define agreement entry if RA_NAME not in properties: properties[RA_NAME] = 'meTo_%s:%s' % (host, port) dn_agreement = ','.join(["cn=%s" % properties[RA_NAME], replica.dn]) # Set the required properties(if not already set) if RA_BINDDN not in properties: properties[RA_BINDDN] = defaultProperties[REPLICATION_BIND_DN] if RA_BINDPW not in properties: properties[RA_BINDPW] = defaultProperties[REPLICATION_BIND_PW] if RA_METHOD not in properties: properties[RA_METHOD] = defaultProperties[REPLICATION_BIND_METHOD] if RA_TRANSPORT_PROT not in properties: properties[RA_TRANSPORT_PROT] = \ defaultProperties[REPLICATION_TRANSPORT] if RA_TIMEOUT not in properties: properties[RA_TIMEOUT] = defaultProperties[REPLICATION_TIMEOUT] if RA_DESCRIPTION not in properties: properties[RA_DESCRIPTION] = properties[RA_NAME] if RA_CONSUMER_HOST not in properties: properties[RA_CONSUMER_HOST] = host if RA_CONSUMER_PORT not in properties: properties[RA_CONSUMER_PORT] = str(port) # This is probably unnecessary because # we can just raise ALREADY_EXISTS try: entry = self.conn.getEntry(dn_agreement, ldap.SCOPE_BASE) self.log.warn("Agreement already exists: %r" % dn_agreement) return dn_agreement except ldap.NO_SUCH_OBJECT: entry = None # Iterate over the properties, adding them to the entry entry = Entry(dn_agreement) entry.update({'objectclass': ["top", RA_OBJECTCLASS_VALUE]}) for prop in properties: entry.update({RA_PROPNAME_TO_ATTRNAME[prop]: properties[prop]}) # we make a copy here because we cannot change # the passed in properties dict propertiescopy = {} if properties: import copy propertiescopy = copy.deepcopy(properties) # Check if this a Winsync Agreement if winsync: self.conn.setupWinSyncAgmt(propertiescopy, entry) try: self.log.debug("Adding replica agreement: [%r]" % entry) self.conn.add_s(entry) except ldap.LDAPError as e: self.log.fatal('Failed to add replication agreement: %s' % str(e)) raise e entry = self.conn.waitForEntry(dn_agreement) if entry: # More verbose but shows what's going on if 'chain' in propertiescopy: raise NotImplementedError chain_args = { 'suffix': suffix, 'binddn': binddn, 'bindpw': bindpw } # Work on `self` aka producer if replica.nsds5replicatype == MASTER_TYPE: self.conn.setupChainingFarm(**chain_args) # Work on `consumer` # TODO - is it really required? if replica.nsds5replicatype == LEAF_TYPE: chain_args.update({ 'isIntermediate': 0, 'urls': self.conn.toLDAPURL(), 'args': propertiescopy['chainargs'] }) consumer.setupConsumerChainOnUpdate(**chain_args) elif replica.nsds5replicatype == HUB_TYPE: chain_args.update({ 'isIntermediate': 1, 'urls': self.conn.toLDAPURL(), 'args': propertiescopy['chainargs'] }) consumer.setupConsumerChainOnUpdate(**chain_args) return dn_agreement
def create(self, suffix=None, host=None, port=None, properties=None): """Create (and return) a replication agreement from self to consumer. - self is the supplier, @param consumer: one of the following (consumer can be a master) * a DirSrv object if chaining * an object with attributes: host, port, sslport, __str__ @param suffix - eg. 'dc=babel,dc=it' @param properties - further properties dict. Support properties RA_NAME RA_SUFFIX RA_BINDDN RA_BINDPW RA_METHOD RA_DESCRIPTION RA_SCHEDULE RA_TRANSPORT_PROT RA_FRAC_EXCLUDE RA_FRAC_EXCLUDE_TOTAL_UPDATE RA_FRAC_STRIP RA_CONSUMER_PORT RA_CONSUMER_HOST RA_CONSUMER_TOTAL_INIT RA_TIMEOUT RA_CHANGES @return dn_agreement - DN of the created agreement @raise InvalidArgumentError - If the suffix is missing @raise NosuchEntryError - if a replica doesn't exist for that suffix @raise UNWILLING_TO_PERFORM if the database was previously in read-only state. To create new agreements you need to *restart* the directory server """ import string # Check we have a suffix [ mandatory ] if not suffix: self.log.warning("create: suffix is missing") raise InvalidArgumentError('suffix is mandatory') if properties: binddn = properties.get(RA_BINDDN) or defaultProperties[REPLICATION_BIND_DN] bindpw = properties.get(RA_BINDPW) or defaultProperties[REPLICATION_BIND_PW] bindmethod = properties.get(RA_METHOD) or defaultProperties[REPLICATION_BIND_METHOD] format = properties.get(RA_NAME) or r'meTo_$host:$port' description = properties.get(RA_DESCRIPTION) or format transport = properties.get(RA_TRANSPORT_PROT) or defaultProperties[REPLICATION_TRANSPORT] timeout = properties.get(RA_TIMEOUT) or defaultProperties[REPLICATION_TIMEOUT] else: binddn = defaultProperties[REPLICATION_BIND_DN] bindpw = defaultProperties[REPLICATION_BIND_PW] bindmethod = defaultProperties[REPLICATION_BIND_METHOD] format = r'meTo_$host:$port' description = format transport = defaultProperties[REPLICATION_TRANSPORT] timeout = defaultProperties[REPLICATION_TIMEOUT] # Compute the normalized suffix to be set in RA entry nsuffix = normalizeDN(suffix) # adding agreement under the replica entry replica_entries = self.conn.replica.list(suffix) if not replica_entries: raise NoSuchEntryError( "Error: no replica set up for suffix " + suffix) replica = replica_entries[0] # define agreement entry cn = string.Template(format).substitute({'host': host, 'port': port}) dn_agreement = ','.join(["cn=%s" % cn, replica.dn]) # This is probably unnecessary because # we can just raise ALREADY_EXISTS try: entry = self.conn.getEntry(dn_agreement, ldap.SCOPE_BASE) self.log.warn("Agreement already exists: %r" % dn_agreement) return dn_agreement except ldap.NO_SUCH_OBJECT: entry = None # In a separate function in this scope? entry = Entry(dn_agreement) entry.update({ 'objectclass': ["top", RA_OBJECTCLASS_VALUE], RA_PROPNAME_TO_ATTRNAME[RA_NAME]: cn, RA_PROPNAME_TO_ATTRNAME[RA_SUFFIX]: nsuffix, RA_PROPNAME_TO_ATTRNAME[RA_CONSUMER_HOST]: host, RA_PROPNAME_TO_ATTRNAME[RA_CONSUMER_PORT]: str(port), RA_PROPNAME_TO_ATTRNAME[RA_TRANSPORT_PROT]: transport, RA_PROPNAME_TO_ATTRNAME[RA_TIMEOUT]: str(timeout), RA_PROPNAME_TO_ATTRNAME[RA_BINDDN]: binddn, RA_PROPNAME_TO_ATTRNAME[RA_BINDPW]: bindpw, RA_PROPNAME_TO_ATTRNAME[RA_METHOD]: bindmethod, RA_PROPNAME_TO_ATTRNAME[RA_DESCRIPTION]: string.Template(description).substitute({'host': host, 'port': port}) }) # we make a copy here because we cannot change # the passed in properties dict propertiescopy = {} if properties: import copy propertiescopy = copy.deepcopy(properties) # further arguments if 'winsync' in propertiescopy: # state it clearly! self.conn.setupWinSyncAgmt(propertiescopy, entry) try: self.log.debug("Adding replica agreement: [%r]" % entry) self.conn.add_s(entry) except: # FIXME check please! raise entry = self.conn.waitForEntry(dn_agreement) if entry: # More verbose but shows what's going on if 'chain' in propertiescopy: raise NotImplementedError chain_args = { 'suffix': suffix, 'binddn': binddn, 'bindpw': bindpw } # Work on `self` aka producer if replica.nsds5replicatype == MASTER_TYPE: self.conn.setupChainingFarm(**chain_args) # Work on `consumer` # TODO - is it really required? if replica.nsds5replicatype == LEAF_TYPE: chain_args.update({ 'isIntermediate': 0, 'urls': self.conn.toLDAPURL(), 'args': propertiescopy['chainargs'] }) consumer.setupConsumerChainOnUpdate(**chain_args) elif replica.nsds5replicatype == HUB_TYPE: chain_args.update({ 'isIntermediate': 1, 'urls': self.conn.toLDAPURL(), 'args': propertiescopy['chainargs'] }) consumer.setupConsumerChainOnUpdate(**chain_args) return dn_agreement
def create(self, suffix=None, properties=None): """ Creates backend entry and returns its dn. If the properties 'chain-bind-pwd' and 'chain-bind-dn' and 'chain-urls' are specified the backend is a chained backend. A chaining backend is created under 'cn=chaining database,cn=plugins,cn=config'. A local backend is created under 'cn=ldbm database,cn=plugins,cn=config' @param suffix - suffix stored in the backend @param properties - dictionary with properties values supported properties are BACKEND_NAME = 'name' BACKEND_READONLY = 'read-only' BACKEND_REQ_INDEX = 'require-index' BACKEND_CACHE_ENTRIES = 'entry-cache-number' BACKEND_CACHE_SIZE = 'entry-cache-size' BACKEND_DNCACHE_SIZE = 'dn-cache-size' BACKEND_DIRECTORY = 'directory' BACKEND_DB_DEADLOCK = 'db-deadlock' BACKEND_CHAIN_BIND_DN = 'chain-bind-dn' BACKEND_CHAIN_BIND_PW = 'chain-bind-pw' BACKEND_CHAIN_URLS = 'chain-urls' @return backend DN of the created backend @raise ValueError - If missing suffix InvalidArgumentError - If it already exists a backend for that suffix or a backend with the same DN """ def _getBackendName(parent): ''' Use to build a backend name that is not already used ''' index = 1 while True: bename = "local%ddb" % index base = "%s=%s,%s" % (BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_NAME], bename, parent) scope = ldap.SCOPE_BASE filt = "(objectclass=%s)" % BACKEND_OBJECTCLASS_VALUE self.log.debug("_getBackendName: baser=%s : fileter=%s" % (base, filt)) try: ents = self.conn.getEntry(base, ldap.SCOPE_BASE, filt) except (NoSuchEntryError, ldap.NO_SUCH_OBJECT) as e: self.log.info("backend name will be %s" % bename) return bename index += 1 # suffix is mandatory if not suffix: raise ValueError("suffix is mandatory") else: nsuffix = normalizeDN(suffix) # Check it does not already exist a backend for that suffix ents = self.conn.backend.list(suffix=suffix) if len(ents) != 0: raise InvalidArgumentError("It already exists backend(s) for %s: %s" % (suffix, ents[0].dn)) # Check if we are creating a local/chained backend chained_suffix = properties and (BACKEND_CHAIN_BIND_DN in properties) and (BACKEND_CHAIN_BIND_PW in properties) and (BACKEND_CHAIN_URLS in properties) if chained_suffix: self.log.info("Creating a chaining backend") dnbase = DN_CHAIN else: self.log.info("Creating a local backend") dnbase = DN_LDBM # Get the future backend name if properties and BACKEND_NAME in properties: cn = properties[BACKEND_NAME] else: cn = _getBackendName(dnbase) # Check the future backend name does not already exists # we can imagine having no backends for 'suffix' but having a backend # with the same name dn = "%s=%s,%s" % (BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_NAME], cn, dnbase) ents = self.conn.backend.list(backend_dn=dn) if ents: raise InvalidArgumentError("It already exists a backend with that DN: %s" % ents[0].dn) # All checks are done, Time to create the backend try: entry = Entry(dn) entry.update({ 'objectclass': ['top', 'extensibleObject', BACKEND_OBJECTCLASS_VALUE], BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_NAME]: cn, BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_SUFFIX]: nsuffix }) if chained_suffix: entry.update({ BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_CHAIN_URLS]: properties[BACKEND_CHAIN_URLS], BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_CHAIN_BIND_DN]: properties[BACKEND_CHAIN_BIND_DN], BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_CHAIN_BIND_PW]: properties[BACKEND_CHAIN_BIND_PW] }) self.log.debug("adding entry: %r" % entry) self.conn.add_s(entry) except ldap.ALREADY_EXISTS, e: self.log.error("Entry already exists: %r" % dn) raise ldap.ALREADY_EXISTS("%s : %r" % (e, dn))
def exportLDIF(self, suffix=None, benamebase=None, output_file=None, args=None): ''' Export in a LDIF format a given 'suffix' (or 'benamebase' that stores that suffix). It uses an internal task to acheive this request. If 'suffix' and 'benamebase' are specified, it uses 'benamebase' first else 'suffix'. If both 'suffix' and 'benamebase' are missing it raises ValueError 'output_file' is the output file of the export @param suffix - suffix of the backend @param benamebase - 'commonname'/'cn' of the backend (e.g. 'userRoot') @param output_file - file that will contain the exported suffix in LDIF format @param args - is a dictionary that contains modifier of the export task wait: True/[False] - If True, 'export' waits for the completion of the task before to return repl-info: True/[False] - If True, it adds the replication meta data (state information, tombstones and RUV) in the exported file @return None @raise ValueError ''' # Checking the parameters if not benamebase and not suffix: raise ValueError("Specify either bename or suffix") if not output_file: raise ValueError("output_file is mandatory") # Prepare the task entry cn = "export_" + time.strftime("%m%d%Y_%H%M%S", time.localtime()) dn = "cn=%s,%s" % (cn, DN_EXPORT_TASK) entry = Entry(dn) entry.update({ 'objectclass': ['top', 'extensibleObject'], 'cn': cn, 'nsFilename': output_file }) if benamebase: entry.setValues('nsInstance', benamebase) else: entry.setValues('nsIncludeSuffix', suffix) if args.get(EXPORT_REPL_INFO, False): entry.setValues('nsExportReplica', 'true') # start the task and possibly wait for task completion self.conn.add_s(entry) exitCode = 0 if args and args.get(TASK_WAIT, False): (done, exitCode) = self.conn.tasks.checkTask(entry, True) if exitCode: self.log.error("Error: export task %s for file %s exited with %d" % (cn, output_file, exitCode)) else: self.log.info("Export task %s for file %s completed successfully" % (cn, output_file)) return exitCode
def create(self, suffix=None, bename=None, parent=None): ''' Create a mapping tree entry (under "cn=mapping tree,cn=config"), for the 'suffix' and that is stored in 'bename' backend. 'bename' backend must exist before creating the mapping tree entry. If a 'parent' is provided that means that we are creating a sub-suffix mapping tree. @param suffix - suffix mapped by this mapping tree entry. It will be the common name ('cn') of the entry @param benamebase - backend common name (e.g. 'userRoot') @param parent - if provided is a parent suffix of 'suffix' @return DN of the mapping tree entry @raise ldap.NO_SUCH_OBJECT - if the backend entry or parent mapping tree does not exist ValueError - if missing a parameter, ''' # Check suffix is provided if not suffix: raise ValueError("suffix is mandatory") else: nsuffix = normalizeDN(suffix) # Check backend name is provided if not bename: raise ValueError("backend name is mandatory") # Check that if the parent suffix is provided then # it exists a mapping tree for it if parent: nparent = normalizeDN(parent) filt = suffixfilt(parent) try: entry = self.conn.getEntry(DN_MAPPING_TREE, ldap.SCOPE_SUBTREE, filt) pass except NoSuchEntryError: raise ValueError("parent suffix has no mapping tree") else: nparent = "" # Check if suffix exists, return filt = suffixfilt(suffix) try: entry = self.conn.getEntry(DN_MAPPING_TREE, ldap.SCOPE_SUBTREE, filt) return entry except ldap.NO_SUCH_OBJECT: entry = None # # Now start the real work # # fix me when we can actually used escaped DNs dn = ','.join(('cn="%s"' % nsuffix, DN_MAPPING_TREE)) entry = Entry(dn) entry.update({ 'objectclass': ['top', 'extensibleObject', MT_OBJECTCLASS_VALUE], 'nsslapd-state': 'backend', # the value in the dn has to be DN escaped # internal code will add the quoted value - unquoted value is # useful for searching. MT_PROPNAME_TO_ATTRNAME[MT_SUFFIX]: nsuffix, MT_PROPNAME_TO_ATTRNAME[MT_BACKEND]: bename }) # possibly add the parent if parent: entry.setValues(MT_PROPNAME_TO_ATTRNAME[MT_PARENT_SUFFIX], nparent) try: self.log.debug("Creating entry: %s", entry.dn) self.log.info("Entry %r", entry) self.conn.add_s(entry) except ldap.LDAPError as e: raise ldap.LDAPError("Error adding suffix entry " + dn, e) ret = self.conn._test_entry(dn, ldap.SCOPE_BASE) return ret
def create(self, suffix=None, host=None, port=None, properties=None): """Create (and return) a replication agreement from self to consumer. - self is the supplier, @param consumer: one of the following (consumer can be a master) * a DirSrv object if chaining * an object with attributes: host, port, sslport, __str__ @param suffix - eg. 'dc=babel,dc=it' @param properties - further properties dict. Support properties RA_NAME RA_SUFFIX RA_BINDDN RA_BINDPW RA_METHOD RA_DESCRIPTION RA_SCHEDULE RA_TRANSPORT_PROT RA_FRAC_EXCLUDE RA_FRAC_EXCLUDE_TOTAL_UPDATE RA_FRAC_STRIP RA_CONSUMER_PORT RA_CONSUMER_HOST RA_CONSUMER_TOTAL_INIT RA_TIMEOUT RA_CHANGES @return dn_agreement - DN of the created agreement @raise InvalidArgumentError - If the suffix is missing @raise NosuchEntryError - if a replica doesn't exist for that suffix @raise UNWILLING_TO_PERFORM if the database was previously in read-only state. To create new agreements you need to *restart* the directory server """ import string # Check we have a suffix [ mandatory ] if not suffix: self.log.warning("create: suffix is missing") raise InvalidArgumentError('suffix is mandatory') if properties: binddn = properties.get( RA_BINDDN) or defaultProperties[REPLICATION_BIND_DN] bindpw = properties.get( RA_BINDPW) or defaultProperties[REPLICATION_BIND_PW] bindmethod = properties.get( RA_METHOD) or defaultProperties[REPLICATION_BIND_METHOD] format = properties.get(RA_NAME) or r'meTo_$host:$port' description = properties.get(RA_DESCRIPTION) or format transport = properties.get( RA_TRANSPORT_PROT) or defaultProperties[REPLICATION_TRANSPORT] timeout = properties.get( RA_TIMEOUT) or defaultProperties[REPLICATION_TIMEOUT] else: binddn = defaultProperties[REPLICATION_BIND_DN] bindpw = defaultProperties[REPLICATION_BIND_PW] bindmethod = defaultProperties[REPLICATION_BIND_METHOD] format = r'meTo_$host:$port' description = format transport = defaultProperties[REPLICATION_TRANSPORT] timeout = defaultProperties[REPLICATION_TIMEOUT] # Compute the normalized suffix to be set in RA entry nsuffix = normalizeDN(suffix) # adding agreement under the replica entry replica_entries = self.conn.replica.list(suffix) if not replica_entries: raise NoSuchEntryError("Error: no replica set up for suffix " + suffix) replica = replica_entries[0] # define agreement entry cn = string.Template(format).substitute({'host': host, 'port': port}) dn_agreement = ','.join(["cn=%s" % cn, replica.dn]) # This is probably unnecessary because # we can just raise ALREADY_EXISTS try: entry = self.conn.getEntry(dn_agreement, ldap.SCOPE_BASE) self.log.warn("Agreement already exists: %r" % dn_agreement) return dn_agreement except ldap.NO_SUCH_OBJECT: entry = None # In a separate function in this scope? entry = Entry(dn_agreement) entry.update({ 'objectclass': ["top", RA_OBJECTCLASS_VALUE], RA_PROPNAME_TO_ATTRNAME[RA_NAME]: cn, RA_PROPNAME_TO_ATTRNAME[RA_SUFFIX]: nsuffix, RA_PROPNAME_TO_ATTRNAME[RA_CONSUMER_HOST]: host, RA_PROPNAME_TO_ATTRNAME[RA_CONSUMER_PORT]: str(port), RA_PROPNAME_TO_ATTRNAME[RA_TRANSPORT_PROT]: transport, RA_PROPNAME_TO_ATTRNAME[RA_TIMEOUT]: str(timeout), RA_PROPNAME_TO_ATTRNAME[RA_BINDDN]: binddn, RA_PROPNAME_TO_ATTRNAME[RA_BINDPW]: bindpw, RA_PROPNAME_TO_ATTRNAME[RA_METHOD]: bindmethod, RA_PROPNAME_TO_ATTRNAME[RA_DESCRIPTION]: string.Template(description).substitute({ 'host': host, 'port': port }) }) # we make a copy here because we cannot change # the passed in properties dict propertiescopy = {} if properties: import copy propertiescopy = copy.deepcopy(properties) # further arguments if 'winsync' in propertiescopy: # state it clearly! self.conn.setupWinSyncAgmt(propertiescopy, entry) try: self.log.debug("Adding replica agreement: [%r]" % entry) self.conn.add_s(entry) except: # FIXME check please! raise entry = self.conn.waitForEntry(dn_agreement) if entry: # More verbose but shows what's going on if 'chain' in propertiescopy: raise NotImplementedError chain_args = { 'suffix': suffix, 'binddn': binddn, 'bindpw': bindpw } # Work on `self` aka producer if replica.nsds5replicatype == MASTER_TYPE: self.conn.setupChainingFarm(**chain_args) # Work on `consumer` # TODO - is it really required? if replica.nsds5replicatype == LEAF_TYPE: chain_args.update({ 'isIntermediate': 0, 'urls': self.conn.toLDAPURL(), 'args': propertiescopy['chainargs'] }) consumer.setupConsumerChainOnUpdate(**chain_args) elif replica.nsds5replicatype == HUB_TYPE: chain_args.update({ 'isIntermediate': 1, 'urls': self.conn.toLDAPURL(), 'args': propertiescopy['chainargs'] }) consumer.setupConsumerChainOnUpdate(**chain_args) return dn_agreement
def create(self, suffix=None, host=None, port=None, properties=None, winsync=False): """Create (and return) a replication agreement from self to consumer. - self is the supplier, @param suffix - Replication Root @param host - Consumer host @param port - Consumer port @param winsync - Identifies the agree as a WinSync agreement @param properties - Agreement properties @return dn_agreement - DN of the created agreement @raise InvalidArgumentError - If the suffix is missing @raise NoSuchEntryError - if a replica doesn't exist for that suffix @raise ldap.LDAPError - ldap error """ # Check we have a suffix [ mandatory ] if not suffix: self.log.warning("create: suffix is missing") raise InvalidArgumentError('suffix is mandatory') # Compute the normalized suffix to be set in RA entry properties[RA_SUFFIX] = normalizeDN(suffix) # Adding agreement under the replica entry replica_entries = self.conn.replica.list(suffix) if not replica_entries: raise NoSuchEntryError( "Error: no replica set up for suffix: %s" % suffix) replica = replica_entries[0] # Define agreement entry if RA_NAME not in properties: properties[RA_NAME] = 'meTo_%s:%s' % (host, port) dn_agreement = ','.join(["cn=%s" % properties[RA_NAME], replica.dn]) # Set the required properties(if not already set) if RA_BINDDN not in properties: properties[RA_BINDDN] = defaultProperties[REPLICATION_BIND_DN] if RA_BINDPW not in properties: properties[RA_BINDPW] = defaultProperties[REPLICATION_BIND_PW] if RA_METHOD not in properties: properties[RA_METHOD] = defaultProperties[REPLICATION_BIND_METHOD] if RA_TRANSPORT_PROT not in properties: properties[RA_TRANSPORT_PROT] = \ defaultProperties[REPLICATION_TRANSPORT] if RA_TIMEOUT not in properties: properties[RA_TIMEOUT] = defaultProperties[REPLICATION_TIMEOUT] if RA_DESCRIPTION not in properties: properties[RA_DESCRIPTION] = properties[RA_NAME] if RA_CONSUMER_HOST not in properties: properties[RA_CONSUMER_HOST] = host if RA_CONSUMER_PORT not in properties: properties[RA_CONSUMER_PORT] = str(port) # This is probably unnecessary because # we can just raise ALREADY_EXISTS try: entry = self.conn.getEntry(dn_agreement, ldap.SCOPE_BASE) self.log.warn("Agreement already exists: %r" % dn_agreement) return dn_agreement except ldap.NO_SUCH_OBJECT: entry = None # Iterate over the properties, adding them to the entry entry = Entry(dn_agreement) entry.update({'objectclass': ["top", RA_OBJECTCLASS_VALUE]}) for prop in properties: entry.update({RA_PROPNAME_TO_ATTRNAME[prop]: properties[prop]}) # we make a copy here because we cannot change # the passed in properties dict propertiescopy = {} if properties: import copy propertiescopy = copy.deepcopy(properties) # Check if this a Winsync Agreement if winsync: self.conn.setupWinSyncAgmt(propertiescopy, entry) try: self.log.debug("Adding replica agreement: [%r]" % entry) self.conn.add_s(entry) except ldap.LDAPError as e: self.log.fatal('Failed to add replication agreement: %s' % str(e)) raise e entry = self.conn.waitForEntry(dn_agreement) if entry: # More verbose but shows what's going on if 'chain' in propertiescopy: raise NotImplementedError chain_args = { 'suffix': suffix, 'binddn': binddn, 'bindpw': bindpw } # Work on `self` aka producer if replica.nsds5replicatype == MASTER_TYPE: self.conn.setupChainingFarm(**chain_args) # Work on `consumer` # TODO - is it really required? if replica.nsds5replicatype == LEAF_TYPE: chain_args.update({ 'isIntermediate': 0, 'urls': self.conn.toLDAPURL(), 'args': propertiescopy['chainargs'] }) consumer.setupConsumerChainOnUpdate(**chain_args) elif replica.nsds5replicatype == HUB_TYPE: chain_args.update({ 'isIntermediate': 1, 'urls': self.conn.toLDAPURL(), 'args': propertiescopy['chainargs'] }) consumer.setupConsumerChainOnUpdate(**chain_args) return dn_agreement
def reindex(self, suffix=None, benamebase=None, attrname=None, args=None): ''' Reindex a 'suffix' (or 'benamebase' that stores that suffix) for a given 'attrname'. It uses an internal task to acheive this request. If 'suffix' and 'benamebase' are specified, it uses 'benamebase' first else 'suffix'. If both 'suffix' and 'benamebase' are missing it raise ValueError @param suffix - suffix of the backend @param benamebase - 'commonname'/'cn' of the backend (e.g. 'userRoot') @param attrname - attribute name @param args - is a dictionary that contains modifier of the reindex task wait: True/[False] - If True, 'index' waits for the completion of the task before to return @return None @raise ValueError if invalid missing benamebase and suffix or invalid benamebase ''' if not benamebase and not suffix: raise ValueError("Specify either bename or suffix") # If backend name was provided, retrieve the suffix if benamebase: ents = self.conn.mappingtree.list(bename=benamebase) if len(ents) != 1: raise ValueError("invalid backend name: %s" % benamebase) attr_suffix = MT_PROPNAME_TO_ATTRNAME[MT_SUFFIX] if not ents[0].hasAttr(attr_suffix): raise ValueError( "invalid backend name: %s, or entry without %s" % (benamebase, attr_suffix)) suffix = ents[0].getValue(attr_suffix) entries_backend = self.conn.backend.list(suffix=suffix) cn = "index_%s_%s" % (attrname, time.strftime("%m%d%Y_%H%M%S", time.localtime())) dn = "cn=%s,%s" % (cn, DN_INDEX_TASK) entry = Entry(dn) entry.update({ 'objectclass': ['top', 'extensibleObject'], 'cn': cn, 'nsIndexAttribute': attrname, 'nsInstance': entries_backend[0].cn }) # assume 1 local backend # start the task and possibly wait for task completion try: self.conn.add_s(entry) except ldap.ALREADY_EXISTS: self.log.error("Fail to add the index task for %s" % attrname) return -1 exitCode = 0 if args and args.get(TASK_WAIT, False): (done, exitCode) = self.conn.tasks.checkTask(entry, True) if exitCode: self.log.error("Error: index task %s exited with %d" % (cn, exitCode)) else: self.log.info("Index task %s completed successfully" % (cn)) return exitCode
def reindex(self, suffix=None, benamebase=None, attrname=None, args=None): ''' Reindex a 'suffix' (or 'benamebase' that stores that suffix) for a given 'attrname'. It uses an internal task to acheive this request. If 'suffix' and 'benamebase' are specified, it uses 'benamebase' first else 'suffix'. If both 'suffix' and 'benamebase' are missing it raise ValueError @param suffix - suffix of the backend @param benamebase - 'commonname'/'cn' of the backend (e.g. 'userRoot') @param attrname - attribute name @param args - is a dictionary that contains modifier of the reindex task wait: True/[False] - If True, 'index' waits for the completion of the task before to return @return None @raise ValueError if invalid missing benamebase and suffix or invalid benamebase ''' if not benamebase and not suffix: raise ValueError("Specify either bename or suffix") # If backend name was provided, retrieve the suffix if benamebase: ents = self.conn.mappingtree.list(bename=benamebase) if len(ents) != 1: raise ValueError("invalid backend name: %s" % benamebase) attr_suffix = MT_PROPNAME_TO_ATTRNAME[MT_SUFFIX] if not ents[0].hasAttr(attr_suffix): raise ValueError("invalid backend name: %s, or entry without %s" % (benamebase, attr_suffix)) suffix = ents[0].getValue(attr_suffix) entries_backend = self.conn.backend.list(suffix=suffix) cn = "index_%s_%s" % (attrname, time.strftime("%m%d%Y_%H%M%S", time.localtime())) dn = "cn=%s,%s" % (cn, DN_INDEX_TASK) entry = Entry(dn) entry.update({ 'objectclass': ['top', 'extensibleObject'], 'cn': cn, 'nsIndexAttribute': attrname, 'nsInstance': entries_backend[0].cn }) # assume 1 local backend # start the task and possibly wait for task completion try: self.conn.add_s(entry) except ldap.ALREADY_EXISTS: self.log.error("Fail to add the index task for %s" % attrname) return -1 exitCode = 0 if args and args.get(TASK_WAIT, False): (done, exitCode) = self.conn.tasks.checkTask(entry, True) if exitCode: self.log.error("Error: index task %s exited with %d" % ( cn, exitCode)) else: self.log.info("Index task %s completed successfully" % ( cn)) return exitCode
def create(self, suffix=None, properties=None): """ Creates backend entry and returns its dn. If the properties 'chain-bind-pwd' and 'chain-bind-dn' and 'chain-urls' are specified the backend is a chained backend. A chaining backend is created under 'cn=chaining database,cn=plugins,cn=config'. A local backend is created under 'cn=ldbm database,cn=plugins,cn=config' @param suffix - suffix stored in the backend @param properties - dictionary with properties values supported properties are BACKEND_NAME = 'name' BACKEND_READONLY = 'read-only' BACKEND_REQ_INDEX = 'require-index' BACKEND_CACHE_ENTRIES = 'entry-cache-number' BACKEND_CACHE_SIZE = 'entry-cache-size' BACKEND_DNCACHE_SIZE = 'dn-cache-size' BACKEND_DIRECTORY = 'directory' BACKEND_DB_DEADLOCK = 'db-deadlock' BACKEND_CHAIN_BIND_DN = 'chain-bind-dn' BACKEND_CHAIN_BIND_PW = 'chain-bind-pw' BACKEND_CHAIN_URLS = 'chain-urls' BACKEND_SUFFIX = 'suffix' BACKEND_SAMPLE_ENTRIES = 'sample_entries' @return backend DN of the created backend @raise LDAPError """ def _getBackendName(parent): ''' Use to build a backend name that is not already used ''' index = 1 while True: bename = "local%ddb" % index base = ("%s=%s,%s" % (BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_NAME], bename, parent)) filt = "(objectclass=%s)" % BACKEND_OBJECTCLASS_VALUE self.log.debug("_getBackendName: baser=%s : fileter=%s", base, filt) try: self.conn.getEntry(base, ldap.SCOPE_BASE, filt) except (NoSuchEntryError, ldap.NO_SUCH_OBJECT): self.log.info("backend name will be %s", bename) return bename index += 1 # suffix is mandatory. If may be in the properties if isinstance(properties, dict) and properties.get( BACKEND_SUFFIX, None) is not None: suffix = properties.get(BACKEND_SUFFIX) if not suffix: raise ldap.UNWILLING_TO_PERFORM('Missing Suffix') else: nsuffix = normalizeDN(suffix) # Check it does not already exist a backend for that suffix if self.conn.verbose: self.log.info("Checking suffix %s for existence", suffix) ents = self.conn.backend.list(suffix=suffix) if len(ents) != 0: raise ldap.ALREADY_EXISTS # Check if we are creating a local/chained backend chained_suffix = (properties and (BACKEND_CHAIN_BIND_DN in properties) and (BACKEND_CHAIN_BIND_PW in properties) and (BACKEND_CHAIN_URLS in properties)) if chained_suffix: self.log.info("Creating a chaining backend") dnbase = DN_CHAIN else: self.log.info("Creating a local backend") dnbase = DN_LDBM # Get the future backend name if properties and BACKEND_NAME in properties: cn = properties[BACKEND_NAME] else: cn = _getBackendName(dnbase) # Check the future backend name does not already exists # we can imagine having no backends for 'suffix' but having a backend # with the same name dn = "%s=%s,%s" % (BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_NAME], cn, dnbase) ents = self.conn.backend.list(backend_dn=dn) if ents: raise ldap.ALREADY_EXISTS( "Backend already exists with that DN: %s" % ents[0].dn) # All checks are done, Time to create the backend try: entry = Entry(dn) entry.update({ 'objectclass': ['top', 'extensibleObject', BACKEND_OBJECTCLASS_VALUE], BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_NAME]: cn, BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_SUFFIX]: nsuffix, }) if chained_suffix: entry.update({ BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_CHAIN_URLS]: properties[BACKEND_CHAIN_URLS], BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_CHAIN_BIND_DN]: properties[BACKEND_CHAIN_BIND_DN], BACKEND_PROPNAME_TO_ATTRNAME[BACKEND_CHAIN_BIND_PW]: properties[BACKEND_CHAIN_BIND_PW] }) self.log.debug("adding entry: %r", entry) self.conn.add_s(entry) except ldap.ALREADY_EXISTS as e: self.log.error("Entry already exists: %r", dn) raise ldap.ALREADY_EXISTS("%s : %r" % (e, dn)) except ldap.LDAPError as e: self.log.error("Could not add backend entry: %r", dn) raise e backend_entry = self.conn._test_entry(dn, ldap.SCOPE_BASE) return backend_entry