def db2bak(self, instance, online=True): ''' Create a BAK backup of the data and changelog in this instance. If executed online create a task and wait for it to complete. ''' self.log.info('Backing up %s' % instance) now = time.localtime() cn = time.strftime('backup_%Y_%m_%d_%H_%M_%S') dn = DN(('cn', cn), ('cn', 'backup'), ('cn', 'tasks'), ('cn', 'config')) bakdir = os.path.join(paths.SLAPD_INSTANCE_BACKUP_DIR_TEMPLATE % (instance, instance)) if online: conn = self.get_connection() ent = conn.make_entry( dn, { 'objectClass': ['top', 'extensibleObject'], 'cn': [cn], 'nsInstance': ['userRoot'], 'nsArchiveDir': [bakdir], 'nsDatabaseType': ['ldbm database'], }) try: conn.add_entry(ent) except Exception, e: raise admintool.ScriptError( 'Unable to to add backup task: %s' % e) self.log.info("Waiting for BAK to finish") wait_for_task(conn, dn)
def db2bak(self, instance, online=True): ''' Create a BAK backup of the data and changelog in this instance. If executed online create a task and wait for it to complete. ''' self.log.info('Backing up %s' % instance) now = time.localtime() cn = time.strftime('backup_%Y_%m_%d_%H_%M_%S') dn = DN(('cn', cn), ('cn', 'backup'), ('cn', 'tasks'), ('cn', 'config')) bakdir = os.path.join(paths.SLAPD_INSTANCE_BACKUP_DIR_TEMPLATE % (instance, instance)) if online: conn = self.get_connection() ent = conn.make_entry( dn, { 'objectClass': ['top', 'extensibleObject'], 'cn': [cn], 'nsInstance': ['userRoot'], 'nsArchiveDir': [bakdir], 'nsDatabaseType': ['ldbm database'], } ) try: conn.add_entry(ent) except Exception, e: raise admintool.ScriptError('Unable to to add backup task: %s' % e) self.log.info("Waiting for BAK to finish") wait_for_task(conn, dn)
def db2ldif(self, instance, backend, online=True): ''' Create a LDIF backup of the data in this instance. If executed online create a task and wait for it to complete. For SELinux reasons this writes out to the 389-ds backup location and we move it. ''' self.log.info('Backing up %s in %s to LDIF' % (backend, instance)) now = time.localtime() cn = time.strftime('export_%Y_%m_%d_%H_%M_%S') dn = DN(('cn', cn), ('cn', 'export'), ('cn', 'tasks'), ('cn', 'config')) ldifname = '%s-%s.ldif' % (instance, backend) ldiffile = os.path.join( paths.SLAPD_INSTANCE_LDIF_DIR_TEMPLATE % instance, ldifname) if online: conn = self.get_connection() ent = conn.make_entry( dn, { 'objectClass': ['top', 'extensibleObject'], 'cn': [cn], 'nsInstance': [backend], 'nsFilename': [ldiffile], 'nsUseOneFile': ['true'], 'nsExportReplica': ['true'], } ) try: conn.add_entry(ent) except Exception as e: raise admintool.ScriptError('Unable to add LDIF task: %s' % e) self.log.info("Waiting for LDIF to finish") wait_for_task(conn, dn) else: args = [paths.DB2LDIF, '-Z', instance, '-r', '-n', backend, '-a', ldiffile] result = run(args, raiseonerr=False) if result.returncode != 0: self.log.critical('db2ldif failed: %s', result.error_log) # Move the LDIF backup to our location shutil.move(ldiffile, os.path.join(self.dir, ldifname))
def bak2db(self, instance, backend, online=True): ''' Restore a BAK backup of the data and changelog in this instance. If backend is None then all backends are restored. If executed online create a task and wait for it to complete. instance here is a loaded term. It can mean either a separate 389-ds install instance or a separate 389-ds backend. We only need to treat PKI-IPA and ipaca specially. ''' if backend is not None: self.log.info('Restoring %s in %s' % (backend, instance)) else: self.log.info('Restoring %s' % instance) cn = time.strftime('restore_%Y_%m_%d_%H_%M_%S') dn = DN(('cn', cn), ('cn', 'restore'), ('cn', 'tasks'), ('cn', 'config')) if online: conn = self.get_connection() ent = conn.make_entry( dn, { 'objectClass': ['top', 'extensibleObject'], 'cn': [cn], 'nsArchiveDir': [os.path.join(self.dir, instance)], 'nsDatabaseType': ['ldbm database'], } ) if backend is not None: ent['nsInstance'] = [backend] try: conn.add_entry(ent) except Exception as e: raise admintool.ScriptError('Unable to bind to LDAP server: %s' % e) self.log.info("Waiting for restore to finish") wait_for_task(conn, dn) else: args = [paths.BAK2DB, '-Z', instance, os.path.join(self.dir, instance)] if backend is not None: args.append('-n') args.append(backend) (stdout, stderr, rc) = run(args, raiseonerr=False) if rc != 0: self.log.critical("bak2db failed: %s" % stderr)
def bak2db(self, instance, backend, online=True): ''' Restore a BAK backup of the data and changelog in this instance. If backend is None then all backends are restored. If executed online create a task and wait for it to complete. instance here is a loaded term. It can mean either a separate 389-ds install instance or a separate 389-ds backend. We only need to treat ipaca specially. ''' if backend is not None: logger.info('Restoring %s in %s', backend, instance) else: logger.info('Restoring %s', instance) cn = time.strftime('restore_%Y_%m_%d_%H_%M_%S') dn = DN(('cn', cn), ('cn', 'restore'), ('cn', 'tasks'), ('cn', 'config')) if online: conn = self.get_connection() ent = conn.make_entry( dn, { 'objectClass': ['top', 'extensibleObject'], 'cn': [cn], 'nsArchiveDir': [os.path.join(self.dir, instance)], 'nsDatabaseType': ['ldbm database'], } ) if backend is not None: ent['nsInstance'] = [backend] try: conn.add_entry(ent) except Exception as e: raise admintool.ScriptError('Unable to bind to LDAP server: %s' % e) logger.info("Waiting for restore to finish") wait_for_task(conn, dn) else: args = [paths.BAK2DB, '-Z', instance, os.path.join(self.dir, instance)] if backend is not None: args.append('-n') args.append(backend) result = run(args, raiseonerr=False) if result.returncode != 0: logger.critical("bak2db failed: %s", result.error_log)
def db2ldif(self, instance, backend, online=True): ''' Create a LDIF backup of the data in this instance. If executed online create a task and wait for it to complete. For SELinux reasons this writes out to the 389-ds backup location and we move it. ''' logger.info('Backing up %s in %s to LDIF', backend, instance) cn = time.strftime('export_%Y_%m_%d_%H_%M_%S') dn = DN(('cn', cn), ('cn', 'export'), ('cn', 'tasks'), ('cn', 'config')) ldifname = '%s-%s.ldif' % (instance, backend) ldiffile = os.path.join( paths.SLAPD_INSTANCE_LDIF_DIR_TEMPLATE % instance, ldifname) if online: conn = self.get_connection() ent = conn.make_entry( dn, { 'objectClass': ['top', 'extensibleObject'], 'cn': [cn], 'nsInstance': [backend], 'nsFilename': [ldiffile], 'nsUseOneFile': ['true'], 'nsExportReplica': ['true'], } ) try: conn.add_entry(ent) except Exception as e: raise admintool.ScriptError('Unable to add LDIF task: %s' % e) logger.info("Waiting for LDIF to finish") wait_for_task(conn, dn) else: args = [paths.DB2LDIF, '-Z', instance, '-r', '-n', backend, '-a', ldiffile] result = run(args, raiseonerr=False) if result.returncode != 0: logger.critical('db2ldif failed: %s', result.error_log) # Move the LDIF backup to our location shutil.move(ldiffile, os.path.join(self.dir, ldifname))
def ldif2db(self, instance, backend, online=True): ''' Restore a LDIF backup of the data in this instance. If executed online create a task and wait for it to complete. ''' self.log.info('Restoring from %s in %s' % (backend, instance)) now = time.localtime() cn = time.strftime('import_%Y_%m_%d_%H_%M_%S') dn = DN(('cn', cn), ('cn', 'import'), ('cn', 'tasks'), ('cn', 'config')) ldifdir = paths.SLAPD_INSTANCE_LDIF_DIR_TEMPLATE % instance ldifname = '%s-%s.ldif' % (instance, backend) ldiffile = os.path.join(ldifdir, ldifname) srcldiffile = os.path.join(self.dir, ldifname) if not os.path.exists(ldifdir): pent = pwd.getpwnam(DS_USER) os.mkdir(ldifdir, 0770) os.chown(ldifdir, pent.pw_uid, pent.pw_gid) ipautil.backup_file(ldiffile) with open(ldiffile, 'wb') as out_file: ldif_writer = ldif.LDIFWriter(out_file) with open(srcldiffile, 'rb') as in_file: ldif_parser = RemoveRUVParser(in_file, ldif_writer, self.log) ldif_parser.parse() if online: conn = self.get_connection() ent = conn.make_entry( dn, { 'objectClass': ['top', 'extensibleObject'], 'cn': [cn], 'nsFilename': [ldiffile], 'nsUseOneFile': ['true'], } ) ent['nsInstance'] = [backend] try: conn.add_entry(ent) except Exception, e: self.log.error("Unable to bind to LDAP server: %s" % e) return self.log.info("Waiting for LDIF to finish") wait_for_task(conn, dn)
def ldif2db(self, instance, backend, online=True): ''' Restore a LDIF backup of the data in this instance. If executed online create a task and wait for it to complete. ''' self.log.info('Restoring from %s in %s' % (backend, instance)) now = time.localtime() cn = time.strftime('import_%Y_%m_%d_%H_%M_%S') dn = DN(('cn', cn), ('cn', 'import'), ('cn', 'tasks'), ('cn', 'config')) ldifdir = paths.SLAPD_INSTANCE_LDIF_DIR_TEMPLATE % instance ldifname = '%s-%s.ldif' % (instance, backend) ldiffile = os.path.join(ldifdir, ldifname) srcldiffile = os.path.join(self.dir, ldifname) if not os.path.exists(ldifdir): pent = pwd.getpwnam(DS_USER) os.mkdir(ldifdir, 0770) os.chown(ldifdir, pent.pw_uid, pent.pw_gid) ipautil.backup_file(ldiffile) with open(ldiffile, 'wb') as out_file: ldif_writer = ldif.LDIFWriter(out_file) with open(srcldiffile, 'rb') as in_file: ldif_parser = RemoveRUVParser(in_file, ldif_writer, self.log) ldif_parser.parse() if online: conn = self.get_connection() ent = conn.make_entry( dn, { 'objectClass': ['top', 'extensibleObject'], 'cn': [cn], 'nsFilename': [ldiffile], 'nsUseOneFile': ['true'], }) ent['nsInstance'] = [backend] try: conn.add_entry(ent) except Exception, e: self.log.error("Unable to bind to LDAP server: %s" % e) return self.log.info("Waiting for LDIF to finish") wait_for_task(conn, dn)
def run_ldapi_reload_task(conn): """Create and wait for reload ldapi mappings task :param conn: ldap2 connection :return: exitcode """ task_cn = "reload_{}".format(int(time.time())) task_dn = DN( ('cn', task_cn), ('cn', 'reload ldapi mappings'), ('cn', 'tasks'), ('cn', 'config') ) entry = conn.make_entry( task_dn, objectClass=['top', 'extensibleObject'], cn=[task_cn], ttl=[10], ) logger.debug('Creating reload task %s', task_dn) conn.add_entry(entry) # task usually finishes in a few ms, avoid 1 sec delay in wait_for_task time.sleep(0.1) exitcode = replication.wait_for_task(api.Backend.ldap2, task_dn) logger.debug( 'Task %s has finished with exit code %i', task_dn, exitcode ) return exitcode
def db2bak(self, instance, online=True): ''' Create a BAK backup of the data and changelog in this instance. If executed online create a task and wait for it to complete. ''' logger.info('Backing up %s', instance) cn = time.strftime('backup_%Y_%m_%d_%H_%M_%S') dn = DN(('cn', cn), ('cn', 'backup'), ('cn', 'tasks'), ('cn', 'config')) bakdir = os.path.join(paths.SLAPD_INSTANCE_BACKUP_DIR_TEMPLATE % (instance, instance)) if online: conn = self.get_connection() ent = conn.make_entry( dn, { 'objectClass': ['top', 'extensibleObject'], 'cn': [cn], 'nsInstance': ['userRoot'], 'nsArchiveDir': [bakdir], 'nsDatabaseType': ['ldbm database'], } ) try: conn.add_entry(ent) except Exception as e: raise admintool.ScriptError( 'Unable to to add backup task: %s' % e ) logger.info("Waiting for BAK to finish") if (wait_for_task(conn, dn) != 0): raise admintool.ScriptError( 'BAK online task failed. Check file systems\' free space.' ) else: args = [paths.DB2BAK, bakdir, '-Z', instance] result = run(args, raiseonerr=False) if result.returncode != 0: raise admintool.ScriptError( 'db2bak failed: %s ' 'Check if destination directory %s has enough space.' % (result.error_log, bakdir) ) try: shutil.move(bakdir, self.dir) except (IOError, OSError) as e: raise admintool.ScriptError( 'Unable to move BAK: %s ' 'Check if destination directory %s has enough space.' % (e, bakdir) ) except Exception as e: raise admintool.ScriptError( 'Unexpected error: %s' % e )
def reindex_task(self, force=False): """Reindex ipaca entries pkispawn sometimes does not run its indextasks. This leads to slow unindexed filters on attributes such as description, which is used to log in with a certificate. Explicitly reindex attribute that should have been reindexed by CA's indextasks.ldif. See https://pagure.io/dogtagpki/issue/3083 """ state_name = 'reindex_task' if not force and sysupgrade.get_upgrade_state('dogtag', state_name): return cn = "indextask_ipaca_{}".format(int(time.time())) dn = DN(('cn', cn), ('cn', 'index'), ('cn', 'tasks'), ('cn', 'config')) entry = api.Backend.ldap2.make_entry( dn, objectClass=['top', 'extensibleObject'], cn=[cn], nsInstance=['ipaca'], # Dogtag PKI database nsIndexAttribute=[ # from pki/base/ca/shared/conf/indextasks.ldif 'archivedBy', 'certstatus', 'clientId', 'dataType', 'dateOfCreate', 'description', 'duration', 'extension', 'issuedby', 'issuername', 'metaInfo', 'notafter', 'notbefore', 'ownername', 'publicKeyData', 'requestid', 'requestowner', 'requestsourceid', 'requeststate', 'requesttype', 'revInfo', 'revokedOn', 'revokedby', 'serialno', 'status', 'subjectname', ], ttl=[10], ) logger.debug('Creating ipaca reindex task %s', dn) api.Backend.ldap2.add_entry(entry) logger.debug('Waiting for task...') exitcode = replication.wait_for_task(api.Backend.ldap2, dn) logger.debug('Task %s has finished with exit code %i', dn, exitcode) sysupgrade.set_upgrade_state('dogtag', state_name, True)
def init_memberof(self): if not self.run_init_memberof: return self._ldap_mod("memberof-task.ldif", self.sub_dict) # Note, keep dn in sync with dn in install/share/memberof-task.ldif dn = DN(('cn', 'IPA install %s' % self.sub_dict["TIME"]), ('cn', 'memberof task'), ('cn', 'tasks'), ('cn', 'config')) root_logger.debug("Waiting for memberof task to complete.") conn = ipaldap.IPAdmin(self.fqdn) if self.dm_password: conn.do_simple_bind( DN(('cn', 'directory manager')), self.dm_password) else: conn.do_sasl_gssapi_bind() replication.wait_for_task(conn, dn) conn.unbind()
def init_memberof(self): if not self.run_init_memberof: return self._ldap_mod("memberof-task.ldif", self.sub_dict) # Note, keep dn in sync with dn in install/share/memberof-task.ldif dn = DN(('cn', 'IPA install %s' % self.sub_dict["TIME"]), ('cn', 'memberof task'), ('cn', 'tasks'), ('cn', 'config')) logger.debug("Waiting for memberof task to complete.") ldap_uri = ipaldap.get_ldap_uri(self.fqdn) conn = ipaldap.LDAPClient(ldap_uri) if self.dm_password: conn.simple_bind(bind_dn=ipaldap.DIRMAN_DN, bind_password=self.dm_password) else: conn.gssapi_bind() replication.wait_for_task(conn, dn) conn.unbind()
def init_memberof(self): if not self.run_init_memberof: return self._ldap_mod("memberof-task.ldif", self.sub_dict) # Note, keep dn in sync with dn in install/share/memberof-task.ldif dn = DN(('cn', 'IPA install %s' % self.sub_dict["TIME"]), ('cn', 'memberof task'), ('cn', 'tasks'), ('cn', 'config')) root_logger.debug("Waiting for memberof task to complete.") ldap_uri = ipaldap.get_ldap_uri(self.fqdn) conn = ipaldap.LDAPClient(ldap_uri) if self.dm_password: conn.simple_bind(bind_dn=ipaldap.DIRMAN_DN, bind_password=self.dm_password) else: conn.gssapi_bind() replication.wait_for_task(conn, dn) conn.unbind()
def bak2db(self, instance, backend, online=True): ''' Restore a BAK backup of the data and changelog in this instance. If backend is None then all backends are restored. If executed online create a task and wait for it to complete. instance here is a loaded term. It can mean either a separate 389-ds install instance or a separate 389-ds backend. We only need to treat PKI-IPA and ipaca specially. ''' if backend is not None: self.log.info('Restoring %s in %s' % (backend, instance)) else: self.log.info('Restoring %s' % instance) cn = time.strftime('restore_%Y_%m_%d_%H_%M_%S') dn = DN(('cn', cn), ('cn', 'restore'), ('cn', 'tasks'), ('cn', 'config')) if online: conn = self.get_connection() ent = conn.make_entry( dn, { 'objectClass': ['top', 'extensibleObject'], 'cn': [cn], 'nsArchiveDir': [os.path.join(self.dir, instance)], 'nsDatabaseType': ['ldbm database'], }) if backend is not None: ent['nsInstance'] = [backend] try: conn.add_entry(ent) except Exception, e: raise admintool.ScriptError( 'Unable to bind to LDAP server: %s' % e) self.log.info("Waiting for restore to finish") wait_for_task(conn, dn)
def __add_sids(self): """ Add SIDs for existing users and groups. Make sure the task is finished before continuing. """ try: # Start the sidgen task self._ldap_mod("ipa-sidgen-task-run.ldif", self.sub_dict) # Notify the user about the possible delay self.print_msg("This step may take considerable amount of time, please wait..") # Wait for the task to complete task_dn = DN('cn=sidgen,cn=ipa-sidgen-task,cn=tasks,cn=config') wait_for_task(api.Backend.ldap2, task_dn) except Exception as e: logger.warning("Exception occured during SID generation: %s", str(e))
def db2bak(self, instance, online=True): ''' Create a BAK backup of the data and changelog in this instance. If executed online create a task and wait for it to complete. ''' logger.info('Backing up %s', instance) cn = time.strftime('backup_%Y_%m_%d_%H_%M_%S') dn = DN(('cn', cn), ('cn', 'backup'), ('cn', 'tasks'), ('cn', 'config')) bakdir = os.path.join(paths.SLAPD_INSTANCE_BACKUP_DIR_TEMPLATE % (instance, instance)) if online: conn = self.get_connection() ent = conn.make_entry( dn, { 'objectClass': ['top', 'extensibleObject'], 'cn': [cn], 'nsInstance': ['userRoot'], 'nsArchiveDir': [bakdir], 'nsDatabaseType': ['ldbm database'], }) try: conn.add_entry(ent) except Exception as e: raise admintool.ScriptError( 'Unable to to add backup task: %s' % e) logger.info("Waiting for BAK to finish") if (wait_for_task(conn, dn) != 0): raise admintool.ScriptError( 'BAK online task failed. Check file systems\' free space.') else: args = [paths.DSCTL, instance, 'db2bak', bakdir] result = run(args, raiseonerr=False) if result.returncode != 0: raise admintool.ScriptError( 'db2bak failed: %s ' 'Check if destination directory %s has enough space.' % (result.error_log, bakdir)) try: shutil.move(bakdir, self.dir) except (IOError, OSError) as e: raise admintool.ScriptError( 'Unable to move BAK: %s ' 'Check if destination directory %s has enough space.' % (e, bakdir)) except Exception as e: raise admintool.ScriptError('Unexpected error: %s' % e)
def db2bak(self, instance, online=True): ''' Create a BAK backup of the data and changelog in this instance. If executed online create a task and wait for it to complete. ''' logger.info('Backing up %s', instance) cn = time.strftime('backup_%Y_%m_%d_%H_%M_%S') dn = DN(('cn', cn), ('cn', 'backup'), ('cn', 'tasks'), ('cn', 'config')) bakdir = os.path.join(paths.SLAPD_INSTANCE_BACKUP_DIR_TEMPLATE % (instance, instance)) if online: conn = self.get_connection() ent = conn.make_entry( dn, { 'objectClass': ['top', 'extensibleObject'], 'cn': [cn], 'nsInstance': ['userRoot'], 'nsArchiveDir': [bakdir], 'nsDatabaseType': ['ldbm database'], }) try: conn.add_entry(ent) except Exception as e: raise admintool.ScriptError( 'Unable to to add backup task: %s' % e) logger.info("Waiting for BAK to finish") wait_for_task(conn, dn) else: args = [paths.DB2BAK, bakdir, '-Z', instance] result = run(args, raiseonerr=False) if result.returncode != 0: logger.critical('db2bak failed: %s', result.error_log) shutil.move(bakdir, self.dir)
def db2ldif(self, instance, backend, online=True): ''' Create a LDIF backup of the data in this instance. If executed online create a task and wait for it to complete. For SELinux reasons this writes out to the 389-ds backup location and we move it. ''' self.log.info('Backing up %s in %s to LDIF' % (backend, instance)) now = time.localtime() cn = time.strftime('export_%Y_%m_%d_%H_%M_%S') dn = DN(('cn', cn), ('cn', 'export'), ('cn', 'tasks'), ('cn', 'config')) ldifname = '%s-%s.ldif' % (instance, backend) ldiffile = os.path.join( paths.SLAPD_INSTANCE_LDIF_DIR_TEMPLATE % instance, ldifname) if online: conn = self.get_connection() ent = conn.make_entry( dn, { 'objectClass': ['top', 'extensibleObject'], 'cn': [cn], 'nsInstance': [backend], 'nsFilename': [ldiffile], 'nsUseOneFile': ['true'], 'nsExportReplica': ['true'], }) try: conn.add_entry(ent) except Exception, e: raise admintool.ScriptError('Unable to add LDIF task: %s' % e) self.log.info("Waiting for LDIF to finish") wait_for_task(conn, dn)
def ldif2db(self, instance, backend, online=True): ''' Restore a LDIF backup of the data in this instance. If executed online create a task and wait for it to complete. ''' self.log.info('Restoring from %s in %s' % (backend, instance)) now = time.localtime() cn = time.strftime('import_%Y_%m_%d_%H_%M_%S') dn = DN(('cn', cn), ('cn', 'import'), ('cn', 'tasks'), ('cn', 'config')) ldifname = '%s-%s.ldif' % (instance, backend) ldiffile = os.path.join(self.dir, ldifname) if online: conn = self.get_connection() ent = conn.make_entry( dn, { 'objectClass': ['top', 'extensibleObject'], 'cn': [cn], 'nsFilename': [ldiffile], 'nsUseOneFile': ['true'], } ) ent['nsInstance'] = [backend] try: conn.add_entry(ent) except Exception, e: raise admintool.ScriptError( 'Unable to bind to LDAP server: %s' % e) self.log.info("Waiting for LDIF to finish") wait_for_task(conn, dn)
def reindex_task(self, force=False): """Reindex ipaca entries pkispawn sometimes does not run its indextasks. This leads to slow unindexed filters on attributes such as description, which is used to log in with a certificate. Explicitly reindex attribute that should have been reindexed by CA's indextasks.ldif. See https://pagure.io/dogtagpki/issue/3083 """ state_name = 'reindex_task' if not force and sysupgrade.get_upgrade_state('dogtag', state_name): return cn = "indextask_ipaca_{}".format(int(time.time())) dn = DN( ('cn', cn), ('cn', 'index'), ('cn', 'tasks'), ('cn', 'config') ) entry = api.Backend.ldap2.make_entry( dn, objectClass=['top', 'extensibleObject'], cn=[cn], nsInstance=['ipaca'], # Dogtag PKI database nsIndexAttribute=[ # from pki/base/ca/shared/conf/indextasks.ldif 'archivedBy', 'certstatus', 'clientId', 'dataType', 'dateOfCreate', 'description', 'duration', 'extension', 'issuedby', 'issuername', 'metaInfo', 'notafter', 'notbefore', 'ownername', 'publicKeyData', 'requestid', 'requestowner', 'requestsourceid', 'requeststate', 'requesttype', 'revInfo', 'revokedOn', 'revokedby', 'serialno', 'status', 'subjectname', ], ttl=[10], ) logger.debug('Creating ipaca reindex task %s', dn) api.Backend.ldap2.add_entry(entry) logger.debug('Waiting for task...') exitcode = replication.wait_for_task(api.Backend.ldap2, dn) logger.debug( 'Task %s has finished with exit code %i', dn, exitcode ) sysupgrade.set_upgrade_state('dogtag', state_name, True)
def ldif2db(self, instance, backend, online=True): ''' Restore a LDIF backup of the data in this instance. If executed online create a task and wait for it to complete. ''' logger.info('Restoring from %s in %s', backend, instance) cn = time.strftime('import_%Y_%m_%d_%H_%M_%S') dn = DN(('cn', cn), ('cn', 'import'), ('cn', 'tasks'), ('cn', 'config')) ldifdir = paths.SLAPD_INSTANCE_LDIF_DIR_TEMPLATE % instance ldifname = '%s-%s.ldif' % (instance, backend) ldiffile = os.path.join(ldifdir, ldifname) srcldiffile = os.path.join(self.dir, ldifname) if not os.path.exists(ldifdir): pent = pwd.getpwnam(constants.DS_USER) os.mkdir(ldifdir) os.chmod(ldifdir, 0o770) os.chown(ldifdir, pent.pw_uid, pent.pw_gid) ipautil.backup_file(ldiffile) with open(ldiffile, 'w') as out_file: ldif_writer = ldif.LDIFWriter(out_file) with open(srcldiffile, 'rb') as in_file: ldif_parser = RemoveRUVParser(in_file, ldif_writer) ldif_parser.parse() # Make sure the modified ldiffile is owned by DS_USER pent = pwd.getpwnam(constants.DS_USER) os.chown(ldiffile, pent.pw_uid, pent.pw_gid) if online: conn = self.get_connection() ent = conn.make_entry( dn, { 'objectClass': ['top', 'extensibleObject'], 'cn': [cn], 'nsFilename': [ldiffile], 'nsUseOneFile': ['true'], } ) ent['nsInstance'] = [backend] try: conn.add_entry(ent) except Exception as e: logger.error("Unable to bind to LDAP server: %s", e) return logger.info("Waiting for LDIF to finish") wait_for_task(conn, dn) else: try: os.makedirs(paths.VAR_LOG_DIRSRV_INSTANCE_TEMPLATE % instance) except OSError as e: pass args = [paths.LDIF2DB, '-Z', instance, '-i', ldiffile, '-n', backend] result = run(args, raiseonerr=False) if result.returncode != 0: logger.critical("ldif2db failed: %s", result.error_log)
def ldif2db(self, instance, backend, online=True): ''' Restore a LDIF backup of the data in this instance. If executed online create a task and wait for it to complete. ''' self.log.info('Restoring from %s in %s' % (backend, instance)) cn = time.strftime('import_%Y_%m_%d_%H_%M_%S') dn = DN(('cn', cn), ('cn', 'import'), ('cn', 'tasks'), ('cn', 'config')) ldifdir = paths.SLAPD_INSTANCE_LDIF_DIR_TEMPLATE % instance ldifname = '%s-%s.ldif' % (instance, backend) ldiffile = os.path.join(ldifdir, ldifname) srcldiffile = os.path.join(self.dir, ldifname) if not os.path.exists(ldifdir): pent = pwd.getpwnam(constants.DS_USER) os.mkdir(ldifdir) os.chmod(ldifdir, 0o770) os.chown(ldifdir, pent.pw_uid, pent.pw_gid) ipautil.backup_file(ldiffile) with open(ldiffile, 'wb') as out_file: ldif_writer = ldif.LDIFWriter(out_file) with open(srcldiffile, 'rb') as in_file: ldif_parser = RemoveRUVParser(in_file, ldif_writer, self.log) ldif_parser.parse() # Make sure the modified ldiffile is owned by DS_USER pent = pwd.getpwnam(constants.DS_USER) os.chown(ldiffile, pent.pw_uid, pent.pw_gid) if online: conn = self.get_connection() ent = conn.make_entry( dn, { 'objectClass': ['top', 'extensibleObject'], 'cn': [cn], 'nsFilename': [ldiffile], 'nsUseOneFile': ['true'], } ) ent['nsInstance'] = [backend] try: conn.add_entry(ent) except Exception as e: self.log.error("Unable to bind to LDAP server: %s" % e) return self.log.info("Waiting for LDIF to finish") wait_for_task(conn, dn) else: try: os.makedirs(paths.VAR_LOG_DIRSRV_INSTANCE_TEMPLATE % instance) except OSError as e: pass args = [paths.LDIF2DB, '-Z', instance, '-i', ldiffile, '-n', backend] result = run(args, raiseonerr=False) if result.returncode != 0: self.log.critical("ldif2db failed: %s" % result.error_log)