def backup_smb_dbs(self, private_dir, samdb, lp, logger): # First, determine if DB backend is MDB. Assume not unless there is a # 'backendStore' attribute on @PARTITION containing the text 'mdb' store_label = "backendStore" res = samdb.search(base="@PARTITION", scope=ldb.SCOPE_BASE, attrs=[store_label]) mdb_backend = store_label in res[0] and res[0][store_label][0] == 'mdb' sam_ldb_path = os.path.join(private_dir, 'sam.ldb') copy_function = None if mdb_backend: logger.info('MDB backend detected. Using mdb backup function.') copy_function = self.offline_mdb_copy else: logger.info('Starting transaction on ' + sam_ldb_path) copy_function = self.offline_tdb_copy sam_obj = Ldb(sam_ldb_path, lp=lp) sam_obj.transaction_start() logger.info(' backing up ' + sam_ldb_path) self.offline_tdb_copy(sam_ldb_path) sam_ldb_d = sam_ldb_path + '.d' for sam_file in os.listdir(sam_ldb_d): sam_file = os.path.join(sam_ldb_d, sam_file) if sam_file.endswith('.ldb'): logger.info(' backing up locked/related file ' + sam_file) copy_function(sam_file) else: logger.info(' copying locked/related file ' + sam_file) shutil.copyfile(sam_file, sam_file + self.backup_ext) if not mdb_backend: sam_obj.transaction_cancel()
def backup_secrets(self, private_dir, lp, logger): secrets_path = os.path.join(private_dir, 'secrets') secrets_obj = Ldb(secrets_path + '.ldb', lp=lp) logger.info('Starting transaction on ' + secrets_path) secrets_obj.transaction_start() self.offline_tdb_copy(secrets_path + '.ldb') self.offline_tdb_copy(secrets_path + '.tdb') secrets_obj.transaction_cancel()
def ldif_to_samdb(dburl, lp, ldif_file, forced_local_dsa=None): """Routine to import all objects and attributes that are relevent to the KCC algorithms from a previously exported LDIF file. The point of this function is to allow a programmer/debugger to import an LDIF file with non-security relevent information that was previously extracted from a DC database. The LDIF file is used to create a temporary abbreviated database. The KCC algorithm can then run against this abbreviated database for debug or test verification that the topology generated is computationally the same between different OSes and algorithms. :param dburl: path to the temporary abbreviated db to create :param ldif_file: path to the ldif file to import """ if os.path.exists(dburl): raise LdifError("Specify a database (%s) that doesn't already exist." % dburl) # Use ["modules:"] as we are attempting to build a sam # database as opposed to start it here. tmpdb = Ldb(url=dburl, session_info=system_session(), lp=lp, options=["modules:"]) tmpdb.transaction_start() try: data = read_and_sub_file(ldif_file, None) tmpdb.add_ldif(data, None) if forced_local_dsa: tmpdb.modify_ldif("""dn: @ROOTDSE changetype: modify replace: dsServiceName dsServiceName: CN=NTDS Settings,%s """ % forced_local_dsa) tmpdb.add_ldif("""dn: @MODULES @LIST: rootdse,extended_dn_in,extended_dn_out_ldb,objectguid - """) except Exception as estr: tmpdb.transaction_cancel() raise LdifError("Failed to import %s: %s" % (ldif_file, estr)) tmpdb.transaction_commit() # We have an abbreviated list of options here because we have built # an abbreviated database. We use the rootdse and extended-dn # modules only during this re-open samdb = SamDB(url=dburl, session_info=system_session(), lp=lp) return samdb
def run(self, sambaopts=None, targetdir=None): logger = logging.getLogger() logger.setLevel(logging.DEBUG) logger.addHandler(logging.StreamHandler(sys.stdout)) # Get the absolute paths of all the directories we're going to backup lp = sambaopts.get_loadparm() paths = samba.provision.provision_paths_from_lp(lp, lp.get('realm')) if not (paths.samdb and os.path.exists(paths.samdb)): raise CommandError('No sam.db found. This backup ' + 'tool is only for AD DCs') check_targetdir(logger, targetdir) samdb = SamDB(url=paths.samdb, session_info=system_session(), lp=lp) sid = get_sid_for_restore(samdb) backup_dirs = [ paths.private_dir, paths.state_dir, os.path.dirname(paths.smbconf) ] # etc dir logger.info('running backup on dirs: {}'.format(backup_dirs)) # Recursively get all file paths in the backup directories all_files = [] for backup_dir in backup_dirs: for (working_dir, _, filenames) in os.walk(backup_dir): if working_dir.startswith(paths.sysvol): continue for filename in filenames: if filename in all_files: continue # Assume existing backup files are from a previous backup. # Delete and ignore. if filename.endswith(self.backup_ext): os.remove(os.path.join(working_dir, filename)) continue all_files.append(os.path.join(working_dir, filename)) # Backup secrets, sam.ldb and their downstream files self.backup_secrets(paths.private_dir, lp, logger) self.backup_smb_dbs(paths.private_dir, samdb, lp, logger) # Open the new backed up samdb, flag it as backed up, and write # the next SID so the restore tool can add objects. # WARNING: Don't change this code unless you know what you're doing. # Writing to a .bak file only works because the DN being # written to happens to be top level. samdb = SamDB(url=paths.samdb + self.backup_ext, session_info=system_session(), lp=lp) time_str = get_timestamp() add_backup_marker(samdb, "backupDate", time_str) add_backup_marker(samdb, "sidForRestore", sid) # Now handle all the LDB and TDB files that are not linked to # anything else. Use transactions for LDBs. for path in all_files: if not os.path.exists(path + self.backup_ext): if path.endswith('.ldb'): logger.info('Starting transaction on solo db: ' + path) ldb_obj = Ldb(path, lp=lp) ldb_obj.transaction_start() logger.info(' running tdbbackup on the same file') self.offline_tdb_copy(path) ldb_obj.transaction_cancel() elif path.endswith('.tdb'): logger.info('running tdbbackup on lone tdb file ' + path) self.offline_tdb_copy(path) # Now make the backup tar file and add all # backed up files and any other files to it. temp_tar_dir = tempfile.mkdtemp(dir=targetdir, prefix='INCOMPLETEsambabackupfile') temp_tar_name = os.path.join(temp_tar_dir, "samba-backup.tar.bz2") tar = tarfile.open(temp_tar_name, 'w:bz2') logger.info('running offline ntacl backup of sysvol') sysvol_tar_fn = 'sysvol.tar.gz' sysvol_tar = os.path.join(temp_tar_dir, sysvol_tar_fn) backup_offline(paths.sysvol, sysvol_tar, samdb, paths.smbconf) tar.add(sysvol_tar, sysvol_tar_fn) os.remove(sysvol_tar) create_log_file(temp_tar_dir, lp, "offline", "localhost", True) backup_fn = os.path.join(temp_tar_dir, "backup.txt") tar.add(backup_fn, os.path.basename(backup_fn)) os.remove(backup_fn) logger.info('building backup tar') for path in all_files: arc_path = self.get_arc_path(path, paths) if os.path.exists(path + self.backup_ext): logger.info(' adding backup ' + arc_path + self.backup_ext + ' to tar and deleting file') tar.add(path + self.backup_ext, arcname=arc_path) os.remove(path + self.backup_ext) elif path.endswith('.ldb') or path.endswith('.tdb'): logger.info(' skipping ' + arc_path) else: logger.info(' adding misc file ' + arc_path) tar.add(path, arcname=arc_path) tar.close() os.rename( temp_tar_name, os.path.join(targetdir, 'samba-backup-{}.tar.bz2'.format(time_str))) os.rmdir(temp_tar_dir) logger.info('Backup succeeded.')