def force_overwrite(config, unpack_path, p_len): ''' Restore Files Without Checksum ''' fname = 'custom_dirs.force_overwrite()' # Allow Exit Since This Is Bad Idea paf.prWarning( 'OVERWRITING FILES WITHOUT CHECKSUMS CAN BE EXTREMELY DANGEROUS!') if paf.yn_frame( 'Do You Still Want to Continue and Restore ALL The Files You Stored?' ) is False: return # Overwrite Files paf.write_to_log(fname, 'Starting Force Overwrite Process...', config['log']) print( 'Starting Full File Restore! Please Be Patient As All Files are Overwritten...' ) fs_stored = paf.find_files(unpack_path) try: fs_stored.remove(unpack_path + '/folder_permissions.pickle') except Exception: pass make_missing_dirs(config, unpack_path, p_len) for f in track(fs_stored, description='Overwriting Files'): shutil.move(f, f[p_len:]) paf.prSuccess('Done Overwriting Files!') paf.write_to_log(fname, 'Finished Force Overwrite Of Files', config['log'])
def search_cache(pkg_list, fs_list, config): ''' Searches the cache for matching pkg versions and returns the results. Because of the way files are named and the output given by pacman -Q, regex is needed to find the version in the cached package path. No performance is gained with more than 4 threads on this function. ''' fname = 'utils.search_cache(' + str(len(pkg_list)) + ')' thread_cap = 4 # Combing Package Names Into One Term Provides Much Faster Results paf.write_to_log(fname, 'Started Search for Matching Versions...', config['log']) bulk_search = ('|'.join(list(re.escape(pkg) for pkg in pkg_list))) # Chunks List of Searches Into Peices For Multi-Threaded Search chunk_size = int(round(len(fs_list) / paf.max_threads(thread_cap), 0)) + 1 fs_list = list(f for f in fs_list) chunks = [ fs_list[i:i + chunk_size] for i in range(0, len(fs_list), chunk_size) ] # Creates Pool of Threads to Run Regex Searches with mp.Pool(processes=paf.max_threads(thread_cap)) as pool: found_pkgs = pool.starmap(search_pkg_chunk, zip(itertools.repeat(bulk_search), chunks)) found_pkgs = set(itertools.chain(*found_pkgs)) paf.write_to_log( fname, 'Found ' + str(len(found_pkgs)) + ' OUT OF ' + str(len(pkg_list)) + ' Packages', config['log']) return found_pkgs
def too_many_pkgs_found(config, parms, found_pkgs, pkg_results): """ This auto resolves some very bizzare edgecases I have run into. """ fname = 'error.too_many_pkgs_found(' + parms['type'] + parms['id'] + ')' paf.write_to_log(fname, 'Starting Debug Proccess...', config['log']) found_files = utils.trim_pkg_list(paf.basenames(found_pkgs)) search_files = paf.basenames(pkg_results['search']) bad_files = (found_files - search_files) paf.write_to_log( fname, 'Debug Proccess Found ' + str(len(bad_files)) + ' Files That Do Not Belong!', config['log']) if len(found_files) - len(search_files) == len(bad_files): paf.write_to_log(fname, 'Cleaning Found Files...', config['log']) bad_files_full = set() for b in bad_files: for f in found_pkgs: if re.search(b, f): bad_files_full.add(f) for f in bad_files_full: found_pkgs.remove(f) paf.write_to_log(fname, 'Debug Process Was Able to Fix All Issues!', config['log']) return (True, found_pkgs) else: paf.write_to_log(fname, 'Debug Process Was NOT Able to Fix All Issues!', config['log']) return (False, found_pkgs)
def make_missing_dirs(config, unpack_path, p_len): ''' This is an add on function that restores permissions to folders. This was a known bug for all of alpha but is finally patched. Folder permissions aren't stored in a tar so a separate system was created to handle this using pickle and paf functions. ''' fname = 'custom_dirs.make_missing_dirs()' # Find All Subdirs dirs = paf.find_subdirs(unpack_path) # Sort in Subdirs in Descending Paths dirs.sort(key=lambda x: x.count('/')) for d in dirs: if not os.path.exists(d[p_len:]): os.makedirs(d[p_len:]) if os.path.exists(unpack_path + '/folder_permissions.pickle'): # Load Folder Permissions Pickle folder_perms = pickle.load( open(unpack_path + '/folder_permissions.pickle', 'rb')) for x in folder_perms: os.system('chmod ' + paf.perm_to_num(x[1]) + ' ' + paf.escape_bash_input(x[0])) os.system('chown ' + x[2] + ':' + x[3] + ' ' + paf.escape_bash_input(x[0])) else: paf.write_to_log(fname, 'Folder Permissions Pickle is Missing!', config['log'])
def unlock(config): ''' Removes the session lock defined by config['slock']. This releases the lock that was created by session.lock() ''' fname = 'session.unlock()' paf.write_to_log(fname, 'Ended Active Session', config['log']) paf.end_log(fname, config['log'], config['log_length'])
def hlock_kill(config): ''' Removes the hook lock file without any checks. This currently isn't used anywhere, it's just future-proofing. ''' fname = 'session.hlock_kill()' paf.rm_file(config['hlock'], sudo=False) paf.write_to_log(fname, 'Force Ended Hook Lock!', config['log'])
def abort_fail(func, output, message, config): ''' This is a surrogate function for other functions to safely abort runtime during a failure. It reports the func sending the kill request as the origin, rather than session.abort(). ''' paf.write_to_log(func, 'FAILURE: ' + output, config['log']) unlock(config) paf.prError(message) sys.exit()
def hlock_start(config): ''' This starts a hook lock overwriting the previous lock. This should be triggered at the end of a successful `--hook` run. ''' fname = 'session.hlock_start(' + str(config['hook_cooldown']) + ')' stime = 'Created: ' + dt.datetime.now().strftime("%Y:%m:%d:%H:%M:%S"), paf.export_iterable(config['hlock'], [stime]) paf.write_to_log(fname, 'Created Hook Lock With ' + str(config['hook_cooldown']) + ' Second Cooldown', config['log'])
def abort(func, output, message, config): ''' This is a surrogate function for other functions to safely abort runtime. It reports the func sending the kill signal as the origin, rather than session.abort(). ''' paf.write_to_log(func, 'ABORT: ' + output, config['log']) unlock(config) paf.prBold(message) sys.exit()
def abort(func, output, message, config): ''' This is a surrogate function for other functions to safely abort runtime WITHOUT reporting as an internal error. This is useful for non-critical issues that still require a runtime exit. It reports the func sending the kill signal as the origin, rather than session.abort(). ''' paf.write_to_log(func, 'ABORT: ' + output, config['log']) unlock(config) paf.prBold(message) sys.exit(0)
def remove_id(config, info): '''Remove a selected id based on type.''' fname = "utils.remove_id(" + info['type'] + info['id'] + ")" paf.rm_file(info['meta'], sudo=False) paf.rm_file(info['meta_md5'], sudo=False) if info['type'] == 'rp': paf.rm_dir(info['path'], sudo=False) paf.write_to_log(fname, 'Removal Complete', config['log'])
def pacman_hook(install, config): ''' Installs or removes a standard alpm hook in /usr/share/libalpm/hooks/ which runs as a PreTransaction hook during every pacman transaction. `install = True` Installs Pacman Hook `install = False` Removes Pacman Hook ''' if install is True: fname = 'utils.pacman_hook(install)' paf.write_to_log(fname, 'Starting Hook Installation...', config['log']) hook = [ '[Trigger]', 'Operation = Install', 'Operation = Remove', 'Operation = Upgrade', 'Type = Package', 'Target = *', '', '[Action]', 'Description = Pre-Upgrade Pacback Hook', 'Depends = pacman', 'When = PreTransaction', 'Exec = /usr/bin/pacback --hook' ] paf.export_iterable('/usr/share/libalpm/hooks/pacback.hook', hook) paf.prSuccess('Pacback Hook is Now Installed!') paf.write_to_log(fname, 'Installed Pacback PreTransaction Hook', config['log']) elif install is False: fname = 'utils.pacman_hook(remove)' paf.write_to_log(fname, 'Starting Hook Removal...', config['log']) paf.rm_file('/usr/share/libalpm/hooks/pacback.hook', sudo=False) paf.write_to_log(fname, 'Removed Pacback PreTransaction Hook', config['log']) paf.prSuccess('Pacback Hook Was Removed!')
def validate(config, info): ''' Checks if a meta file has become corrupted or is missing. ''' fname = 'meta.validate(' + info['type'] + info['id'] + ')' if os.path.exists(info['meta']) and os.path.exists(info['meta_md5']): paf.write_to_log(fname, 'Meta File and Meta Checksum Are Present', config['log']) csum = str(open(info['meta_md5']).read()).strip() msum = str(paf.checksum_file(info['meta'])[1]).strip() if csum == msum: paf.write_to_log(fname, 'Meta Passed Checksum', config['log']) return elif csum != msum: paf.write_to_log(fname, 'Meta Checksum FAILED!', config['log']) paf.prError(info['TYPE'] + ' ' + info['id'] + ' Has Failed its Checksum Check!') paf.prError('This ' + info['TYPE'] + ' Has Likely Become Corrupt!') if paf.yn_frame('Do You Want to Remove This ' + info['TYPE'] + ' Now?') is True: utils.remove_id(config, info) session.abort(fname, 'User Deleted Corrupted ' + info['TYPE'], info['TYPE'] + ' Was Removed. Exiting Now!', config) else: session.abort( fname, 'User Choose NOT to Remove Corrupted ' + info['TYPE'], 'Okay, Leaving the ' + info['TYPE'] + ' Alone. Exiting Now!', config) elif os.path.exists(info['meta']) and not os.path.exists(info['meta_md5']): paf.write_to_log(fname, 'Meta File is Missing its Checksum File!', config['log']) paf.prError(info['TYPE'] + ' ' + info['id'] + ' is Missing a Checksum!') if paf.yn_frame('Do You Still Want To Continue?') is False: session.abort(fname, 'User Exited Due to Missing Checksum File', 'Okay, Aborting Due to Missing Checksum', config) else: paf.write_to_log( fname, 'User Choose To Continue Even Though The Checksum is Missing', config['log']) return
def repack(config, info, unpack_path): ''' Cleans up after comparing an already created custom tar. ''' fname = 'custom_dirs.repack()' paf.rm_dir(unpack_path, sudo=False) paf.write_to_log(fname, 'Cleaned Up Unpacked Files', config['log']) if os.path.exists(info['tar']): # Re-Compress Custom Tar print('Re-Compressing Tar...') if any(re.findall('pigz', l.lower()) for l in utils.pacman_Q()): os.system('/usr/bin/pigz ' + info['tar'] + ' -f') else: paf.gz_c(info['tar'], rm=True) paf.write_to_log(fname, 'Compressed ' + info['tar'], config['log'])
def restore_point(config, id_num): ''' This preps the system for a restoration then hands off to restore.main() ''' id_num = str(id_num).zfill(2) fname = 'restore.restore_point(' + id_num + ')' paf.write_to_log(fname, 'Started Restoring Restore Point ID:' + id_num, config['log']) info = { 'id': id_num, 'type': 'rp', 'TYPE': 'Restore Point', 'meta': config['rp_paths'] + '/rp' + id_num + '.meta', 'meta_md5': config['rp_paths'] + '/.rp' + id_num + '.md5', 'path': config['rp_paths'] + '/rp' + id_num, 'pkgcache': config['rp_paths'] + '/rp' + id_num + '/pkg-cache', 'tar': config['rp_paths'] + '/rp' + id_num + '/rp' + id_num + '_dirs.tar', 'tar.gz': config['rp_paths'] + '/rp' + id_num + '/rp' + id_num + '_dirs.tar.gz' } # Read Meta File, Check Version, Compare Results meta.validate(config, info) rp_dict = meta.read(config, info['meta']) version.compare(config, rp_dict['version']) main(config, info, meta.compare_now(config, rp_dict)) # Unpack and Compare Directories Stored By User if rp_dict['dir_list']: custom_dirs.restore(config, info, rp_dict['dir_list'], rp_dict['tar_csum']) # Finish Last Checks and Exit utils.reboot_check(config) paf.write_to_log(fname, 'Finished Restoreing Restore Point ID:' + id_num, config['log'])
def hlock_check(config): ''' If config['hlock'] exists it checks if it was created less than the number of seconds defined by config['hook_cooldown']. ''' fname = 'session.hlock_check(' + str(config['hook_cooldown']) + ')' if os.path.exists(config['hlock']): f = str(open(config['hlock'], 'r').readlines())[11:-4] f = f.split(':') hc = dt.datetime(int(f[0]), int(f[1]), int(f[2]), int(f[3]), int(f[4]), int(f[5])) sec_dif = (dt.datetime.now() - hc).total_seconds() if sec_dif > config['hook_cooldown']: paf.write_to_log(fname, 'Passed Cooldown Check', config['log']) else: abort(fname, 'A Hook Lock Was Created ' + str(sec_dif) + ' Ago!', 'Aborting: A Snapshot Was Created Less Than ' + str(config['hook_cooldown']) + ' Seconds Ago!', config) else: paf.write_to_log(fname, 'Passed Check, No Previous Lock Found', config['log'])
def reboot_check(config): ''' Checks running and installed kernel versions to determine if a reboot is needed. ''' fname = 'utils.reboot_check()' cmd = "file -bL /boot/vmlinuz* | grep -o 'version [^ ]*' | cut -d ' ' -f 2 && uname -r" raw = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) out = str(raw.communicate())[3:] out = out.split('\n') out = out[0].split('\\n')[:-1] if out[0].strip() != out[1].strip(): paf.write_to_log( fname, 'The Installed Kernel Has Changed From ' + out[1].strip() + ' To ' + out[0].strip(), config['log']) paf.prWarning('Your Installed Kernel Has Changed From ' + out[1].strip() + ' To ' + out[0].strip() + ' and a Reboot Is Needed!') if config['reboot'] is True: if paf.yn_frame('Do You Want To Schedule A Reboot In ' + str(config['reboot_offset']) + ' Minutes?') is True: os.system("shutdown -r $(date --date='" + str(config['reboot_offset']) + " minute' +%H:%M)") paf.write_to_log( fname, 'User Scheduled A Reboot In ' + str(config['reboot_offset']) + ' Minutes', config['log']) else: paf.write_to_log(fname, 'User Declined System Reboot', config['log']) else: paf.write_to_log( fname, 'A Reboot Is Needed For The Whole Downgrade To Take Affect!', config['log']) else: paf.write_to_log( fname, 'The Kernel Hasn\'t Been Changed, A Reboot is Unnecessary', config['log'])
def remove_rp(config, num, nc): fname = 'user.remove_rp(' + str(num) + ')' rm_info = { 'id': str(num).zfill(2), 'type': 'rp', 'TYPE': 'Restore Point', 'meta': config['rp_paths'] + '/rp' + str(num).zfill(2) + '.meta', 'meta_md5': config['rp_paths'] + '/.rp' + str(num).zfill(2) + '.md5', 'path': config['rp_paths'] + '/rp' + str(num).zfill(2) } if nc is False: if paf.yn_frame('Are You Sure You Want to Remove This Restore Point?' ) is False or None: return utils.remove_id(config, rm_info) paf.prSuccess('Restore Point Removed!') paf.write_to_log(fname, 'Removed Restore Point ' + num, config['log'])
def store(config, info): ''' Packs up user defined directories. ''' fname = 'custom_dirs.pack()' paf.write_to_log( fname, str(len(info['dir_list'])) + ' Folders Selected For Storage', config['log']) tmpfile = tempfile.gettempdir() + '/folder_permissions.pickle' # Fetch Folder Permissions and Pickle folder_perms = set() for d in info['dir_list']: folder_perms.update(paf.get_permissions(d, 'folders')) pickle.dump(folder_perms, (open(tmpfile, 'wb'))) # Scan For Files files = paf.find_files(info['dir_list']) # Pack Custom Files Into Tar with tarfile.open(info['tar'], 'w') as tar: tar.add(tmpfile, arcname='folder_permissions.pickle') for f in track(files, description='Adding Files to Tar'): tar.add(f) paf.rm_file(tmpfile, sudo=False) paf.write_to_log(fname, 'Created ' + info['tar'], config['log']) # Create Checksum for Tar print('Creating Checksum...') pack_csum = paf.checksum_file(info['tar'])[1] paf.write_to_log(fname, 'Checksummed Tar ', config['log']) # Compresses Custom Tar print('Compressing Custom Tar...') if any(re.findall('pigz', l.lower()) for l in utils.pacman_Q()): os.system('/usr/bin/pigz ' + info['tar'] + ' -f') else: paf.gz_c(info['tar'], rm=True) paf.write_to_log(fname, 'Compressed ' + info['tar'], config['log']) pack_results = { 'file_count': len(files), 'raw_size': paf.convert_size(paf.size_of_files(files)), 'compressed_size': paf.convert_size(os.path.getsize(info['tar'] + '.gz')), 'csum': pack_csum } return pack_results
def cache_size(config): ''' Gets the size of cached packages reported by applications like du, and also the real size without counting hardlinks more than once. ''' fname = 'utils.cache_size()' paf.write_to_log(fname, 'Started Calculating Cache Size...', config['log']) caches = find_cache_paths(config) pacman_cache = find_pkgs_in_dir(caches[0]) user_cache = find_pkgs_in_dir(caches[1:-1]) pacback_cache = find_pkgs_in_dir(caches[-1:]) inodes = {os.lstat(x)[stat.ST_INO] for x in {*pacman_cache, *user_cache}} pacback_filter = set() for x in pacback_cache: i = os.lstat(x)[stat.ST_INO] if i in inodes: pass else: pacback_filter.add(x) inodes.add(i) all_cache = {*pacman_cache, *user_cache, *pacback_cache} pkg_total = len(pacman_cache) + len(user_cache) + len(pacback_filter) # Calculate Size On Disk pacman_size = paf.convert_size(paf.size_of_files(pacman_cache)) user_size = paf.convert_size(paf.size_of_files(user_cache)) pacback_size = paf.convert_size(paf.size_of_files(pacback_filter)) reported_size = paf.convert_size(paf.size_of_files(all_cache)) paf.write_to_log(fname, 'Returning Cache Size', config['log']) return (str(pkg_total), pacman_size, user_size, pacback_size, reported_size)
def compare(config, target_version): ''' Parses the versions and forks if an upgrade is needed. ''' fname = 'version.compare()' # Current Version cv_M = int(config['version'].split('.')[0]) cv_m = int(config['version'].split('.')[1]) cv_p = int(config['version'].split('.')[2]) # Target Version tv_M = int(target_version.split('.')[0]) tv_m = int(target_version.split('.')[1]) tv_p = int(target_version.split('.')[2]) versions = ((cv_M, cv_m, cv_p), (tv_M, tv_m, tv_p)) if config['version'] != target_version: paf.write_to_log( fname, 'Current Version ' + config['version'] + ' Miss-Matched With ' + target_version, config['log']) # Check for Versions <= V1.5 if tv_M == 1 and tv_m < 5: paf.prError( 'Restore Points Generated Before V1.5.0 Are Not Backwards Compatible With Newer Versions of Pacback!' ) paf.write_to_log( fname, 'Detected a Restore Point Version Generated > V1.5', config['log']) session.abort_fail( fname, 'Can\'t Upgrade or Restore Versions Created Before V1.5', 'Aborting!', config['log']) # Check for V1.5 to V1.7 elif tv_M == 1 and tv_m > 5: paf.write_to_log(fname, 'Detected Alpha Restore Point!', config['log']) else: paf.write_to_log(fname, 'Both Versions Match ' + config['version'], config['log']) return versions
def restore_point(config, num, full_rp, dir_list, no_confirm, label): ''' Assembles all the info for main() and stages the file system for the creation of a restore point. It is assumed that user input has been cleansed by this point. ''' num = str(num).zfill(2) fname = 'create.restore_point(' + num + ')' paf.write_to_log(fname, 'Started Restore Point Creation...', config['log']) info = { 'id': num, 'type': 'rp', 'TYPE': 'Restore Point', 'stype': 'f' if full_rp is True else 'l', 'STYPE': 'Full' if full_rp is True else 'Light', 'nc': no_confirm, 'label': str(label), 'meta': config['rp_paths'] + '/rp' + num + '.meta', 'meta_md5': config['rp_paths'] + '/.rp' + num + '.md5', 'dir_list': dir_list, 'path': config['rp_paths'] + '/rp' + num, 'pkgcache': config['rp_paths'] + '/rp' + num + '/pkg-cache', 'tar': config['rp_paths'] + '/rp' + num + '/rp' + num + '_dirs.tar' } # Check for Pre-Existing Restore Point if os.path.exists(info['meta']) or os.path.exists(info['path']): paf.prWarning('Restore Point #' + info['id'] + ' Already Exists!') if info['nc'] is False: if paf.yn_frame('Do You Want to Overwrite It?') is False or None: session.abort(fname, 'User Aborted Overwrite of RP #' + info['id'], 'Aborting Creation!', config) utils.remove_id(config, info) # Create Restore Point After Checks paf.write_to_log(fname, 'All Checks Passed! Handing Off to create.main()', config['log']) paf.prBold('Building ' + info['STYPE'] + ' ' + info['TYPE'] + ' ' + info['id'] + '...') main(config, info) # Finish After Successful Creation paf.write_to_log(fname, 'Restore Point Creation Complete!', config['log']) paf.prBold('Restore Point Creation Complete!')
def lock(config): ''' This checks if pacback is being run by root or sudo, then checks if an active session is already in progress. ''' fname = 'session.lock()' if paf.am_i_root() is False: sys.exit('Critical Error: This Command Must Be Run As Root!') try: lock = os.open(config['slock'], os.O_CREAT) fcntl.flock(lock, fcntl.LOCK_EX | fcntl.LOCK_NB) paf.start_log(fname, config['log']) paf.write_to_log(fname, 'Passed Root Check', config['log']) paf.write_to_log(fname, 'Started Active Session', config['log']) if os.path.exists('/etc/pacback.conf') is False: paf.write_to_log(fname, 'User Config File Is Missing!', config['log']) except (IOError, OSError): sys.exit('Critical Error! Pacback Already Has An Active Session Running.')
def compare_files(config, dir_list, unpack_path, p_len): ''' Compares and unpacked custom user files against the current system. Returns a dict of added, removed and changed files on the system. ''' fname = 'custom_dirs.compare_files()' # Core Compare Results diff_added = set() diff_removed = set() diff_large = set() diff_noread = set() diff_changed = set() # Compare Checksums For Files That Exist paf.write_to_log(fname, 'Started Sorting and Comparing Files...', config['log']) # Search Directories unpack_files = paf.find_files(unpack_path) current_files = paf.find_files(dir_list) # Find Added Files and Remove From Csum Queue diff_added.update(current_files - {f[p_len:] for f in unpack_files}) current_files.difference_update(diff_added) # Find Removed Files and Trim From Csum Queue diff_removed.update(unpack_files - {unpack_path + f for f in current_files}) unpack_files.difference_update(diff_removed) try: diff_removed.remove(unpack_path + '/folder_permissions.pickle') except KeyError: paf.write_to_log(fname, 'Error: Couldn\'t Find Permission Pickle.', config['log']) # Only Checksum Files That Exist in Both Current AND Unpack paf.write_to_log(fname, 'Started Checksumming Custom Files...', config['log']) unpack_csum = paf.checksum_files(unpack_files, output='Checksumming Stored Files') current_csum = paf.checksum_files(current_files, output='Checksumming Current Files') paf.write_to_log(fname, 'Finished Checksumming Custom Files', config['log']) # Find Exceptions and Trim for csum in unpack_csum: if csum[1] == 'TOO LARGE!': diff_large.add(csum) unpack_csum.remove(csum) paf.write_to_log(fname, csum[0] + ' Was Too Large To Checksum!', config['log']) elif csum[1] == 'UNREADABLE!': diff_noread.add(csum) unpack_csum.remove(csum) paf.write_to_log(fname, csum[0] + ' Was Unreadable!', config['log']) for csum in current_csum: if csum[1] == 'TOO LARGE!': diff_large.add(csum) current_csum.remove(csum) paf.write_to_log(fname, csum[0] + ' Was Too Large To Checksum!', config['log']) elif csum[1] == 'UNREADABLE!': diff_noread.add(csum) current_csum.remove(csum) paf.write_to_log(fname, csum[0] + ' Was Unreadable!', config['log']) # Find Changed Files diff_changed.update(current_csum - {(tpl[0][p_len:], tpl[1]) for tpl in unpack_csum}) paf.write_to_log(fname, 'Finished Comparing and Sorting Files', config['log']) compare_results = { 'added': diff_added, 'removed': diff_removed, 'changed': diff_changed, 'large': diff_large, 'noread': diff_noread } return compare_results
def archive_date(config, date): ''' This function simply automates the date rollback instructions found on the Arch Wiki. https://wiki.archlinux.org/index.php/Arch_Linux_Archive#How_to_restore_all_packages_to_a_specific_date ''' # Startup fname = 'restore.archive_date(' + str(date) + ')' mirror = '/etc/pacman.d/mirrorlist' # Done as a Fail Safe if len(paf.read_file(mirror)) > 2: os.system('mv ' + mirror + ' ' + mirror + '.pacback') paf.write_to_log(fname, 'Backed Up Existing Mirrorlist', config['log']) else: paf.write_to_log( fname, 'Skipped Mirrorlist Backup. File Seems Miss-Formated!', config['log']) paf.export_iterable(mirror, [ '## Set By Pacback', 'Server=https://archive.archlinux.org/repos/' + date + '/$repo/os/$arch' ]) paf.write_to_log(fname, 'Added ' + date + ' Archive URL To Mirrorlist', config['log']) # Run Pacman Update to Run Downgrade os.system('/usr/bin/pacman -Syyuu') paf.write_to_log(fname, 'Sent -Syyuu to Pacman', config['log']) # Restore the Non-Archive URL Mirrorlist if os.path.exists(mirror + '.pacback') is False: paf.write_to_log(fname, 'Backup Mirrorlist Is Missing', config['log']) if paf.yn_frame( 'Missing Mirrorlist! Do You Want to Fetch a New HTTPS Mirrorlist?' ) is True: if utils.fetch_new_mirrorlist() is True: paf.write_to_log( fname, 'A New Mirrorlist Was Successfully Downloaded', config['log']) else: session.abort_fail(fname, 'User Declined Country Selection!', 'Please Manually Replace Your Mirrorlist!', config['log']) else: session.abort_fail( fname, 'Backup Mirrorlist Is Missing and User Declined Download!', 'Please Manually Replace Your Mirrorlist!', config['log']) else: os.system('mv ' + mirror + '.pacback ' + mirror) paf.write_to_log(fname, 'Backup Mirrorlist Was Restored Successfully', config['log']) print('Refreshing Pacman Database...') os.system('/usr/bin/pacman -Sy > /dev/null') paf.write_to_log(fname, 'Updated Pacman Database After Restoring Mirrorlist', config['log'])
def packages(config, pkgs): ''' Allows the user to rollback packages by name. Packages are not sent to pacman until the user has selected all the packages they want to restore/change. ''' # Startup fname = 'restore.packages(' + str(len(pkgs)) + ')' pkg_paths = list() cache = utils.scan_caches(config) # Search For Each Package Name And Let User Select Version paf.write_to_log(fname, 'Started Search for ' + ', '.join(pkgs), config['log']) for pkg in pkgs: found_pkgs = utils.user_pkg_search(pkg, cache) sort_pkgs = sorted(found_pkgs, reverse=True) if found_pkgs: paf.write_to_log( fname, 'Found ' + str(len(found_pkgs)) + ' Cached Versions for `' + pkg + '`', config['log']) paf.prSuccess('Pacback Found the Following Versions for `' + pkg + '`:') answer = paf.multi_choice_frame(sort_pkgs) # Lets User Abort Package Selection if answer is False or None: paf.write_to_log(fname, 'User Selected NOTHING For ' + pkg, config['log']) else: for x in cache: if re.findall(re.escape(answer), x): pkg_paths.append(x) break else: paf.prError('No Packages Found Under the Name: ' + pkg) paf.write_to_log( fname, 'Search for ' + pkg.upper() + ' Returned ZERO Results!', config['log']) if pkg_paths: paf.pacman(' '.join(pkg_paths), '-U') paf.write_to_log(fname, 'Sent Pacman Selected Packages For Installation', config['log']) else: paf.write_to_log( fname, 'User Selected No Packages or No Packages Were Found', config['log'])
def smart_overwrite(config, csum_results, unpack_path, p_len): ''' Main File Restoration Logic ''' fname = 'custom_dirs.smart_overwrite()' if csum_results['changed']: paf.write_to_log( fname, 'Found ' + str(len(csum_results['changed'])) + ' Changed Files', config['log']) print('') print('#################################') paf.prWarning('The Following Files Have Changed:') print('#################################') print('') for f in list(csum_results['changed']): paf.prChanged(f[0]) print('') if paf.yn_frame('Do You Want to Restore ' + str(len(csum_results['changed'])) + ' Files That Have Been CHANGED?') is True: for f in track(csum_results['changed'], description='Restoring Changed Files'): shutil.move(unpack_path + f[0], f[0]) paf.write_to_log(fname, 'Restored Changed Files', config['log']) else: paf.write_to_log(fname, 'User Declined Restoring Changed Files', config['log']) if csum_results['removed']: paf.write_to_log( fname, 'Found ' + str(len(csum_results['removed'])) + ' Removed Files', config['log']) print('') print('######################################') paf.prWarning('The Following Files Have Been Removed:') print('######################################') print('') for f in list(csum_results['removed']): paf.prRemoved(f[p_len:]) print('') if paf.yn_frame('Do You Want to Restore ' + str(len(csum_results['removed'])) + ' Files That Have Been REMOVED?') is True: make_missing_dirs(config, unpack_path, p_len) for f in track(csum_results['removed'], description='Restoring Removed Files'): os.shutil(f, f[p_len:]) paf.write_to_log(fname, 'Restored Removed Files', config['log']) else: paf.write_to_log(fname, 'User Declined Restoring Removed Files', config['log']) if csum_results['added']: paf.write_to_log( fname, 'Found ' + str(len(csum_results['added'])) + ' New Files', config['log']) print('') print('####################################') paf.prWarning('The Following Files Have Been Added:') print('####################################') print('') for f in list(csum_results['added']): paf.prAdded(f) print('') if paf.yn_frame('Do You Want to Remove ' + str(len(csum_results['added'])) + ' Files That Have Been ADDED?') is True: for f in track(csum_results['added'], description='Removing New Files'): os.remove(f) paf.write_to_log(fname, 'Removed New Files', config['log']) else: paf.write_to_log(fname, 'User Declined Removing New Files', config['log']) paf.prSuccess('Done Restoring Files!') paf.write_to_log(fname, 'Done Restoring Files', config['log'])
def main(config, parms, pkg_results): ''' This is the main restore logic for pacback. It should NOT be called directly but instead called through a higher level 'API' like call. This logic does the actual work of downgrading, removing, and installing packages. ''' fname = 'restore.main(' + parms['type'] + parms['id'] + ')' # Branch if Packages Have Been Changed or Removed if pkg_results['search']: cache = utils.scan_caches(config) found_pkgs = utils.search_cache(pkg_results['search'], cache, config) # This is Very Bad if len(found_pkgs) > len(pkg_results['search']): paf.prError( 'Error: Somehow More Packages Were Found Than Were Searched For!' ) paf.write_to_log( fname, 'Error: Somehow More Packages Were Found Than Were Searched For!', config['log']) print('Starting Error Resolving Process...') error_handler_results = error.too_many_pkgs_found( config, parms, found_pkgs, pkg_results) if error_handler_results[0] is True: paf.prSuccess( 'Pacback Was Able To Automaticly Resolve This Error!') found_pkgs = error_handler_results[1] else: paf.prError( 'Pacback Was NOT Able To Automaticly Resolve This Error!') error.create_error_report() # Branch if Packages are Missing elif len(found_pkgs) < len(pkg_results['search']): missing_pkg = set(pkg_results['search'] - utils.trim_pkg_list(found_pkgs)) paf.write_to_log( fname, str(len(found_pkgs)) + ' Out of ' + str(len(pkg_results['search'])) + ' Packages Found', config['log']) paf.prWarning('Couldn\'t Find The Following Package Versions:') for pkg in missing_pkg: paf.prError(pkg) if paf.yn_frame('Do You Want To Continue Anyway?') is False: session.abort_fail( fname, 'User Aborted Rollback Because of Missing Packages', 'Aborting Rollback!', config) # This is the Best Case else: paf.prSuccess('All Packages Found In Your Local File System!') paf.write_to_log(fname, 'Found All Changed and Removed Packages', config['log']) print(str(len(found_pkgs))) paf.pacman(' '.join(found_pkgs), '-U') paf.write_to_log(fname, 'Sent Pacman Selected Packages', config['log']) else: paf.prSuccess('No Packages Have Been Changed or Removed!') paf.write_to_log(fname, 'No Packages Have Been Changed or Removed', config['log']) # Branch if Packages Have Been Added if pkg_results['a_pkgs']: print('') paf.write_to_log( fname, str(len(pkg_results['a_pkgs'])) + ' Have Been Added Since Creation', config['log']) paf.prWarning( str(len(pkg_results['a_pkgs'])) + ' Packages Have Been Added Since Creation') for pkg in pkg_results['a_pkgs']: paf.prAdded(pkg) print('') if paf.yn_frame( 'Do You Want to Remove These Packages From Your System?' ) is True: print('') paf.pacman(' '.join(pkg_results['a_pkgs']), '-R') paf.write_to_log(fname, 'Sent Added Packages To `pacman -R`', config['log']) else: paf.prSuccess('No Packages Have Been Added!') paf.write_to_log(fname, 'No Packages Have Been Added', config['log'])
def snapshot(config, id_num): ''' This handles the process of restoring snapshots. This is pretty much the same as a standard restore point but requires post-processing after the restoration to maintain the order of changes made to the system. ''' id_num = str(id_num).zfill(2) fname = 'restore.snapshot(' + id_num + ')' paf.write_to_log(fname, 'Started Restoring Snapshot ID:' + id_num, config['log']) info = { 'id': id_num, 'type': 'ss', 'TYPE': 'Snapshot', 'meta': config['ss_paths'] + '/ss' + id_num + '.meta', 'meta_md5': config['ss_paths'] + '/.ss' + id_num + '.md5', 'path': config['ss_paths'] + '/ss' + id_num, 'pkgcache': config['ss_paths'] + '/ss' + id_num + '/pkg-cache' } # Read Meta Data File, Check Version, Compare Results, Restore meta.validate(config, info) ss_dict = meta.read(config, info['meta']) version.compare(config, ss_dict['version']) main(config, info, meta.compare_now(config, ss_dict)) # Resets Order So The Restored Version is Zero paf.write_to_log(fname, 'Started Rewinding Snapshots Back to Zero', config['log']) # Removes Snapshots From Zero to Restored Snapshot ID for n in range(0, int(info['id'])): rm_info = { 'id': str(n).zfill(2), 'type': 'ss', 'TYPE': 'Snapshot', 'meta': config['ss_paths'] + '/ss' + str(n).zfill(2) + '.meta', 'meta_md5': config['ss_paths'] + '/.ss' + str(n).zfill(2) + '.md5' } utils.remove_id(config, rm_info) # Shifts Snapshots Back, So Now Retored Snapshot Is New Zero id_counter = 0 for n in range(int(info['id']), (config['max_ss'] + 1)): meta_path_old = config['ss_paths'] + '/ss' + str(n).zfill(2) + '.meta' meta_path_new = config['ss_paths'] + '/ss' + str(id_counter).zfill( 2) + '.meta' hash_path_old = config['ss_paths'] + '/.ss' + str(n).zfill(2) + '.md5' hash_path_new = config['ss_paths'] + '/.ss' + str(id_counter).zfill( 2) + '.md5' meta_found = os.path.exists(meta_path_old) csum_found = os.path.exists(hash_path_old) if meta_found and csum_found: os.rename(meta_path_old, meta_path_new) os.rename(hash_path_old, hash_path_new) id_counter += 1 elif meta_found and not csum_found: paf.write_to_log( fname, 'Snapshot ' + str(n).zfill(2) + ' is Missing it\'s Checksum File!', config['log']) paf.rm_file(meta_path_old, sudo=False) paf.write_to_log(fname, 'Removed Snapshot ID:' + str(n).zfill(2), config['log']) elif not meta_found and csum_found: paf.write_to_log(fname, hash_path_old + ' is an Orphaned Checksum', config['log']) paf.rm_file(hash_path_old, sudo=False) paf.write_to_log(fname, 'Removed Orphaned Checksum', config['log']) else: pass paf.write_to_log(fname, 'Finished Rewinding Snapshots Back to Zero', config['log']) # Finish Last Checks and Exit utils.reboot_check(config) paf.write_to_log(fname, 'Finished Restoring Snapshot ID:' + id_num, config['log'])
def restore(config, info, dir_list, checksum): ''' This is the main 'api' entrance point for file restoration. This function orchestrates the process handing of work to other funcs. ''' fname = 'custom_dirs.restore()' unpack_path = info['tar'][:-4] p_len = len(unpack_path) paf.write_to_log(fname, 'PLACE HOLDER', config['log']) # Decompress Tar if os.path.exists(info['tar.gz']): paf.prWarning('Decompressing Custom Tar....') if any(re.findall('pigz', line.lower()) for line in utils.pacman_Q()): os.system('/usr/bin/pigz -d ' + info['tar.gz'] + ' -f') paf.write_to_log(fname, 'Decompressed Tar With Pigz', config['log']) else: paf.gz_d(info['tar.gz']) paf.write_to_log(fname, 'Decompressed Tar With Python', config['log']) # Check Tar Csum And Unpack if os.path.exists(info['tar']): # Checksum Tar print('Checking Integrity of Tar...') tar_csum = paf.checksum_file(info['tar'])[1] paf.write_to_log(fname, 'Checksummed Tar', config['log']) if tar_csum == checksum: paf.write_to_log(fname, 'Tar Passed Checksum Integrity Check', config['log']) paf.prSuccess('Tar Passed Integrity Check') else: paf.write_to_log(fname, 'Custom Tar Failed Integrity Check!', config['log']) paf.prError('Custom Tar Failed Integrity Check!') paf.prBold('Skipping Custom File Restoration!') return # Clean Then Unpack Tar paf.prWarning('Unpacking Files from Tar....') paf.rm_dir(unpack_path, sudo=True) paf.untar_dir(info['tar']) paf.write_to_log(fname, 'Unpacked Custom Files From Tar', config['log']) else: # Skip If Tar is Missing paf.write_to_log( fname, 'Meta Data File Spesifies A Tar That is Now Missing!', config['log']) paf.prError('This Restore Point is Missing It\'s Custom Tar!') return if paf.yn_frame( 'Do You Want to Compare Restore Point Files Against Your Current File System?' ) is True: results = compare_files(config, dir_list, unpack_path, p_len) # Exit If No Changes Made to Files if len(results['added']) + len(results['removed']) + len( results['changed']) == 0: paf.write_to_log( fname, 'Checksum Returned 0 Changed, Removed or Added Files', config['log']) paf.prSuccess('No Changes Have Been Made to Your File System!') else: smart_overwrite(config, results, unpack_path, p_len) else: force_overwrite(config, unpack_path, p_len) # Cleanup After Runtime repack(config, info, unpack_path)