Пример #1
0
def pacman_hook(install, config):
    '''
    Installs or removes a standard alpm hook in /usr/share/libalpm/hooks/
    which runs as a PreTransaction hook during every pacman transaction.
    `install = True` Installs Pacman Hook
    `install = False` Removes Pacman Hook
    '''

    if install is True:
        fname = 'utils.pacman_hook(install)'
        paf.write_to_log(fname, 'Starting Hook Installation...', config['log'])

        hook = [
            '[Trigger]', 'Operation = Install', 'Operation = Remove',
            'Operation = Upgrade', 'Type = Package', 'Target = *', '',
            '[Action]', 'Description = Pre-Upgrade Pacback Hook',
            'Depends = pacman', 'When = PreTransaction',
            'Exec = /usr/bin/pacback --hook'
        ]

        paf.export_iterable('/usr/share/libalpm/hooks/pacback.hook', hook)
        paf.prSuccess('Pacback Hook is Now Installed!')
        paf.write_to_log(fname, 'Installed Pacback PreTransaction Hook',
                         config['log'])

    elif install is False:
        fname = 'utils.pacman_hook(remove)'
        paf.write_to_log(fname, 'Starting Hook Removal...', config['log'])

        paf.rm_file('/usr/share/libalpm/hooks/pacback.hook', sudo=False)
        paf.write_to_log(fname, 'Removed Pacback PreTransaction Hook',
                         config['log'])
        paf.prSuccess('Pacback Hook Was Removed!')
Пример #2
0
def hlock_kill(config):
    '''
    Removes the hook lock file without any checks.
    This currently isn't used anywhere, it's just future-proofing.
    '''
    fname = 'session.hlock_kill()'
    paf.rm_file(config['hlock'], sudo=False)
    paf.write_to_log(fname, 'Force Ended Hook Lock!', config['log'])
Пример #3
0
def remove_id(config, info):
    '''Remove a selected id based on type.'''
    fname = "utils.remove_id(" + info['type'] + info['id'] + ")"

    paf.rm_file(info['meta'], sudo=False)
    paf.rm_file(info['meta_md5'], sudo=False)
    if info['type'] == 'rp':
        paf.rm_dir(info['path'], sudo=False)

    paf.write_to_log(fname, 'Removal Complete', config['log'])
Пример #4
0
def store(config, info):
    '''
    Packs up user defined directories.
    '''
    fname = 'custom_dirs.pack()'
    paf.write_to_log(
        fname,
        str(len(info['dir_list'])) + ' Folders Selected For Storage',
        config['log'])
    tmpfile = tempfile.gettempdir() + '/folder_permissions.pickle'

    # Fetch Folder Permissions and Pickle
    folder_perms = set()
    for d in info['dir_list']:
        folder_perms.update(paf.get_permissions(d, 'folders'))
    pickle.dump(folder_perms, (open(tmpfile, 'wb')))
    # Scan For Files
    files = paf.find_files(info['dir_list'])

    # Pack Custom Files Into Tar
    with tarfile.open(info['tar'], 'w') as tar:
        tar.add(tmpfile, arcname='folder_permissions.pickle')
        for f in track(files, description='Adding Files to Tar'):
            tar.add(f)
    paf.rm_file(tmpfile, sudo=False)

    paf.write_to_log(fname, 'Created ' + info['tar'], config['log'])

    # Create Checksum for Tar
    print('Creating Checksum...')
    pack_csum = paf.checksum_file(info['tar'])[1]
    paf.write_to_log(fname, 'Checksummed Tar ', config['log'])

    # Compresses Custom Tar
    print('Compressing Custom Tar...')
    if any(re.findall('pigz', l.lower()) for l in utils.pacman_Q()):
        os.system('/usr/bin/pigz ' + info['tar'] + ' -f')
    else:
        paf.gz_c(info['tar'], rm=True)
    paf.write_to_log(fname, 'Compressed ' + info['tar'], config['log'])

    pack_results = {
        'file_count': len(files),
        'raw_size': paf.convert_size(paf.size_of_files(files)),
        'compressed_size':
        paf.convert_size(os.path.getsize(info['tar'] + '.gz')),
        'csum': pack_csum
    }

    return pack_results
Пример #5
0
def snapshot(config, id_num):
    '''
    This handles the process of restoring snapshots. This is pretty much the same as a
    standard restore point but requires post-processing after the restoration to maintain
    the order of changes made to the system.
    '''
    id_num = str(id_num).zfill(2)
    fname = 'restore.snapshot(' + id_num + ')'
    paf.write_to_log(fname, 'Started Restoring Snapshot ID:' + id_num,
                     config['log'])

    info = {
        'id': id_num,
        'type': 'ss',
        'TYPE': 'Snapshot',
        'meta': config['ss_paths'] + '/ss' + id_num + '.meta',
        'meta_md5': config['ss_paths'] + '/.ss' + id_num + '.md5',
        'path': config['ss_paths'] + '/ss' + id_num,
        'pkgcache': config['ss_paths'] + '/ss' + id_num + '/pkg-cache'
    }

    # Read Meta Data File, Check Version, Compare Results, Restore
    meta.validate(config, info)
    ss_dict = meta.read(config, info['meta'])
    version.compare(config, ss_dict['version'])
    main(config, info, meta.compare_now(config, ss_dict))

    # Resets Order So The Restored Version is Zero
    paf.write_to_log(fname, 'Started Rewinding Snapshots Back to Zero',
                     config['log'])

    # Removes Snapshots From Zero to Restored Snapshot ID
    for n in range(0, int(info['id'])):
        rm_info = {
            'id': str(n).zfill(2),
            'type': 'ss',
            'TYPE': 'Snapshot',
            'meta': config['ss_paths'] + '/ss' + str(n).zfill(2) + '.meta',
            'meta_md5': config['ss_paths'] + '/.ss' + str(n).zfill(2) + '.md5'
        }
        utils.remove_id(config, rm_info)

    # Shifts Snapshots Back, So Now Retored Snapshot Is New Zero
    id_counter = 0
    for n in range(int(info['id']), (config['max_ss'] + 1)):
        meta_path_old = config['ss_paths'] + '/ss' + str(n).zfill(2) + '.meta'
        meta_path_new = config['ss_paths'] + '/ss' + str(id_counter).zfill(
            2) + '.meta'
        hash_path_old = config['ss_paths'] + '/.ss' + str(n).zfill(2) + '.md5'
        hash_path_new = config['ss_paths'] + '/.ss' + str(id_counter).zfill(
            2) + '.md5'
        meta_found = os.path.exists(meta_path_old)
        csum_found = os.path.exists(hash_path_old)

        if meta_found and csum_found:
            os.rename(meta_path_old, meta_path_new)
            os.rename(hash_path_old, hash_path_new)
            id_counter += 1
        elif meta_found and not csum_found:
            paf.write_to_log(
                fname, 'Snapshot ' + str(n).zfill(2) +
                ' is Missing it\'s Checksum File!', config['log'])
            paf.rm_file(meta_path_old, sudo=False)
            paf.write_to_log(fname, 'Removed Snapshot ID:' + str(n).zfill(2),
                             config['log'])
        elif not meta_found and csum_found:
            paf.write_to_log(fname, hash_path_old + ' is an Orphaned Checksum',
                             config['log'])
            paf.rm_file(hash_path_old, sudo=False)
            paf.write_to_log(fname, 'Removed Orphaned Checksum', config['log'])
        else:
            pass

    paf.write_to_log(fname, 'Finished Rewinding Snapshots Back to Zero',
                     config['log'])

    # Finish Last Checks and Exit
    utils.reboot_check(config)
    paf.write_to_log(fname, 'Finished Restoring Snapshot ID:' + id_num,
                     config['log'])
Пример #6
0
def snapshot(config, label):
    '''
    Assembles all the info for main() and stages the file system for the creation
    of a new snapshot with id='00'. This is only called by `--hook`.
    '''
    num = '00'
    fname = 'create.snapshot(' + num + ')'
    paf.write_to_log(fname, 'Started Snapshot Creation...', config['log'])
    session.hlock_check(config)

    info = {
        'id': num,
        'type': 'ss',
        'TYPE': 'Snapshot',
        'stype': 'l',
        'STYPE': 'Light',
        'nc': True,
        'label': str(label),
        'meta': config['ss_paths'] + '/ss' + num + '.meta',
        'meta_md5': config['ss_paths'] + '/.ss' + num + '.md5',
        'dir_list': [],
        'path': config['ss_paths'] + '/ss' + num,
        'pkgcache': config['ss_paths'] + '/ss' + num + '/pkg-cache',
        'tar': config['ss_paths'] + '/ss' + num + '/ss' + num + '_dirs.tar'
    }

    # Shift Snapshots Forward So This Becomes Zero
    if os.path.exists(config['ss_paths'] + '/ss00.meta'):
        paf.write_to_log(fname, 'Shifting All Snapshots Forward +1...',
                         config['log'])

        # Remove the Last Snapshot
        paf.rm_file(config['ss_paths'] + '/ss' +
                    str(config['max_ss']).zfill(2) + '.meta',
                    sudo=False)
        paf.rm_file(config['ss_paths'] + '/ss' +
                    str(config['max_ss']).zfill(2) + '.md5',
                    sudo=False)

        # Moves Each Snapshot Forward +1 and Cleans on Exceptions
        for n in range((config['max_ss'] - 1), -1, -1):
            meta_path_old = config['ss_paths'] + '/ss' + str(n).zfill(
                2) + '.meta'
            meta_path_new = config['ss_paths'] + '/ss' + str(n + 1).zfill(
                2) + '.meta'
            hash_path_old = config['ss_paths'] + '/.ss' + str(n).zfill(
                2) + '.md5'
            hash_path_new = config['ss_paths'] + '/.ss' + str(n + 1).zfill(
                2) + '.md5'
            meta_found = os.path.exists(meta_path_old)
            csum_found = os.path.exists(hash_path_old)

            if meta_found and csum_found:
                os.rename(meta_path_old, meta_path_new)
                os.rename(hash_path_old, hash_path_new)

            elif meta_found and not csum_found:
                paf.write_to_log(
                    fname, 'Snapshot ' + str(n).zfill(2) +
                    ' is Missing it\'s Checksum File!', config['log'])
                paf.rm_file(meta_path_old, sudo=False)
                paf.write_to_log(fname,
                                 'Removed Snapshot ID:' + str(n).zfill(2),
                                 config['log'])

            elif not meta_found and csum_found:
                paf.write_to_log(fname,
                                 hash_path_old + ' is an Orphaned Checksum',
                                 config['log'])
                paf.rm_file(hash_path_old, sudo=False)
                paf.write_to_log(fname, 'Removed Orphaned Checksum',
                                 config['log'])

            else:
                pass

        paf.write_to_log(fname, 'Finished Shifting Snapshots Forward',
                         config['log'])

    else:
        paf.write_to_log(
            fname,
            'Snapshot ID:00 Was Not Found, Shift Forward is Unnecessary.',
            config['log'])

    # Creates Snapshot After Pre-Transaction Work and Checks
    paf.write_to_log(fname, 'All Checks Passed! Ready For Snapshot Creation',
                     config['log'])
    paf.prBold('Creating Snapshot...')
    main(config, info)

    # Prevents Back-to-Back Snapshots(Especially During AUR Upgrades)
    session.hlock_start(config)
    paf.write_to_log(fname, 'Snapshot Creation Complete!', config['log'])
    paf.prBold('Snapshot Creation Complete!')