示例#1
0
def generate(backup_type, storages, path_to_file):
    """ The function generate a configuration file job.

    """

    backup_type = backup_type[0]
    path_to_file = path_to_file[0]

    template_path = f'{TEMPLATES_DIR}/backup_type/{backup_type}.conf'

    if path_to_file.startswith('/'):
        general_function.create_dirs(
            job_name=backup_type,
            dirs_pairs={os.path.dirname(path_to_file): ''})

    general_function.copy_ofs(template_path, path_to_file)

    try:
        fd = open(path_to_file, 'a')
    except (OSError, PermissionError, FileNotFoundError) as e:
        messange_info = f"Couldn't open file {path_to_file}:{e}!"
        general_function.print_info(messange_info)
        sys.exit(1)

    if backup_type in config.supported_db_backup_type:
        job_type = 'databases'
    elif backup_type in config.supported_file_backup_type:
        job_type = 'files'
    else:
        job_type = 'external'

    for storage in storages:
        storage_template_path = f'{TEMPLATES_DIR}/storages/{storage}.conf'

        with open(storage_template_path, 'r', encoding='utf-8') as f:
            str_storage = f.read()

        str_storage = str_storage.replace('backup_type', backup_type)
        str_storage = str_storage.replace('job_type', job_type)

        if backup_type == 'inc_files':
            str_storage = str_storage.replace('inc_files/dump', 'inc')
            str_storage = re.sub(
                r"[ ]*store:[\s]*days: ''[\s]*weeks: ''[\s]*month: ''[\s]*",
                '', str_storage)

        if backup_type == 'desc_files':
            str_storage = str_storage.replace('desc_files/dump', 'desc/dump')

        if backup_type == 'external':
            str_storage = str_storage.replace('external/dump', 'dump')

        fd.write(str_storage)

    fd.close()

    os.chmod(path_to_file, 0o600)

    general_function.print_info(
        f"Successfully generated '{path_to_file}' configuration file!")
示例#2
0
def mount(current_storage_data):
    """ A function that is responsible for directly mounting a particular storage.
    The input receives a dictionary containing the necessary data for connecting storage.

    """

    try:
        (data_mount, pre_mount) = get_mount_data(current_storage_data)
    except MountError as e:
        raise general_function.MyError(f"{e}")

    if not data_mount:
        # if local storage
        return
    else:
        type_storage = data_mount.get('type_storage')
        packets = data_mount.get('packets')
        check_cmd = data_mount.get('check_cmd')
        mount_cmd = data_mount.get('mount_cmd')

        for i in packets:
            if i:
                check_packet = general_function.exec_cmd(f"{check_cmd} {i}")
                stdout_check = check_packet['stdout']

                if not stdout_check:
                    raise general_function.MyError(
                        f"Required package '{i}' not installed!")
            else:
                continue

        if pre_mount:
            for key in pre_mount:
                try:
                    f = globals()[key]
                    args = pre_mount[key]
                    f(args)
                except Exception as err:
                    raise general_function.MyError(
                        f"Impossible perform pre-mount operations for storage '{type_storage}': {err}"
                    )

        check_mount_cmd = f"mount | grep {mount_point}"
        check_mount = general_function.exec_cmd(check_mount_cmd)
        stdout_mount = check_mount['stdout']

        if stdout_mount:
            if mount_point == '/mnt/sshfs':
                remote_mount = stdout_mount.split()[0]
                if remote_mount not in mount_cmd:
                    raise general_function.MyError(
                        f"Mount point {mount_point} is busy by different remote resource! "
                        f"Requested mount: {mount_cmd}. "
                        f"Current mount: {stdout_mount}.")
            else:
                raise general_function.MyError(
                    f"Mount point {mount_point} is busy!")
        else:
            general_function.create_dirs(job_name='',
                                         dirs_pairs={mount_point: ''})
            data_mounting = general_function.exec_cmd(f"{mount_cmd}")
            stderr_mounting = data_mounting['stderr']
            code = data_mounting['code']

            if stderr_mounting:
                raise general_function.MyError(stderr_mounting)

            if code != 0:
                raise general_function.MyError(
                    f"Bad result code external process '{mount_cmd}':'{code}'")

            if type_storage == 's3':
                try:
                    os.chdir('/mnt/s3')
                except ConnectionAbortedError:
                    raise general_function.MyError(
                        "incorrect authentification data!")
                else:
                    os.chdir(
                        '/'
                    )  # fix error 'getcwd: cannot access parent directories: No such file or directory'

    return
示例#3
0
def periodic_backup(full_tmp_path, general_local_dst_path, remote_dir, storage,
                    subdir_name, days_count, weeks_count, job_name, host,
                    share):

    daily_subdir_name = "daily"
    weekly_subdir_name = "weekly"
    monthly_subdir_name = "monthly"

    link_dict = {}

    dow = general_function.get_time_now("dow")
    backup_file_name = os.path.basename(full_tmp_path)
    full_dst_path = os.path.join(general_local_dst_path, subdir_name)

    dst_dirs = []
    daily_dir = os.path.join(general_local_dst_path, daily_subdir_name)
    weekly_dir = os.path.join(general_local_dst_path, weekly_subdir_name)
    monthly_dir = os.path.join(general_local_dst_path, monthly_subdir_name)

    if storage == 'local':
        if subdir_name == monthly_subdir_name:
            dst_dirs.append(monthly_dir)

            if dow == config.dow_backup and int(weeks_count):
                src_link = os.path.join(general_local_dst_path,
                                        monthly_subdir_name, backup_file_name)
                dst_link = os.path.join(general_local_dst_path,
                                        weekly_subdir_name, backup_file_name)
                dst_dirs.append(weekly_dir)
                link_dict[dst_link] = src_link

            if int(days_count):
                src_link = os.path.join(general_local_dst_path,
                                        monthly_subdir_name, backup_file_name)
                dst_link = os.path.join(general_local_dst_path,
                                        daily_subdir_name, backup_file_name)
                dst_dirs.append(daily_dir)
                link_dict[dst_link] = src_link
        elif subdir_name == weekly_subdir_name and storage == 'local':

            dst_dirs.append(weekly_dir)

            if int(days_count):
                src_link = os.path.join(general_local_dst_path,
                                        weekly_subdir_name, backup_file_name)
                dst_link = os.path.join(general_local_dst_path,
                                        daily_subdir_name, backup_file_name)
                dst_dirs.append(daily_dir)
                link_dict[dst_link] = src_link
        else:
            dst_dirs.append(daily_dir)
    else:
        dst_dirs.append(full_dst_path)

    for dst_dir in set(dst_dirs):
        dirs_for_log = general_function.get_dirs_for_log(
            dst_dir, remote_dir, storage)
        general_function.create_dirs(job_name='',
                                     dirs_pairs={dst_dir: dirs_for_log})

    if storage == 'local':
        try:
            general_function.move_ofs(full_tmp_path, full_dst_path)
        except general_function.MyError as err:
            log_and_mail.writelog(
                'ERROR',
                "Can't move '%s' file '%s' -> '%s' on '%s' storage: %s" %
                (subdir_name, full_tmp_path, full_dst_path, storage, err),
                config.filelog_fd, job_name)
        else:
            log_and_mail.writelog(
                'INFO',
                "Successfully moved '%s' file '%s' -> '%s' on '%s' storage." %
                (subdir_name, full_tmp_path, full_dst_path, storage),
                config.filelog_fd, job_name)

        if link_dict:
            for key in link_dict.keys():
                src = link_dict[key]
                dst = key

                try:
                    general_function.create_symlink(src, dst)
                except general_function.MyError as err:
                    log_and_mail.writelog(
                        'ERROR',
                        "Can't create symlink '%s' -> '%s' on 'local' storage: %s"
                        % (src, dst, err), config.filelog_fd, job_name)
    else:
        try:
            general_function.copy_ofs(full_tmp_path, full_dst_path)
            dirs_for_log = general_function.get_dirs_for_log(
                full_dst_path, remote_dir, storage)

        except general_function.MyError as err:
            if storage != 'smb':
                log_and_mail.writelog(
                    'ERROR',
                    "Can't copy '%s' file '%s' -> '%s' directory on '%s' storage(%s): %s"
                    % (subdir_name, full_tmp_path, dirs_for_log, storage, host,
                       err), config.filelog_fd, job_name)
            else:
                log_and_mail.writelog(
                    'ERROR',
                    "Can't copy '%s' file '%s' -> '%s' directory in '%s' share on '%s' storage(%s): %s"
                    % (subdir_name, full_tmp_path, dirs_for_log, share,
                       storage, host, err), config.filelog_fd, job_name)
        else:
            if storage != 'smb':
                log_and_mail.writelog(
                    'INFO',
                    "Successfully copied '%s' file '%s' -> '%s' directory on '%s' storage(%s)."
                    %
                    (subdir_name, full_tmp_path, dirs_for_log, storage, host),
                    config.filelog_fd, job_name)
            else:
                log_and_mail.writelog(
                    'INFO',
                    "Successfully copied '%s' file '%s' -> '%s' directory in '%s' share on '%s' storage(%s)."
                    % (subdir_name, full_tmp_path, dirs_for_log, share,
                       storage, host), config.filelog_fd, job_name)
def set_cgroup(group, *args):
    pid = os.getpid()

    data_1 = general_function.exec_cmd(f"cat /proc/cgroups | grep {group}")
    stdout_1 = data_1['stdout']
    if not stdout_1:
        log_and_mail.writelog('WARNING', f"Your kernel doesn't support cgroup '{group}'.",
                              config.filelog_fd)
        return False

    data_2 = general_function.exec_cmd('mount | grep "/sys/fs/cgroup"')
    stdout_2 = data_2['stdout']

    if not stdout_2:
        general_function.exec_cmd(
            'mount -t tmpfs -o rw,nosuid,nodev,noexec,relatime,size=0k cgroup_root /sys/fs/cgroup/')
    _dir = f'/sys/fs/cgroup/{group}'

    data_3 = general_function.exec_cmd(f'mount | grep "{_dir}"')
    stdout_3 = data_3['stdout']

    if not (os.path.isdir(_dir) or not stdout_3):
        general_function.create_dirs(job_name='', dirs_pairs={_dir: ''})
        general_function.exec_cmd(f'mount -t cgroup -o rw,nosuid,nodev,noexec,relatime,{group} cgroup_{group} {_dir}/')

    general_function.create_dirs(job_name='', dirs_pairs={f'{_dir}/nixys_backup': ''})

    args_list = list(args)
    for index in args_list:
        if not os.path.isfile(os.path.join(_dir, index)):
            log_and_mail.writelog('WARNING', f"Your kernel does not support option '{index}' in subsystem '{group}'.",
                                  config.filelog_fd)
            return False
        _parametr = -100

        if group == 'blkio':
            directory_for_tmp_file = config.general_path_to_all_tmp_dir

            general_function.create_dirs(job_name='', dirs_pairs={directory_for_tmp_file: ''})

            data_4 = general_function.exec_cmd(f"df {directory_for_tmp_file} | tail -1 | awk '{{print $1}}'")
            stdout_4 = data_4['stdout']

            if re.match("/dev/disk/(by-id|by-path|by-uuid)", stdout_4):
                data_5 = general_function.exec_cmd(f"ls -l {stdout_4} | awk '{{print $11}}'")
                stdout_5 = data_5['stdout']
                device = os.path.basename(stdout_5)
            else:
                device = stdout_4

            raid = True
            if not re.match(".*/(md|dm).+", device):
                raid = False
                while re.match("^[0-9]$", str(device[len(device) - 1])):
                    device = device[0:-1]

            data_6 = general_function.exec_cmd(f"ls -l {device} | awk '{{print $5}}'")
            stdout_6 = data_6['stdout']
            major_device = stdout_6[0:-1]

            data_7 = general_function.exec_cmd(f"ls -l {device} | awk '{{print $6}}'")
            stdout_7 = data_7['stdout']
            minor_device = stdout_7

            if index != 'blkio.weight_device':
                if index == 'blkio.throttle.write_bps_device':
                    if not re.match("^([0-9]*)$", config.block_io_write, re.I):
                        log_and_mail.writelog(
                            'WARNING',
                            "Incorrect data in field 'block_io_write'! You must specify the write speed "
                            "in MB/s using only numbers!",
                            config.filelog_fd)
                        return False
                    _parametr = 1024 * 1024 * int(config.block_io_write)
                else:
                    if not re.match("^([0-9]*)$", config.block_io_read, re.I):
                        log_and_mail.writelog(
                            'WARNING',
                            "Incorrect data in field 'block_io_read'! You must specify the read speed "
                            "in MB/s using only numbers!",
                            config.filelog_fd)
                        return False
                    _parametr = 1024 * 1024 * int(config.block_io_read)

            else:
                if not raid:
                    if not (re.match("^([0-9]*)$", config.block_io_weight, re.I) and
                            100 <= int(config.block_io_weight) <= 1000):
                        log_and_mail.writelog(
                            'WARNING',
                            "Incorrect data in field 'blkio_weight'! Process must specify weight "
                            "in the range from 100 to 1000!",
                            config.filelog_fd)
                        return False
                    _parametr = config.block_io_weight
                else:
                    log_and_mail.writelog(
                        'WARNING', "You can not use option 'blkio.weight_device' with the raid!",
                        config.filelog_fd)
                    return False

            general_function.exec_cmd(f'echo {major_device}:{minor_device} {_parametr} > {_dir}/nixys_backup/{index}')

            data_8 = general_function.exec_cmd(f"cat {_dir}/nixys_backup/{index}")
            stdout_8 = data_8['stdout']
            _flag = stdout_8

            if len(_flag) < 3:
                log_and_mail.writelog('WARNING', f"Incorrect data in file '{_dir}/nixys_backup/{index}'!",
                                      config.filelog_fd)
                return False

        if group == 'cpu':
            if index == 'cpu.shares':
                if not re.match("^([0-9]*)$", config.cpu_shares, re.I):
                    log_and_mail.writelog(
                        'WARNING',
                        "Incorrect data in field 'cpu_shares'! You must specify  weight "
                        "in the range from 1 to cpu_count*1000!",
                        config.filelog_fd)
                    return False

                _parametr = int(config.cpu_shares)

                general_function.exec_cmd(f"echo {_parametr} > {_dir}/nixys_backup/{index}")

                data_9 = general_function.exec_cmd(f"cat {_dir}/nixys_backup/{index}")
                stdout_9 = data_9['stdout']
                _flag = stdout_9

                if not _flag:
                    log_and_mail.writelog('WARNING', f"Incorrect data in file '{_dir}/nixys_backup/{index}'!",
                                          config.filelog_fd)
                    return False

    general_function.exec_cmd(f"echo {pid} > {_dir}/nixys_backup/tasks")
    return True
示例#5
0
def mount(current_storage_data):
    ''' A function that is responsible for directly mounting a particular storage.
    The input receives a dictionary containing the necessary data for connecting storage.

    '''

    try:
        (data_mount, pre_mount) = get_mount_data(current_storage_data)
    except MountError as e:
        raise general_function.MyError("%s" % e)

    if not data_mount:
        # if local storage
        return 0
    else:
        type_storage = data_mount.get('type_storage')
        packets = data_mount.get('packets')
        check_cmd = data_mount.get('check_cmd')
        mount_cmd = data_mount.get('mount_cmd')

        for i in packets:
            if i:
                check_packet = general_function.exec_cmd("%s %s" %
                                                         (check_cmd, i))
                stdout_check = check_packet['stdout']

                if not stdout_check:
                    raise general_function.MyError(
                        "Required package '%s' not installed!" % (i))
            else:
                continue

        if pre_mount:
            for key in pre_mount:
                try:
                    f = globals()[key]
                    args = pre_mount[key]
                    f(args)
                except Exception as err:
                    raise general_function.MyError(
                        "Impossible perform pre-mount operations for storage '%s': %s"
                        % (type_storage, err))

        check_mount_cmd = "mount | grep %s" % (mount_point)
        check_mount = general_function.exec_cmd(check_mount_cmd)
        stdout_mount = check_mount['stdout']

        if stdout_mount:
            raise general_function.MyError("Mount point %s is busy!" %
                                           (mount_point))
        else:
            general_function.create_dirs(job_name='',
                                         dirs_pairs={mount_point: ''})
            data_mounting = general_function.exec_cmd("%s" % (mount_cmd))
            stderr_mounting = data_mounting['stderr']
            code = data_mounting['code']

            if stderr_mounting:
                raise general_function.MyError(stderr_mounting)

            if code != 0:
                raise general_function.MyError(
                    "Bad result code external process '%s':'%s'" %
                    (mount_cmd, code))

            if type_storage == 's3':
                try:
                    os.chdir('/mnt/s3')
                except ConnectionAbortedError:
                    raise general_function.MyError(
                        "incorrect authentification data!")

    return 1
示例#6
0
def create_inc_file(local_dst_dirname, remote_dir, part_of_dir_path,
                    backup_file_name, target, exclude_list, gzip, job_name,
                    storage, host, share):
    ''' The function determines whether to collect a full backup or incremental,
    prepares all the necessary information.

    '''

    date_year = general_function.get_time_now('year')
    date_month = general_function.get_time_now('moy')
    date_day = general_function.get_time_now('dom')

    if int(date_day) < 11:
        daily_prefix = 'day_01'
    elif int(date_day) < 21:
        daily_prefix = 'day_11'
    else:
        daily_prefix = 'day_21'

    year_dir = os.path.join(local_dst_dirname, part_of_dir_path, date_year)
    initial_dir = os.path.join(year_dir, 'year')  # Path to full backup
    month_dir = os.path.join(year_dir, 'month_%s' % (date_month), 'monthly')
    daily_dir = os.path.join(year_dir, 'month_%s' % (date_month), 'daily',
                             daily_prefix)

    year_inc_file = os.path.join(initial_dir, 'year.inc')
    month_inc_file = os.path.join(month_dir, 'month.inc')
    daily_inc_file = os.path.join(daily_dir, 'daily.inc')

    link_dict = {}  # dict for symlink with pairs like dst: src
    copy_dict = {}  # dict for copy with pairs like dst: src

    # Before we proceed to collect a copy, we need to delete the copies for the same month last year
    # if they are to not save extra archives

    old_year = int(date_year) - 1
    old_year_dir = os.path.join(local_dst_dirname, part_of_dir_path,
                                str(old_year))
    if os.path.isdir(old_year_dir):
        old_month_dir = os.path.join(old_year_dir, 'month_%s' % (date_month))
        del_old_inc_file(old_year_dir, old_month_dir)

    if not os.path.isfile(year_inc_file):
        # There is no original index file, so we need to check the existence of an year directory
        if os.path.isdir(year_dir):
            # There is a directory, but there is no file itself, then something went wrong, so
            # we delete this directory with all the data inside, because even if they are there
            # continue to collect incremental copies it will not be able to
            general_function.del_file_objects(job_name, year_dir)
            dirs_for_log = general_function.get_dirs_for_log(
                year_dir, remote_dir, storage)
            file_for_log = os.path.join(dirs_for_log,
                                        os.path.basename(year_inc_file))
            log_and_mail.writelog('ERROR', "The file %s not found, so the directory %s is cleared." +\
                                  "Incremental backup will be reinitialized " %(file_for_log, dirs_for_log),
                                  config.filelog_fd, job_name)

        # Initialize the incremental backup, i.e. collect a full copy
        dirs_for_log = general_function.get_dirs_for_log(
            initial_dir, remote_dir, storage)
        general_function.create_dirs(job_name=job_name,
                                     dirs_pairs={initial_dir: dirs_for_log})

        # Get the current list of files and write to the year inc file
        meta_info = get_index(target, exclude_list)
        with open(year_inc_file, "w") as index_file:
            json.dump(meta_info, index_file)

        full_backup_path = general_function.get_full_path(
            initial_dir, backup_file_name, 'tar', gzip)

        general_files_func.create_tar('files', full_backup_path, target, gzip,
                                      'inc_files', job_name, remote_dir,
                                      storage, host, share)

        # After creating the full copy, you need to make the symlinks for the inc.file and
        # the most collected copy in the month directory of the current month
        # as well as in the decade directory if it's local, scp the repository and
        # copy inc.file for other types of repositories that do not support symlynk.

        month_dirs_for_log = general_function.get_dirs_for_log(
            month_dir, remote_dir, storage)
        daily_dirs_for_log = general_function.get_dirs_for_log(
            daily_dir, remote_dir, storage)
        general_function.create_dirs(job_name=job_name,
                                     dirs_pairs={
                                         month_dir: month_dirs_for_log,
                                         daily_dir: daily_dirs_for_log
                                     })

        if storage in 'local, scp':
            link_dict[month_inc_file] = year_inc_file
            link_dict[os.path.join(
                month_dir,
                os.path.basename(full_backup_path))] = full_backup_path
            link_dict[daily_inc_file] = year_inc_file
            link_dict[os.path.join(
                daily_dir,
                os.path.basename(full_backup_path))] = full_backup_path
        else:
            copy_dict[month_inc_file] = year_inc_file
            copy_dict[daily_inc_file] = year_inc_file
    else:
        symlink_dir = ''
        if int(date_day) == 1:
            # It is necessary to collect monthly incremental backup relative to the year copy
            old_meta_info = specific_function.parser_json(year_inc_file)
            new_meta_info = get_index(target, exclude_list)

            general_inc_backup_dir = month_dir

            # It is also necessary to make a symlink for inc files and backups to the directory with the first decade
            symlink_dir = daily_dir

            general_dirs_for_log = general_function.get_dirs_for_log(
                general_inc_backup_dir, remote_dir, storage)
            symlink_dirs_for_log = general_function.get_dirs_for_log(
                symlink_dir, remote_dir, storage)
            general_function.create_dirs(job_name=job_name,
                                         dirs_pairs={
                                             general_inc_backup_dir:
                                             general_dirs_for_log,
                                             symlink_dir: symlink_dirs_for_log
                                         })

            with open(month_inc_file, "w") as index_file:
                json.dump(new_meta_info, index_file)

        elif int(date_day) == 11 or int(date_day) == 21:
            # It is necessary to collect a ten-day incremental backup relative to a monthly copy
            try:
                old_meta_info = specific_function.parser_json(month_inc_file)
            except general_function.MyError as e:
                log_and_mail.writelog(
                    'ERROR',
                    "Couldn't open old month meta info file '%s': %s!" %
                    (month_inc_file, e), config.filelog_fd, job_name)
                return 2

            new_meta_info = get_index(target, exclude_list)

            general_inc_backup_dir = daily_dir
            general_dirs_for_log = general_function.get_dirs_for_log(
                general_inc_backup_dir, remote_dir, storage)
            general_function.create_dirs(
                job_name=job_name,
                dirs_pairs={general_inc_backup_dir: general_dirs_for_log})

            with open(daily_inc_file, "w") as index_file:
                json.dump(new_meta_info, index_file)
        else:
            # It is necessary to collect a normal daily incremental backup relative to a ten-day copy
            try:
                old_meta_info = specific_function.parser_json(daily_inc_file)
            except general_function.MyError as e:
                log_and_mail.writelog(
                    'ERROR',
                    "Couldn't open old decade meta info file '%s': %s!" %
                    (daily_inc_file, e), config.filelog_fd, job_name)
                return 2

            new_meta_info = get_index(target, exclude_list)

            general_inc_backup_dir = daily_dir
            general_dirs_for_log = general_function.get_dirs_for_log(
                general_inc_backup_dir, remote_dir, storage)
            general_function.create_dirs(
                job_name=job_name,
                dirs_pairs={general_inc_backup_dir: general_dirs_for_log})

        # Calculate the difference between the old and new file states
        diff_json = compute_diff(new_meta_info, old_meta_info)

        inc_backup_path = general_function.get_full_path(
            general_inc_backup_dir, backup_file_name, 'tar', gzip)

        # Define the list of files that need to be included in the archive
        target_change_list = diff_json['modify']

        # Form GNU.dumpdir headers
        dict_directory = {}  # Dict to store pairs like dir:GNU.dumpdir

        excludes = r'|'.join([
            fnmatch.translate(x)[:-7] for x in general_files_func.EXCLUDE_FILES
        ]) or r'$.'

        for dir_name, dirs, files in os.walk(target):
            first_level_files = []

            if re.match(excludes, dir_name):
                continue

            for file in files:
                if re.match(excludes, os.path.join(dir_name, file)):
                    continue

                first_level_files.append(file)

            first_level_subdirs = dirs
            dict_directory[dir_name] = get_gnu_dumpdir_format(
                diff_json, dir_name, target, excludes, first_level_subdirs,
                first_level_files)

        create_inc_tar(inc_backup_path, remote_dir, dict_directory,
                       target_change_list, gzip, job_name, storage, host,
                       share)

        if symlink_dir:
            if storage in 'local, scp':
                link_dict[daily_inc_file] = month_inc_file
            else:
                copy_dict[daily_inc_file] = month_inc_file

    if link_dict:
        for key in link_dict.keys():
            src = link_dict[key]
            dst = key

            try:
                general_function.create_symlink(src, dst)
            except general_function.MyError as err:
                log_and_mail.writelog(
                    'ERROR',
                    "Can't create symlink %s -> %s: %s" % (src, dst, err),
                    config.filelog_fd, job_name)

    if copy_dict:
        for key in copy_dict.keys():
            src = copy_dict[key]
            dst = key

            try:
                general_function.copy_ofs(src, dst)
            except general_function.MyError as err:
                log_and_mail.writelog(
                    'ERROR', "Can't copy %s -> %s: %s" % (src, dst, err),
                    config.filelog_fd, job_name)
示例#7
0
def set_cgroup(group, *args):

    PID = os.getpid()

    data_1 = general_function.exec_cmd("cat /proc/cgroups | grep %s" %(group))
    stdout_1 = data_1['stdout']
    if not stdout_1:
        log_and_mail.writelog('WARNING', "Your kernel doesn't support cgroup '%s'." %(group),
                 config.filelog_fd)
        return False

    data_2 = general_function.exec_cmd('mount | grep "/sys/fs/cgroup"')
    stdout_2 = data_2['stdout']

    if not stdout_2:
        general_function.exec_cmd('mount -t tmpfs -o rw,nosuid,nodev,noexec,relatime,size=0k cgroup_root /sys/fs/cgroup/')
    _dir = '/sys/fs/cgroup/%s' %(group)

    data_3 = general_function.exec_cmd('mount | grep "%s"' %(_dir))
    stdout_3 = data_3['stdout']

    if not (os.path.isdir(_dir) or not stdout_3):
        general_function.create_dirs(job_name='', dirs_pairs={_dir:''})
        general_function.exec_cmd('mount -t cgroup -o rw,nosuid,nodev,noexec,relatime,%s cgroup_%s %s/' %(group, group, _dir))

    general_function.create_dirs(job_name='', dirs_pairs={'%s/nixys_backup' %(_dir):''})

    l = list(args)
    for index in l:
        if not os.path.isfile(os.path.join(_dir, index)):
            log_and_mail.writelog('WARNING',"Your kernel does not support option '%s' in subsystem '%s'." %(index, group),
                     config.filelog_fd)
            return False
        _parametr = -100

        if group == 'blkio':
            device = ''
            DIRECTORY_FOR_TMP_FILE = config.general_path_to_all_tmp_dir

            general_function.create_dirs(job_name='', dirs_pairs={DIRECTORY_FOR_TMP_FILE:''})

            data_4 = general_function.exec_cmd("df %s | tail -1 | awk '{{print $1}}'" %(DIRECTORY_FOR_TMP_FILE))
            stdout_4 = data_4['stdout']

            if re.match("/dev/disk/(by-id|by-path|by-uuid)", stdout_4):
                data_5 = general_function.exec_cmd("ls -l %s | awk '{{print $11}}'" %(stdout_4))
                stdout_5 = data_5['stdout']
                device = os.path.basename(stdout_5)
            else:
                device = stdout_4

            raid = True
            if not re.match(".*/(md|dm).+", device):
                raid = False
                while re.match("^[0-9]$", str(device[len(device)-1])):
                    device = device[0:-1]

            data_6 = general_function.exec_cmd("ls -l %s | awk '{{print $5}}'" %(device))
            stdout_6 = data_6['stdout']
            major_device = stdout_6[0:-1]

            data_7 = general_function.exec_cmd("ls -l %s | awk '{{print $6}}'" %(device))
            stdout_7 = data_7['stdout']
            minor_device = stdout_7

            if index != 'blkio.weight_device':
                if index == 'blkio.throttle.write_bps_device':
                    if not re.match("^([0-9]*)$", config.block_io_write, re.I):
                        log_and_mail.writelog('WARNING', "Incorrect data in field 'block_io_write'! You must specify the write speed in MB/s using only numbers!",
                                 config.filelog_fd)
                        return False
                    _parametr = 1024 * 1024 * int(config.block_io_write)
                else:
                    if not re.match("^([0-9]*)$", config.block_io_read, re.I):
                        log_and_mail.writelog('WARNING', "Incorrect data in field 'block_io_read'! You must specify the read speed in MB/s using only numbers!",
                                 config.filelog_fd)
                        return False
                    _parametr = 1024 * 1024 * int(config.block_io_read)

            else:
                if not raid:
                    if not (re.match("^([0-9]*)$", config.blkio_weight, re.I) and
                            int(config.blkio_weight) >= 100 and
                            int(config.blkio_weight) <= 1000):
                        log_and_mail.writelog('WARNING', "Incorrect data in field 'blkio_weight'! Process must specify weight in the range from 100 to 1000!",
                                 config.filelog_fd)
                        return False
                    _parametr = config.blkio_weight
                else:
                    log_and_mail.writelog('WARNING', "You can not use option 'blkio.weight_device' with the raid!",
                              config.filelog_fd)
                    return False

            general_function.exec_cmd('echo %s:%s %s > %s/nixys_backup/%s' %(major_device, minor_device, _parametr, _dir, index))

            data_8 = general_function.exec_cmd("cat %s/nixys_backup/%s" %(_dir, index))
            stdout_8 = data_8['stdout']
            _flag = stdout_8

            if len(_flag) < 3:
                log_and_mail.writelog('WARNING',"Incorrect data in file '%s/nixys_backup/%s'!" %(_dir, index),
                         config.filelog_fd)
                return False

        if group == 'cpu':
            if index == 'cpu.shares':
                if not re.match("^([0-9]*)$", config.cpu_shares, re.I):
                    log_and_mail.writelog('WARNING', "Incorrect data in field 'cpu_shares'! You must specify  weight in the range from 1 to cpu_count*1000!",
                             config.filelog_fd)
                    return False

                _parametr = int(config.cpu_shares)

                general_function.exec_cmd("echo %s > %s/nixys_backup/%s" %(_parametr, _dir, index))

                data_9 = general_function.exec_cmd("cat %s/nixys_backup/%s" %(_dir, index))
                stdout_9 = data_9['stdout']
                _flag = stdout_9 

                if not _flag:
                    log_and_mail.writelog('WARNING',"Incorrect data in file '%s/nixys_backup/%s'!" %(_dir, index),
                             config.filelog_fd)
                    return False

    general_function.exec_cmd("echo %s > %s/nixys_backup/tasks" %(PID, _dir))
    return True
示例#8
0
def mount(current_storage_data):
    ''' A function that is responsible for directly mounting a particular storage.
    The input receives a dictionary containing the necessary data for connecting storage.

    '''

    try:
        (data_mount, pre_mount) = get_mount_data(current_storage_data)
    except MountError as e:
        raise general_function.MyError("%s" % e)

    if not data_mount:
        # if local storage
        return 0
    else:
        packets = data_mount.get('packets')
        update_cmd = data_mount.get('update_cmd')
        check_cmd = data_mount.get('check_cmd')
        install_cmd = data_mount.get('install_cmd')
        mount_cmd = data_mount.get('mount_cmd')
        pre_install_cmd = data_mount.get('pre_install_cmd')

        if packets:
            command = general_function.exec_cmd(update_cmd)
            code = command['code']

            if code != 0:
                raise general_function.MyError(
                    "Bad result code external process '%s':'%s'" %
                    (update_cmd, code))

        for i in packets:
            check_packet = general_function.exec_cmd("%s %s" % (check_cmd, i))
            stdout_check = check_packet['stdout']

            if not stdout_check:
                if pre_install_cmd:
                    pre_install = general_function.exec_cmd(pre_install_cmd)
                    stderr_pre_install = pre_install['stderr']
                    code = pre_install['code']

                    if stderr_pre_install:
                        raise general_function.MyError(
                            "Package '%s' can't installed:%s" %
                            (i, stderr_pre_install))
                    if code != 0:
                        raise general_function.MyError(
                            "Bad result code external process '%s':'%s'" %
                            (pre_install_cmd, code))

                install_packet = general_function.exec_cmd("%s %s" %
                                                           (install_cmd, i))
                stderr_install = install_packet['stderr']
                code = install_packet['code']

                if stderr_install:
                    raise general_function.MyError(
                        "Package '%s' can't installed:%s" %
                        (i, stderr_install))

                if code != 0:
                    raise general_function.MyError(
                        "Bad result code external process '%s':'%s'" %
                        (install_cmd, code))

        if pre_mount:
            for key in pre_mount:
                try:
                    f = globals()[key]
                    args = pre_mount[key]
                    f(args)
                except Exception as err:
                    raise general_function.MyError(
                        "Impossible perform pre-mount operations for storage '%s': %s"
                        % (current_storage_data.get('storage'), err))

        check_mount_cmd = "mount | grep %s" % (mount_point)
        check_mount = general_function.exec_cmd(check_mount_cmd)
        stdout_mount = check_mount['stdout']

        if stdout_mount:
            raise general_function.MyError("Mount point %s is busy!" %
                                           (mount_point))
        else:
            general_function.create_dirs(job_name='',
                                         dirs_pairs={mount_point: ''})
            data_mounting = general_function.exec_cmd("%s" % (mount_cmd))
            stderr_mounting = data_mounting['stderr']
            code = data_mounting['code']

            if stderr_mounting:
                raise general_function.MyError(stderr_mounting)
            if code != 0:
                raise general_function.MyError(
                    "Bad result code external process '%s':'%s'" %
                    (mount_cmd, code))
    return 1
示例#9
0
def create_inc_backup(local_dst_dirname, remote_dir, part_of_dir_path, backup_file_name,
                      target, exclude_list, gzip, job_name, storage, host, share, months_to_store):
    """ The function determines whether to collect a full backup or incremental,
    prepares all the necessary information.

    """
    date_year = general_function.get_time_now('year')
    date_month = general_function.get_time_now('moy')
    date_day = general_function.get_time_now('dom')

    dated_paths = get_dated_paths(local_dst_dirname, part_of_dir_path, date_year, date_month, date_day)

    # Before we proceed to collect a copy, we need to delete the copies for the same month last year
    # if they are to not save extra archives
    old_month_dirs = []
    if os.path.isdir(dated_paths['old_year_dir']) or months_to_store < 12:
        if months_to_store < 12:
            int_date_month = int(date_month)
            last_month = int_date_month - months_to_store
            if last_month <= 0:
                m_range = list(range(last_month+12, 13))
                m_range.extend(list(range(1, int_date_month)))
            else:
                m_range = list(range(last_month, int_date_month))
            for i in range(1, 13):
                if i not in m_range:
                    date = str(i).zfill(2)
                    if i < int(date_month):
                        year_to_cleanup = dated_paths['year_dir']
                    else:
                        year_to_cleanup = dated_paths['old_year_dir']
                    old_month_dirs.append(os.path.join(year_to_cleanup, f'month_{date}'))
        else:
            old_month_dirs.append(os.path.join(dated_paths['old_year_dir'], f'month_{date_month}'))
        del_old_inc_file(dated_paths['old_year_dir'], old_month_dirs)

    link_dict = {}  # dict for symlink with pairs like dst: src
    copy_dict = {}  # dict for copy with pairs like dst: src

    # Get the current list of files
    new_meta_info = get_index(target, exclude_list)

    if not os.path.isfile(dated_paths['year_inc_file']):
        # There is no original index file, so we need to check the existence of an year directory
        if os.path.isdir(dated_paths['year_dir']):
            # There is a directory, but there is no file itself, then something went wrong, so
            # we delete this directory with all the data inside, because even if they are there
            # continue to collect incremental copies it will not be able to
            general_function.del_file_objects(job_name, dated_paths['year_dir'])
            dirs_for_log = general_function.get_dirs_for_log(dated_paths['year_dir'], remote_dir, storage)
            file_for_log = os.path.join(dirs_for_log, os.path.basename(dated_paths['year_inc_file']))
            log_and_mail.writelog('ERROR',
                                  f"The file {file_for_log} not found, so the directory {dirs_for_log} is cleared. "
                                  f"Incremental backup will be reinitialized ",
                                  config.filelog_fd, job_name)

        # Initialize the incremental backup, i.e. collect a full copy
        remote_dir_for_logs = general_function.get_dirs_for_log(dated_paths['initial_dir'], remote_dir, storage)
        general_function.create_dirs(job_name=job_name, dirs_pairs={dated_paths['initial_dir']: remote_dir_for_logs})

        write_meta_info(dated_paths['year_inc_file'], new_meta_info)

        full_backup_path = general_function.get_full_path(dated_paths['initial_dir'],
                                                          backup_file_name,
                                                          'tar',
                                                          gzip)

        general_files_func.create_tar('files', full_backup_path, target,
                                      gzip, 'inc_files', job_name,
                                      remote_dir, storage, host, share)

        daily_dirs_remote = general_function.get_dirs_for_log(dated_paths['daily_dir'], remote_dir, storage)
        month_dirs_remote = general_function.get_dirs_for_log(dated_paths['month_dir'], remote_dir, storage)
        general_function.create_dirs(job_name=job_name, dirs_pairs={dated_paths['daily_dir']: daily_dirs_remote,
                                                                    dated_paths['month_dir']: month_dirs_remote})

        if storage in 'local':
            link_dict[dated_paths['month_inc_file']] = dated_paths['year_inc_file']
            link_dict[os.path.join(dated_paths['month_dir'], os.path.basename(full_backup_path))] = full_backup_path
            link_dict[dated_paths['daily_inc_file']] = dated_paths['year_inc_file']
            link_dict[os.path.join(dated_paths['daily_dir'], os.path.basename(full_backup_path))] = full_backup_path
        elif storage in 'scp, nfs':
            copy_dict[dated_paths['month_inc_file']] = dated_paths['year_inc_file']
            link_dict[os.path.join(dated_paths['month_dir'], os.path.basename(full_backup_path))] = \
                full_backup_path.replace(local_dst_dirname, remote_dir)
            copy_dict[dated_paths['daily_inc_file']] = dated_paths['year_inc_file']
            link_dict[os.path.join(dated_paths['daily_dir'], os.path.basename(full_backup_path))] = \
                full_backup_path.replace(local_dst_dirname, remote_dir)
        else:
            copy_dict[dated_paths['month_inc_file']] = dated_paths['year_inc_file']
            copy_dict[os.path.join(dated_paths['month_dir'], os.path.basename(full_backup_path))] = full_backup_path
            copy_dict[dated_paths['daily_inc_file']] = dated_paths['year_inc_file']
            copy_dict[os.path.join(dated_paths['daily_dir'], os.path.basename(full_backup_path))] = full_backup_path

    else:
        symlink_dir = ''
        meta_path = ''
        if int(date_day) == 1:
            meta_path = dated_paths['month_inc_file']
            old_meta_path = dated_paths['year_inc_file']
            general_inc_backup_dir = dated_paths['month_dir']
            symlink_dir = dated_paths['daily_dir']
        elif int(date_day) == 11 or int(date_day) == 21:
            meta_path = dated_paths['daily_inc_file']
            old_meta_path = dated_paths['month_inc_file']
            general_inc_backup_dir = dated_paths['daily_dir']
        else:
            old_meta_path = dated_paths['daily_inc_file']
            general_inc_backup_dir = dated_paths['daily_dir']

        try:
            old_meta_info = specific_function.parser_json(old_meta_path)
        except general_function.MyError as e:
            log_and_mail.writelog('ERROR',
                                  f"Couldn't open old meta info file '{old_meta_path}': {e}!",
                                  config.filelog_fd, job_name)
            return 2

        general_dirs_for_log = general_function.get_dirs_for_log(general_inc_backup_dir, remote_dir, storage)
        general_function.create_dirs(job_name=job_name, dirs_pairs={general_inc_backup_dir: general_dirs_for_log})
        if meta_path:
            write_meta_info(meta_path, new_meta_info)

        # Calculate the difference between the old and new file states
        diff_json = compute_diff(new_meta_info, old_meta_info)

        # Define the list of files that need to be included in the archive
        target_change_list = diff_json['modify']

        dict_directory = get_dict_directory(target, diff_json)

        inc_backup_path = general_function.get_full_path(general_inc_backup_dir, backup_file_name, 'tar', gzip)
        create_inc_tar(
            inc_backup_path, remote_dir, dict_directory, target_change_list, gzip, job_name, storage, host, share
        )

        if symlink_dir:
            symlink_dirs_for_log = general_function.get_dirs_for_log(symlink_dir, remote_dir, storage)
            general_function.create_dirs(job_name=job_name, dirs_pairs={symlink_dir: symlink_dirs_for_log})
            if storage in 'local':
                link_dict[dated_paths['daily_inc_file']] = dated_paths['month_inc_file']
            elif storage in 'scp, nfs':
                copy_dict[dated_paths['daily_inc_file'].replace(local_dst_dirname, remote_dir)] = \
                    dated_paths['month_inc_file'].replace(local_dst_dirname, remote_dir)
            else:
                copy_dict[dated_paths['daily_inc_file']] = dated_paths['month_inc_file']

    create_links_and_copies(link_dict, copy_dict, job_name)