예제 #1
0
def is_success_mysql_xtrabackup(extra_keys, str_auth, backup_full_path, gzip, job_name):
    date_now = general_function.get_time_now('backup')
    tmp_status_file = f'/tmp/xtrabackup_status/{date_now}.log'

    dom = int(general_function.get_time_now('dom'))
    if dom == 1:
        dir_for_status_file = os.path.dirname(tmp_status_file)
        if os.path.isdir(dir_for_status_file):
            listing = glob.glob(dir_for_status_file)
            periodic_backup.delete_oldest_files(listing, 31, job_name)

    general_function.create_files(job_name, tmp_status_file)

    if gzip:
        dump_cmd = f"innobackupex {str_auth} {extra_keys} 2>{tmp_status_file} | gzip > {backup_full_path}"
    else:
        dump_cmd = f"innobackupex {str_auth} {extra_keys} > {backup_full_path} 2>{tmp_status_file} "

    command = general_function.exec_cmd(dump_cmd)
    code = command['code']

    if not is_success_status_xtrabackup(tmp_status_file, job_name):
        log_and_mail.writelog(
            'ERROR', f"Can't create xtrabackup in tmp directory! More information in status file {tmp_status_file}.",
            config.filelog_fd, job_name)
        return False
    elif code != 0:
        log_and_mail.writelog('ERROR', f"Bad result code external process '{dump_cmd}':'{code}'",
                              config.filelog_fd, job_name)
        return False
    else:
        log_and_mail.writelog('INFO', "Successfully created xtrabackup in tmp directory.",
                              config.filelog_fd, job_name)
        return True
예제 #2
0
def is_time_to_backup(job_data):
    """ A function that determines whether or not to run copy collection according to the plan.
    It receives a dictionary with data for a particular section at the input.

    """

    job_name = job_data['job']
    job_type = job_data['type']
    storages = job_data['storages']

    result = True

    if job_type == 'inc_files':
        return result

    dow = general_function.get_time_now("dow")
    dom = general_function.get_time_now("dom")

    day_flag = False
    week_flag = False
    month_flag = False

    for i in range(len(storages)):
        if storages[i]['enable']:
            if storages[i]['store']['days'] or storages[i]['store'][
                    'weeks'] or storages[i]['store']['month']:
                if int(storages[i]['store']['days']) > 0:
                    day_flag = True
                if int(storages[i]['store']['weeks']) > 0:
                    week_flag = True
                if int(storages[i]['store']['month']) > 0:
                    month_flag = True
            else:
                log_and_mail.writelog(
                    'ERROR',
                    f'There are no stores data for storage {job_type} in the job {job_name}!',
                    config.filelog_fd, job_name)
                continue
    if not day_flag:
        if not week_flag:
            if not month_flag:
                result = False
            else:
                if dom == config.dom_backup:
                    result = True
                else:
                    result = False
        else:
            if dow == config.dow_backup:
                result = True
            else:
                if not month_flag:
                    result = False
                else:
                    if dom == config.dom_backup:
                        result = True
    else:
        result = True

    return result
예제 #3
0
def is_time_to_backup(job_data):
    ''''' Фукнция, которая определяет необходимо ли запускать на выполнение сбор копий согласно плану.
    На вход получает словарь с данными для конкретной секции

    ''' ''

    job_name = job_data['job']
    job_type = job_data['type']
    storages = job_data['storages']

    if job_type == 'inc_files':
        return True

    dow = general_function.get_time_now("dow")
    dom = general_function.get_time_now("dom")

    day_flag = False
    week_flag = False
    month_flag = False

    for i in range(len(storages)):
        if storages[i]['enable']:
            if storages[i]['store']['days'] or storages[i]['store'][
                    'weeks'] or storages[i]['store']['month']:
                if int(storages[i]['store']['days']) > 0:
                    day_flag = True
                if int(storages[i]['store']['weeks']) > 0:
                    week_flag = True
                if int(storages[i]['store']['month']) > 0:
                    month_flag = True
            else:
                log_and_mail.writelog(
                    'ERROR',
                    f'There are no stores data for storage {job_type} in the job {job_name}!',
                    config.filelog_fd, job_name)
                continue
    if not day_flag:
        if not week_flag:
            if not month_flag:
                result = False
            else:
                if dom == config.dom_backup:
                    result = True
                else:
                    result = False
        else:
            if dow == config.dow_backup:
                result = True
            else:
                if not month_flag:
                    result = False
                else:
                    if dom == config.dom_backup:
                        result = True
    else:
        result = True

    return result
예제 #4
0
def is_success_mysql_xtrabackup(extra_keys, str_auth, backup_full_path, gzip,
                                job_name):

    date_now = general_function.get_time_now('backup')
    tmp_status_file = '/tmp/xtrabackup_status/%s.log' % (date_now)

    dom = int(general_function.get_time_now('dom'))
    if dom == 1:
        dir_for_status_file = os.path.dirname(tmp_status_file)
        if os.path.isdir(dir_for_status_file):
            listing = glob.glob(dir_for_status_file)
            periodic_backup.delete_oldest_files(listing, 31, job_name)

    general_function.create_files(job_name, tmp_status_file)

    if gzip:
        dump_cmd = "innobackupex %s %s 2>%s | gzip > %s" % (
            str_auth, extra_keys, tmp_status_file, backup_full_path)
    else:
        dump_cmd = "innobackupex %s %s > %s 2>%s " % (
            str_auth, extra_keys, backup_full_path, tmp_status_file)

    command = general_function.exec_cmd(dump_cmd)
    code = command['code']

    if code != 0:
        log_and_mail.writelog(
            'ERROR',
            "Bad result code external process '%s':'%s'" % (dump_cmd, code),
            config.filelog_fd, job_name)
        return False

    if not is_success_status_xtrabackup(tmp_status_file, job_name):
        log_and_mail.writelog(
            'ERROR',
            "Can't create xtrabackup in tmp directory! More information in status file %s."
            % (tmp_status_file), config.filelog_fd, job_name)
        return False
    elif code != 0:
        log_and_mail.writelog(
            'ERROR',
            "Bad result code external process '%s':'%s'" % (dump_cmd, code),
            config.filelog_fd, job_name)
        return False
    else:
        log_and_mail.writelog(
            'INFO', "Successfully created xtrabackup in tmp directory.",
            config.filelog_fd, job_name)
        return True
예제 #5
0
def del_old_inc_file(old_year_dir, old_month_dirs):
    """

    :param str old_year_dir:
    :param list old_month_dirs:
    """
    for old_month_dir in old_month_dirs:
        general_function.del_file_objects('inc_files', old_month_dir)

    if os.path.isdir(old_year_dir):
        list_subdir_in_old_dir = os.listdir(old_year_dir)

        if len(list_subdir_in_old_dir) == 1 and \
                list_subdir_in_old_dir[0] == 'year' and \
                old_year_dir != general_function.get_time_now('year'):
            general_function.del_file_objects('inc_files', old_year_dir)
예제 #6
0
def get_log(log_level, log_message, type_message=''):
    """ The function of forming a string for writing to a log file.
    The input is given the following values:
     log_level - event level (error, info, warning);
     log_message - message;
     type_message is the section in the configuration file to which the event belongs.

    """

    time_now = general_function.get_time_now('log')

    if type_message:
        result_str = f"{log_level} [{type_message}] [{time_now}]: {log_message}\n"
    else:
        result_str = f"{log_level} [{time_now}]: {log_message}\n"

    return result_str
예제 #7
0
def get_log(log_level, log_message, type_message=''):
    ''' The function of forming a string for writing to a log file.
    The input is given the following values:
     log_level - event level (error, info, warning);
     log_message - message;
     type_message is the section in the configuration file to which the event belongs.

    '''

    time_now = general_function.get_time_now('log')

    if type_message:
        result_str = "%s [%s] [%s]: %s\n" % (log_level, type_message, time_now,
                                             log_message)
    else:
        result_str = "%s [%s]: %s\n" % (log_level, time_now, log_message)

    return result_str
예제 #8
0
def control_old_files(full_dir_path,
                      store_backup_count,
                      storage,
                      job_name,
                      host='',
                      full_path_for_log='',
                      share=''):

    dow = general_function.get_time_now("dow")
    dom = general_function.get_time_now("dom")

    files_grabbed_list = []

    for extension in config.backup_extenstion:
        full_glob_path = os.path.join(full_dir_path, extension)
        files_grabbed_list.extend(glob.glob(full_glob_path))

    count_file = len(files_grabbed_list)
    time_period = os.path.split(full_dir_path)[1]

    if int(store_backup_count):
        delta_count_file = int(count_file) - int(store_backup_count)

        if ((time_period == 'weekly' and dow != config.dow_backup)
                or (time_period == 'monthly' and dom != config.dom_backup)):
            result_delete_count = delta_count_file
        else:
            result_delete_count = delta_count_file + 1

        if result_delete_count < 1:
            return 1

        try:
            delete_oldest_files(files_grabbed_list, result_delete_count,
                                job_name)
        except general_function.MyError as err:
            if storage == 'local':
                log_and_mail.writelog(
                    'ERROR',
                    "Can't delete old '%s' files in directory '%s' on '%s' storage:%s"
                    % (time_period, full_dir_path, storage, err),
                    config.filelog_fd, job_name)
            elif storage == 'smb':
                log_and_mail.writelog(
                    'ERROR',
                    "Can't delete old '%s' files in directory '%s' in '%s' share on '%s' storage(%s):%s"
                    % (time_period, full_path_for_log, share, storage, host,
                       err), config.filelog_fd, job_name)
            else:
                log_and_mail.writelog(
                    'ERROR',
                    "Can't delete old '%s' files in directory '%s' on '%s' storage(%s):%s"
                    % (time_period, full_path_for_log, storage, host, err),
                    config.filelog_fd, job_name)
        else:
            if storage == 'local':
                log_and_mail.writelog(
                    'INFO',
                    "Successfully deleted old '%s' files  in directory '%s' on '%s' storage."
                    % (time_period, full_dir_path, storage), config.filelog_fd,
                    job_name)
            elif storage == 'smb':
                log_and_mail.writelog(
                    'INFO',
                    "Successfully deleted old '%s' files in directory '%s' in '%s' share on '%s' storage(%s)."
                    % (time_period, full_path_for_log, share, storage, host),
                    config.filelog_fd, job_name)
            else:
                log_and_mail.writelog(
                    'INFO',
                    "Successfully deleted old '%s' files in directory '%s' on '%s' storage(%s)."
                    % (time_period, full_path_for_log, storage, host),
                    config.filelog_fd, job_name)
    else:
        try:
            for i in files_grabbed_list:
                general_function.del_file_objects(job_name, i)
        except general_function.MyError as err:
            if storage == 'local':
                log_and_mail.writelog(
                    'ERROR',
                    "Can't delete old '%s' files in directory '%s' on '%s' storage:%s"
                    % (time_period, full_dir_path, storage, err),
                    config.filelog_fd, job_name)
            elif storage == 'smb':
                log_and_mail.writelog(
                    'ERROR',
                    "Can't delete old '%s' files in directory '%s' in '%s' share on '%s' storage(%s):%s"
                    % (time_period, full_path_for_log, share, storage, host,
                       err), config.filelog_fd, job_name)
            else:
                log_and_mail.writelog(
                    'ERROR',
                    "Can't delete old '%s' files in directory '%s' on '%s' storage(%s):%s"
                    % (time_period, full_path_for_log, storage, host, err),
                    config.filelog_fd, job_name)
        else:
            if storage == 'local':
                log_and_mail.writelog(
                    'INFO',
                    "Successfully deleted old '%s' files in directory '%s' on '%s' storage."
                    % (time_period, full_dir_path, storage), config.filelog_fd,
                    job_name)
            elif storage == 'smb':
                log_and_mail.writelog(
                    'INFO',
                    "Successfully deleted old '%s' files in directory '%s' in '%s' share on '%s' storage(%s)."
                    % (time_period, full_path_for_log, share, storage, host),
                    config.filelog_fd, job_name)
            else:
                log_and_mail.writelog(
                    'INFO',
                    "Successfully deleted old '%s' files in directory '%s' on '%s' storage(%s)."
                    % (time_period, full_path_for_log, storage, host),
                    config.filelog_fd, job_name)
예제 #9
0
def periodic_backup(full_tmp_path, general_local_dst_path, remote_dir, storage,
                    subdir_name, days_count, weeks_count, job_name, host,
                    share):

    daily_subdir_name = "daily"
    weekly_subdir_name = "weekly"
    monthly_subdir_name = "monthly"

    link_dict = {}

    dow = general_function.get_time_now("dow")
    backup_file_name = os.path.basename(full_tmp_path)
    full_dst_path = os.path.join(general_local_dst_path, subdir_name)

    dst_dirs = []
    daily_dir = os.path.join(general_local_dst_path, daily_subdir_name)
    weekly_dir = os.path.join(general_local_dst_path, weekly_subdir_name)
    monthly_dir = os.path.join(general_local_dst_path, monthly_subdir_name)

    if storage == 'local':
        if subdir_name == monthly_subdir_name:
            dst_dirs.append(monthly_dir)

            if dow == config.dow_backup and int(weeks_count):
                src_link = os.path.join(general_local_dst_path,
                                        monthly_subdir_name, backup_file_name)
                dst_link = os.path.join(general_local_dst_path,
                                        weekly_subdir_name, backup_file_name)
                dst_dirs.append(weekly_dir)
                link_dict[dst_link] = src_link

            if int(days_count):
                src_link = os.path.join(general_local_dst_path,
                                        monthly_subdir_name, backup_file_name)
                dst_link = os.path.join(general_local_dst_path,
                                        daily_subdir_name, backup_file_name)
                dst_dirs.append(daily_dir)
                link_dict[dst_link] = src_link
        elif subdir_name == weekly_subdir_name and storage == 'local':

            dst_dirs.append(weekly_dir)

            if int(days_count):
                src_link = os.path.join(general_local_dst_path,
                                        weekly_subdir_name, backup_file_name)
                dst_link = os.path.join(general_local_dst_path,
                                        daily_subdir_name, backup_file_name)
                dst_dirs.append(daily_dir)
                link_dict[dst_link] = src_link
        else:
            dst_dirs.append(daily_dir)
    else:
        dst_dirs.append(full_dst_path)

    for dst_dir in set(dst_dirs):
        dirs_for_log = general_function.get_dirs_for_log(
            dst_dir, remote_dir, storage)
        general_function.create_dirs(job_name='',
                                     dirs_pairs={dst_dir: dirs_for_log})

    if storage == 'local':
        try:
            general_function.move_ofs(full_tmp_path, full_dst_path)
        except general_function.MyError as err:
            log_and_mail.writelog(
                'ERROR',
                "Can't move '%s' file '%s' -> '%s' on '%s' storage: %s" %
                (subdir_name, full_tmp_path, full_dst_path, storage, err),
                config.filelog_fd, job_name)
        else:
            log_and_mail.writelog(
                'INFO',
                "Successfully moved '%s' file '%s' -> '%s' on '%s' storage." %
                (subdir_name, full_tmp_path, full_dst_path, storage),
                config.filelog_fd, job_name)

        if link_dict:
            for key in link_dict.keys():
                src = link_dict[key]
                dst = key

                try:
                    general_function.create_symlink(src, dst)
                except general_function.MyError as err:
                    log_and_mail.writelog(
                        'ERROR',
                        "Can't create symlink '%s' -> '%s' on 'local' storage: %s"
                        % (src, dst, err), config.filelog_fd, job_name)
    else:
        try:
            general_function.copy_ofs(full_tmp_path, full_dst_path)
            dirs_for_log = general_function.get_dirs_for_log(
                full_dst_path, remote_dir, storage)

        except general_function.MyError as err:
            if storage != 'smb':
                log_and_mail.writelog(
                    'ERROR',
                    "Can't copy '%s' file '%s' -> '%s' directory on '%s' storage(%s): %s"
                    % (subdir_name, full_tmp_path, dirs_for_log, storage, host,
                       err), config.filelog_fd, job_name)
            else:
                log_and_mail.writelog(
                    'ERROR',
                    "Can't copy '%s' file '%s' -> '%s' directory in '%s' share on '%s' storage(%s): %s"
                    % (subdir_name, full_tmp_path, dirs_for_log, share,
                       storage, host, err), config.filelog_fd, job_name)
        else:
            if storage != 'smb':
                log_and_mail.writelog(
                    'INFO',
                    "Successfully copied '%s' file '%s' -> '%s' directory on '%s' storage(%s)."
                    %
                    (subdir_name, full_tmp_path, dirs_for_log, storage, host),
                    config.filelog_fd, job_name)
            else:
                log_and_mail.writelog(
                    'INFO',
                    "Successfully copied '%s' file '%s' -> '%s' directory in '%s' share on '%s' storage(%s)."
                    % (subdir_name, full_tmp_path, dirs_for_log, share,
                       storage, host), config.filelog_fd, job_name)
예제 #10
0
def general_desc_iteration(full_tmp_path, storages, part_of_dir_path,
                           job_name):

    dow = general_function.get_time_now("dow")
    dom = general_function.get_time_now("dom")

    index_local_storage = -1
    for i in range(len(storages)):
        if storages[i]['storage'] == 'local':
            index_local_storage = i
            break
    if index_local_storage != -1:
        storages += [storages.pop(index_local_storage)]

    for i in range(len(storages)):
        if specific_function.is_save_to_storage(job_name, storages[i]):
            try:
                current_storage_data = mount_fuse.get_storage_data(
                    job_name, storages[i])
            except general_function.MyError as err:
                log_and_mail.writelog('ERROR', '%s' % (err), config.filelog_fd,
                                      job_name)
                continue
            else:
                storage = current_storage_data['storage']
                backup_dir = current_storage_data['backup_dir']

                try:
                    mount_fuse.mount(current_storage_data)
                except general_function.MyError as err:
                    log_and_mail.writelog(
                        'ERROR',
                        "Can't mount remote '%s' storage :%s" % (storage, err),
                        config.filelog_fd, job_name)
                    continue
                else:
                    remote_dir = ''  # for logging

                    if storage != 'local':
                        remote_dir = backup_dir
                        local_dst_dirname = mount_fuse.mount_point
                    else:
                        local_dst_dirname = backup_dir

                    days_count = storages[i]['store']['days']
                    weeks_count = storages[i]['store']['weeks']
                    month_count = storages[i]['store']['month']

                    store_dict = {
                        'daily': days_count,
                        'weekly': weeks_count,
                        'monthly': month_count
                    }

                    if storage != 'local':
                        if storage != 's3':
                            host = current_storage_data['host']
                        else:
                            host = ''

                        if storage != 'smb':
                            share = ''
                        else:
                            share = current_storage_data['share']

                        for j in list(store_dict.keys()):
                            # For storage: sshfs, nfs backup_dir is the mount point and must already be created before mounting.
                            # For storage: ftp, smb, webdav, s3 is NOT a mount point, but actually a relative path relative to the mount point
                            if storage in ('scp', 'nfs'):
                                full_path = os.path.join(
                                    local_dst_dirname, part_of_dir_path, j)
                                remote_path_to_backup_dir = os.path.join(
                                    backup_dir, part_of_dir_path, j)
                            else:
                                full_path = os.path.join(
                                    local_dst_dirname, backup_dir.lstrip('/'),
                                    part_of_dir_path, j)
                                remote_path_to_backup_dir = os.path.join(
                                    backup_dir.lstrip('/'), part_of_dir_path,
                                    j)

                            store_backup_count = store_dict[j]

                            control_old_files(full_path, store_backup_count,
                                              storage, job_name, host,
                                              remote_path_to_backup_dir, share)
                    else:
                        host = ''
                        share = ''

                    if int(month_count) and dom == config.dom_backup:
                        subdir_name = 'monthly'
                    elif int(weeks_count) and dow == config.dow_backup:
                        subdir_name = 'weekly'
                    elif int(days_count):
                        subdir_name = 'daily'

                    # For storage: sshfs, nfs backup_dir is the mount point and must already be created before mounting.
                    # For storage: ftp, smb, webdav, s3 is NOT a mount point, but actually a relative path relative to the mount point
                    if storage in ('local', 'scp', 'nfs'):
                        general_local_dst_path = os.path.join(
                            local_dst_dirname, part_of_dir_path)
                    else:
                        general_local_dst_path = os.path.join(
                            local_dst_dirname, backup_dir.lstrip('/'),
                            part_of_dir_path)

                    periodic_backup(full_tmp_path, general_local_dst_path,
                                    remote_dir, storage, subdir_name,
                                    days_count, weeks_count, job_name, host,
                                    share)

                    try:
                        mount_fuse.unmount()
                    except general_function.MyError as err:
                        log_and_mail.writelog(
                            'ERROR', "Can't umount remote '%s' storage :%s" %
                            (storage, err), config.filelog_fd, job_name)
                        continue
        else:
            continue
예제 #11
0
def general_desc_iteration(full_tmp_path, storages, part_of_dir_path, job_name,
                           safety_backup):
    dow = general_function.get_time_now("dow")
    dom = general_function.get_time_now("dom")

    index_local_storage = -1
    for i in range(len(storages)):
        if storages[i]['storage'] == 'local':
            index_local_storage = i
            break
    if index_local_storage != -1:
        storages += [storages.pop(index_local_storage)]

    for i in range(len(storages)):
        if specific_function.is_save_to_storage(job_name, storages[i]):
            try:
                current_storage_data = mount_fuse.get_storage_data(
                    job_name, storages[i])
            except general_function.MyError as err:
                log_and_mail.writelog('ERROR', f'{err}', config.filelog_fd,
                                      job_name)
                continue
            else:
                storage = current_storage_data['storage']
                backup_dir = current_storage_data['backup_dir']

                try:
                    mount_fuse.mount(current_storage_data)
                except general_function.MyError as err:
                    log_and_mail.writelog(
                        'ERROR',
                        f"Can't mount remote '{storage}' storage :{err}",
                        config.filelog_fd, job_name)
                    continue
                else:
                    remote_dir = ''  # for logging

                    if storage != 'local':
                        remote_dir = backup_dir
                        local_dst_dirname = mount_fuse.mount_point + mount_fuse.mount_point_sub_dir
                    else:
                        local_dst_dirname = backup_dir

                    days_count = storages[i]['store']['days']
                    weeks_count = storages[i]['store']['weeks']
                    month_count = storages[i]['store']['month']

                    store_dict = {
                        'daily': days_count,
                        'weekly': weeks_count,
                        'monthly': month_count
                    }

                    if storage != 'local':
                        host, share = general_function.get_host_and_share(
                            storage, current_storage_data)

                        if not safety_backup:
                            remove_old_remote_files(store_dict, storage,
                                                    local_dst_dirname,
                                                    part_of_dir_path,
                                                    backup_dir, job_name, host,
                                                    share, safety_backup)

                    else:
                        host = ''
                        share = ''

                    subdir_name = ''
                    if int(month_count) and dom == config.dom_backup:
                        subdir_name = 'monthly'
                    elif int(weeks_count) and dow == config.dow_backup:
                        subdir_name = 'weekly'
                    elif int(days_count):
                        subdir_name = 'daily'

                    # For storage: sshfs, nfs backup_dir is the mount point and must already be created before mounting.
                    # It'll be created if remote_mount_point is defined.
                    # For storage: ftp, smb, webdav, s3 is NOT a mount point, but actually a relative path relative
                    # to the mount point
                    if storage in ('local', 'scp', 'nfs'):
                        general_local_dst_path = os.path.join(
                            local_dst_dirname, part_of_dir_path)
                    else:
                        general_local_dst_path = os.path.join(
                            local_dst_dirname, backup_dir.lstrip('/'),
                            part_of_dir_path)

                    periodic_backup(full_tmp_path, general_local_dst_path,
                                    remote_dir, storage, subdir_name,
                                    days_count, weeks_count, job_name, host,
                                    share)

                    if safety_backup and storage != 'local':
                        remove_old_remote_files(store_dict, storage,
                                                local_dst_dirname,
                                                part_of_dir_path, backup_dir,
                                                job_name, host, share,
                                                safety_backup)

                    try:
                        mount_fuse.unmount()
                    except general_function.MyError as err:
                        log_and_mail.writelog(
                            'ERROR',
                            f"Can't umount remote '{storage}' storage:{err}",
                            config.filelog_fd, job_name)
                        continue
        else:
            continue
예제 #12
0
def create_inc_file(local_dst_dirname, remote_dir, part_of_dir_path,
                    backup_file_name, target, exclude_list, gzip, job_name,
                    storage, host, share):
    ''' The function determines whether to collect a full backup or incremental,
    prepares all the necessary information.

    '''

    date_year = general_function.get_time_now('year')
    date_month = general_function.get_time_now('moy')
    date_day = general_function.get_time_now('dom')

    if int(date_day) < 11:
        daily_prefix = 'day_01'
    elif int(date_day) < 21:
        daily_prefix = 'day_11'
    else:
        daily_prefix = 'day_21'

    year_dir = os.path.join(local_dst_dirname, part_of_dir_path, date_year)
    initial_dir = os.path.join(year_dir, 'year')  # Path to full backup
    month_dir = os.path.join(year_dir, 'month_%s' % (date_month), 'monthly')
    daily_dir = os.path.join(year_dir, 'month_%s' % (date_month), 'daily',
                             daily_prefix)

    year_inc_file = os.path.join(initial_dir, 'year.inc')
    month_inc_file = os.path.join(month_dir, 'month.inc')
    daily_inc_file = os.path.join(daily_dir, 'daily.inc')

    link_dict = {}  # dict for symlink with pairs like dst: src
    copy_dict = {}  # dict for copy with pairs like dst: src

    # Before we proceed to collect a copy, we need to delete the copies for the same month last year
    # if they are to not save extra archives

    old_year = int(date_year) - 1
    old_year_dir = os.path.join(local_dst_dirname, part_of_dir_path,
                                str(old_year))
    if os.path.isdir(old_year_dir):
        old_month_dir = os.path.join(old_year_dir, 'month_%s' % (date_month))
        del_old_inc_file(old_year_dir, old_month_dir)

    if not os.path.isfile(year_inc_file):
        # There is no original index file, so we need to check the existence of an year directory
        if os.path.isdir(year_dir):
            # There is a directory, but there is no file itself, then something went wrong, so
            # we delete this directory with all the data inside, because even if they are there
            # continue to collect incremental copies it will not be able to
            general_function.del_file_objects(job_name, year_dir)
            dirs_for_log = general_function.get_dirs_for_log(
                year_dir, remote_dir, storage)
            file_for_log = os.path.join(dirs_for_log,
                                        os.path.basename(year_inc_file))
            log_and_mail.writelog('ERROR', "The file %s not found, so the directory %s is cleared." +\
                                  "Incremental backup will be reinitialized " %(file_for_log, dirs_for_log),
                                  config.filelog_fd, job_name)

        # Initialize the incremental backup, i.e. collect a full copy
        dirs_for_log = general_function.get_dirs_for_log(
            initial_dir, remote_dir, storage)
        general_function.create_dirs(job_name=job_name,
                                     dirs_pairs={initial_dir: dirs_for_log})

        # Get the current list of files and write to the year inc file
        meta_info = get_index(target, exclude_list)
        with open(year_inc_file, "w") as index_file:
            json.dump(meta_info, index_file)

        full_backup_path = general_function.get_full_path(
            initial_dir, backup_file_name, 'tar', gzip)

        general_files_func.create_tar('files', full_backup_path, target, gzip,
                                      'inc_files', job_name, remote_dir,
                                      storage, host, share)

        # After creating the full copy, you need to make the symlinks for the inc.file and
        # the most collected copy in the month directory of the current month
        # as well as in the decade directory if it's local, scp the repository and
        # copy inc.file for other types of repositories that do not support symlynk.

        month_dirs_for_log = general_function.get_dirs_for_log(
            month_dir, remote_dir, storage)
        daily_dirs_for_log = general_function.get_dirs_for_log(
            daily_dir, remote_dir, storage)
        general_function.create_dirs(job_name=job_name,
                                     dirs_pairs={
                                         month_dir: month_dirs_for_log,
                                         daily_dir: daily_dirs_for_log
                                     })

        if storage in 'local, scp':
            link_dict[month_inc_file] = year_inc_file
            link_dict[os.path.join(
                month_dir,
                os.path.basename(full_backup_path))] = full_backup_path
            link_dict[daily_inc_file] = year_inc_file
            link_dict[os.path.join(
                daily_dir,
                os.path.basename(full_backup_path))] = full_backup_path
        else:
            copy_dict[month_inc_file] = year_inc_file
            copy_dict[daily_inc_file] = year_inc_file
    else:
        symlink_dir = ''
        if int(date_day) == 1:
            # It is necessary to collect monthly incremental backup relative to the year copy
            old_meta_info = specific_function.parser_json(year_inc_file)
            new_meta_info = get_index(target, exclude_list)

            general_inc_backup_dir = month_dir

            # It is also necessary to make a symlink for inc files and backups to the directory with the first decade
            symlink_dir = daily_dir

            general_dirs_for_log = general_function.get_dirs_for_log(
                general_inc_backup_dir, remote_dir, storage)
            symlink_dirs_for_log = general_function.get_dirs_for_log(
                symlink_dir, remote_dir, storage)
            general_function.create_dirs(job_name=job_name,
                                         dirs_pairs={
                                             general_inc_backup_dir:
                                             general_dirs_for_log,
                                             symlink_dir: symlink_dirs_for_log
                                         })

            with open(month_inc_file, "w") as index_file:
                json.dump(new_meta_info, index_file)

        elif int(date_day) == 11 or int(date_day) == 21:
            # It is necessary to collect a ten-day incremental backup relative to a monthly copy
            try:
                old_meta_info = specific_function.parser_json(month_inc_file)
            except general_function.MyError as e:
                log_and_mail.writelog(
                    'ERROR',
                    "Couldn't open old month meta info file '%s': %s!" %
                    (month_inc_file, e), config.filelog_fd, job_name)
                return 2

            new_meta_info = get_index(target, exclude_list)

            general_inc_backup_dir = daily_dir
            general_dirs_for_log = general_function.get_dirs_for_log(
                general_inc_backup_dir, remote_dir, storage)
            general_function.create_dirs(
                job_name=job_name,
                dirs_pairs={general_inc_backup_dir: general_dirs_for_log})

            with open(daily_inc_file, "w") as index_file:
                json.dump(new_meta_info, index_file)
        else:
            # It is necessary to collect a normal daily incremental backup relative to a ten-day copy
            try:
                old_meta_info = specific_function.parser_json(daily_inc_file)
            except general_function.MyError as e:
                log_and_mail.writelog(
                    'ERROR',
                    "Couldn't open old decade meta info file '%s': %s!" %
                    (daily_inc_file, e), config.filelog_fd, job_name)
                return 2

            new_meta_info = get_index(target, exclude_list)

            general_inc_backup_dir = daily_dir
            general_dirs_for_log = general_function.get_dirs_for_log(
                general_inc_backup_dir, remote_dir, storage)
            general_function.create_dirs(
                job_name=job_name,
                dirs_pairs={general_inc_backup_dir: general_dirs_for_log})

        # Calculate the difference between the old and new file states
        diff_json = compute_diff(new_meta_info, old_meta_info)

        inc_backup_path = general_function.get_full_path(
            general_inc_backup_dir, backup_file_name, 'tar', gzip)

        # Define the list of files that need to be included in the archive
        target_change_list = diff_json['modify']

        # Form GNU.dumpdir headers
        dict_directory = {}  # Dict to store pairs like dir:GNU.dumpdir

        excludes = r'|'.join([
            fnmatch.translate(x)[:-7] for x in general_files_func.EXCLUDE_FILES
        ]) or r'$.'

        for dir_name, dirs, files in os.walk(target):
            first_level_files = []

            if re.match(excludes, dir_name):
                continue

            for file in files:
                if re.match(excludes, os.path.join(dir_name, file)):
                    continue

                first_level_files.append(file)

            first_level_subdirs = dirs
            dict_directory[dir_name] = get_gnu_dumpdir_format(
                diff_json, dir_name, target, excludes, first_level_subdirs,
                first_level_files)

        create_inc_tar(inc_backup_path, remote_dir, dict_directory,
                       target_change_list, gzip, job_name, storage, host,
                       share)

        if symlink_dir:
            if storage in 'local, scp':
                link_dict[daily_inc_file] = month_inc_file
            else:
                copy_dict[daily_inc_file] = month_inc_file

    if link_dict:
        for key in link_dict.keys():
            src = link_dict[key]
            dst = key

            try:
                general_function.create_symlink(src, dst)
            except general_function.MyError as err:
                log_and_mail.writelog(
                    'ERROR',
                    "Can't create symlink %s -> %s: %s" % (src, dst, err),
                    config.filelog_fd, job_name)

    if copy_dict:
        for key in copy_dict.keys():
            src = copy_dict[key]
            dst = key

            try:
                general_function.copy_ofs(src, dst)
            except general_function.MyError as err:
                log_and_mail.writelog(
                    'ERROR', "Can't copy %s -> %s: %s" % (src, dst, err),
                    config.filelog_fd, job_name)
예제 #13
0
def control_files(full_dir_path, store_backup_count, storage, job_name, files_type,
                  host='', full_path_for_log='', share='', safety_backup=False):
    dow = general_function.get_time_now("dow")
    dom = general_function.get_time_now("dom")

    files_grabbed_list = []

    for extension in config.backup_extenstion:
        full_glob_path = os.path.join(full_dir_path, extension)
        files_grabbed_list.extend(glob.glob(full_glob_path))

    count_file = len(files_grabbed_list)
    time_period = os.path.split(full_dir_path)[1]

    if int(store_backup_count):
        delta_count_file = int(count_file) - int(store_backup_count)

        if ((time_period == 'weekly' and dow != config.dow_backup) or
                (time_period == 'monthly' and dom != config.dom_backup)):
            result_delete_count = delta_count_file
        else:
            result_delete_count = delta_count_file + 1

        if safety_backup:
            result_delete_count -= 1

        if result_delete_count < 1:
            return 1

        try:
            delete_oldest_files(files_grabbed_list, result_delete_count, job_name)
        except general_function.MyError as err:
            if storage == 'local':
                log_and_mail.writelog(
                    'ERROR',
                    f"Can't delete {files_type} '{time_period}' files in directory '{full_dir_path}' on '{storage}' "
                    f"storage: {err}",
                    config.filelog_fd, job_name)
            elif storage == 'smb':
                log_and_mail.writelog(
                    'ERROR',
                    f"Can't delete {files_type} '{time_period}' files in directory '{full_path_for_log}' in '{share}' "
                    f"share on '{storage}' storage({host}): {err}",
                    config.filelog_fd, job_name)
            else:
                log_and_mail.writelog(
                    'ERROR',
                    f"Can't delete {files_type} '{time_period}' files in directory '{full_path_for_log}' on '{storage}' "
                    f"storage({host}): {err}",
                    config.filelog_fd, job_name)
        else:
            if storage == 'local':
                log_and_mail.writelog(
                    'INFO',
                    f"Successfully deleted {files_type} '{time_period}' files  in directory '{full_dir_path}' on '{storage}' "
                    f"storage.",
                    config.filelog_fd, job_name)
            elif storage == 'smb':
                log_and_mail.writelog(
                    'INFO',
                    f"Successfully deleted {files_type} '{time_period}' files in directory '{full_path_for_log}' in '{share}' "
                    f"share on '{storage}' storage({host}).",
                    config.filelog_fd, job_name)
            else:
                log_and_mail.writelog(
                    'INFO',
                    f"Successfully deleted {files_type} '{time_period}' files in directory '{full_path_for_log}' on '{storage}' "
                    f"storage({host}).",
                    config.filelog_fd, job_name)
    else:
        try:
            for i in files_grabbed_list:
                general_function.del_file_objects(job_name, i)
        except general_function.MyError as err:
            if storage == 'local':
                log_and_mail.writelog(
                    'ERROR',
                    f"Can't delete {files_type} '{time_period}' files in directory '{full_dir_path}' on '{storage}' "
                    f"storage:{err}",
                    config.filelog_fd, job_name)
            elif storage == 'smb':
                log_and_mail.writelog(
                    'ERROR',
                    f"Can't delete {files_type} '{time_period}' files in directory '{full_path_for_log}' in '{share}' "
                    f"share on '{storage}' storage({host}):{err}",
                    config.filelog_fd, job_name)
            else:
                log_and_mail.writelog(
                    'ERROR',
                    f"Can't delete {files_type} '{time_period}' files in directory '{full_path_for_log}' on '{storage}' "
                    f"storage({host}):{err}",
                    config.filelog_fd, job_name)
        else:
            if storage == 'local':
                log_and_mail.writelog(
                    'INFO',
                    f"Successfully deleted {files_type} '{time_period}' files in directory '{full_dir_path}' on '{storage}' "
                    f"storage.",
                    config.filelog_fd, job_name)
            elif storage == 'smb':
                log_and_mail.writelog(
                    'INFO',
                    f"Successfully deleted {files_type} '{time_period}' files in directory '{full_path_for_log}' in '{share}' "
                    f"share on '{storage}' storage({host}).",
                    config.filelog_fd, job_name)
            else:
                log_and_mail.writelog(
                    'INFO',
                    f"Successfully deleted {files_type} '{time_period}' files in directory '{full_path_for_log}' on '{storage}' "
                    f"storage({host}).",
                    config.filelog_fd, job_name)
예제 #14
0
def create_inc_backup(local_dst_dirname, remote_dir, part_of_dir_path, backup_file_name,
                      target, exclude_list, gzip, job_name, storage, host, share, months_to_store):
    """ The function determines whether to collect a full backup or incremental,
    prepares all the necessary information.

    """
    date_year = general_function.get_time_now('year')
    date_month = general_function.get_time_now('moy')
    date_day = general_function.get_time_now('dom')

    dated_paths = get_dated_paths(local_dst_dirname, part_of_dir_path, date_year, date_month, date_day)

    # Before we proceed to collect a copy, we need to delete the copies for the same month last year
    # if they are to not save extra archives
    old_month_dirs = []
    if os.path.isdir(dated_paths['old_year_dir']) or months_to_store < 12:
        if months_to_store < 12:
            int_date_month = int(date_month)
            last_month = int_date_month - months_to_store
            if last_month <= 0:
                m_range = list(range(last_month+12, 13))
                m_range.extend(list(range(1, int_date_month)))
            else:
                m_range = list(range(last_month, int_date_month))
            for i in range(1, 13):
                if i not in m_range:
                    date = str(i).zfill(2)
                    if i < int(date_month):
                        year_to_cleanup = dated_paths['year_dir']
                    else:
                        year_to_cleanup = dated_paths['old_year_dir']
                    old_month_dirs.append(os.path.join(year_to_cleanup, f'month_{date}'))
        else:
            old_month_dirs.append(os.path.join(dated_paths['old_year_dir'], f'month_{date_month}'))
        del_old_inc_file(dated_paths['old_year_dir'], old_month_dirs)

    link_dict = {}  # dict for symlink with pairs like dst: src
    copy_dict = {}  # dict for copy with pairs like dst: src

    # Get the current list of files
    new_meta_info = get_index(target, exclude_list)

    if not os.path.isfile(dated_paths['year_inc_file']):
        # There is no original index file, so we need to check the existence of an year directory
        if os.path.isdir(dated_paths['year_dir']):
            # There is a directory, but there is no file itself, then something went wrong, so
            # we delete this directory with all the data inside, because even if they are there
            # continue to collect incremental copies it will not be able to
            general_function.del_file_objects(job_name, dated_paths['year_dir'])
            dirs_for_log = general_function.get_dirs_for_log(dated_paths['year_dir'], remote_dir, storage)
            file_for_log = os.path.join(dirs_for_log, os.path.basename(dated_paths['year_inc_file']))
            log_and_mail.writelog('ERROR',
                                  f"The file {file_for_log} not found, so the directory {dirs_for_log} is cleared. "
                                  f"Incremental backup will be reinitialized ",
                                  config.filelog_fd, job_name)

        # Initialize the incremental backup, i.e. collect a full copy
        remote_dir_for_logs = general_function.get_dirs_for_log(dated_paths['initial_dir'], remote_dir, storage)
        general_function.create_dirs(job_name=job_name, dirs_pairs={dated_paths['initial_dir']: remote_dir_for_logs})

        write_meta_info(dated_paths['year_inc_file'], new_meta_info)

        full_backup_path = general_function.get_full_path(dated_paths['initial_dir'],
                                                          backup_file_name,
                                                          'tar',
                                                          gzip)

        general_files_func.create_tar('files', full_backup_path, target,
                                      gzip, 'inc_files', job_name,
                                      remote_dir, storage, host, share)

        daily_dirs_remote = general_function.get_dirs_for_log(dated_paths['daily_dir'], remote_dir, storage)
        month_dirs_remote = general_function.get_dirs_for_log(dated_paths['month_dir'], remote_dir, storage)
        general_function.create_dirs(job_name=job_name, dirs_pairs={dated_paths['daily_dir']: daily_dirs_remote,
                                                                    dated_paths['month_dir']: month_dirs_remote})

        if storage in 'local':
            link_dict[dated_paths['month_inc_file']] = dated_paths['year_inc_file']
            link_dict[os.path.join(dated_paths['month_dir'], os.path.basename(full_backup_path))] = full_backup_path
            link_dict[dated_paths['daily_inc_file']] = dated_paths['year_inc_file']
            link_dict[os.path.join(dated_paths['daily_dir'], os.path.basename(full_backup_path))] = full_backup_path
        elif storage in 'scp, nfs':
            copy_dict[dated_paths['month_inc_file']] = dated_paths['year_inc_file']
            link_dict[os.path.join(dated_paths['month_dir'], os.path.basename(full_backup_path))] = \
                full_backup_path.replace(local_dst_dirname, remote_dir)
            copy_dict[dated_paths['daily_inc_file']] = dated_paths['year_inc_file']
            link_dict[os.path.join(dated_paths['daily_dir'], os.path.basename(full_backup_path))] = \
                full_backup_path.replace(local_dst_dirname, remote_dir)
        else:
            copy_dict[dated_paths['month_inc_file']] = dated_paths['year_inc_file']
            copy_dict[os.path.join(dated_paths['month_dir'], os.path.basename(full_backup_path))] = full_backup_path
            copy_dict[dated_paths['daily_inc_file']] = dated_paths['year_inc_file']
            copy_dict[os.path.join(dated_paths['daily_dir'], os.path.basename(full_backup_path))] = full_backup_path

    else:
        symlink_dir = ''
        meta_path = ''
        if int(date_day) == 1:
            meta_path = dated_paths['month_inc_file']
            old_meta_path = dated_paths['year_inc_file']
            general_inc_backup_dir = dated_paths['month_dir']
            symlink_dir = dated_paths['daily_dir']
        elif int(date_day) == 11 or int(date_day) == 21:
            meta_path = dated_paths['daily_inc_file']
            old_meta_path = dated_paths['month_inc_file']
            general_inc_backup_dir = dated_paths['daily_dir']
        else:
            old_meta_path = dated_paths['daily_inc_file']
            general_inc_backup_dir = dated_paths['daily_dir']

        try:
            old_meta_info = specific_function.parser_json(old_meta_path)
        except general_function.MyError as e:
            log_and_mail.writelog('ERROR',
                                  f"Couldn't open old meta info file '{old_meta_path}': {e}!",
                                  config.filelog_fd, job_name)
            return 2

        general_dirs_for_log = general_function.get_dirs_for_log(general_inc_backup_dir, remote_dir, storage)
        general_function.create_dirs(job_name=job_name, dirs_pairs={general_inc_backup_dir: general_dirs_for_log})
        if meta_path:
            write_meta_info(meta_path, new_meta_info)

        # Calculate the difference between the old and new file states
        diff_json = compute_diff(new_meta_info, old_meta_info)

        # Define the list of files that need to be included in the archive
        target_change_list = diff_json['modify']

        dict_directory = get_dict_directory(target, diff_json)

        inc_backup_path = general_function.get_full_path(general_inc_backup_dir, backup_file_name, 'tar', gzip)
        create_inc_tar(
            inc_backup_path, remote_dir, dict_directory, target_change_list, gzip, job_name, storage, host, share
        )

        if symlink_dir:
            symlink_dirs_for_log = general_function.get_dirs_for_log(symlink_dir, remote_dir, storage)
            general_function.create_dirs(job_name=job_name, dirs_pairs={symlink_dir: symlink_dirs_for_log})
            if storage in 'local':
                link_dict[dated_paths['daily_inc_file']] = dated_paths['month_inc_file']
            elif storage in 'scp, nfs':
                copy_dict[dated_paths['daily_inc_file'].replace(local_dst_dirname, remote_dir)] = \
                    dated_paths['month_inc_file'].replace(local_dst_dirname, remote_dir)
            else:
                copy_dict[dated_paths['daily_inc_file']] = dated_paths['month_inc_file']

    create_links_and_copies(link_dict, copy_dict, job_name)