Beispiel #1
0
def general_desc_iteration(full_tmp_path, storages, part_of_dir_path,
                           job_name):

    dow = general_function.get_time_now("dow")
    dom = general_function.get_time_now("dom")

    index_local_storage = -1
    for i in range(len(storages)):
        if storages[i]['storage'] == 'local':
            index_local_storage = i
            break
    if index_local_storage != -1:
        storages += [storages.pop(index_local_storage)]

    for i in range(len(storages)):
        if specific_function.is_save_to_storage(job_name, storages[i]):
            try:
                current_storage_data = mount_fuse.get_storage_data(
                    job_name, storages[i])
            except general_function.MyError as err:
                log_and_mail.writelog('ERROR', '%s' % (err), config.filelog_fd,
                                      job_name)
                continue
            else:
                storage = current_storage_data['storage']
                backup_dir = current_storage_data['backup_dir']

                try:
                    mount_fuse.mount(current_storage_data)
                except general_function.MyError as err:
                    log_and_mail.writelog(
                        'ERROR',
                        "Can't mount remote '%s' storage :%s" % (storage, err),
                        config.filelog_fd, job_name)
                    continue
                else:
                    remote_dir = ''  # for logging

                    if storage != 'local':
                        remote_dir = backup_dir
                        local_dst_dirname = mount_fuse.mount_point
                    else:
                        local_dst_dirname = backup_dir

                    days_count = storages[i]['store']['days']
                    weeks_count = storages[i]['store']['weeks']
                    month_count = storages[i]['store']['month']

                    store_dict = {
                        'daily': days_count,
                        'weekly': weeks_count,
                        'monthly': month_count
                    }

                    if storage != 'local':
                        if storage != 's3':
                            host = current_storage_data['host']
                        else:
                            host = ''

                        if storage != 'smb':
                            share = ''
                        else:
                            share = current_storage_data['share']

                        for j in list(store_dict.keys()):
                            # For storage: sshfs, nfs backup_dir is the mount point and must already be created before mounting.
                            # For storage: ftp, smb, webdav, s3 is NOT a mount point, but actually a relative path relative to the mount point
                            if storage in ('scp', 'nfs'):
                                full_path = os.path.join(
                                    local_dst_dirname, part_of_dir_path, j)
                                remote_path_to_backup_dir = os.path.join(
                                    backup_dir, part_of_dir_path, j)
                            else:
                                full_path = os.path.join(
                                    local_dst_dirname, backup_dir.lstrip('/'),
                                    part_of_dir_path, j)
                                remote_path_to_backup_dir = os.path.join(
                                    backup_dir.lstrip('/'), part_of_dir_path,
                                    j)

                            store_backup_count = store_dict[j]

                            control_old_files(full_path, store_backup_count,
                                              storage, job_name, host,
                                              remote_path_to_backup_dir, share)
                    else:
                        host = ''
                        share = ''

                    if int(month_count) and dom == config.dom_backup:
                        subdir_name = 'monthly'
                    elif int(weeks_count) and dow == config.dow_backup:
                        subdir_name = 'weekly'
                    elif int(days_count):
                        subdir_name = 'daily'

                    # For storage: sshfs, nfs backup_dir is the mount point and must already be created before mounting.
                    # For storage: ftp, smb, webdav, s3 is NOT a mount point, but actually a relative path relative to the mount point
                    if storage in ('local', 'scp', 'nfs'):
                        general_local_dst_path = os.path.join(
                            local_dst_dirname, part_of_dir_path)
                    else:
                        general_local_dst_path = os.path.join(
                            local_dst_dirname, backup_dir.lstrip('/'),
                            part_of_dir_path)

                    periodic_backup(full_tmp_path, general_local_dst_path,
                                    remote_dir, storage, subdir_name,
                                    days_count, weeks_count, job_name, host,
                                    share)

                    try:
                        mount_fuse.unmount()
                    except general_function.MyError as err:
                        log_and_mail.writelog(
                            'ERROR', "Can't umount remote '%s' storage :%s" %
                            (storage, err), config.filelog_fd, job_name)
                        continue
        else:
            continue
Beispiel #2
0
def inc_files_backup(job_data):
    ''' The function collects an incremental backup for the specified partition.

    '''

    try:
        job_name = job_data['job']
        sources = job_data['sources']
        storages = job_data['storages']
    except KeyError as e:
        log_and_mail.writelog('ERROR', "Missing required key:'%s'!" % (e),
                              config.filelog_fd, job_name)
        return 1

    for i in range(len(sources)):
        target_list = sources[i]['target']
        exclude_list = sources[i].get('excludes', '')
        gzip = sources[i]['gzip']

        # Keeping an exception list in the global variable due to the specificity of
        # the `filter` key of the `add` method of the `tarfile` class
        general_files_func.EXCLUDE_FILES = general_files_func.get_exclude_ofs(
            target_list, exclude_list)

        # The backup name is selected depending on the particular glob patterns from
        # the list `target_list`
        for regex in target_list:
            target_ofs_list = general_files_func.get_ofs(regex)

            for i in target_ofs_list:
                if not general_files_func.is_excluded_ofs(i):
                    # Create a backup only if the directory is not in the exception list
                    # so as not to generate empty backups

                    # A function that by regularity returns the name of
                    # the backup WITHOUT EXTENSION AND DATE
                    backup_file_name = general_files_func.get_name_files_backup(
                        regex, i)

                    # Get the part of the backup storage path for this archive relative to
                    # the backup dir
                    part_of_dir_path = backup_file_name.replace('___', '/')

                    for j in range(len(storages)):
                        if specific_function.is_save_to_storage(
                                job_name, storages[j]):
                            try:
                                current_storage_data = mount_fuse.get_storage_data(
                                    job_name, storages[j])
                            except general_function.MyError as err:
                                log_and_mail.writelog('ERROR', '%s' % (err),
                                                      config.filelog_fd,
                                                      job_name)
                                continue
                            else:
                                storage = current_storage_data['storage']
                                backup_dir = current_storage_data['backup_dir']
                                # Если storage активный - монтируем его
                                try:
                                    mount_fuse.mount(current_storage_data)
                                except general_function.MyError as err:
                                    log_and_mail.writelog(
                                        'ERROR',
                                        "Can't mount remote '%s' storage :%s" %
                                        (storage, err), config.filelog_fd,
                                        job_name)
                                    continue
                                else:
                                    remote_dir = ''  # Only for logging
                                    if storage != 'local':
                                        local_dst_dirname = mount_fuse.mount_point
                                        remote_dir = backup_dir
                                        if storage != 's3':
                                            host = current_storage_data['host']
                                        else:
                                            host = ''
                                        share = current_storage_data.get(
                                            'share')
                                    else:
                                        host = ''
                                        share = ''
                                        local_dst_dirname = backup_dir
                                    # We collect an incremental copy
                                    # For storage: local, sshfs, nfs backup_dir is the mount point and must already be created before mounting.
                                    # For storage: ftp, smb, webdav, s3 is NOT a mount point, but actually a relative path relative to the mount point
                                    if not storage in ('local', 'scp', 'nfs'):
                                        local_dst_dirname = os.path.join(
                                            local_dst_dirname,
                                            backup_dir.lstrip('/'))

                                    create_inc_file(
                                        local_dst_dirname, remote_dir,
                                        part_of_dir_path, backup_file_name, i,
                                        exclude_list, gzip, job_name, storage,
                                        host, share)  #general_inc_iteration

                                    try:
                                        mount_fuse.unmount()
                                    except general_function.MyError as err:
                                        log_and_mail.writelog(
                                            'ERROR',
                                            "Can't umount remote '%s' storage :%s"
                                            % (storage, err),
                                            config.filelog_fd, job_name)
                                        continue
                        else:
                            continue
                else:
                    continue
Beispiel #3
0
def general_desc_iteration(full_tmp_path, storages, part_of_dir_path, job_name,
                           safety_backup):
    dow = general_function.get_time_now("dow")
    dom = general_function.get_time_now("dom")

    index_local_storage = -1
    for i in range(len(storages)):
        if storages[i]['storage'] == 'local':
            index_local_storage = i
            break
    if index_local_storage != -1:
        storages += [storages.pop(index_local_storage)]

    for i in range(len(storages)):
        if specific_function.is_save_to_storage(job_name, storages[i]):
            try:
                current_storage_data = mount_fuse.get_storage_data(
                    job_name, storages[i])
            except general_function.MyError as err:
                log_and_mail.writelog('ERROR', f'{err}', config.filelog_fd,
                                      job_name)
                continue
            else:
                storage = current_storage_data['storage']
                backup_dir = current_storage_data['backup_dir']

                try:
                    mount_fuse.mount(current_storage_data)
                except general_function.MyError as err:
                    log_and_mail.writelog(
                        'ERROR',
                        f"Can't mount remote '{storage}' storage :{err}",
                        config.filelog_fd, job_name)
                    continue
                else:
                    remote_dir = ''  # for logging

                    if storage != 'local':
                        remote_dir = backup_dir
                        local_dst_dirname = mount_fuse.mount_point + mount_fuse.mount_point_sub_dir
                    else:
                        local_dst_dirname = backup_dir

                    days_count = storages[i]['store']['days']
                    weeks_count = storages[i]['store']['weeks']
                    month_count = storages[i]['store']['month']

                    store_dict = {
                        'daily': days_count,
                        'weekly': weeks_count,
                        'monthly': month_count
                    }

                    if storage != 'local':
                        host, share = general_function.get_host_and_share(
                            storage, current_storage_data)

                        if not safety_backup:
                            remove_old_remote_files(store_dict, storage,
                                                    local_dst_dirname,
                                                    part_of_dir_path,
                                                    backup_dir, job_name, host,
                                                    share, safety_backup)

                    else:
                        host = ''
                        share = ''

                    subdir_name = ''
                    if int(month_count) and dom == config.dom_backup:
                        subdir_name = 'monthly'
                    elif int(weeks_count) and dow == config.dow_backup:
                        subdir_name = 'weekly'
                    elif int(days_count):
                        subdir_name = 'daily'

                    # For storage: sshfs, nfs backup_dir is the mount point and must already be created before mounting.
                    # It'll be created if remote_mount_point is defined.
                    # For storage: ftp, smb, webdav, s3 is NOT a mount point, but actually a relative path relative
                    # to the mount point
                    if storage in ('local', 'scp', 'nfs'):
                        general_local_dst_path = os.path.join(
                            local_dst_dirname, part_of_dir_path)
                    else:
                        general_local_dst_path = os.path.join(
                            local_dst_dirname, backup_dir.lstrip('/'),
                            part_of_dir_path)

                    periodic_backup(full_tmp_path, general_local_dst_path,
                                    remote_dir, storage, subdir_name,
                                    days_count, weeks_count, job_name, host,
                                    share)

                    if safety_backup and storage != 'local':
                        remove_old_remote_files(store_dict, storage,
                                                local_dst_dirname,
                                                part_of_dir_path, backup_dir,
                                                job_name, host, share,
                                                safety_backup)

                    try:
                        mount_fuse.unmount()
                    except general_function.MyError as err:
                        log_and_mail.writelog(
                            'ERROR',
                            f"Can't umount remote '{storage}' storage:{err}",
                            config.filelog_fd, job_name)
                        continue
        else:
            continue
Beispiel #4
0
def inc_files_backup(job_data):
    """ The function collects an incremental backup for the specified partition.

    """

    is_prams_read, job_name, backup_type, tmp_dir, sources, storages, safety_backup, deferred_copying_level = \
        general_function.get_job_parameters(job_data)
    if not is_prams_read:
        return

    for i in range(len(sources)):
        target_list = sources[i]['target']
        exclude_list = sources[i].get('excludes', '')
        gzip = sources[i]['gzip']

        # Keeping an exception list in the global variable due to the specificity of
        # the `filter` key of the `add` method of the `tarfile` class
        general_files_func.EXCLUDE_FILES = general_files_func.get_exclude_ofs(target_list,
                                                                              exclude_list)

        # The backup name is selected depending on the particular glob patterns from
        # the list `target_list`
        for regex in target_list:
            target_ofs_list = general_files_func.get_ofs(regex)

            for ofs in target_ofs_list:
                if not general_files_func.is_excluded_ofs(ofs):
                    # Create a backup only if the directory is not in the exception list
                    # so as not to generate empty backups

                    # A function that by regularity returns the name of
                    # the backup WITHOUT EXTENSION AND DATE
                    backup_file_name = general_files_func.get_name_files_backup(regex, ofs)

                    # Get the part of the backup storage path for this archive relative to
                    # the backup dir
                    part_of_dir_path = backup_file_name.replace('___', '/')

                    for j in range(len(storages)):
                        if specific_function.is_save_to_storage(job_name, storages[j]):
                            try:
                                current_storage_data = mount_fuse.get_storage_data(job_name,
                                                                                   storages[j])
                            except general_function.MyError as err:
                                log_and_mail.writelog('ERROR', f'{err}',
                                                      config.filelog_fd, job_name)
                                continue
                            else:
                                storage = current_storage_data['storage']
                                backup_dir = current_storage_data['backup_dir']
                                # Если storage активный - монтируем его
                                try:
                                    mount_fuse.mount(current_storage_data)
                                except general_function.MyError as err:
                                    log_and_mail.writelog('ERROR', f"Can't mount remote '{storage}' storage :{err}",
                                                          config.filelog_fd, job_name)
                                    continue
                                else:
                                    remote_dir = ''  # Only for logging
                                    if storage != 'local':
                                        local_dst_dirname = mount_fuse.mount_point
                                        remote_dir = backup_dir
                                        if storage != 's3':
                                            host = current_storage_data['host']
                                        else:
                                            host = ''
                                        share = current_storage_data.get('share')
                                    else:
                                        host = ''
                                        share = ''
                                        local_dst_dirname = backup_dir

                                    if storage not in ('local', 'scp', 'nfs'):
                                        local_dst_dirname = os.path.join(local_dst_dirname, backup_dir.lstrip('/'))

                                    create_inc_file(local_dst_dirname, remote_dir, part_of_dir_path, backup_file_name,
                                                    ofs, exclude_list, gzip, job_name, storage, host, share)

                                    try:
                                        mount_fuse.unmount()
                                    except general_function.MyError as err:
                                        log_and_mail.writelog('ERROR',
                                                              f"Can't umount remote '{storage}' storage :{err}",
                                                              config.filelog_fd, job_name)
                                        continue
                        else:
                            continue
                else:
                    continue