Esempio n. 1
0
def external_backup(job_data):
    """ Function, creates a external backup.
    At the entrance receives a dictionary with the data of the job.

    """

    job_name = 'undefined'
    try:
        job_name = job_data['job']
        backup_type = job_data['type']
        dump_cmd = job_data['dump_cmd']
        storages = job_data['storages']
    except KeyError as e:
        log_and_mail.writelog('ERROR', f"Missing required key:'{e}'!",
                              config.filelog_fd, job_name)
        return

    safety_backup = job_data.get('safety_backup', False)
    skip_backup_rotate = job_data.get('skip_backup_rotate', False)

    periodic_backup.remove_local_file(storages, '', job_name)

    command = general_function.exec_cmd(dump_cmd)
    stderr = command['stderr']
    stdout = command['stdout']
    code = command['code']

    if code != 0:
        log_and_mail.writelog('ERROR',
                              f"Bad result code external process '{dump_cmd}': '{code}' with next STDERR:\n"
                              f"'{stderr}'",
                              config.filelog_fd, job_name)
        return

    if skip_backup_rotate:
        log_and_mail.writelog('INFO', f"Command '{dump_cmd}' finished success with the next result:\n{stdout}",
                              config.filelog_fd, job_name)
        return

    source_dict = get_value_from_stdout(stderr, stdout, job_name)

    if source_dict is None:
        return

    full_tmp_path = source_dict['full_path']
    basename = source_dict['basename']
    extension = source_dict['extension']
    gzip = source_dict['gzip']

    new_name = os.path.basename(general_function.get_full_path('', basename, extension, gzip))
    new_full_tmp_path = os.path.join(os.path.dirname(full_tmp_path), new_name)

    general_function.move_ofs(full_tmp_path, new_full_tmp_path)

    periodic_backup.general_desc_iteration(new_full_tmp_path, storages, '',
                                           job_name, safety_backup)

    # After all the manipulations, delete the created temporary directory and
    # data inside the directory with cache davfs, but not the directory itself!
    general_function.del_file_objects(backup_type, '/var/cache/davfs2/*')
Esempio n. 2
0
def del_old_inc_file(old_year_dir, old_month_dir):
    general_function.del_file_objects('inc_files', old_month_dir)

    list_subdir_in_old_dir = os.listdir(old_year_dir)

    if len(list_subdir_in_old_dir) == 1 and list_subdir_in_old_dir[0] == 'year':
        general_function.del_file_objects('inc_files', old_year_dir)
Esempio n. 3
0
def is_success_bgsave(str_auth, backup_full_tmp_path, gzip, job_name):

    backup_full_tmp_path_tmp = backup_full_tmp_path.split('.gz')[0]

    dump_cmd = f"redis-cli {str_auth} --rdb {backup_full_tmp_path_tmp}"

    command = general_function.exec_cmd(dump_cmd)
    stderr = command['stderr']

    check_success_cmd =  "echo $?"
    check_command = general_function.exec_cmd(check_success_cmd)
    stdout = check_command.get('stdout')

    if stdout == 1:
        log_and_mail.writelog('ERROR', f"Can't create redis database dump '{backup_full_tmp_path_tmp}' in tmp directory:{stderr}",
                              config.filelog_fd, job_name)
        return False
    else:
        if gzip:
            try:
                general_files_func.gzip_file(backup_full_tmp_path_tmp, backup_full_tmp_path)
            except general_function.MyError as stderr:
                log_and_mail.writelog('ERROR', f"Can't gzip redis database dump '{backup_full_tmp_path_tmp}' in tmp directory:{stderr}.",
                                      config.filelog_fd, job_name)
                return False
            else:
                log_and_mail.writelog('INFO', f"Successfully created redis database dump '{backup_full_tmp_path}' in tmp directory.",
                                      config.filelog_fd, job_name)
                return True
            finally:
                general_function.del_file_objects(job_name, backup_full_tmp_path_tmp)
        else:
            log_and_mail.writelog('INFO', f"Successfully created redis database dump '{backup_full_tmp_path_tmp}' in tmp directory.",
                              config.filelog_fd, job_name)
            return True
def mysql_xtradb_backup(job_data):
    try:
        job_name = job_data['job']
        backup_type = job_data['type']
        tmp_dir = job_data['tmp_dir']
        sources = job_data['sources']
        storages = job_data['storages']
    except KeyError as e:
        log_and_mail.writelog('ERROR', "Missing required key:'%s'!" % (e),
                              config.filelog_fd, job_name)
        return 1

    full_path_tmp_dir = general_function.get_tmp_dir(tmp_dir, backup_type)

    for i in range(len(sources)):
        try:
            connect = sources[i]['connect']
            gzip = sources[i]['gzip']
            extra_keys = sources[i]['extra_keys']
        except KeyError as e:
            log_and_mail.writelog('ERROR', "Missing required key:'%s'!" % (e),
                                  config.filelog_fd, job_name)
            continue

        db_user = connect.get('db_user')
        db_password = connect.get('db_password')
        path_to_conf = connect.get('path_to_conf')

        if not (path_to_conf and db_user and db_password):
            log_and_mail.writelog(
                'ERROR',
                "Can't find the authentication data, please fill the required fields",
                config.filelog_fd, job_name)
            continue

        if not os.path.isfile(path_to_conf):
            log_and_mail.writelog('ERROR',
                                  "Configuration file '%s' not found!",
                                  config.filelog_fd, job_name)
            continue

        str_auth = '--defaults-file=%s --user=%s --password=%s' % (
            path_to_conf, db_user, db_password)

        backup_full_tmp_path = general_function.get_full_path(
            full_path_tmp_dir, 'xtrabackup', 'tar', gzip)

        periodic_backup.remove_old_local_file(storages, '', job_name)

        if is_success_mysql_xtrabackup(extra_keys, str_auth,
                                       backup_full_tmp_path, gzip, job_name):
            periodic_backup.general_desc_iteration(backup_full_tmp_path,
                                                   storages, '', job_name)

    # After all the manipulations, delete the created temporary directory and
    # data inside the directory with cache davfs, but not the directory itself!
    general_function.del_file_objects(backup_type, full_path_tmp_dir,
                                      '/var/cache/davfs2/*')
Esempio n. 5
0
def external_backup(job_data):
    ''''' Function, creates a external backup.
    At the entrance receives a dictionary with the data of the job.

    ''' ''

    try:
        job_name = job_data['job']
        backup_type = job_data['type']
        dump_cmd = job_data['dump_cmd']
        storages = job_data['storages']
    except KeyError as e:
        log_and_mail.writelog('ERROR', "Missing required key:'%s'!" % (e),
                              config.filelog_fd, job_name)
        return 1

    periodic_backup.remove_old_local_file(storages, '', job_name)

    command = general_function.exec_cmd(dump_cmd)
    stderr = command['stderr']
    stdout = command['stdout']
    code = command['code']

    if code != 0:
        log_and_mail.writelog(
            'ERROR',
            "Bad result code external process '%s': %s'" % (dump_cmd, code),
            config.filelog_fd, job_name)
        return 1

    source_dict = get_value_from_stdout(stderr, stdout, job_name)

    if source_dict is None:
        return 1

    full_tmp_path = source_dict['full_path']
    basename = source_dict['basename']
    extension = source_dict['extension']
    gzip = source_dict['gzip']

    new_name = os.path.basename(
        general_function.get_full_path('', basename, extension, gzip))
    new_full_tmp_path = os.path.join(os.path.dirname(full_tmp_path), new_name)

    general_function.move_ofs(full_tmp_path, new_full_tmp_path)

    periodic_backup.general_desc_iteration(new_full_tmp_path, storages, '',
                                           job_name)

    # After all the manipulations, delete the created temporary directory and
    # data inside the directory with cache davfs, but not the directory itself!
    general_function.del_file_objects(backup_type, '/var/cache/davfs2/*')
Esempio n. 6
0
def unmount(storage):
    if storage != 'local' and mount_point:
        umount_cmd = f"fusermount -uz {mount_point}"
        umount = general_function.exec_cmd(umount_cmd)
        stderr_umount = umount['stderr']
        code = umount['code']

        if stderr_umount:
            raise general_function.MyError(stderr_umount)
        elif code != 0:
            raise general_function.MyError(
                f"Bad result code external process '{umount_cmd}':'{code}'")
        else:
            general_function.del_file_objects('', mount_point)
    return
Esempio n. 7
0
def unmount():
    if mount_point:
        umount_cmd = "fusermount -uz %s" % (mount_point)
        umount = general_function.exec_cmd(umount_cmd)
        stderr_umount = umount['stderr']
        code = umount['code']

        if stderr_umount:
            raise general_function.MyError(stderr_umount)
        elif code != 0:
            raise general_function.MyError(
                "Bad result code external process '%s':'%s'" %
                (umount_cmd, code))
        else:
            general_function.del_file_objects('', mount_point)
    return 1
Esempio n. 8
0
def del_old_inc_file(old_year_dir, old_month_dirs):
    """

    :param str old_year_dir:
    :param list old_month_dirs:
    """
    for old_month_dir in old_month_dirs:
        general_function.del_file_objects('inc_files', old_month_dir)

    if os.path.isdir(old_year_dir):
        list_subdir_in_old_dir = os.listdir(old_year_dir)

        if len(list_subdir_in_old_dir) == 1 and \
                list_subdir_in_old_dir[0] == 'year' and \
                old_year_dir != general_function.get_time_now('year'):
            general_function.del_file_objects('inc_files', old_year_dir)
Esempio n. 9
0
def mysql_xtrabackup(job_data):
    is_prams_read, job_name, backup_type, tmp_dir, sources, storages, safety_backup, deferred_copying_level = \
        general_function.get_job_parameters(job_data)
    if not is_prams_read:
        return

    full_path_tmp_dir = general_function.get_tmp_dir(tmp_dir, backup_type)

    for i in range(len(sources)):
        try:
            connect = sources[i]['connect']
            gzip = sources[i]['gzip']
            extra_keys = sources[i]['extra_keys']
        except KeyError as e:
            log_and_mail.writelog('ERROR', f"Missing required key:'{e}'!", config.filelog_fd, job_name)
            continue

        db_user = connect.get('db_user')
        db_password = connect.get('db_password')
        path_to_conf = connect.get('path_to_conf')

        if not (path_to_conf and db_user and db_password):
            log_and_mail.writelog('ERROR', "Can't find the authentication data, please fill the required fields",
                                  config.filelog_fd, job_name)
            continue

        if not os.path.isfile(path_to_conf):
            log_and_mail.writelog('ERROR', f"Configuration file '{path_to_conf}' not found!",
                                  config.filelog_fd, job_name)
            continue

        str_auth = f'--defaults-file={path_to_conf} --user={db_user} --password={db_password}'

        backup_full_tmp_path = general_function.get_full_path(full_path_tmp_dir, 'xtrabackup', 'tar', gzip)

        periodic_backup.remove_old_local_file(storages, '', job_name)

        if is_success_mysql_xtrabackup(extra_keys, str_auth, backup_full_tmp_path, gzip, job_name):
            periodic_backup.general_desc_iteration(backup_full_tmp_path, storages, '', job_name, safety_backup)

    # After all the manipulations, delete the created temporary directory and
    # data inside the directory with cache davfs, but not the directory itself!
    general_function.del_file_objects(backup_type, full_path_tmp_dir, '/var/cache/davfs2/*')
Esempio n. 10
0
def delete_oldest_files(files_list, count, job_name):
    tmp_list = []

    for i in files_list:
        if os.path.exists(i):
            tmp_list.append(i)
        else:
            general_function.del_file_objects(job_name, i)
            count -= 1

    time_sorted_list = sorted(tmp_list, key=os.path.getmtime)
    length_list = len(time_sorted_list)

    if count <= 0:
        return 0
    elif count > length_list:
        count = length_list

    for i in time_sorted_list[0:count]:
        general_function.del_file_objects(job_name, i)
Esempio n. 11
0
def mongodb_backup(job_data):
    ''' Function, creates a mongodb backup.
    At the entrance receives a dictionary with the data of the job.

    '''

    try:
        job_name = job_data['job']
        backup_type = job_data['type']
        tmp_dir = job_data['tmp_dir']
        sources = job_data['sources']
        storages = job_data['storages']
    except KeyError as e:
        log_and_mail.writelog('ERROR', "Missing required key:'%s'!" %(e), config.filelog_fd, job_name)
        return 1

    full_path_tmp_dir = general_function.get_tmp_dir(tmp_dir, backup_type)

    for i in range(len(sources)):
        exclude_dbs_list = sources[i].get('exclude_dbs', [])
        exclude_collections_list = sources[i].get('exclude_collections', [])
        try:
            connect = sources[i]['connect']
            target_db_list = sources[i]['target_dbs']
            target_collection_list = sources[i]['target_collections']
            gzip =  sources[i]['gzip']
            extra_keys = sources[i]['extra_keys']
        except KeyError as e:
            log_and_mail.writelog('ERROR', "Missing required key:'%s'!" %(e), config.filelog_fd, job_name)
            continue

        db_host = connect.get('db_host')
        db_port = connect.get('db_port')
        db_user = connect.get('db_user')
        db_password = connect.get('db_password')

        if  not (db_host and not (bool(db_user) ^ bool(db_password))):
            log_and_mail.writelog('ERROR', "Can't find the authentication data, please fill in the required fields", 
                                  config.filelog_fd, job_name) 
            continue

        if not db_port:
            db_port = general_function.get_default_port('mongodb')

        is_all_flag_db = is_all_flag_collection = False

        if 'all' in target_db_list:
            is_all_flag_db = True

        if 'all' in target_collection_list:
            is_all_flag_collection = True


        if db_user:
            uri = "mongodb://%s:%s@%s:%s/" % (db_user, db_password, db_host, db_port)  # for pymongo
            str_auth = " --host %s --port %s --username %s --password %s " %(db_host, db_port, db_user, db_password)  # for mongodump
        else:
            uri = "mongodb://%s:%s/" % (db_host, db_port)
            str_auth = " --host %s --port %s " %(db_host, db_port)


        if is_all_flag_db:
            try:
                client = pymongo.MongoClient(uri)
                target_db_list = client.database_names()
            except pymongo.errors.PyMongoError as err:
                log_and_mail.writelog('ERROR', "Can't connect to MongoDB instances with the following data host='%s', port='%s', user='******', passwd='%s':%s" %(db_host, db_port, db_user, db_password, err),
                                    config.filelog_fd, job_name)
                continue
            finally:
                client.close()

        for db in target_db_list:
            if not db in exclude_dbs_list:
                try:
                    client = pymongo.MongoClient(uri)
                    current_db = client[db]
                    collection_list = current_db.collection_names()
                except pymongo.errors.PyMongoError as err:
                    log_and_mail.writelog('ERROR', "Can't connect to MongoDB instances with the following data host='%s', port='%s', user='******', passwd='%s':%s" %(db_host, db_port, db_user, db_password, err),
                                          config.filelog_fd, job_name)
                    continue
                finally:
                    client.close()

                if is_all_flag_collection:
                    target_collection_list = collection_list

                for collection in target_collection_list:
                    if not collection in exclude_collections_list and collection in collection_list:
                        str_auth_finally = "%s --collection %s " %(str_auth, collection)

                        backup_full_tmp_path = general_function.get_full_path(
                                                                            full_path_tmp_dir,
                                                                            collection, 
                                                                            'mongodump',
                                                                            gzip)

                        part_of_dir_path = os.path.join(db, collection)
                        periodic_backup.remove_old_local_file(storages, part_of_dir_path, job_name)

                        if is_success_mongodump(collection, db, extra_keys, str_auth_finally, backup_full_tmp_path, gzip, job_name):
                            periodic_backup.general_desc_iteration(backup_full_tmp_path, 
                                                                   storages, part_of_dir_path,
                                                                   job_name)

    # After all the manipulations, delete the created temporary directory and
    # data inside the directory with cache davfs, but not the directory itself!
    general_function.del_file_objects(backup_type,
                                      full_path_tmp_dir, '/var/cache/davfs2/*') 
def postgresql_basebackup(job_data):
    is_prams_read, job_name, backup_type, tmp_dir, sources, storages, safety_backup, deferred_copying_level = \
        general_function.get_job_parameters(job_data)
    if not is_prams_read:
        return

    full_path_tmp_dir = general_function.get_tmp_dir(tmp_dir, backup_type)

    for i in range(len(sources)):
        try:
            connect = sources[i]['connect']
            gzip = sources[i]['gzip']
            extra_keys = sources[i]['extra_keys']
        except KeyError as e:
            log_and_mail.writelog('ERROR', f"Missing required key:'{e}'!",
                                  config.filelog_fd, job_name)
            continue

        db_host = connect.get('db_host')
        db_port = connect.get('db_port')
        db_user = connect.get('db_user')
        db_password = connect.get('db_password')

        if not (db_user or db_host or db_password):
            log_and_mail.writelog(
                'ERROR',
                "Can't find the authentication data, please fill in the required fields",
                config.filelog_fd, job_name)
            continue

        if not db_port:
            db_port = general_function.get_default_port('postgresql')

        try:
            connection = psycopg2.connect(dbname="postgres",
                                          user=db_user,
                                          password=db_password,
                                          host=db_host,
                                          port=db_port)
        except psycopg2.Error as err:
            log_and_mail.writelog(
                'ERROR',
                f"Can't connect to PostgreSQL instances with with following data host='{db_host}', "
                f"port='{db_port}', user='******', passwd='{db_password}':{err}",
                config.filelog_fd, job_name)
            continue
        else:
            connection.close()

        backup_full_tmp_path = general_function.get_full_path(
            full_path_tmp_dir, 'postgresq_hot', 'tar', gzip, i)

        periodic_backup.remove_old_local_file(storages, '', job_name)

        str_auth = f' --dbname=postgresql://{db_user}:{db_password}@{db_host}:{db_port}/ '

        if is_success_pgbasebackup(extra_keys, str_auth, backup_full_tmp_path,
                                   gzip, job_name):
            periodic_backup.general_desc_iteration(backup_full_tmp_path,
                                                   storages, '', job_name,
                                                   safety_backup)

    # After all the manipulations, delete the created temporary directory and
    # data inside the directory with cache davfs, but not the directory itself!
    general_function.del_file_objects(backup_type, full_path_tmp_dir,
                                      '/var/cache/davfs2/*')
Esempio n. 13
0
def mongodb_backup(job_data):
    """ Function, creates a mongodb backup.
    At the entrance receives a dictionary with the data of the job.

    """
    is_prams_read, job_name, backup_type, tmp_dir, sources, storages, safety_backup, deferred_copying_level = \
        general_function.get_job_parameters(job_data)
    if not is_prams_read:
        return

    full_path_tmp_dir = general_function.get_tmp_dir(tmp_dir, backup_type)

    dumped_collections = {}
    for i in range(len(sources)):
        exclude_dbs_list = sources[i].get('exclude_dbs', [])
        exclude_collections_list = sources[i].get('exclude_collections', [])
        try:
            connect = sources[i]['connect']
            target_db_list = sources[i]['target_dbs']
            target_collection_list = sources[i]['target_collections']
            gzip = sources[i]['gzip']
            extra_keys = sources[i]['extra_keys']
        except KeyError as e:
            log_and_mail.writelog('ERROR', f"Missing required key:'{e}'!", config.filelog_fd, job_name)
            continue

        db_host = connect.get('db_host')
        db_port = connect.get('db_port')
        db_user = connect.get('db_user')
        db_password = connect.get('db_password')

        if not (db_host and not (bool(db_user) ^ bool(db_password))):
            log_and_mail.writelog('ERROR', "Can't find the authentication data, please fill in the required fields",
                                  config.filelog_fd, job_name)
            continue

        if not db_port:
            db_port = general_function.get_default_port('mongodb')

        is_all_flag_db = is_all_flag_collection = False

        if 'all' in target_db_list:
            is_all_flag_db = True

        if 'all' in target_collection_list:
            is_all_flag_collection = True

        if db_user:
            uri = f"mongodb://{db_user}:{db_password}@{db_host}:{db_port}/"  # for pymongo
            str_auth = f" --host {db_host} --port {db_port} --username {db_user} --password {db_password} "
        else:
            uri = f"mongodb://{db_host}:{db_port}/"
            str_auth = f" --host {db_host} --port {db_port} "

        client = None
        if is_all_flag_db:
            try:
                client = pymongo.MongoClient(uri)
                target_db_list = client.list_database_names()
            except PyMongoError as err:
                log_and_mail.writelog('ERROR',
                                      f"Can't connect to MongoDB instances with the following data host='{db_host}', "
                                      f"port='{db_port}', user='******', passwd='{db_password}':{err}",
                                      config.filelog_fd, job_name)
                continue
            finally:
                if client:
                    client.close()

        for db in target_db_list:
            if db not in exclude_dbs_list:
                try:
                    client = pymongo.MongoClient(uri)
                    current_db = client[db]
                    collection_list = current_db.collection_names()
                except PyMongoError as err:
                    log_and_mail.writelog(
                        'ERROR',
                        f"Can't connect to MongoDB instances with the following data host='{db_host}', "
                        f"port='{db_port}', user='******', passwd='{db_password}':{err}", config.filelog_fd,
                        job_name)
                    continue
                finally:
                    if client:
                        client.close()

                if is_all_flag_collection:
                    target_collection_list = collection_list

                for collection in target_collection_list:
                    if collection not in exclude_collections_list and collection in collection_list:
                        str_auth_finally = f"{str_auth} --collection {collection} "

                        backup_full_tmp_path = general_function.get_full_path(
                            full_path_tmp_dir,
                            collection,
                            'mongodump',
                            gzip,
                            f'{i}-{db}-')

                        part_of_dir_path = os.path.join(db, collection)
                        periodic_backup.remove_old_local_file(storages, part_of_dir_path, job_name)

                        if is_success_mongodump(collection, db, extra_keys, str_auth_finally, backup_full_tmp_path,
                                                gzip, job_name):
                            dumped_collections[collection] = {'success': True,
                                                              'tmp_path': backup_full_tmp_path,
                                                              'part_of_dir_path': part_of_dir_path}
                        else:
                            dumped_collections[collection] = {'success': False}

                        if deferred_copying_level <= 0 and dumped_collections[collection]['success']:
                            periodic_backup.general_desc_iteration(backup_full_tmp_path,
                                                                   storages, part_of_dir_path,
                                                                   job_name, safety_backup)

                for collection, result in dumped_collections.items():
                    if deferred_copying_level == 1 and result['success']:
                        periodic_backup.general_desc_iteration(result['tmp_path'], storages,
                                                               result['part_of_dir_path'], job_name, safety_backup)

        for collection, result in dumped_collections.items():
            if deferred_copying_level == 2 and result['success']:
                periodic_backup.general_desc_iteration(result['tmp_path'], storages,
                                                       result['part_of_dir_path'], job_name, safety_backup)

    for collection, result in dumped_collections.items():
        if deferred_copying_level >= 3 and result['success']:
            periodic_backup.general_desc_iteration(result['tmp_path'], storages,
                                                   result['part_of_dir_path'], job_name, safety_backup)

    # After all the manipulations, delete the created temporary directory and
    # data inside the directory with cache davfs, but not the directory itself!
    general_function.del_file_objects(backup_type, full_path_tmp_dir, '/var/cache/davfs2/*')
Esempio n. 14
0
def redis_backup(job_data):
    try:
        job_name = job_data['job']
        backup_type = job_data['type']
        tmp_dir = job_data['tmp_dir']
        sources = job_data['sources']
        storages = job_data['storages']
    except KeyError as e:
        log_and_mail.writelog('ERROR', "Missing required key:'%s'!" %(e), config.filelog_fd, job_name)
        return 1

    full_path_tmp_dir = general_function.get_tmp_dir(tmp_dir, backup_type)

    for i in range(len(sources)):
        try:
            connect = sources[i]['connect']
            gzip =  sources[i]['gzip']
        except KeyError as e:
            log_and_mail.writelog('ERROR', "Missing required key:'%s'!" %(e), config.filelog_fd, job_name)
            continue

        db_host = connect.get('db_host')
        db_port = connect.get('db_port')
        db_password = connect.get('db_password')
        socket = connect.get('socket')

        if not (db_host or socket):
            log_and_mail.writelog('ERROR', "Can't find the authentication data, please fill in the required fields", 
                                  config.filelog_fd, job_name) 
            continue

        if not db_port:
            db_port = general_function.get_default_port('redis')

        try:
            if db_host:
                if db_password:
                    redis.StrictRedis(host=db_host, port=db_port, password=db_password)
                    str_auth = " -h %s -p %s -a '%s' " %(db_host, db_port, db_password)
                else:
                    redis.StrictRedis(host=db_host, port=db_port)
                    str_auth = " -h %s -p %s " %(db_host, db_port)
            else:
                if db_password:
                    redis.StrictRedis(unix_socket_path=socket, password=db_password)
                    str_auth = " -s %s -a '%s' " %(socket, db_password)
                else:
                    redis.StrictRedis(unix_socket_path=socket)
                    str_auth = " -s %s " %(socket)
        except (redis.exceptions.ConnectionError, ConnectionRefusedError) as err:
            log_and_mail.writelog('ERROR', "Can't connect to Redis instances with with following data host='%s', port='%s', passwd='%s', socket='%s' :%s" %(db_host, db_port, db_password, socket, err),
                                  config.filelog_fd, job_name) 
            continue
        else:
            backup_full_tmp_path = general_function.get_full_path(
                                                                full_path_tmp_dir,
                                                                'redis', 
                                                                'rdb',
                                                                gzip)

            periodic_backup.remove_old_local_file(storages, '', job_name)

            if is_success_bgsave(str_auth, backup_full_tmp_path, gzip, job_name):
                periodic_backup.general_desc_iteration(backup_full_tmp_path, 
                                                        storages, '',
                                                        job_name)

    # After all the manipulations, delete the created temporary directory and
    # data inside the directory with cache davfs, but not the directory itself!
    general_function.del_file_objects(backup_type,
                                      full_path_tmp_dir, '/var/cache/davfs2/*')
Esempio n. 15
0
def create_inc_file(local_dst_dirname, remote_dir, part_of_dir_path,
                    backup_file_name, target, exclude_list, gzip, job_name,
                    storage, host, share):
    ''' The function determines whether to collect a full backup or incremental,
    prepares all the necessary information.

    '''

    date_year = general_function.get_time_now('year')
    date_month = general_function.get_time_now('moy')
    date_day = general_function.get_time_now('dom')

    if int(date_day) < 11:
        daily_prefix = 'day_01'
    elif int(date_day) < 21:
        daily_prefix = 'day_11'
    else:
        daily_prefix = 'day_21'

    year_dir = os.path.join(local_dst_dirname, part_of_dir_path, date_year)
    initial_dir = os.path.join(year_dir, 'year')  # Path to full backup
    month_dir = os.path.join(year_dir, 'month_%s' % (date_month), 'monthly')
    daily_dir = os.path.join(year_dir, 'month_%s' % (date_month), 'daily',
                             daily_prefix)

    year_inc_file = os.path.join(initial_dir, 'year.inc')
    month_inc_file = os.path.join(month_dir, 'month.inc')
    daily_inc_file = os.path.join(daily_dir, 'daily.inc')

    link_dict = {}  # dict for symlink with pairs like dst: src
    copy_dict = {}  # dict for copy with pairs like dst: src

    # Before we proceed to collect a copy, we need to delete the copies for the same month last year
    # if they are to not save extra archives

    old_year = int(date_year) - 1
    old_year_dir = os.path.join(local_dst_dirname, part_of_dir_path,
                                str(old_year))
    if os.path.isdir(old_year_dir):
        old_month_dir = os.path.join(old_year_dir, 'month_%s' % (date_month))
        del_old_inc_file(old_year_dir, old_month_dir)

    if not os.path.isfile(year_inc_file):
        # There is no original index file, so we need to check the existence of an year directory
        if os.path.isdir(year_dir):
            # There is a directory, but there is no file itself, then something went wrong, so
            # we delete this directory with all the data inside, because even if they are there
            # continue to collect incremental copies it will not be able to
            general_function.del_file_objects(job_name, year_dir)
            dirs_for_log = general_function.get_dirs_for_log(
                year_dir, remote_dir, storage)
            file_for_log = os.path.join(dirs_for_log,
                                        os.path.basename(year_inc_file))
            log_and_mail.writelog('ERROR', "The file %s not found, so the directory %s is cleared." +\
                                  "Incremental backup will be reinitialized " %(file_for_log, dirs_for_log),
                                  config.filelog_fd, job_name)

        # Initialize the incremental backup, i.e. collect a full copy
        dirs_for_log = general_function.get_dirs_for_log(
            initial_dir, remote_dir, storage)
        general_function.create_dirs(job_name=job_name,
                                     dirs_pairs={initial_dir: dirs_for_log})

        # Get the current list of files and write to the year inc file
        meta_info = get_index(target, exclude_list)
        with open(year_inc_file, "w") as index_file:
            json.dump(meta_info, index_file)

        full_backup_path = general_function.get_full_path(
            initial_dir, backup_file_name, 'tar', gzip)

        general_files_func.create_tar('files', full_backup_path, target, gzip,
                                      'inc_files', job_name, remote_dir,
                                      storage, host, share)

        # After creating the full copy, you need to make the symlinks for the inc.file and
        # the most collected copy in the month directory of the current month
        # as well as in the decade directory if it's local, scp the repository and
        # copy inc.file for other types of repositories that do not support symlynk.

        month_dirs_for_log = general_function.get_dirs_for_log(
            month_dir, remote_dir, storage)
        daily_dirs_for_log = general_function.get_dirs_for_log(
            daily_dir, remote_dir, storage)
        general_function.create_dirs(job_name=job_name,
                                     dirs_pairs={
                                         month_dir: month_dirs_for_log,
                                         daily_dir: daily_dirs_for_log
                                     })

        if storage in 'local, scp':
            link_dict[month_inc_file] = year_inc_file
            link_dict[os.path.join(
                month_dir,
                os.path.basename(full_backup_path))] = full_backup_path
            link_dict[daily_inc_file] = year_inc_file
            link_dict[os.path.join(
                daily_dir,
                os.path.basename(full_backup_path))] = full_backup_path
        else:
            copy_dict[month_inc_file] = year_inc_file
            copy_dict[daily_inc_file] = year_inc_file
    else:
        symlink_dir = ''
        if int(date_day) == 1:
            # It is necessary to collect monthly incremental backup relative to the year copy
            old_meta_info = specific_function.parser_json(year_inc_file)
            new_meta_info = get_index(target, exclude_list)

            general_inc_backup_dir = month_dir

            # It is also necessary to make a symlink for inc files and backups to the directory with the first decade
            symlink_dir = daily_dir

            general_dirs_for_log = general_function.get_dirs_for_log(
                general_inc_backup_dir, remote_dir, storage)
            symlink_dirs_for_log = general_function.get_dirs_for_log(
                symlink_dir, remote_dir, storage)
            general_function.create_dirs(job_name=job_name,
                                         dirs_pairs={
                                             general_inc_backup_dir:
                                             general_dirs_for_log,
                                             symlink_dir: symlink_dirs_for_log
                                         })

            with open(month_inc_file, "w") as index_file:
                json.dump(new_meta_info, index_file)

        elif int(date_day) == 11 or int(date_day) == 21:
            # It is necessary to collect a ten-day incremental backup relative to a monthly copy
            try:
                old_meta_info = specific_function.parser_json(month_inc_file)
            except general_function.MyError as e:
                log_and_mail.writelog(
                    'ERROR',
                    "Couldn't open old month meta info file '%s': %s!" %
                    (month_inc_file, e), config.filelog_fd, job_name)
                return 2

            new_meta_info = get_index(target, exclude_list)

            general_inc_backup_dir = daily_dir
            general_dirs_for_log = general_function.get_dirs_for_log(
                general_inc_backup_dir, remote_dir, storage)
            general_function.create_dirs(
                job_name=job_name,
                dirs_pairs={general_inc_backup_dir: general_dirs_for_log})

            with open(daily_inc_file, "w") as index_file:
                json.dump(new_meta_info, index_file)
        else:
            # It is necessary to collect a normal daily incremental backup relative to a ten-day copy
            try:
                old_meta_info = specific_function.parser_json(daily_inc_file)
            except general_function.MyError as e:
                log_and_mail.writelog(
                    'ERROR',
                    "Couldn't open old decade meta info file '%s': %s!" %
                    (daily_inc_file, e), config.filelog_fd, job_name)
                return 2

            new_meta_info = get_index(target, exclude_list)

            general_inc_backup_dir = daily_dir
            general_dirs_for_log = general_function.get_dirs_for_log(
                general_inc_backup_dir, remote_dir, storage)
            general_function.create_dirs(
                job_name=job_name,
                dirs_pairs={general_inc_backup_dir: general_dirs_for_log})

        # Calculate the difference between the old and new file states
        diff_json = compute_diff(new_meta_info, old_meta_info)

        inc_backup_path = general_function.get_full_path(
            general_inc_backup_dir, backup_file_name, 'tar', gzip)

        # Define the list of files that need to be included in the archive
        target_change_list = diff_json['modify']

        # Form GNU.dumpdir headers
        dict_directory = {}  # Dict to store pairs like dir:GNU.dumpdir

        excludes = r'|'.join([
            fnmatch.translate(x)[:-7] for x in general_files_func.EXCLUDE_FILES
        ]) or r'$.'

        for dir_name, dirs, files in os.walk(target):
            first_level_files = []

            if re.match(excludes, dir_name):
                continue

            for file in files:
                if re.match(excludes, os.path.join(dir_name, file)):
                    continue

                first_level_files.append(file)

            first_level_subdirs = dirs
            dict_directory[dir_name] = get_gnu_dumpdir_format(
                diff_json, dir_name, target, excludes, first_level_subdirs,
                first_level_files)

        create_inc_tar(inc_backup_path, remote_dir, dict_directory,
                       target_change_list, gzip, job_name, storage, host,
                       share)

        if symlink_dir:
            if storage in 'local, scp':
                link_dict[daily_inc_file] = month_inc_file
            else:
                copy_dict[daily_inc_file] = month_inc_file

    if link_dict:
        for key in link_dict.keys():
            src = link_dict[key]
            dst = key

            try:
                general_function.create_symlink(src, dst)
            except general_function.MyError as err:
                log_and_mail.writelog(
                    'ERROR',
                    "Can't create symlink %s -> %s: %s" % (src, dst, err),
                    config.filelog_fd, job_name)

    if copy_dict:
        for key in copy_dict.keys():
            src = copy_dict[key]
            dst = key

            try:
                general_function.copy_ofs(src, dst)
            except general_function.MyError as err:
                log_and_mail.writelog(
                    'ERROR', "Can't copy %s -> %s: %s" % (src, dst, err),
                    config.filelog_fd, job_name)
Esempio n. 16
0
def mysql_backup(job_data):
    try:
        job_name = job_data['job']
        backup_type = job_data['type']
        tmp_dir = job_data['tmp_dir']
        sources = job_data['sources']
        storages = job_data['storages']
    except KeyError as e:
        log_and_mail.writelog('ERROR', f"Missing required key:'{e}'!",
                              config.filelog_fd, job_name)
        return 1

    full_path_tmp_dir = general_function.get_tmp_dir(tmp_dir, backup_type)

    for i in range(len(sources)):
        exclude_list = sources[i].get('excludes', [])
        try:
            connect = sources[i]['connect']
            target_list = sources[i]['target']
            gzip = sources[i]['gzip']
            is_slave = sources[i]['is_slave']
            extra_keys = sources[i]['extra_keys']
        except KeyError as e:
            log_and_mail.writelog('ERROR', f"Missing required key:'{e}'!",
                                  config.filelog_fd, job_name)
            continue

        db_host = connect.get('db_host')
        db_port = connect.get('db_port')
        socket = connect.get('socket')
        db_user = connect.get('db_user')
        db_password = connect.get('db_password')
        auth_file = connect.get('auth_file')

        if not (auth_file or
                ((db_host or socket) and db_user and db_password)):
            log_and_mail.writelog(
                'ERROR',
                "Can't find the authentication data, please fill in the required fields",
                config.filelog_fd, job_name)
            continue

        if not db_port:
            db_port = general_function.get_default_port('mysql')

        is_all_flag = False

        if 'all' in target_list:
            is_all_flag = True

        try:
            (connection_1,
             str_auth) = get_connection(db_host, db_port, db_user, db_password,
                                        auth_file, socket, job_name)
        except:
            continue

        cur_1 = connection_1.cursor()

        if is_all_flag:
            cur_1.execute("SHOW DATABASES")
            target_list = [i[0] for i in cur_1.fetchall()]

        if is_slave:
            try:
                cur_1.execute("STOP SLAVE")
            except MySQLdb.Error as err:
                log_and_mail.writelog('ERROR', f"Can't stop slave: {err}",
                                      config.filelog_fd, job_name)

        connection_1.close()

        for db in target_list:
            if not db in exclude_list:
                backup_full_tmp_path = general_function.get_full_path(
                    full_path_tmp_dir, db, 'sql', gzip)

                periodic_backup.remove_old_local_file(storages, db, job_name)

                if is_success_mysqldump(db, extra_keys, str_auth,
                                        backup_full_tmp_path, gzip, job_name):
                    periodic_backup.general_desc_iteration(
                        backup_full_tmp_path, storages, db, job_name)

        if is_slave:
            try:
                (connection_2,
                 str_auth) = get_connection(db_host, db_port, db_user,
                                            db_password, auth_file, socket,
                                            job_name)
                cur_2 = connection_2.cursor()
                cur_2.execute("START SLAVE")
            except MySQLdb.Error as err:
                log_and_mail.writelog('ERROR', f"Can't start slave: {err} ",
                                      config.filelog_fd, job_name)
            finally:
                connection_2.close()

    # After all the manipulations, delete the created temporary directory and
    # data inside the directory with cache davfs, but not the directory itself!
    general_function.del_file_objects(backup_type, full_path_tmp_dir,
                                      '/var/cache/davfs2/*')
Esempio n. 17
0
def control_files(full_dir_path, store_backup_count, storage, job_name, files_type,
                  host='', full_path_for_log='', share='', safety_backup=False):
    dow = general_function.get_time_now("dow")
    dom = general_function.get_time_now("dom")

    files_grabbed_list = []

    for extension in config.backup_extenstion:
        full_glob_path = os.path.join(full_dir_path, extension)
        files_grabbed_list.extend(glob.glob(full_glob_path))

    count_file = len(files_grabbed_list)
    time_period = os.path.split(full_dir_path)[1]

    if int(store_backup_count):
        delta_count_file = int(count_file) - int(store_backup_count)

        if ((time_period == 'weekly' and dow != config.dow_backup) or
                (time_period == 'monthly' and dom != config.dom_backup)):
            result_delete_count = delta_count_file
        else:
            result_delete_count = delta_count_file + 1

        if safety_backup:
            result_delete_count -= 1

        if result_delete_count < 1:
            return 1

        try:
            delete_oldest_files(files_grabbed_list, result_delete_count, job_name)
        except general_function.MyError as err:
            if storage == 'local':
                log_and_mail.writelog(
                    'ERROR',
                    f"Can't delete {files_type} '{time_period}' files in directory '{full_dir_path}' on '{storage}' "
                    f"storage: {err}",
                    config.filelog_fd, job_name)
            elif storage == 'smb':
                log_and_mail.writelog(
                    'ERROR',
                    f"Can't delete {files_type} '{time_period}' files in directory '{full_path_for_log}' in '{share}' "
                    f"share on '{storage}' storage({host}): {err}",
                    config.filelog_fd, job_name)
            else:
                log_and_mail.writelog(
                    'ERROR',
                    f"Can't delete {files_type} '{time_period}' files in directory '{full_path_for_log}' on '{storage}' "
                    f"storage({host}): {err}",
                    config.filelog_fd, job_name)
        else:
            if storage == 'local':
                log_and_mail.writelog(
                    'INFO',
                    f"Successfully deleted {files_type} '{time_period}' files  in directory '{full_dir_path}' on '{storage}' "
                    f"storage.",
                    config.filelog_fd, job_name)
            elif storage == 'smb':
                log_and_mail.writelog(
                    'INFO',
                    f"Successfully deleted {files_type} '{time_period}' files in directory '{full_path_for_log}' in '{share}' "
                    f"share on '{storage}' storage({host}).",
                    config.filelog_fd, job_name)
            else:
                log_and_mail.writelog(
                    'INFO',
                    f"Successfully deleted {files_type} '{time_period}' files in directory '{full_path_for_log}' on '{storage}' "
                    f"storage({host}).",
                    config.filelog_fd, job_name)
    else:
        try:
            for i in files_grabbed_list:
                general_function.del_file_objects(job_name, i)
        except general_function.MyError as err:
            if storage == 'local':
                log_and_mail.writelog(
                    'ERROR',
                    f"Can't delete {files_type} '{time_period}' files in directory '{full_dir_path}' on '{storage}' "
                    f"storage:{err}",
                    config.filelog_fd, job_name)
            elif storage == 'smb':
                log_and_mail.writelog(
                    'ERROR',
                    f"Can't delete {files_type} '{time_period}' files in directory '{full_path_for_log}' in '{share}' "
                    f"share on '{storage}' storage({host}):{err}",
                    config.filelog_fd, job_name)
            else:
                log_and_mail.writelog(
                    'ERROR',
                    f"Can't delete {files_type} '{time_period}' files in directory '{full_path_for_log}' on '{storage}' "
                    f"storage({host}):{err}",
                    config.filelog_fd, job_name)
        else:
            if storage == 'local':
                log_and_mail.writelog(
                    'INFO',
                    f"Successfully deleted {files_type} '{time_period}' files in directory '{full_dir_path}' on '{storage}' "
                    f"storage.",
                    config.filelog_fd, job_name)
            elif storage == 'smb':
                log_and_mail.writelog(
                    'INFO',
                    f"Successfully deleted {files_type} '{time_period}' files in directory '{full_path_for_log}' in '{share}' "
                    f"share on '{storage}' storage({host}).",
                    config.filelog_fd, job_name)
            else:
                log_and_mail.writelog(
                    'INFO',
                    f"Successfully deleted {files_type} '{time_period}' files in directory '{full_path_for_log}' on '{storage}' "
                    f"storage({host}).",
                    config.filelog_fd, job_name)
Esempio n. 18
0
def desc_files_backup(job_data):
    """ Function, creates a desc backup of directories.
    At the entrance receives a dictionary with the data of the job.

    """
    is_prams_read, job_name, backup_type, tmp_dir, sources, storages, safety_backup, deferred_copying_level = \
        general_function.get_job_parameters(job_data)
    if not is_prams_read:
        return

    full_path_tmp_dir = general_function.get_tmp_dir(tmp_dir, backup_type)

    dumped_ofs = {}
    for i in range(len(sources)):
        exclude_list = sources[i].get('excludes', '')
        try:
            target_list = sources[i]['target']
            gzip = sources[i]['gzip']
        except KeyError as e:
            log_and_mail.writelog('ERROR', f"Missing required key:'{e}'!",
                                  config.filelog_fd, job_name)
            continue

        # Keeping an exception list in the global variable due to the specificity of
        # the `filter` key of the `add` method of the `tarfile` class
        general_files_func.EXCLUDE_FILES = general_files_func.get_exclude_ofs(
            target_list, exclude_list)

        # The backup name is selected depending on the particular glob patterns from
        # the list `target_list`
        for regex in target_list:
            target_ofs_list = general_files_func.get_ofs(regex)

            if not target_ofs_list:
                log_and_mail.writelog(
                    'ERROR', "No file system objects found that" +
                    f"match the regular expression '{regex}'!",
                    config.filelog_fd, job_name)
                continue

            for ofs in target_ofs_list:
                # Create a backup only if the directory is not in the exception list
                # so as not to generate empty backups
                if not general_files_func.is_excluded_ofs(ofs):
                    # A function that by regularity returns the name of
                    # the backup WITHOUT EXTENSION AND DATE
                    backup_file_name = general_files_func.get_name_files_backup(
                        regex, ofs)
                    # Get the part of the backup storage path for this archive relative to
                    # the backup dir
                    part_of_dir_path = backup_file_name.replace('___', '/')

                    backup_full_tmp_path = general_function.get_full_path(
                        full_path_tmp_dir, backup_file_name, 'tar', gzip)

                    periodic_backup.remove_old_local_file(
                        storages, part_of_dir_path, job_name)

                    if general_files_func.create_tar('files',
                                                     backup_full_tmp_path, ofs,
                                                     gzip, backup_type,
                                                     job_name):
                        dumped_ofs[ofs] = {
                            'success': True,
                            'tmp_path': backup_full_tmp_path,
                            'part_of_dir_path': part_of_dir_path
                        }
                    else:
                        dumped_ofs[ofs] = {'success': False}

                    if deferred_copying_level <= 0 and dumped_ofs[ofs][
                            'success']:
                        periodic_backup.general_desc_iteration(
                            backup_full_tmp_path, storages, part_of_dir_path,
                            job_name, safety_backup)
                else:
                    continue

            for ofs, result in dumped_ofs.items():
                if deferred_copying_level == 1 and result['success']:
                    periodic_backup.general_desc_iteration(
                        result['tmp_path'], storages,
                        result['part_of_dir_path'], job_name, safety_backup)

        for ofs, result in dumped_ofs.items():
            if deferred_copying_level == 2 and result['success']:
                periodic_backup.general_desc_iteration(
                    result['tmp_path'], storages, result['part_of_dir_path'],
                    job_name, safety_backup)

    for ofs, result in dumped_ofs.items():
        if deferred_copying_level >= 3 and result['success']:
            periodic_backup.general_desc_iteration(result['tmp_path'],
                                                   storages,
                                                   result['part_of_dir_path'],
                                                   job_name, safety_backup)

    # After all the manipulations, delete the created temporary directory and
    # data inside the directory with cache davfs, but not the directory itself!
    general_function.del_file_objects(backup_type, full_path_tmp_dir,
                                      '/var/cache/davfs2/*')
Esempio n. 19
0
def mysql_backup(job_data):
    is_prams_read, job_name, backup_type, tmp_dir, sources, storages, safety_backup, deferred_copying_level = \
        general_function.get_job_parameters(job_data)
    if not is_prams_read:
        return

    full_path_tmp_dir = general_function.get_tmp_dir(tmp_dir, backup_type)

    dumped_dbs = {}
    for i in range(len(sources)):
        exclude_list = sources[i].get('excludes', [])
        try:
            connect = sources[i]['connect']
            target_list = sources[i]['target']
            gzip = sources[i]['gzip']
            is_slave = sources[i]['is_slave']
            extra_keys = sources[i]['extra_keys']
        except KeyError as e:
            log_and_mail.writelog('ERROR', f"Missing required key:'{e}'!",
                                  config.filelog_fd, job_name)
            continue

        db_host = connect.get('db_host')
        db_port = connect.get('db_port')
        socket = connect.get('socket')
        db_user = connect.get('db_user')
        db_password = connect.get('db_password')
        auth_file = connect.get('auth_file')

        if not (auth_file or ((db_host or socket) or db_user or db_password)):
            log_and_mail.writelog(
                'ERROR',
                "Can't find the authentication data, please fill in the required fields",
                config.filelog_fd, job_name)
            continue

        if not db_port:
            db_port = general_function.get_default_port('mysql')

        is_all_flag = False

        if 'all' in target_list:
            is_all_flag = True

        connection_1, str_auth = get_connection(db_host, db_port, db_user,
                                                db_password, auth_file, socket,
                                                job_name)
        if connection_1 is None:
            continue

        cur_1 = connection_1.cursor()

        if is_all_flag:
            cur_1.execute("SHOW DATABASES")
            target_list = [i[0] for i in cur_1.fetchall()]

        if is_slave:
            try:
                cur_1.execute("STOP SLAVE")
            except MySQLdb.Error as err:
                log_and_mail.writelog('ERROR', f"Can't stop slave: {err}",
                                      config.filelog_fd, job_name)

        connection_1.close()

        for db in target_list:
            if db not in exclude_list:
                backup_full_tmp_path = general_function.get_full_path(
                    full_path_tmp_dir, db, 'sql', gzip, i)

                periodic_backup.remove_old_local_file(storages, db, job_name)

                if is_success_mysqldump(db, extra_keys, str_auth,
                                        backup_full_tmp_path, gzip, job_name):
                    dumped_dbs[db] = {
                        'success': True,
                        'tmp_path': backup_full_tmp_path
                    }
                else:
                    dumped_dbs[db] = {'success': False}

                if deferred_copying_level <= 0 and dumped_dbs[db]['success']:
                    periodic_backup.general_desc_iteration(
                        backup_full_tmp_path, storages, db, job_name,
                        safety_backup)

        if is_slave:
            connection_2, str_auth = get_connection(db_host, db_port, db_user,
                                                    db_password, auth_file,
                                                    socket, job_name)
            if connection_2 is None:
                log_and_mail.writelog(
                    'ERROR', f"Can't start slave: Can't connect to MySQL.",
                    config.filelog_fd, job_name)
                return
            cur_2 = connection_2.cursor()
            try:
                cur_2.execute("START SLAVE")
            except MySQLdb.Error as err:
                log_and_mail.writelog('ERROR', f"Can't start slave: {err} ",
                                      config.filelog_fd, job_name)
            connection_2.close()

        for db, result in dumped_dbs.items():
            if deferred_copying_level == 1 and result['success']:
                periodic_backup.general_desc_iteration(result['tmp_path'],
                                                       storages, db, job_name,
                                                       safety_backup)

    for db, result in dumped_dbs.items():
        if deferred_copying_level >= 2 and result['success']:
            periodic_backup.general_desc_iteration(result['tmp_path'],
                                                   storages, db, job_name,
                                                   safety_backup)

    # After all the manipulations, delete the created temporary directory and
    # data inside the directory with cache davfs, but not the directory itself!
    general_function.del_file_objects(backup_type, full_path_tmp_dir,
                                      '/var/cache/davfs2/*')
Esempio n. 20
0
def create_inc_backup(local_dst_dirname, remote_dir, part_of_dir_path, backup_file_name,
                      target, exclude_list, gzip, job_name, storage, host, share, months_to_store):
    """ The function determines whether to collect a full backup or incremental,
    prepares all the necessary information.

    """
    date_year = general_function.get_time_now('year')
    date_month = general_function.get_time_now('moy')
    date_day = general_function.get_time_now('dom')

    dated_paths = get_dated_paths(local_dst_dirname, part_of_dir_path, date_year, date_month, date_day)

    # Before we proceed to collect a copy, we need to delete the copies for the same month last year
    # if they are to not save extra archives
    old_month_dirs = []
    if os.path.isdir(dated_paths['old_year_dir']) or months_to_store < 12:
        if months_to_store < 12:
            int_date_month = int(date_month)
            last_month = int_date_month - months_to_store
            if last_month <= 0:
                m_range = list(range(last_month+12, 13))
                m_range.extend(list(range(1, int_date_month)))
            else:
                m_range = list(range(last_month, int_date_month))
            for i in range(1, 13):
                if i not in m_range:
                    date = str(i).zfill(2)
                    if i < int(date_month):
                        year_to_cleanup = dated_paths['year_dir']
                    else:
                        year_to_cleanup = dated_paths['old_year_dir']
                    old_month_dirs.append(os.path.join(year_to_cleanup, f'month_{date}'))
        else:
            old_month_dirs.append(os.path.join(dated_paths['old_year_dir'], f'month_{date_month}'))
        del_old_inc_file(dated_paths['old_year_dir'], old_month_dirs)

    link_dict = {}  # dict for symlink with pairs like dst: src
    copy_dict = {}  # dict for copy with pairs like dst: src

    # Get the current list of files
    new_meta_info = get_index(target, exclude_list)

    if not os.path.isfile(dated_paths['year_inc_file']):
        # There is no original index file, so we need to check the existence of an year directory
        if os.path.isdir(dated_paths['year_dir']):
            # There is a directory, but there is no file itself, then something went wrong, so
            # we delete this directory with all the data inside, because even if they are there
            # continue to collect incremental copies it will not be able to
            general_function.del_file_objects(job_name, dated_paths['year_dir'])
            dirs_for_log = general_function.get_dirs_for_log(dated_paths['year_dir'], remote_dir, storage)
            file_for_log = os.path.join(dirs_for_log, os.path.basename(dated_paths['year_inc_file']))
            log_and_mail.writelog('ERROR',
                                  f"The file {file_for_log} not found, so the directory {dirs_for_log} is cleared. "
                                  f"Incremental backup will be reinitialized ",
                                  config.filelog_fd, job_name)

        # Initialize the incremental backup, i.e. collect a full copy
        remote_dir_for_logs = general_function.get_dirs_for_log(dated_paths['initial_dir'], remote_dir, storage)
        general_function.create_dirs(job_name=job_name, dirs_pairs={dated_paths['initial_dir']: remote_dir_for_logs})

        write_meta_info(dated_paths['year_inc_file'], new_meta_info)

        full_backup_path = general_function.get_full_path(dated_paths['initial_dir'],
                                                          backup_file_name,
                                                          'tar',
                                                          gzip)

        general_files_func.create_tar('files', full_backup_path, target,
                                      gzip, 'inc_files', job_name,
                                      remote_dir, storage, host, share)

        daily_dirs_remote = general_function.get_dirs_for_log(dated_paths['daily_dir'], remote_dir, storage)
        month_dirs_remote = general_function.get_dirs_for_log(dated_paths['month_dir'], remote_dir, storage)
        general_function.create_dirs(job_name=job_name, dirs_pairs={dated_paths['daily_dir']: daily_dirs_remote,
                                                                    dated_paths['month_dir']: month_dirs_remote})

        if storage in 'local':
            link_dict[dated_paths['month_inc_file']] = dated_paths['year_inc_file']
            link_dict[os.path.join(dated_paths['month_dir'], os.path.basename(full_backup_path))] = full_backup_path
            link_dict[dated_paths['daily_inc_file']] = dated_paths['year_inc_file']
            link_dict[os.path.join(dated_paths['daily_dir'], os.path.basename(full_backup_path))] = full_backup_path
        elif storage in 'scp, nfs':
            copy_dict[dated_paths['month_inc_file']] = dated_paths['year_inc_file']
            link_dict[os.path.join(dated_paths['month_dir'], os.path.basename(full_backup_path))] = \
                full_backup_path.replace(local_dst_dirname, remote_dir)
            copy_dict[dated_paths['daily_inc_file']] = dated_paths['year_inc_file']
            link_dict[os.path.join(dated_paths['daily_dir'], os.path.basename(full_backup_path))] = \
                full_backup_path.replace(local_dst_dirname, remote_dir)
        else:
            copy_dict[dated_paths['month_inc_file']] = dated_paths['year_inc_file']
            copy_dict[os.path.join(dated_paths['month_dir'], os.path.basename(full_backup_path))] = full_backup_path
            copy_dict[dated_paths['daily_inc_file']] = dated_paths['year_inc_file']
            copy_dict[os.path.join(dated_paths['daily_dir'], os.path.basename(full_backup_path))] = full_backup_path

    else:
        symlink_dir = ''
        meta_path = ''
        if int(date_day) == 1:
            meta_path = dated_paths['month_inc_file']
            old_meta_path = dated_paths['year_inc_file']
            general_inc_backup_dir = dated_paths['month_dir']
            symlink_dir = dated_paths['daily_dir']
        elif int(date_day) == 11 or int(date_day) == 21:
            meta_path = dated_paths['daily_inc_file']
            old_meta_path = dated_paths['month_inc_file']
            general_inc_backup_dir = dated_paths['daily_dir']
        else:
            old_meta_path = dated_paths['daily_inc_file']
            general_inc_backup_dir = dated_paths['daily_dir']

        try:
            old_meta_info = specific_function.parser_json(old_meta_path)
        except general_function.MyError as e:
            log_and_mail.writelog('ERROR',
                                  f"Couldn't open old meta info file '{old_meta_path}': {e}!",
                                  config.filelog_fd, job_name)
            return 2

        general_dirs_for_log = general_function.get_dirs_for_log(general_inc_backup_dir, remote_dir, storage)
        general_function.create_dirs(job_name=job_name, dirs_pairs={general_inc_backup_dir: general_dirs_for_log})
        if meta_path:
            write_meta_info(meta_path, new_meta_info)

        # Calculate the difference between the old and new file states
        diff_json = compute_diff(new_meta_info, old_meta_info)

        # Define the list of files that need to be included in the archive
        target_change_list = diff_json['modify']

        dict_directory = get_dict_directory(target, diff_json)

        inc_backup_path = general_function.get_full_path(general_inc_backup_dir, backup_file_name, 'tar', gzip)
        create_inc_tar(
            inc_backup_path, remote_dir, dict_directory, target_change_list, gzip, job_name, storage, host, share
        )

        if symlink_dir:
            symlink_dirs_for_log = general_function.get_dirs_for_log(symlink_dir, remote_dir, storage)
            general_function.create_dirs(job_name=job_name, dirs_pairs={symlink_dir: symlink_dirs_for_log})
            if storage in 'local':
                link_dict[dated_paths['daily_inc_file']] = dated_paths['month_inc_file']
            elif storage in 'scp, nfs':
                copy_dict[dated_paths['daily_inc_file'].replace(local_dst_dirname, remote_dir)] = \
                    dated_paths['month_inc_file'].replace(local_dst_dirname, remote_dir)
            else:
                copy_dict[dated_paths['daily_inc_file']] = dated_paths['month_inc_file']

    create_links_and_copies(link_dict, copy_dict, job_name)
Esempio n. 21
0
def control_old_files(full_dir_path,
                      store_backup_count,
                      storage,
                      job_name,
                      host='',
                      full_path_for_log='',
                      share=''):

    dow = general_function.get_time_now("dow")
    dom = general_function.get_time_now("dom")

    files_grabbed_list = []

    for extension in config.backup_extenstion:
        full_glob_path = os.path.join(full_dir_path, extension)
        files_grabbed_list.extend(glob.glob(full_glob_path))

    count_file = len(files_grabbed_list)
    time_period = os.path.split(full_dir_path)[1]

    if int(store_backup_count):
        delta_count_file = int(count_file) - int(store_backup_count)

        if ((time_period == 'weekly' and dow != config.dow_backup)
                or (time_period == 'monthly' and dom != config.dom_backup)):
            result_delete_count = delta_count_file
        else:
            result_delete_count = delta_count_file + 1

        if result_delete_count < 1:
            return 1

        try:
            delete_oldest_files(files_grabbed_list, result_delete_count,
                                job_name)
        except general_function.MyError as err:
            if storage == 'local':
                log_and_mail.writelog(
                    'ERROR',
                    "Can't delete old '%s' files in directory '%s' on '%s' storage:%s"
                    % (time_period, full_dir_path, storage, err),
                    config.filelog_fd, job_name)
            elif storage == 'smb':
                log_and_mail.writelog(
                    'ERROR',
                    "Can't delete old '%s' files in directory '%s' in '%s' share on '%s' storage(%s):%s"
                    % (time_period, full_path_for_log, share, storage, host,
                       err), config.filelog_fd, job_name)
            else:
                log_and_mail.writelog(
                    'ERROR',
                    "Can't delete old '%s' files in directory '%s' on '%s' storage(%s):%s"
                    % (time_period, full_path_for_log, storage, host, err),
                    config.filelog_fd, job_name)
        else:
            if storage == 'local':
                log_and_mail.writelog(
                    'INFO',
                    "Successfully deleted old '%s' files  in directory '%s' on '%s' storage."
                    % (time_period, full_dir_path, storage), config.filelog_fd,
                    job_name)
            elif storage == 'smb':
                log_and_mail.writelog(
                    'INFO',
                    "Successfully deleted old '%s' files in directory '%s' in '%s' share on '%s' storage(%s)."
                    % (time_period, full_path_for_log, share, storage, host),
                    config.filelog_fd, job_name)
            else:
                log_and_mail.writelog(
                    'INFO',
                    "Successfully deleted old '%s' files in directory '%s' on '%s' storage(%s)."
                    % (time_period, full_path_for_log, storage, host),
                    config.filelog_fd, job_name)
    else:
        try:
            for i in files_grabbed_list:
                general_function.del_file_objects(job_name, i)
        except general_function.MyError as err:
            if storage == 'local':
                log_and_mail.writelog(
                    'ERROR',
                    "Can't delete old '%s' files in directory '%s' on '%s' storage:%s"
                    % (time_period, full_dir_path, storage, err),
                    config.filelog_fd, job_name)
            elif storage == 'smb':
                log_and_mail.writelog(
                    'ERROR',
                    "Can't delete old '%s' files in directory '%s' in '%s' share on '%s' storage(%s):%s"
                    % (time_period, full_path_for_log, share, storage, host,
                       err), config.filelog_fd, job_name)
            else:
                log_and_mail.writelog(
                    'ERROR',
                    "Can't delete old '%s' files in directory '%s' on '%s' storage(%s):%s"
                    % (time_period, full_path_for_log, storage, host, err),
                    config.filelog_fd, job_name)
        else:
            if storage == 'local':
                log_and_mail.writelog(
                    'INFO',
                    "Successfully deleted old '%s' files in directory '%s' on '%s' storage."
                    % (time_period, full_dir_path, storage), config.filelog_fd,
                    job_name)
            elif storage == 'smb':
                log_and_mail.writelog(
                    'INFO',
                    "Successfully deleted old '%s' files in directory '%s' in '%s' share on '%s' storage(%s)."
                    % (time_period, full_path_for_log, share, storage, host),
                    config.filelog_fd, job_name)
            else:
                log_and_mail.writelog(
                    'INFO',
                    "Successfully deleted old '%s' files in directory '%s' on '%s' storage(%s)."
                    % (time_period, full_path_for_log, storage, host),
                    config.filelog_fd, job_name)
Esempio n. 22
0
def desc_files_backup(job_data):
    ''' Function, creates a desc backup of directories.
    At the entrance receives a dictionary with the data of the job.

    '''

    try:
        job_name = job_data['job']
        backup_type = job_data['type']
        tmp_dir = job_data['tmp_dir']
        sources = job_data['sources']
        storages = job_data['storages']
    except KeyError as e:
        log_and_mail.writelog('ERROR', f"Missing required key:'{e}'!",
                              config.filelog_fd, job_name)
        return 1


    full_path_tmp_dir = general_function.get_tmp_dir(tmp_dir, backup_type)

    for i in range(len(sources)):
        exclude_list = sources[i].get('excludes', '')
        try:
            target_list = sources[i]['target']
            gzip =  sources[i]['gzip']
        except KeyError as e:
            log_and_mail.writelog('ERROR', f"Missing required key:'{e}'!",
                                  config.filelog_fd, job_name)
            continue

        # Keeping an exception list in the global variable due to the specificity of
        # the `filter` key of the `add` method of the `tarfile` class
        general_files_func.EXCLUDE_FILES = general_files_func.get_exclude_ofs(target_list,
                                                                              exclude_list)

        # The backup name is selected depending on the particular glob patterns from
        # the list `target_list`
        for regex in target_list:
            target_ofs_list = general_files_func.get_ofs(regex)

            if not target_ofs_list:
                log_and_mail.writelog('ERROR', "No file system objects found that" +\
                                      f"match the regular expression '{regex}'!",
                                      config.filelog_fd, job_name)
                continue

            for i in target_ofs_list:
                # Create a backup only if the directory is not in the exception list
                # so as not to generate empty backups
                if not general_files_func.is_excluded_ofs(i):
                    # A function that by regularity returns the name of 
                    # the backup WITHOUT EXTENSION AND DATE
                    backup_file_name = general_files_func.get_name_files_backup(regex, i)
                    # Get the part of the backup storage path for this archive relative to
                    # the backup dir
                    part_of_dir_path = backup_file_name.replace('___', '/')

                    backup_full_tmp_path = general_function.get_full_path(
                                                                    full_path_tmp_dir,
                                                                    backup_file_name, 
                                                                    'tar',
                                                                    gzip)

                    periodic_backup.remove_old_local_file(storages, part_of_dir_path, job_name)

                    if general_files_func.create_tar('files', backup_full_tmp_path, i,
                                                  gzip, backup_type, job_name):
                        # If the dump collection in the temporary directory has successfully
                        # transferred the data to the specified storage
                        periodic_backup.general_desc_iteration(backup_full_tmp_path,
                                                               storages, part_of_dir_path,
                                                               job_name)
                else:
                    continue

    # After all the manipulations, delete the created temporary directory and
    # data inside the directory with cache davfs, but not the directory itself!
    general_function.del_file_objects(backup_type,
                                      full_path_tmp_dir, '/var/cache/davfs2/*')
Esempio n. 23
0
def postgresql_basebackup(job_data):
    try:
        job_name = job_data['job']
        backup_type = job_data['type']
        tmp_dir = job_data['tmp_dir']
        sources = job_data['sources']
        storages = job_data['storages']
    except KeyError as e:
        log_and_mail.writelog('ERROR', "Missing required key:'%s'!" %(e), config.filelog_fd, job_name)
        return 1

    full_path_tmp_dir = general_function.get_tmp_dir(tmp_dir, backup_type)

    for i in range(len(sources)):
        try:
            connect = sources[i]['connect']
            gzip =  sources[i]['gzip']
            extra_keys = sources[i]['extra_keys']
        except KeyError as e:
            log_and_mail.writelog('ERROR', "Missing required key:'%s'!" %(e), config.filelog_fd, job_name)
            continue

        db_host = connect.get('db_host')
        db_port = connect.get('db_port')
        db_user = connect.get('db_user')
        db_password = connect.get('db_password')

        if not (db_user and db_host and db_password):
            log_and_mail.writelog('ERROR', "Can't find the authentication data, please fill in the required fields", 
                                  config.filelog_fd, job_name) 
            continue

        if not db_port:
            db_port = general_function.get_default_port('postgresql')

        try:
            connection = psycopg2.connect(dbname="postgres", user=db_user, password=db_password, host=db_host, port=db_port)
        except psycopg2.Error as err:
            log_and_mail.writelog('ERROR', "Can't connect to PostgreSQL instances with with following data host='%s', port='%s', user='******', passwd='%s':%s" %(db_host, db_port, db_user, db_password, err),
                                  config.filelog_fd, job_name)
            continue
        else:
            connection.close()

        backup_full_tmp_path = general_function.get_full_path(
                                                            full_path_tmp_dir,
                                                            'postgresq_hot', 
                                                            'tar',
                                                            gzip)

        periodic_backup.remove_old_local_file(storages, '', job_name)

        str_auth = ' --dbname=postgresql://%s:%s@%s:%s/ ' %(db_user, db_password, db_host, db_port)

        if is_success_pgbasebackup(extra_keys, str_auth, backup_full_tmp_path, gzip, job_name):
            periodic_backup.general_desc_iteration(backup_full_tmp_path, 
                                                    storages, '',
                                                    job_name)

    # After all the manipulations, delete the created temporary directory and
    # data inside the directory with cache davfs, but not the directory itself!
    general_function.del_file_objects(backup_type,
                                      full_path_tmp_dir, '/var/cache/davfs2/*')
Esempio n. 24
0
def redis_backup(job_data):
    is_prams_read, job_name, backup_type, tmp_dir, sources, storages, safety_backup, deferred_copying_level = \
        general_function.get_job_parameters(job_data)
    if not is_prams_read:
        return

    full_path_tmp_dir = general_function.get_tmp_dir(tmp_dir, backup_type)

    for i in range(len(sources)):
        try:
            connect = sources[i]['connect']
            gzip = sources[i]['gzip']
        except KeyError as e:
            log_and_mail.writelog('ERROR', f"Missing required key:'{e}'!", config.filelog_fd, job_name)
            continue

        db_host = connect.get('db_host')
        db_port = connect.get('db_port')
        db_password = connect.get('db_password')
        socket = connect.get('socket')

        if not (db_host or socket):
            log_and_mail.writelog('ERROR', "Can't find the authentication data, please fill in the required fields",
                                  config.filelog_fd, job_name)
            continue

        if not db_port:
            db_port = general_function.get_default_port('redis')

        try:
            if db_host:
                if db_password:
                    redis.StrictRedis(host=db_host, port=db_port, password=db_password)
                    str_auth = f" -h {db_host} -p {db_port} -a '{db_password}' "
                else:
                    redis.StrictRedis(host=db_host, port=db_port)
                    str_auth = f" -h {db_host} -p {db_port} "
            else:
                if db_password:
                    redis.StrictRedis(unix_socket_path=socket, password=db_password)
                    str_auth = f" -s {socket} -a '{db_password}' "
                else:
                    redis.StrictRedis(unix_socket_path=socket)
                    str_auth = f" -s {socket} "
        except (redis.exceptions.ConnectionError, ConnectionRefusedError) as err:
            log_and_mail.writelog('ERROR',
                                  f"Can't connect to Redis instances with with following data host='{db_host}', "
                                  f"port='{db_port}', passwd='{db_password}', socket='{socket}': {err}",
                                  config.filelog_fd, job_name)
            continue
        else:
            backup_full_tmp_path = general_function.get_full_path(
                full_path_tmp_dir,
                'redis',
                'rdb',
                gzip)
            periodic_backup.remove_old_local_file(storages, '', job_name)

            if is_success_bgsave(str_auth, backup_full_tmp_path, gzip, job_name):
                periodic_backup.general_desc_iteration(backup_full_tmp_path,
                                                       storages, '',
                                                       job_name, safety_backup)

    # After all the manipulations, delete the created temporary directory and
    # data inside the directory with cache davfs, but not the directory itself!
    general_function.del_file_objects(backup_type,
                                      full_path_tmp_dir, '/var/cache/davfs2/*')
Esempio n. 25
0
def postgresql_backup(job_data):
    is_prams_read, job_name, options = general_function.get_job_parameters(
        job_data)
    if not is_prams_read:
        return

    full_path_tmp_dir = general_function.get_tmp_dir(options['tmp_dir'],
                                                     options['backup_type'])

    dumped_dbs = {}
    for i in range(len(options['sources'])):
        exclude_list = options['sources'][i].get('excludes', [])
        try:
            connect = options['sources'][i]['connect']
            target_list = options['sources'][i]['target']
            gzip = options['sources'][i]['gzip']
            extra_keys = options['sources'][i]['extra_keys']
        except KeyError as e:
            log_and_mail.writelog('ERROR', f"Missing required key:'{e}'!",
                                  config.filelog_fd, job_name)
            continue

        db_host = connect.get('db_host')
        db_port = connect.get('db_port')
        db_user = connect.get('db_user')
        db_password = connect.get('db_password')

        if not (db_user or db_host or db_password):
            log_and_mail.writelog(
                'ERROR',
                "Can't find the authentication data, please fill in the required fields",
                config.filelog_fd, job_name)
            continue

        if not db_port:
            db_port = general_function.get_default_port('postgresql')

        is_all_flag = False

        if 'all' in target_list:
            is_all_flag = True

        if is_all_flag:
            try:
                connection = psycopg2.connect(dbname="postgres",
                                              user=db_user,
                                              password=db_password,
                                              host=db_host,
                                              port=db_port)
            except psycopg2.Error as err:
                log_and_mail.writelog(
                    'ERROR',
                    f"Can't connect to PostgreSQL instances with with following data host='{db_host}', "
                    f"port='{db_port}', user='******', passwd='{db_password}':{err}",
                    config.filelog_fd, job_name)
                continue

            cur = connection.cursor()
            cur.execute("select datname from pg_database;")
            target_list = [i[0] for i in cur.fetchall()]
            connection.close()

        for db in target_list:
            if db not in exclude_list:
                backup_full_tmp_path = general_function.get_full_path(
                    full_path_tmp_dir, db, 'pgdump.sql', gzip, i)

                periodic_backup.remove_local_file(options['storages'], db,
                                                  job_name)

                str_auth = f' --dbname=postgresql://{db_user}:{db_password}@{db_host}:{db_port}/{db} '

                if is_success_pgdump(db, extra_keys, str_auth,
                                     backup_full_tmp_path, gzip, job_name):
                    dumped_dbs[db] = {
                        'success': True,
                        'tmp_path': backup_full_tmp_path
                    }
                else:
                    dumped_dbs[db] = {'success': False}

                if options['deferred_copying_level'] <= 0 and dumped_dbs[db][
                        'success']:
                    periodic_backup.general_desc_iteration(
                        backup_full_tmp_path, options['storages'], db,
                        job_name, options['safety_backup'])
        for db, result in dumped_dbs.items():
            if options['deferred_copying_level'] == 1 and result['success']:
                periodic_backup.general_desc_iteration(
                    result['tmp_path'], options['storages'], db, job_name,
                    options['safety_backup'])

    for db, result in dumped_dbs.items():
        if options['deferred_copying_level'] >= 2 and result['success']:
            periodic_backup.general_desc_iteration(result['tmp_path'],
                                                   options['storages'], db,
                                                   job_name,
                                                   options['safety_backup'])

    # After all the manipulations, delete the created temporary directory and
    # data inside the directory with cache davfs, but not the directory itself!
    general_function.del_file_objects(options['backup_type'],
                                      full_path_tmp_dir, '/var/cache/davfs2/*')
Esempio n. 26
0
def postgresql_backup(job_data):
    try:
        job_name = job_data['job']
        backup_type = job_data['type']
        tmp_dir = job_data['tmp_dir']
        sources = job_data['sources']
        storages = job_data['storages']
    except KeyError as e:
        log_and_mail.writelog('ERROR', f"Missing required key:'{e}'!",
                              config.filelog_fd, job_name)
        return 1

    full_path_tmp_dir = general_function.get_tmp_dir(tmp_dir, backup_type)

    for i in range(len(sources)):
        exclude_list = sources[i].get('excludes', [])
        try:
            connect = sources[i]['connect']
            target_list = sources[i]['target']
            gzip = sources[i]['gzip']
            extra_keys = sources[i]['extra_keys']
        except KeyError as e:
            log_and_mail.writelog('ERROR', f"Missing required key:'{e}'!",
                                  config.filelog_fd, job_name)
            continue

        db_host = connect.get('db_host')
        db_port = connect.get('db_port')
        db_user = connect.get('db_user')
        db_password = connect.get('db_password')

        if not (db_user and db_host and db_password):
            log_and_mail.writelog(
                'ERROR',
                "Can't find the authentication data, please fill in the required fields",
                config.filelog_fd, job_name)
            continue

        if not db_port:
            db_port = general_function.get_default_port('postgresql')

        is_all_flag = False

        if 'all' in target_list:
            is_all_flag = True

        if is_all_flag:
            try:
                connection = psycopg2.connect(dbname="postgres",
                                              user=db_user,
                                              password=db_password,
                                              host=db_host,
                                              port=db_port)
            except psycopg2.Error as err:
                log_and_mail.writelog(
                    'ERROR',
                    f"Can't connect to PostgreSQL instances with with following data host='{db_host}', port='{db_port}', user='******', passwd='{db_password}':{err}",
                    config.filelog_fd, job_name)
                continue

            cur = connection.cursor()
            cur.execute("select datname from pg_database;")
            target_list = [i[0] for i in cur.fetchall()]
            connection.close()

        for db in target_list:
            if not db in exclude_list:
                backup_full_tmp_path = general_function.get_full_path(
                    full_path_tmp_dir, db, 'pgdump', gzip)

                periodic_backup.remove_old_local_file(storages, db, job_name)

                str_auth = f' --dbname=postgresql://{db_user}:{db_password}@{db_host}:{db_port}/{db} '

                if is_success_pgdump(db, extra_keys, str_auth,
                                     backup_full_tmp_path, gzip, job_name):
                    periodic_backup.general_desc_iteration(
                        backup_full_tmp_path, storages, db, job_name)

    # After all the manipulations, delete the created temporary directory and
    # data inside the directory with cache davfs, but not the directory itself!
    general_function.del_file_objects(backup_type, full_path_tmp_dir,
                                      '/var/cache/davfs2/*')