Exemplo n.º 1
0
def mysql_xtrabackup(job_data):
    is_prams_read, job_name, backup_type, tmp_dir, sources, storages, safety_backup, deferred_copying_level = \
        general_function.get_job_parameters(job_data)
    if not is_prams_read:
        return

    full_path_tmp_dir = general_function.get_tmp_dir(tmp_dir, backup_type)

    for i in range(len(sources)):
        try:
            connect = sources[i]['connect']
            gzip = sources[i]['gzip']
            extra_keys = sources[i]['extra_keys']
        except KeyError as e:
            log_and_mail.writelog('ERROR', f"Missing required key:'{e}'!", config.filelog_fd, job_name)
            continue

        db_user = connect.get('db_user')
        db_password = connect.get('db_password')
        path_to_conf = connect.get('path_to_conf')

        if not (path_to_conf and db_user and db_password):
            log_and_mail.writelog('ERROR', "Can't find the authentication data, please fill the required fields",
                                  config.filelog_fd, job_name)
            continue

        if not os.path.isfile(path_to_conf):
            log_and_mail.writelog('ERROR', f"Configuration file '{path_to_conf}' not found!",
                                  config.filelog_fd, job_name)
            continue

        str_auth = f'--defaults-file={path_to_conf} --user={db_user} --password={db_password}'

        backup_full_tmp_path = general_function.get_full_path(full_path_tmp_dir, 'xtrabackup', 'tar', gzip)

        periodic_backup.remove_old_local_file(storages, '', job_name)

        if is_success_mysql_xtrabackup(extra_keys, str_auth, backup_full_tmp_path, gzip, job_name):
            periodic_backup.general_desc_iteration(backup_full_tmp_path, storages, '', job_name, safety_backup)

    # After all the manipulations, delete the created temporary directory and
    # data inside the directory with cache davfs, but not the directory itself!
    general_function.del_file_objects(backup_type, full_path_tmp_dir, '/var/cache/davfs2/*')
Exemplo n.º 2
0
def mongodb_backup(job_data):
    """ Function, creates a mongodb backup.
    At the entrance receives a dictionary with the data of the job.

    """
    is_prams_read, job_name, backup_type, tmp_dir, sources, storages, safety_backup, deferred_copying_level = \
        general_function.get_job_parameters(job_data)
    if not is_prams_read:
        return

    full_path_tmp_dir = general_function.get_tmp_dir(tmp_dir, backup_type)

    dumped_collections = {}
    for i in range(len(sources)):
        exclude_dbs_list = sources[i].get('exclude_dbs', [])
        exclude_collections_list = sources[i].get('exclude_collections', [])
        try:
            connect = sources[i]['connect']
            target_db_list = sources[i]['target_dbs']
            target_collection_list = sources[i]['target_collections']
            gzip = sources[i]['gzip']
            extra_keys = sources[i]['extra_keys']
        except KeyError as e:
            log_and_mail.writelog('ERROR', f"Missing required key:'{e}'!", config.filelog_fd, job_name)
            continue

        db_host = connect.get('db_host')
        db_port = connect.get('db_port')
        db_user = connect.get('db_user')
        db_password = connect.get('db_password')

        if not (db_host and not (bool(db_user) ^ bool(db_password))):
            log_and_mail.writelog('ERROR', "Can't find the authentication data, please fill in the required fields",
                                  config.filelog_fd, job_name)
            continue

        if not db_port:
            db_port = general_function.get_default_port('mongodb')

        is_all_flag_db = is_all_flag_collection = False

        if 'all' in target_db_list:
            is_all_flag_db = True

        if 'all' in target_collection_list:
            is_all_flag_collection = True

        if db_user:
            uri = f"mongodb://{db_user}:{db_password}@{db_host}:{db_port}/"  # for pymongo
            str_auth = f" --host {db_host} --port {db_port} --username {db_user} --password {db_password} "
        else:
            uri = f"mongodb://{db_host}:{db_port}/"
            str_auth = f" --host {db_host} --port {db_port} "

        client = None
        if is_all_flag_db:
            try:
                client = pymongo.MongoClient(uri)
                target_db_list = client.list_database_names()
            except PyMongoError as err:
                log_and_mail.writelog('ERROR',
                                      f"Can't connect to MongoDB instances with the following data host='{db_host}', "
                                      f"port='{db_port}', user='******', passwd='{db_password}':{err}",
                                      config.filelog_fd, job_name)
                continue
            finally:
                if client:
                    client.close()

        for db in target_db_list:
            if db not in exclude_dbs_list:
                try:
                    client = pymongo.MongoClient(uri)
                    current_db = client[db]
                    collection_list = current_db.collection_names()
                except PyMongoError as err:
                    log_and_mail.writelog(
                        'ERROR',
                        f"Can't connect to MongoDB instances with the following data host='{db_host}', "
                        f"port='{db_port}', user='******', passwd='{db_password}':{err}", config.filelog_fd,
                        job_name)
                    continue
                finally:
                    if client:
                        client.close()

                if is_all_flag_collection:
                    target_collection_list = collection_list

                for collection in target_collection_list:
                    if collection not in exclude_collections_list and collection in collection_list:
                        str_auth_finally = f"{str_auth} --collection {collection} "

                        backup_full_tmp_path = general_function.get_full_path(
                            full_path_tmp_dir,
                            collection,
                            'mongodump',
                            gzip,
                            f'{i}-{db}-')

                        part_of_dir_path = os.path.join(db, collection)
                        periodic_backup.remove_old_local_file(storages, part_of_dir_path, job_name)

                        if is_success_mongodump(collection, db, extra_keys, str_auth_finally, backup_full_tmp_path,
                                                gzip, job_name):
                            dumped_collections[collection] = {'success': True,
                                                              'tmp_path': backup_full_tmp_path,
                                                              'part_of_dir_path': part_of_dir_path}
                        else:
                            dumped_collections[collection] = {'success': False}

                        if deferred_copying_level <= 0 and dumped_collections[collection]['success']:
                            periodic_backup.general_desc_iteration(backup_full_tmp_path,
                                                                   storages, part_of_dir_path,
                                                                   job_name, safety_backup)

                for collection, result in dumped_collections.items():
                    if deferred_copying_level == 1 and result['success']:
                        periodic_backup.general_desc_iteration(result['tmp_path'], storages,
                                                               result['part_of_dir_path'], job_name, safety_backup)

        for collection, result in dumped_collections.items():
            if deferred_copying_level == 2 and result['success']:
                periodic_backup.general_desc_iteration(result['tmp_path'], storages,
                                                       result['part_of_dir_path'], job_name, safety_backup)

    for collection, result in dumped_collections.items():
        if deferred_copying_level >= 3 and result['success']:
            periodic_backup.general_desc_iteration(result['tmp_path'], storages,
                                                   result['part_of_dir_path'], job_name, safety_backup)

    # After all the manipulations, delete the created temporary directory and
    # data inside the directory with cache davfs, but not the directory itself!
    general_function.del_file_objects(backup_type, full_path_tmp_dir, '/var/cache/davfs2/*')
def postgresql_basebackup(job_data):
    is_prams_read, job_name, backup_type, tmp_dir, sources, storages, safety_backup, deferred_copying_level = \
        general_function.get_job_parameters(job_data)
    if not is_prams_read:
        return

    full_path_tmp_dir = general_function.get_tmp_dir(tmp_dir, backup_type)

    for i in range(len(sources)):
        try:
            connect = sources[i]['connect']
            gzip = sources[i]['gzip']
            extra_keys = sources[i]['extra_keys']
        except KeyError as e:
            log_and_mail.writelog('ERROR', f"Missing required key:'{e}'!",
                                  config.filelog_fd, job_name)
            continue

        db_host = connect.get('db_host')
        db_port = connect.get('db_port')
        db_user = connect.get('db_user')
        db_password = connect.get('db_password')

        if not (db_user or db_host or db_password):
            log_and_mail.writelog(
                'ERROR',
                "Can't find the authentication data, please fill in the required fields",
                config.filelog_fd, job_name)
            continue

        if not db_port:
            db_port = general_function.get_default_port('postgresql')

        try:
            connection = psycopg2.connect(dbname="postgres",
                                          user=db_user,
                                          password=db_password,
                                          host=db_host,
                                          port=db_port)
        except psycopg2.Error as err:
            log_and_mail.writelog(
                'ERROR',
                f"Can't connect to PostgreSQL instances with with following data host='{db_host}', "
                f"port='{db_port}', user='******', passwd='{db_password}':{err}",
                config.filelog_fd, job_name)
            continue
        else:
            connection.close()

        backup_full_tmp_path = general_function.get_full_path(
            full_path_tmp_dir, 'postgresq_hot', 'tar', gzip, i)

        periodic_backup.remove_old_local_file(storages, '', job_name)

        str_auth = f' --dbname=postgresql://{db_user}:{db_password}@{db_host}:{db_port}/ '

        if is_success_pgbasebackup(extra_keys, str_auth, backup_full_tmp_path,
                                   gzip, job_name):
            periodic_backup.general_desc_iteration(backup_full_tmp_path,
                                                   storages, '', job_name,
                                                   safety_backup)

    # After all the manipulations, delete the created temporary directory and
    # data inside the directory with cache davfs, but not the directory itself!
    general_function.del_file_objects(backup_type, full_path_tmp_dir,
                                      '/var/cache/davfs2/*')
Exemplo n.º 4
0
def redis_backup(job_data):
    is_prams_read, job_name, backup_type, tmp_dir, sources, storages, safety_backup, deferred_copying_level = \
        general_function.get_job_parameters(job_data)
    if not is_prams_read:
        return

    full_path_tmp_dir = general_function.get_tmp_dir(tmp_dir, backup_type)

    for i in range(len(sources)):
        try:
            connect = sources[i]['connect']
            gzip = sources[i]['gzip']
        except KeyError as e:
            log_and_mail.writelog('ERROR', f"Missing required key:'{e}'!", config.filelog_fd, job_name)
            continue

        db_host = connect.get('db_host')
        db_port = connect.get('db_port')
        db_password = connect.get('db_password')
        socket = connect.get('socket')

        if not (db_host or socket):
            log_and_mail.writelog('ERROR', "Can't find the authentication data, please fill in the required fields",
                                  config.filelog_fd, job_name)
            continue

        if not db_port:
            db_port = general_function.get_default_port('redis')

        try:
            if db_host:
                if db_password:
                    redis.StrictRedis(host=db_host, port=db_port, password=db_password)
                    str_auth = f" -h {db_host} -p {db_port} -a '{db_password}' "
                else:
                    redis.StrictRedis(host=db_host, port=db_port)
                    str_auth = f" -h {db_host} -p {db_port} "
            else:
                if db_password:
                    redis.StrictRedis(unix_socket_path=socket, password=db_password)
                    str_auth = f" -s {socket} -a '{db_password}' "
                else:
                    redis.StrictRedis(unix_socket_path=socket)
                    str_auth = f" -s {socket} "
        except (redis.exceptions.ConnectionError, ConnectionRefusedError) as err:
            log_and_mail.writelog('ERROR',
                                  f"Can't connect to Redis instances with with following data host='{db_host}', "
                                  f"port='{db_port}', passwd='{db_password}', socket='{socket}': {err}",
                                  config.filelog_fd, job_name)
            continue
        else:
            backup_full_tmp_path = general_function.get_full_path(
                full_path_tmp_dir,
                'redis',
                'rdb',
                gzip)
            periodic_backup.remove_old_local_file(storages, '', job_name)

            if is_success_bgsave(str_auth, backup_full_tmp_path, gzip, job_name):
                periodic_backup.general_desc_iteration(backup_full_tmp_path,
                                                       storages, '',
                                                       job_name, safety_backup)

    # After all the manipulations, delete the created temporary directory and
    # data inside the directory with cache davfs, but not the directory itself!
    general_function.del_file_objects(backup_type,
                                      full_path_tmp_dir, '/var/cache/davfs2/*')
Exemplo n.º 5
0
def inc_files_backup(job_data):
    """ The function collects an incremental backup for the specified partition.

    """

    is_prams_read, job_name, backup_type, tmp_dir, sources, storages, safety_backup, deferred_copying_level = \
        general_function.get_job_parameters(job_data)
    if not is_prams_read:
        return

    for i in range(len(sources)):
        target_list = sources[i]['target']
        exclude_list = sources[i].get('excludes', '')
        gzip = sources[i]['gzip']

        # Keeping an exception list in the global variable due to the specificity of
        # the `filter` key of the `add` method of the `tarfile` class
        general_files_func.EXCLUDE_FILES = general_files_func.get_exclude_ofs(target_list,
                                                                              exclude_list)

        # The backup name is selected depending on the particular glob patterns from
        # the list `target_list`
        for regex in target_list:
            target_ofs_list = general_files_func.get_ofs(regex)

            for ofs in target_ofs_list:
                if not general_files_func.is_excluded_ofs(ofs):
                    # Create a backup only if the directory is not in the exception list
                    # so as not to generate empty backups

                    # A function that by regularity returns the name of
                    # the backup WITHOUT EXTENSION AND DATE
                    backup_file_name = general_files_func.get_name_files_backup(regex, ofs)

                    # Get the part of the backup storage path for this archive relative to
                    # the backup dir
                    part_of_dir_path = backup_file_name.replace('___', '/')

                    for j in range(len(storages)):
                        if specific_function.is_save_to_storage(job_name, storages[j]):
                            try:
                                current_storage_data = mount_fuse.get_storage_data(job_name,
                                                                                   storages[j])
                            except general_function.MyError as err:
                                log_and_mail.writelog('ERROR', f'{err}',
                                                      config.filelog_fd, job_name)
                                continue
                            else:
                                storage = current_storage_data['storage']
                                backup_dir = current_storage_data['backup_dir']
                                # Если storage активный - монтируем его
                                try:
                                    mount_fuse.mount(current_storage_data)
                                except general_function.MyError as err:
                                    log_and_mail.writelog('ERROR', f"Can't mount remote '{storage}' storage :{err}",
                                                          config.filelog_fd, job_name)
                                    continue
                                else:
                                    remote_dir = ''  # Only for logging
                                    if storage != 'local':
                                        local_dst_dirname = mount_fuse.mount_point
                                        remote_dir = backup_dir
                                        if storage != 's3':
                                            host = current_storage_data['host']
                                        else:
                                            host = ''
                                        share = current_storage_data.get('share')
                                    else:
                                        host = ''
                                        share = ''
                                        local_dst_dirname = backup_dir

                                    if storage not in ('local', 'scp', 'nfs'):
                                        local_dst_dirname = os.path.join(local_dst_dirname, backup_dir.lstrip('/'))

                                    create_inc_file(local_dst_dirname, remote_dir, part_of_dir_path, backup_file_name,
                                                    ofs, exclude_list, gzip, job_name, storage, host, share)

                                    try:
                                        mount_fuse.unmount()
                                    except general_function.MyError as err:
                                        log_and_mail.writelog('ERROR',
                                                              f"Can't umount remote '{storage}' storage :{err}",
                                                              config.filelog_fd, job_name)
                                        continue
                        else:
                            continue
                else:
                    continue
Exemplo n.º 6
0
def desc_files_backup(job_data):
    """ Function, creates a desc backup of directories.
    At the entrance receives a dictionary with the data of the job.

    """
    is_prams_read, job_name, backup_type, tmp_dir, sources, storages, safety_backup, deferred_copying_level = \
        general_function.get_job_parameters(job_data)
    if not is_prams_read:
        return

    full_path_tmp_dir = general_function.get_tmp_dir(tmp_dir, backup_type)

    dumped_ofs = {}
    for i in range(len(sources)):
        exclude_list = sources[i].get('excludes', '')
        try:
            target_list = sources[i]['target']
            gzip = sources[i]['gzip']
        except KeyError as e:
            log_and_mail.writelog('ERROR', f"Missing required key:'{e}'!",
                                  config.filelog_fd, job_name)
            continue

        # Keeping an exception list in the global variable due to the specificity of
        # the `filter` key of the `add` method of the `tarfile` class
        general_files_func.EXCLUDE_FILES = general_files_func.get_exclude_ofs(
            target_list, exclude_list)

        # The backup name is selected depending on the particular glob patterns from
        # the list `target_list`
        for regex in target_list:
            target_ofs_list = general_files_func.get_ofs(regex)

            if not target_ofs_list:
                log_and_mail.writelog(
                    'ERROR', "No file system objects found that" +
                    f"match the regular expression '{regex}'!",
                    config.filelog_fd, job_name)
                continue

            for ofs in target_ofs_list:
                # Create a backup only if the directory is not in the exception list
                # so as not to generate empty backups
                if not general_files_func.is_excluded_ofs(ofs):
                    # A function that by regularity returns the name of
                    # the backup WITHOUT EXTENSION AND DATE
                    backup_file_name = general_files_func.get_name_files_backup(
                        regex, ofs)
                    # Get the part of the backup storage path for this archive relative to
                    # the backup dir
                    part_of_dir_path = backup_file_name.replace('___', '/')

                    backup_full_tmp_path = general_function.get_full_path(
                        full_path_tmp_dir, backup_file_name, 'tar', gzip)

                    periodic_backup.remove_old_local_file(
                        storages, part_of_dir_path, job_name)

                    if general_files_func.create_tar('files',
                                                     backup_full_tmp_path, ofs,
                                                     gzip, backup_type,
                                                     job_name):
                        dumped_ofs[ofs] = {
                            'success': True,
                            'tmp_path': backup_full_tmp_path,
                            'part_of_dir_path': part_of_dir_path
                        }
                    else:
                        dumped_ofs[ofs] = {'success': False}

                    if deferred_copying_level <= 0 and dumped_ofs[ofs][
                            'success']:
                        periodic_backup.general_desc_iteration(
                            backup_full_tmp_path, storages, part_of_dir_path,
                            job_name, safety_backup)
                else:
                    continue

            for ofs, result in dumped_ofs.items():
                if deferred_copying_level == 1 and result['success']:
                    periodic_backup.general_desc_iteration(
                        result['tmp_path'], storages,
                        result['part_of_dir_path'], job_name, safety_backup)

        for ofs, result in dumped_ofs.items():
            if deferred_copying_level == 2 and result['success']:
                periodic_backup.general_desc_iteration(
                    result['tmp_path'], storages, result['part_of_dir_path'],
                    job_name, safety_backup)

    for ofs, result in dumped_ofs.items():
        if deferred_copying_level >= 3 and result['success']:
            periodic_backup.general_desc_iteration(result['tmp_path'],
                                                   storages,
                                                   result['part_of_dir_path'],
                                                   job_name, safety_backup)

    # After all the manipulations, delete the created temporary directory and
    # data inside the directory with cache davfs, but not the directory itself!
    general_function.del_file_objects(backup_type, full_path_tmp_dir,
                                      '/var/cache/davfs2/*')
Exemplo n.º 7
0
def mysql_backup(job_data):
    is_prams_read, job_name, backup_type, tmp_dir, sources, storages, safety_backup, deferred_copying_level = \
        general_function.get_job_parameters(job_data)
    if not is_prams_read:
        return

    full_path_tmp_dir = general_function.get_tmp_dir(tmp_dir, backup_type)

    dumped_dbs = {}
    for i in range(len(sources)):
        exclude_list = sources[i].get('excludes', [])
        try:
            connect = sources[i]['connect']
            target_list = sources[i]['target']
            gzip = sources[i]['gzip']
            is_slave = sources[i]['is_slave']
            extra_keys = sources[i]['extra_keys']
        except KeyError as e:
            log_and_mail.writelog('ERROR', f"Missing required key:'{e}'!",
                                  config.filelog_fd, job_name)
            continue

        db_host = connect.get('db_host')
        db_port = connect.get('db_port')
        socket = connect.get('socket')
        db_user = connect.get('db_user')
        db_password = connect.get('db_password')
        auth_file = connect.get('auth_file')

        if not (auth_file or ((db_host or socket) or db_user or db_password)):
            log_and_mail.writelog(
                'ERROR',
                "Can't find the authentication data, please fill in the required fields",
                config.filelog_fd, job_name)
            continue

        if not db_port:
            db_port = general_function.get_default_port('mysql')

        is_all_flag = False

        if 'all' in target_list:
            is_all_flag = True

        connection_1, str_auth = get_connection(db_host, db_port, db_user,
                                                db_password, auth_file, socket,
                                                job_name)
        if connection_1 is None:
            continue

        cur_1 = connection_1.cursor()

        if is_all_flag:
            cur_1.execute("SHOW DATABASES")
            target_list = [i[0] for i in cur_1.fetchall()]

        if is_slave:
            try:
                cur_1.execute("STOP SLAVE")
            except MySQLdb.Error as err:
                log_and_mail.writelog('ERROR', f"Can't stop slave: {err}",
                                      config.filelog_fd, job_name)

        connection_1.close()

        for db in target_list:
            if db not in exclude_list:
                backup_full_tmp_path = general_function.get_full_path(
                    full_path_tmp_dir, db, 'sql', gzip, i)

                periodic_backup.remove_old_local_file(storages, db, job_name)

                if is_success_mysqldump(db, extra_keys, str_auth,
                                        backup_full_tmp_path, gzip, job_name):
                    dumped_dbs[db] = {
                        'success': True,
                        'tmp_path': backup_full_tmp_path
                    }
                else:
                    dumped_dbs[db] = {'success': False}

                if deferred_copying_level <= 0 and dumped_dbs[db]['success']:
                    periodic_backup.general_desc_iteration(
                        backup_full_tmp_path, storages, db, job_name,
                        safety_backup)

        if is_slave:
            connection_2, str_auth = get_connection(db_host, db_port, db_user,
                                                    db_password, auth_file,
                                                    socket, job_name)
            if connection_2 is None:
                log_and_mail.writelog(
                    'ERROR', f"Can't start slave: Can't connect to MySQL.",
                    config.filelog_fd, job_name)
                return
            cur_2 = connection_2.cursor()
            try:
                cur_2.execute("START SLAVE")
            except MySQLdb.Error as err:
                log_and_mail.writelog('ERROR', f"Can't start slave: {err} ",
                                      config.filelog_fd, job_name)
            connection_2.close()

        for db, result in dumped_dbs.items():
            if deferred_copying_level == 1 and result['success']:
                periodic_backup.general_desc_iteration(result['tmp_path'],
                                                       storages, db, job_name,
                                                       safety_backup)

    for db, result in dumped_dbs.items():
        if deferred_copying_level >= 2 and result['success']:
            periodic_backup.general_desc_iteration(result['tmp_path'],
                                                   storages, db, job_name,
                                                   safety_backup)

    # After all the manipulations, delete the created temporary directory and
    # data inside the directory with cache davfs, but not the directory itself!
    general_function.del_file_objects(backup_type, full_path_tmp_dir,
                                      '/var/cache/davfs2/*')
Exemplo n.º 8
0
def postgresql_backup(job_data):
    is_prams_read, job_name, options = general_function.get_job_parameters(
        job_data)
    if not is_prams_read:
        return

    full_path_tmp_dir = general_function.get_tmp_dir(options['tmp_dir'],
                                                     options['backup_type'])

    dumped_dbs = {}
    for i in range(len(options['sources'])):
        exclude_list = options['sources'][i].get('excludes', [])
        try:
            connect = options['sources'][i]['connect']
            target_list = options['sources'][i]['target']
            gzip = options['sources'][i]['gzip']
            extra_keys = options['sources'][i]['extra_keys']
        except KeyError as e:
            log_and_mail.writelog('ERROR', f"Missing required key:'{e}'!",
                                  config.filelog_fd, job_name)
            continue

        db_host = connect.get('db_host')
        db_port = connect.get('db_port')
        db_user = connect.get('db_user')
        db_password = connect.get('db_password')

        if not (db_user or db_host or db_password):
            log_and_mail.writelog(
                'ERROR',
                "Can't find the authentication data, please fill in the required fields",
                config.filelog_fd, job_name)
            continue

        if not db_port:
            db_port = general_function.get_default_port('postgresql')

        is_all_flag = False

        if 'all' in target_list:
            is_all_flag = True

        if is_all_flag:
            try:
                connection = psycopg2.connect(dbname="postgres",
                                              user=db_user,
                                              password=db_password,
                                              host=db_host,
                                              port=db_port)
            except psycopg2.Error as err:
                log_and_mail.writelog(
                    'ERROR',
                    f"Can't connect to PostgreSQL instances with with following data host='{db_host}', "
                    f"port='{db_port}', user='******', passwd='{db_password}':{err}",
                    config.filelog_fd, job_name)
                continue

            cur = connection.cursor()
            cur.execute("select datname from pg_database;")
            target_list = [i[0] for i in cur.fetchall()]
            connection.close()

        for db in target_list:
            if db not in exclude_list:
                backup_full_tmp_path = general_function.get_full_path(
                    full_path_tmp_dir, db, 'pgdump.sql', gzip, i)

                periodic_backup.remove_local_file(options['storages'], db,
                                                  job_name)

                str_auth = f' --dbname=postgresql://{db_user}:{db_password}@{db_host}:{db_port}/{db} '

                if is_success_pgdump(db, extra_keys, str_auth,
                                     backup_full_tmp_path, gzip, job_name):
                    dumped_dbs[db] = {
                        'success': True,
                        'tmp_path': backup_full_tmp_path
                    }
                else:
                    dumped_dbs[db] = {'success': False}

                if options['deferred_copying_level'] <= 0 and dumped_dbs[db][
                        'success']:
                    periodic_backup.general_desc_iteration(
                        backup_full_tmp_path, options['storages'], db,
                        job_name, options['safety_backup'])
        for db, result in dumped_dbs.items():
            if options['deferred_copying_level'] == 1 and result['success']:
                periodic_backup.general_desc_iteration(
                    result['tmp_path'], options['storages'], db, job_name,
                    options['safety_backup'])

    for db, result in dumped_dbs.items():
        if options['deferred_copying_level'] >= 2 and result['success']:
            periodic_backup.general_desc_iteration(result['tmp_path'],
                                                   options['storages'], db,
                                                   job_name,
                                                   options['safety_backup'])

    # After all the manipulations, delete the created temporary directory and
    # data inside the directory with cache davfs, but not the directory itself!
    general_function.del_file_objects(options['backup_type'],
                                      full_path_tmp_dir, '/var/cache/davfs2/*')