Ejemplo n.º 1
0
def run(dry_run=False):
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name,
                               user=db_user,
                               host=db_host,
                               password=db_password)
        cur = con.cursor()
        cur.execute(
            "SELECT m.volumename, s.name FROM media m, storage s WHERE m.storageid=s.storageid;"
        )
        media_storage = cur.fetchall(
        )  # e.g. [('Incremental-ST-0126', 's8tb01'), ('Full-ST-0031', 's8tb01'), ..]
    except Exception as e:
        print(format_exception(e))
    storages_conf_parsed = bacula_parse(BACULA_DIR_BIN)
    for volname, storagename in media_storage:
        for storage_name, storage_value in storages_conf_parsed[
                "Storage"].items():
            hn = storage_value["Address"]
            if not islocal(hn):
                sd_conf_parsed = bacula_parse(BACULA_SD_BIN, hn=hn)
            else:
                sd_conf_parsed = bacula_parse(BACULA_SD_BIN)
            if storagename == storage_name:
                device = storage_value["Device"]
                ad = get_archive_device_of_device(device, sd_conf_parsed)
                if ad:
                    volpath = os.path.join(ad, volname)
                else:
                    continue
                if CONF('VERBOSE'):
                    log.debug("hn: %s" % hn)
                    if not find_mountpoint(ad, hn) == "/":
                        if not _isfile(volpath, hn):
                            log.info(
                                "Deleted volume %s from catalog, because file doesn't exist."
                                % volpath)
                            with open(CONF('LOG'), 'a') as f:
                                time = datetime.now().strftime(
                                    '%Y-%m-%d %H:%M:%S')
                                f.write("{0} {1}\n".format(time, volpath))
                            if not dry_run or not CONF('DRY_RUN'):
                                p1 = Popen(
                                    ["echo",
                                     "delete volume=%s yes" % volname],
                                    stdout=PIPE)
                                p2 = Popen(["bconsole"],
                                           stdin=p1.stdout,
                                           stdout=PIPE)
                                p1.stdout.close()
                                out, err = p2.communicate()
                                log.debug("out: %s, err: %s" % (out, err))
                        elif CONF('VERBOSE') is True:
                            log.info('File exists for %s' % volpath)
Ejemplo n.º 2
0
def run(dry_run=False):
    if dry_run:
        CONF_SET('DRY_RUN', dry_run)
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name, user=db_user, host=db_host, password=db_password)
        cur = con.cursor()
        query = """
SELECT DISTINCT j.jobid, j.name, m.volumename, s.name
FROM job j, media m, jobmedia jm, storage s
WHERE m.mediaid=jm.mediaid
AND j.jobid=jm.jobid
AND s.storageid=m.storageid
"""
        data = []
        if CONF('OPERATOR').lower() == "or":
            operator2 = " OR "
        else:
            operator2 = " AND "
        if all(CONF('DEL_JOB_NAMES')):
            data += CONF('DEL_JOB_NAMES')
            query2 = "j.name IN (%s)" % jobnames_placeholders
            query += operator2 + query2
        if all(CONF('DEL_STORAGE_NAMES')):
            data += CONF("DEL_STORAGE_NAMES")
            query2 = "s.name IN (%s)" % storagenames_placeholders
            query += operator2 + query2
        if all(CONF('DEL_NEWER')):
            data += CONF('DEL_NEWER')
            query += operator2 + "j.starttime >= %s::timestamp"
        if all(CONF('DEL_OLDER')):
            data += CONF('DEL_OLDER')
            query += operator2 + "j.starttime <= %s::timestamp"
        print("Query: %s %s" % (query, str(data)))
        query += ";"
        cur.execute(query, data)
        del_job_media_jm_storage = cur.fetchall()
        print(del_job_media_jm_storage)
    except Exception as e:
        print(format_exception(e))
        print(
            "\n\nYour config /etc/bacula-scripts/bacula_del_jobs_conf.py has an error.\n"\
            "Check if all your configured values are in the tuple format. E.g.:\n"\
            "DEL_NEWER = ('',) and not DEL_NEWER = ('')"
        )
        return
    sd_conf_parsed = bacula_parse("bareos-sd")
    storages_conf_parsed = bacula_parse("bareos-dir")
    del_job_media_jm_storage = [
        (w, x, build_volpath(y, z, sd_conf_parsed, storages_conf_parsed), z) for w, x, y, z in
        del_job_media_jm_storage if build_volpath(y, z, sd_conf_parsed, storages_conf_parsed)
    ]
    del_backups(del_job_media_jm_storage)
def get_archive_device_of_device(device):
    """
    2021-05 Deprecated - specify path manually. Deprecated, because would need to parse
    autochanger for device name, then get each autochanger device and check if the volume
    is in one of the autochanger archive device paths. Those are just 3-4 lines of code,
    but it's not needed for this setup. Keep it simple and just specify the archive device
    manually where to look at. In this setup there won't be any redundant file names, so
    it's safe to do.
    """
    sd_conf_parsed = bacula_parse(BACULA_SD_BIN)
    if device:
        device2 = sd_conf_parsed["Device"][device]
        print("devdev: %s" % device)
        print(sd_conf_parsed["Device"])
        print("dev2: %s" % device2)
        if device2:
            ad = device2["ArchiveDevice"]
            return ad
        elif not device2:
            try:
                device2 = sd_conf_parsed["Autochanger"][device]["Device"]
            except:
                return None
            if device2:
                device2 = autochanger["Device"].split(",")[0].strip()
                ad = sd_conf_parsed["Device"][device2]["ArchiveDevice"]
                return ad
    return None
def run(dry_run=False):
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name, user=db_user, host=db_host, password=db_password)
        cur = con.cursor()
        cur.execute("SELECT m.volumename, s.name FROM media m, storage s WHERE m.storageid=s.storageid;")
        media_storage = cur.fetchall()  # e.g. [('Incremental-ST-0126', 's8tb01'), ('Full-ST-0031', 's8tb01'), ..]
    except Exception as e:
        print(format_exception(e))
    storages_conf_parsed = bacula_parse("bareos-dir")
    for volname, storagename in media_storage:
        for storage_name, storage_value in storages_conf_parsed["Storage"].items():
            hn = storage_value["Address"]
            if not islocal(hn):
                sd_conf_parsed = bacula_parse("bareos-sd", hn=hn)
            else:
                sd_conf_parsed = bacula_parse("bareos-sd")            
            if storagename == storage_name:
                device = storage_value["Device"]
                ad = get_archive_device_of_device(device, sd_conf_parsed)
                if ad:
                    volpath = os.path.join(ad, volname)
                else:
                    continue
                if CONF('VERBOSE'):
                    log.debug("hn: %s" % hn)
                    if not find_mountpoint(ad, hn) == "/":
                        if not _isfile(volpath, hn):
                            log.info("Deleted volume %s from catalog, because file doesn't exist." % volpath)
                            with open(CONF('LOG'), 'a') as f:
                                time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                                f.write("{0} {1}\n".format(time, volpath))
                            if not dry_run or not CONF('DRY_RUN'):
                                p1 = Popen(["echo", "delete volume=%s yes" % volname], stdout=PIPE)
                                p2 = Popen(["bconsole"], stdin=p1.stdout, stdout=PIPE)
                                p1.stdout.close()
                                out, err = p2.communicate()
                                log.debug("out: %s, err: %s" % (out, err))
                        elif CONF('VERBOSE') is True:
                            log.info('File exists for %s' % volpath)
Ejemplo n.º 5
0
 def run(
         self,
         dry_run=None, 
         fd_fqdn=None,
         os_type=None,
         create_client_job=None,
         create_client_copy_job=None,
         create_bconsole=None
     ):
     # User Input
     self.dry_run = dry_run
     self.fd_fqdn = fd_fqdn
     self.os_type = os_type
     self.create_client_job = create_client_job
     self.create_client_copy_job = create_client_copy_job
     self.create_bconsole = create_bconsole
     self.user_input()
     self.fd_data_dir = os.path.join(
         CONF('STORE_CLIENT_FILES'),
         self.fd_fqdn
     )
     self.bareos_bins_dir = os.path.join(
         CONF('STORE_CLIENT_FILES'),
         "bareos-bins"
     )
     subprocess.call(("mkdir -p %s" % self.bareos_bins_dir).split())
     subprocess.call(("mkdir -p %s" % self.fd_data_dir).split())
     self.try_download_bareos_bins()
     # Write client and/or job/copy-job resources
     self.dir_conf = bacula_parse("bareos-dir")
     self.fd_password = self.generate_password()
     self.write_client_resource()
     self.create_windows_bat()
     if self.create_bconsole:
         self.create_bconsole_conf()
     if self.create_client_job:
         self.write_job()
     if self.create_client_copy_job:
         self.write_copy_job()
     # Write filedaemon resources and create client keys
     if not self.client_exists:
         self.create_filedaemon_files()
         print(
             "\nPlease add following certs and config files to your client %s:\n%s" %
             (self.fd_fqdn, self.fd_data_dir)
         )
     self.reload_director()
     print("..DONE")
def get_archive_device_of_device(device):
    sd_conf_parsed = bacula_parse("bareos-sd")
    if device:
        device2 = sd_conf_parsed["Device"][device]
        if device2:
            ad = device2["ArchiveDevice"]
            return ad
        elif not device2:
            try:
                device2 = sd_conf_parsed["Autochanger"][device]["Device"]
            except:
                return None
            if device2:
                device2 = autochanger["Device"].split(",")[0].strip()
                ad = sd_conf_parsed["Device"][device2]["ArchiveDevice"]
                return ad
    return None
Ejemplo n.º 7
0
 def run(self,
         dry_run=None,
         fd_fqdn=None,
         fileset=None,
         os_type=None,
         create_client_job=None,
         create_client_copy_job=None,
         create_bconsole=None):
     # User Input
     self.dry_run = dry_run
     self.dir_conf = bacula_parse("bareos-dir")
     self.fd_fqdn = fd_fqdn
     self.fileset = fileset
     self.os_type = os_type
     self.create_client_job = create_client_job
     self.create_client_copy_job = create_client_copy_job
     self.create_bconsole = create_bconsole
     self.user_input()
     self.fd_data_dir = os.path.join(CONF('STORE_CLIENT_FILES'),
                                     self.fd_fqdn)
     self.bareos_bins_dir = os.path.join(CONF('STORE_CLIENT_FILES'),
                                         "bareos-bins")
     subprocess.call(("mkdir -p %s" % self.bareos_bins_dir).split())
     subprocess.call(("mkdir -p %s" % self.fd_data_dir).split())
     self.try_download_bareos_bins()
     # Write client and/or job/copy-job resources
     self.fd_password = self.generate_password()
     self.write_client_resource()
     self.create_windows_bat()
     if self.create_bconsole:
         self.create_bconsole_conf()
     if self.create_client_job:
         self.write_job()
     if self.create_client_copy_job:
         self.write_copy_job()
     # Write filedaemon resources and create client keys
     if not self.client_exists:
         self.create_filedaemon_files()
         print(
             "\nPlease add following certs and config files to your client %s:\n%s"
             % (self.fd_fqdn, self.fd_data_dir))
     self.reload_director()
     print("..DONE")
Ejemplo n.º 8
0
 def __init__(self, dry_run):
     self.dir_conf = bacula_parse("bareos-dir")
     self.dry_run = dry_run
"""
import argparse
import os
import psycopg2
import re
import sys
from argparse import RawDescriptionHelpFormatter
from subprocess import Popen, PIPE

from helputils.core import format_exception, systemd_services_up
from helputils.defaultlog import log
sys.path.append("/etc/bacula-scripts")
from general_conf import db_host, db_user, db_name, db_password, services
from bacula_scripts.bacula_parser import bacula_parse

storages_conf_parsed = bacula_parse("bareos-dir")
sd_conf_parsed = bacula_parse("bareos-sd")


def get_archive_device_of_device(device):
    sd_conf_parsed = bacula_parse("bareos-sd")
    if device:
        device2 = sd_conf_parsed["Device"][device]
        if device2:
            ad = device2["ArchiveDevice"]
            return ad
        elif not device2:
            try:
                device2 = sd_conf_parsed["Autochanger"][device]["Device"]
            except:
                return None
"""
import argparse
import os
import psycopg2
import re
import sys
from argparse import RawDescriptionHelpFormatter
from subprocess import Popen, PIPE

from helputils.core import format_exception, systemd_services_up
from helputils.defaultlog import log
sys.path.append("/etc/bacula-scripts")
from general_conf import db_host, db_user, db_name, db_password, services
from bacula_scripts.bacula_parser import bacula_parse

storages_conf_parsed = bacula_parse("bareos-dir")
sd_conf_parsed = bacula_parse("bareos-sd")


def has_catalog_entry(volume_name):
    try:
        con = psycopg2.connect(database=db_name, user=db_user, host=db_host, password=db_password)
        cur = con.cursor()
        cur.execute("SELECT * FROM media WHERE volumename=%s", (volume_name,))
        res = cur.fetchall()
        if len(res) > 0:
            return True
        else:
            return False
    except Exception as e:
        log.error(format_exception(e))
Ejemplo n.º 11
0
def run(args):
    if args.dry_run:
        CONF_SET('DRY_RUN', args.dry_run)
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name,
                               user=db_user,
                               host=db_host,
                               password=db_password)
        cur = con.cursor()
        query = """
SELECT DISTINCT j.jobid, j.name, m.volumename, s.name
FROM job j, media m, jobmedia jm, storage s
WHERE m.mediaid=jm.mediaid
AND j.jobid=jm.jobid
AND s.storageid=m.storageid
"""
        data = []
        if CONF('OPERATOR').lower() == "or":
            operator2 = " OR "
        else:
            operator2 = " AND "
        #if all([False] if not CONF('DEL_CLIENTS') else CONF('DEL_CLIENTS')):
        if CONF('DEL_CLIENTS') and all(CONF('DEL_CLIENTS')):
            data += CONF('DEL_CLIENTS')
            query2 = "j.clientid IN (SELECT clientid FROM client WHERE name IN (%s))" % clients_placeholders
            query += operator2 + query2
        if CONF('DEL_FILESETS') and all(CONF('DEL_FILESETS')):
            data += CONF('DEL_FILESETS')
            query2 = "j.filesetid IN (SELECT filesetid FROM fileset WHERE fileset IN (%s))" % filesets_placeholders
            query += operator2 + query2
        if CONF('DEL_NOT_FILESETS') and all(CONF('DEL_NOT_FILESETS')):
            data += CONF('DEL_NOT_FILESETS')
            query2 = "j.filesetid NOT IN (SELECT filesetid FROM fileset WHERE fileset IN (%s))" % filesets_not_placeholders
            query += operator2 + query2
        if CONF('DEL_JOB_NAMES') and all(CONF('DEL_JOB_NAMES')):
            data += CONF('DEL_JOB_NAMES')
            query2 = "j.name IN (%s)" % jobnames_placeholders
            query += operator2 + query2
        if CONF('DEL_STORAGE_NAMES') and all(CONF('DEL_STORAGE_NAMES')):
            data += CONF("DEL_STORAGE_NAMES")
            query2 = "s.name IN (%s)" % storagenames_placeholders
            query += operator2 + query2
        if CONF('DEL_NEWER') and all(CONF('DEL_NEWER')):
            data += CONF('DEL_NEWER')
            query += operator2 + "j.starttime >= %s::timestamp"
        if CONF('DEL_OLDER') and all(CONF('DEL_OLDER')):
            data += CONF('DEL_OLDER')
            query += operator2 + "j.starttime <= %s::timestamp"
        print("Query: %s %s" % (query, str(data)))
        directives = [
            "DEL_CLIENTS", "DEL_JOB_NAMES", "DEL_STORAGE_NAMES", "DEL_NEWER"
        ]
        if all(CONF(directive) is None for directive in directives):
            print("No deletion rule configured. Exiting")
            sys.exit()
        query += ";"
        cur.execute(query, data)
        select_job_media_jm_storage = cur.fetchall()
    except Exception as e:
        print(format_exception(e))
        print(
            "\n\nYour config /etc/bacula-scripts/bacula_del_jobs_conf.py has an error.\n"\
            "Check if all your configured values are in the tuple format. E.g.:\n"\
            "DEL_NEWER = ('',) and not DEL_NEWER = ('')"
        )
        return
    sd_conf_parsed = bacula_parse("bareos-sd")
    storages_conf_parsed = bacula_parse("bareos-dir")

    del_job_media_jm_storage = list()
    for jobid, jobname, volname, storagename in select_job_media_jm_storage:
        if build_volpath(volname, storagename, sd_conf_parsed,
                         storages_conf_parsed):
            storage_path = build_volpath(volname, storagename, sd_conf_parsed,
                                         storages_conf_parsed)
            print("Storage found: %s" % storage_path)
            del_job_media_jm_storage.append(
                (jobid, jobname, volname,
                 build_volpath(volname, storagename, sd_conf_parsed,
                               storages_conf_parsed)))
        elif args.default_storage:
            print("Storage not found. Specified default_storage: %s" %
                  args.default_storage)
            del_job_media_jm_storage.append(
                (jobid, jobname, volname,
                 os.path.join(args.default_storage, volname)))
        elif args.force_del_catalog:
            # Setting path to None. This way only the catalog entry will be deleted
            print(
                "Storage not found. force_del_catalog: True. Deleting catalog entries"
            )
            del_job_media_jm_storage.append((jobid, jobname, volname, None))
        else:
            # Neither deleting file nor catalog
            print("Storage not found. Skipping")
            pass
    print("Deleting: %s" % del_job_media_jm_storage)
    del_backups(del_job_media_jm_storage)
Ejemplo n.º 12
0
def run(dry_run=False):
    CONF_SET('DRY_RUN', dry_run)
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name, user=db_user, host=db_host, password=db_password)
        cur = con.cursor()
        cur.execute("SELECT distinct m.volumename, s.name, m.volstatus, j.jobtdate, j.filesetid, j.clientid, j.level, "
                    "c.name, f.fileset, j.name, mt.mediatype "
                    "FROM media m, storage s, job j, jobmedia jm, fileset f, client c, mediatype mt "
                    "WHERE m.storageid=s.storageid "
                    "AND jm.mediaid=m.mediaid "
                    "AND jm.jobid=j.jobid "
                    "AND f.filesetid=j.filesetid "
                    "AND j.clientid=c.clientid "
                    "AND mt.mediatype=m.mediatype;")
        volumes = cur.fetchall()
        cur.execute("SELECT distinct m.volumename, s.name "
                    "FROM media m, storage s "
                    "WHERE m.storageid=s.storageid "
                    "AND m.volstatus='Purged';")
        purged_vols = cur.fetchall()
    except Exception as e:
        log.error(format_exception(e))
    unpurged_backups = [x for x in volumes if x[2] != "Purged"]
    full_purged, diff_purged, inc_purged, remove_backup = [list() for x in range(4)]

    sd_conf_parsed = bacula_parse(BACULA_SD_BIN)
    storages_conf_parsed = bacula_parse(BACULA_DIR_BIN)

    log.info("\n\n\n\nSorting purged volumes to full_purged, diff_purged and inc_purged.\n\n")
    log.info("There are %s purged_vols and %s unpurged_backups" % (len(purged_vols), len(unpurged_backups)))
    for volname, storagename in purged_vols:
        hn = storagehostname(storages_conf_parsed, storagename)
        if islocal(hn):
            volpath = build_volpath(volname, storagename, sd_conf_parsed, storages_conf_parsed)
        elif not islocal(hn):
            log.info("content of %s" % hn)
            remote_sd_conf_parsed = bacula_parse(BACULA_SD_BIN, hn=hn)
            if remote_sd_conf_parsed:
                volpath = build_volpath(
                    volname,
                    storagename,
                    remote_sd_conf_parsed,
                    storages_conf_parsed,
                    hn
                )
            else:
                log.warning("Skipping this purged volume, because host %s is down" % hn)
                continue
        if not volpath:
            log.info("Skipping this purged volume, because storage device is not mounted. %s:%s" % (hn, volpath))
            continue
        elif _isfile(volpath, hn) == False and volpath:
            log.info("Deleting backup from catalog, because volume doesn't exist anymore: %s:%s" % (hn, volpath))
            del_backups([(volpath, hn)])
            continue
        elif _isfile(volpath, hn):
            vol_parsed = parse_vol(volpath, hn)
            if vol_parsed:
                cn, fn, ts, jl, jn, mt, pn = vol_parsed
            else:
                if CONF('DEL_VOLS_WITH_NO_METADATA'):
                    log.info("Removing volume, because it has no metadata. Removing both file and catalog record.")
                    os.remove(volpath)
                    p1 = Popen(["echo", "delete volume=%s yes" % volname], stdout=PIPE)
                    p2 = Popen(["bconsole"], stdin=p1.stdout, stdout=PIPE)
                    p1.stdout.close()
                    out, err = p2.communicate()
                    log.debug("out: %s, err: %s" % (out, err))
                continue
        else:
            continue
        x1 = (volpath, cn, fn, ts, hn, jn, mt)
        # Workaround for copy volumes, which don't store the right job level. Notice
        #  this works only if your pool names include the job level (e.g. full, inc or diff)
        if mt in CONF('OFFSITE_MT'):
            pnl = pn.lower()
            if "full" in pnl:
                jl = "F"
            elif "diff" in pnl:
                jl = "D"
            elif "inc" in pnl:
                jl = "I"
        full_purged.append(x1) if jl == "F" else ""
        diff_purged.append(x1) if jl == "D" else ""
        inc_purged.append(x1) if jl == "I" else ""
    log.info("\n\n\n")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("\n\n\nDeciding which purged full vols to delete")
    for volpath, cn, fn, backup_time, hn, jn, mt in full_purged:
        # log.debug("\n\nDeciding which purged full vols to delete: cn: {0}, fn: {1}, backup_time: {2}, volpath:
        #            {3}".format(cn, fn, backup_time, volpath))
        newer_full_backups = [x3 for x3 in unpurged_backups if x3[6] == "F" and x3[3] > backup_time and cn == x3[7] and
                              fn == x3[8] and jn == x3[9] and mt == x3[10]]
        if len(newer_full_backups) == 0:
            log.info("Skipping and not removing {0}, because it's the newest full backup.".format(volpath))
            continue
        next_full_backup = min(newer_full_backups, key=lambda x: x[3])
        newer_full_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] > backup_time and
                                   cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        next_full_diff_backup = min(newer_full_diff_backups, key=lambda x: x[3])
        inc_backups = [x3 for x3 in unpurged_backups if x3[6] == "I" and x3[3] > backup_time and x3[3] <
                       next_full_diff_backup[3] and cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        # here we use next_full_backup
        diff_backups = [x3 for x3 in unpurged_backups if x3[6] == "D" and x3[3] > backup_time and x3[3] <
                        next_full_backup[3] and cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        full_backups = [x3 for x3 in unpurged_backups if x3[6] == "F" and cn == x3[7] and fn == x3[8] and
                        jn == x3[9] and mt == x3[10]]
        # log.info("newer_full_backups %s" % str(newer_full_backups))
        # log.info("newer_full_diff_backups %s" % str(newer_full_diff_backups))
        # log.info("next_full_diff_backup %s" % str(next_full_diff_backup))
        # log.info("inc_backups %s" % inc_backups)
        if len(inc_backups) > 0:
            log.info("Not removing {0}, because there are still incremental backups dependent on it.".format(volpath))
        elif len(diff_backups) > 0:
            log.info("Not removing {0}, because there are still diff backups dependent on it.".format(volpath))
            continue
        elif len(full_backups) < 3:
            log.info("Not removing {0}, because we have less than three backups in total.".format(volpath))
            continue
        else:
            log.info("Adding backup to remove list")
            remove_backup.append((volpath, hn))
    log.info("\n\n\n")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("\n\n\nDeciding which purged incremental vols to delete")
    for volpath, cn, fn, backup_time, hn, jn, mt in inc_purged:
        newer_full_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] > backup_time and
                                   cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        older_full_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] < backup_time and
                                   cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        inc_backups = list()
        for x3 in unpurged_backups:
            inc_filter = [x3[6] == "I", cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
            if newer_full_diff_backups:
                next_full_backup = min(newer_full_diff_backups, key=lambda x: x[3])
                inc_filter.append(x3[3] < next_full_backup[3])
            if older_full_diff_backups:
                prev_full_backup = max(older_full_diff_backups, key=lambda x: x[3])
                inc_filter.append(x3[3] > prev_full_backup[3])
            if all(inc_filter):
                inc_backups.append(x3)
        if len(inc_backups) > 0:
            log.info("Not removing {0}, because there are still chained inc backups that are not "
                     "purged.".format(volpath))
            continue
        else:
            log.info("Adding backup to remove list")
            remove_backup.append((volpath, hn))
    log.info("\n\n\n")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("\n\n\nDeciding which purged diff vols to delete")
    for volpath, cn, fn, backup_time, hn, jn, mt in diff_purged:
        newer_full_or_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] > backup_time and
                                      cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        if newer_full_or_diff_backups:
            next_full_or_diff_backup = min(newer_full_or_diff_backups, key=lambda x: x[3])
            inc_backups = [x3 for x3 in unpurged_backups if x3[6] == "I" and x3[3] > backup_time and x3[3] <
                           next_full_or_diff_backup[3] and cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
            diff_backups = [x3 for x3 in unpurged_backups if x3[6] == "D" and cn == x3[7] and fn == x3[8] and jn ==
                            x3[9] and mt == x3[10]]
            # log.info("newer_full_or_diff_backups %s" % str(newer_full_or_diff_backups))
            # log.info("next_full_or_diff_backup %s" % str(next_full_or_diff_backup))
            # log.info("inc_backups %s" % inc_backups)
            if len(inc_backups) > 0:
                log.info("Not removing {0}, because there are still incremental backups dependent on "
                         "it.".format(volpath))
                continue
            elif len(diff_backups) < 2:
                log.info("Not removing {0}, because we have less than four full backups in total.".format(volpath))
                continue
            else:
                log.info("Adding backup to remove list")
                remove_backup.append((volpath, hn))
    log.info("\n\n\n\nDecisions made. Initating deletion.")
    log.info("remove_backup list: %s" % remove_backup)
    if len(remove_backup) == 0:
        log.info("Nothing to delete")
    del_backups(remove_backup)
Ejemplo n.º 13
0
def run(dry_run=False):
    CONF_SET('DRY_RUN', dry_run)
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name, user=db_user, host=db_host, password=db_password)
        cur = con.cursor()
        cur.execute("SELECT distinct m.volumename, s.name, m.volstatus, j.jobtdate, j.filesetid, j.clientid, j.level, "
                    "c.name, f.fileset, j.name, mt.mediatype "
                    "FROM media m, storage s, job j, jobmedia jm, fileset f, client c, mediatype mt "
                    "WHERE m.storageid=s.storageid "
                    "AND jm.mediaid=m.mediaid "
                    "AND jm.jobid=j.jobid "
                    "AND f.filesetid=j.filesetid "
                    "AND j.clientid=c.clientid "
                    "AND mt.mediatype=m.mediatype;")
        volumes = cur.fetchall()
        cur.execute("SELECT distinct m.volumename, s.name "
                    "FROM media m, storage s "
                    "WHERE m.storageid=s.storageid "
                    "AND m.volstatus='Purged';")
        purged_vols = cur.fetchall()
    except Exception as e:
        log.error(format_exception(e))
    unpurged_backups = [x for x in volumes if x[2] != "Purged"]
    full_purged, diff_purged, inc_purged, remove_backup = [list() for x in range(4)]

    sd_conf_parsed = bacula_parse(CONF("bacula_sd_bin"))
    storages_conf_parsed = bacula_parse(CONF("bacula_dir_bin"))

    log.info("\n\n\n\nSorting purged volumes to full_purged, diff_purged and inc_purged.\n\n")
    log.info("There are %s purged_vols and %s unpurged_backups" % (len(purged_vols), len(unpurged_backups)))
    for volname, storagename in purged_vols:
        hn = storagehostname(storages_conf_parsed, storagename)
        if islocal(hn):
            volpath = build_volpath(volname, storagename, sd_conf_parsed, storages_conf_parsed)
        elif not islocal(hn):
            log.info("content of %s:%s (hn:filename)" % (hn, fn))
            remote_sd_conf_parsed = bacula_parse(CONF("bacula_sd_bin"), hn=hn)
            volpath = build_volpath(volname, storagename, remote_sd_conf_parsed, storages_conf_parsed, hn)
        if not volpath:
            log.info("Skipping this purged volume, because storage device is not mounted. %s:%s" % (hn, volpath))
            continue
        elif _isfile(volpath, hn) == False and volpath:
            log.info("Deleting backup from catalog, because volume doesn't exist anymore: %s:%s" % (hn, volpath))
            del_backups([(volpath, hn)])
            continue
        elif _isfile(volpath, hn):
            vol_parsed = parse_vol(volpath, hn)
            if vol_parsed:
                cn, fn, ts, jl, jn, mt, pn = vol_parsed
            else:
                if CONF('DEL_VOLS_WITH_NO_METADATA'):
                    log.info("Removing volume, because it has no metadata. Removing both file and catalog record.")
                    os.remove(volpath)
                    p1 = Popen(["echo", "delete volume=%s yes" % volname], stdout=PIPE)
                    p2 = Popen(["bconsole"], stdin=p1.stdout, stdout=PIPE)
                    p1.stdout.close()
                    out, err = p2.communicate()
                    log.debug("out: %s, err: %s" % (out, err))
                continue
        else:
            continue
        x1 = (volpath, cn, fn, ts, hn, jn, mt)
        # Workaround for copy volumes, which don't store the right job level. Notice
        #  this works only if your pool names include the job level (e.g. full, inc or diff)
        if mt in CONF('OFFSITE_MT'):
            pnl = pn.lower()
            if "full" in pnl:
                jl = "F"
            elif "diff" in pnl:
                jl = "D"
            elif "inc" in pnl:
                jl = "I"
        full_purged.append(x1) if jl == "F" else ""
        diff_purged.append(x1) if jl == "D" else ""
        inc_purged.append(x1) if jl == "I" else ""
    log.info("\n\n\n")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("\n\n\nDeciding which purged full vols to delete")
    for volpath, cn, fn, backup_time, hn, jn, mt in full_purged:
        # log.debug("\n\nDeciding which purged full vols to delete: cn: {0}, fn: {1}, backup_time: {2}, volpath:
        #            {3}".format(cn, fn, backup_time, volpath))
        newer_full_backups = [x3 for x3 in unpurged_backups if x3[6] == "F" and x3[3] > backup_time and cn == x3[7] and
                              fn == x3[8] and jn == x3[9] and mt == x3[10]]
        if len(newer_full_backups) == 0:
            log.info("Skipping and not removing {0}, because it's the newest full backup.".format(volpath))
            continue
        next_full_backup = min(newer_full_backups, key=lambda x: x[3])
        newer_full_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] > backup_time and
                                   cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        next_full_diff_backup = min(newer_full_diff_backups, key=lambda x: x[3])
        inc_backups = [x3 for x3 in unpurged_backups if x3[6] == "I" and x3[3] > backup_time and x3[3] <
                       next_full_diff_backup[3] and cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        # here we use next_full_backup
        diff_backups = [x3 for x3 in unpurged_backups if x3[6] == "D" and x3[3] > backup_time and x3[3] <
                        next_full_backup[3] and cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        full_backups = [x3 for x3 in unpurged_backups if x3[6] == "F" and cn == x3[7] and fn == x3[8] and
                        jn == x3[9] and mt == x3[10]]
        # log.info("newer_full_backups %s" % str(newer_full_backups))
        # log.info("newer_full_diff_backups %s" % str(newer_full_diff_backups))
        # log.info("next_full_diff_backup %s" % str(next_full_diff_backup))
        # log.info("inc_backups %s" % inc_backups)
        if len(inc_backups) > 0:
            log.info("Not removing {0}, because there are still incremental backups dependent on it.".format(volpath))
        elif len(diff_backups) > 0:
            log.info("Not removing {0}, because there are still diff backups dependent on it.".format(volpath))
            continue
        elif len(full_backups) < 3:
            log.info("Not removing {0}, because we have less than four three backups in total.".format(volpath))
            continue
        else:
            log.info("Adding backup to remove list")
            remove_backup.append((volpath, hn))
    log.info("\n\n\n")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("\n\n\nDeciding which purged incremental vols to delete")
    for volpath, cn, fn, backup_time, hn, jn, mt in inc_purged:
        newer_full_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] > backup_time and
                                   cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        older_full_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] < backup_time and
                                   cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        inc_backups = list()
        for x3 in unpurged_backups:
            inc_filter = [x3[6] == "I", cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
            if newer_full_diff_backups:
                next_full_backup = min(newer_full_diff_backups, key=lambda x: x[3])
                inc_filter.append(x3[3] < next_full_backup[3])
            if older_full_diff_backups:
                prev_full_backup = max(older_full_diff_backups, key=lambda x: x[3])
                inc_filter.append(x3[3] > prev_full_backup[3])
            if all(inc_filter):
                inc_backups.append(x3)
        if len(inc_backups) > 0:
            log.info("Not removing {0}, because there are still chained inc backups that are not "
                     "purged.".format(volpath))
            continue
        else:
            log.info("Adding backup to remove list")
            remove_backup.append((volpath, hn))
    log.info("\n\n\n")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("\n\n\nDeciding which purged diff vols to delete")
    for volpath, cn, fn, backup_time, hn, jn, mt in diff_purged:
        newer_full_or_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] > backup_time and
                                      cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        if newer_full_or_diff_backups:
            next_full_or_diff_backup = min(newer_full_or_diff_backups, key=lambda x: x[3])
            inc_backups = [x3 for x3 in unpurged_backups if x3[6] == "I" and x3[3] > backup_time and x3[3] <
                           next_full_or_diff_backup[3] and cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
            diff_backups = [x3 for x3 in unpurged_backups if x3[6] == "D" and cn == x3[7] and fn == x3[8] and jn ==
                            x3[9] and mt == x3[10]]
            # log.info("newer_full_or_diff_backups %s" % str(newer_full_or_diff_backups))
            # log.info("next_full_or_diff_backup %s" % str(next_full_or_diff_backup))
            # log.info("inc_backups %s" % inc_backups)
            if len(inc_backups) > 0:
                log.info("Not removing {0}, because there are still incremental backups dependent on "
                         "it.".format(volpath))
                continue
            elif len(diff_backups) < 2:
                log.info("Not removing {0}, because we have less than four full backups in total.".format(volpath))
                continue
            else:
                log.info("Adding backup to remove list")
                remove_backup.append((volpath, hn))
    log.info("\n\n\n\nDecisions made. Initating deletion.")
    log.info("remove_backup list: %s" % remove_backup)
    if len(remove_backup) == 0:
        log.info("Nothing to delete")
    del_backups(remove_backup)
import argparse
import os
import psycopg2
import re
import sys
from argparse import RawDescriptionHelpFormatter
from subprocess import Popen, PIPE

from helputils.core import format_exception, systemd_services_up
from helputils.defaultlog import log
sys.path.append("/etc/bacula-scripts")
from general_conf import (BACULA_DIR_BIN, BACULA_SD_BIN, db_host, db_name,
                          db_password, db_user, services)
from bacula_scripts.bacula_parser import bacula_parse

storages_conf_parsed = bacula_parse(BACULA_DIR_BIN)
sd_conf_parsed = bacula_parse(BACULA_SD_BIN)


def has_catalog_entry(volume_name):
    try:
        con = psycopg2.connect(database=db_name,
                               user=db_user,
                               host=db_host,
                               password=db_password)
        cur = con.cursor()
        cur.execute("SELECT * FROM media WHERE volumename=%s", (volume_name, ))
        res = cur.fetchall()
        if len(res) > 0:
            return True
        else:
Ejemplo n.º 15
0
 def __init__(self, dry_run):
     self.dir_conf = bacula_parse(BACULA_DIR_BIN)
     self.dry_run = dry_run