Example #1
0
def parse_vol(volume, hn=False):
    """Parses volume with bls and returns jobname and timestamp of job. Make sure to have bls in your $PATH and add
       `user ALL=NOPASSWD: /usr/bin/timeout 0.1 bls -jv` to sudoers"""
    log.debug("Run `/usr/bin/timeout 0.1 bls -jv %s` (should be absolute path)" % volume)
    cmd = ["/usr/bin/timeout", "0.1", "bls", "-jv", volume]
    if hn and not islocal(hn):
        cmd = ["ssh", hn, "sudo"] + cmd
    p = Popen(cmd, stdout=PIPE, stderr=PIPE)
    out, err = p.communicate()
    out = str(out)
    vol = os.path.basename(volume)
    try:
        cn = re.search("\\\\nClientName\s+:\s(.*?)\\\\n", out).group(1)
        fn = re.search("\\\\nFileSet\s+:\s(.*?)\\\\n", out).group(1)
        jl = re.search("\\\\nJobLevel\s+:\s(.*?)\\\\n", out).group(1)
        ti = re.search("\\\\nDate written\s+:\s(.*?)\\\\n", out).group(1)
        jn = re.search("\\\\nJobName\s+:\s(.*?)\\\\n", out).group(1)
        mt = re.search("\\\\nMediaType\s+:\s(.*?)\\\\n", out).group(1)
        pn = re.search("\\\\nPoolName\s+:\s(.*?)\\\\n", out).group(1)
    except:
        log.info("Deleting volume, because no metadata found: %s " % vol)
        return None
    log.info("cn:{0}, fn:{1}, jl:{2}, ti:{3}, mt:{4}, vol:{5}, jn:{6}, pn:{7}".format(cn, fn, jl, ti, mt, vol, jn, pn))
    try:
        ti = ti.replace("\\xc3\\xa4", "ä")  # Temp fix for backups that were made while my locals where broken            
        dt = datetime.strptime(ti, "%d-%b-%Y %H:%M")
    except:
        setlocals()
        dt = datetime.strptime(ti, "%d-%b-%Y %H:%M")        
    ts = time.mktime(dt.timetuple())
    return (cn, fn, ts, jl, jn, mt, pn)
def main():
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name,
                               user=db_user,
                               host=db_host,
                               password=db_password)
        cur = con.cursor()
        cur.execute(
            "SELECT m.volumename, s.name FROM media m, storage s WHERE m.storageid=s.storageid;"
        )
        media_storage = cur.fetchall(
        )  # e.g. [('Incremental-ST-0126', 's8tb01'), ('Full-ST-0031', 's8tb01'), ..]
    except Exception as e:
        print(format_exception(e))
    with open(storages_conf, "r") as myfile:
        storages_conf_parsed = parse_conf(myfile)
    for volname, storagename in media_storage:
        for storage in storages_conf_parsed:
            hn = storage["Address"]
            if not islocal(hn):
                remote_sd_conf = remote_file_content(hn, sd_conf)
                sd_conf_parsed = parse_conf(remote_sd_conf)
            else:
                with open(sd_conf, "r") as myfile:
                    sd_conf_parsed = parse_conf(myfile)
            if storagename == storage["Name"]:
                devicename = storage["Device"]
                for device in sd_conf_parsed:
                    if devicename == device["Name"]:
                        volpath = os.path.join(device["Archive Device"],
                                               volname)
                        if verbose:
                            log.debug("hn: %s" % hn)
                        if not find_mountpoint(device["Archive Device"],
                                               hn) == "/":
                            if not _isfile(volpath, hn):
                                log.info(
                                    "Deleted volume %s from catalog, because file doesn't exist."
                                    % volpath)
                                with open(del_orphan_log, 'a') as f:
                                    time = datetime.now().strftime(
                                        '%Y-%m-%d %H:%M:%S')
                                    f.write("{0} {1}\n".format(time, volpath))
                                if not dry_run:
                                    p1 = Popen([
                                        "echo",
                                        "delete volume=%s yes" % volname
                                    ],
                                               stdout=PIPE)
                                    p2 = Popen(["bconsole"],
                                               stdin=p1.stdout,
                                               stdout=PIPE)
                                    p1.stdout.close()
                                    out, err = p2.communicate()
                                    log.debug("out: %s, err: %s" % (out, err))
                            elif verbose is True:
                                log.info('File exists for %s' % volpath)
Example #3
0
def run(dry_run=False):
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name,
                               user=db_user,
                               host=db_host,
                               password=db_password)
        cur = con.cursor()
        cur.execute(
            "SELECT m.volumename, s.name FROM media m, storage s WHERE m.storageid=s.storageid;"
        )
        media_storage = cur.fetchall(
        )  # e.g. [('Incremental-ST-0126', 's8tb01'), ('Full-ST-0031', 's8tb01'), ..]
    except Exception as e:
        print(format_exception(e))
    storages_conf_parsed = bacula_parse(BACULA_DIR_BIN)
    for volname, storagename in media_storage:
        for storage_name, storage_value in storages_conf_parsed[
                "Storage"].items():
            hn = storage_value["Address"]
            if not islocal(hn):
                sd_conf_parsed = bacula_parse(BACULA_SD_BIN, hn=hn)
            else:
                sd_conf_parsed = bacula_parse(BACULA_SD_BIN)
            if storagename == storage_name:
                device = storage_value["Device"]
                ad = get_archive_device_of_device(device, sd_conf_parsed)
                if ad:
                    volpath = os.path.join(ad, volname)
                else:
                    continue
                if CONF('VERBOSE'):
                    log.debug("hn: %s" % hn)
                    if not find_mountpoint(ad, hn) == "/":
                        if not _isfile(volpath, hn):
                            log.info(
                                "Deleted volume %s from catalog, because file doesn't exist."
                                % volpath)
                            with open(CONF('LOG'), 'a') as f:
                                time = datetime.now().strftime(
                                    '%Y-%m-%d %H:%M:%S')
                                f.write("{0} {1}\n".format(time, volpath))
                            if not dry_run or not CONF('DRY_RUN'):
                                p1 = Popen(
                                    ["echo",
                                     "delete volume=%s yes" % volname],
                                    stdout=PIPE)
                                p2 = Popen(["bconsole"],
                                           stdin=p1.stdout,
                                           stdout=PIPE)
                                p1.stdout.close()
                                out, err = p2.communicate()
                                log.debug("out: %s, err: %s" % (out, err))
                        elif CONF('VERBOSE') is True:
                            log.info('File exists for %s' % volpath)
def del_backups(remove_backup):
    """Deletes list of backups from disk and catalog.

    Make sure to add to your sudoers file something like:
    `user ALL=NOPASSWD: /usr/bin/rm /mnt/8tb01/offsite01/*`. Notice that I added the offsite's path with the
    wildcard after the rm command, so that the user can only use rm for that directory.
    """
    for volpath, hn in remove_backup:
        volname = os.path.basename(volpath)
        log.info("Deleting %s:%s" % (hn, volpath))
        if not CONF('DRY_RUN'):
            if islocal(hn):
                try:
                    os.remove(volpath)
                    log.info("Deleted %s" % volpath)
                except Exception as e:
                    log.error(format_exception(e))
                    log.info(
                        "Deleting failed, apparently volpath %s doesn't exist."
                        % volpath)
            elif not islocal(hn):
                try:
                    p = Popen(["ssh", hn, "sudo", CONF("rm_bin"), volpath])
                    o, e = p.communicate()
                    if e:
                        if "ssh: Could not resolve hostname" in e.decode(
                                "UTF-8"):
                            log.error(e)
                            log.error(
                                "Please setup ssh keys for the storage host, so that this script can ssh to the "
                                "host %s" % hn)
                            continue
                except Exception as e:
                    log.error(format_exception(e))
                    log.info(
                        "Deleting failed, apparently volpath %s doesn't exist (remote delete)."
                        % volpath)
            p1 = Popen(["echo", "delete volume=%s yes" % volname], stdout=PIPE)
            p2 = Popen(["bconsole"], stdin=p1.stdout, stdout=PIPE)
            p1.stdout.close()
            out, err = p2.communicate()
            log.debug("out: %s, err: %s" % (out, err))
def del_backups(remove_backup):
    """Deletes list of backups from disk and catalog.

    Make sure to add to your sudoers file something like:
    `user ALL=NOPASSWD: /usr/bin/rm /mnt/8tb01/offsite01/*`. Notice that I added the offsite's path with the
    wildcard after the rm command, so that the user can only use rm for that directory.
    """
    for volpath, hn in remove_backup:
        volname = os.path.basename(volpath)
        log.info("Deleting %s:%s" % (hn, volpath))
        if not CONF('DRY_RUN'):
            if islocal(hn):
                try:
                    os.remove(volpath)
                    log.info("Deleted %s" % volpath)
                except Exception as e:
                    log.error(format_exception(e))
                    log.info("Deleting failed, apparently volpath %s doesn't exist." % volpath)
            elif not islocal(hn):
                try:
                    p = Popen(["ssh", hn, "sudo", CONF("rm_bin"), volpath])
                    o, e = p.communicate()
                    if e:
                        if "ssh: Could not resolve hostname" in e.decode("UTF-8"):
                            log.error(e)
                            log.error("Please setup ssh keys for the storage host, so that this script can ssh to the "
                                      "host %s" % hn)
                            continue
                except Exception as e:
                    log.error(format_exception(e))
                    log.info("Deleting failed, apparently volpath %s doesn't exist (remote delete)." % volpath)
            p1 = Popen(["echo", "delete volume=%s yes" % volname], stdout=PIPE)
            p2 = Popen(["bconsole"], stdin=p1.stdout, stdout=PIPE)
            p1.stdout.close()
            out, err = p2.communicate()
            log.debug("out: %s, err: %s" % (out, err))
def run(dry_run=False):
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name, user=db_user, host=db_host, password=db_password)
        cur = con.cursor()
        cur.execute("SELECT m.volumename, s.name FROM media m, storage s WHERE m.storageid=s.storageid;")
        media_storage = cur.fetchall()  # e.g. [('Incremental-ST-0126', 's8tb01'), ('Full-ST-0031', 's8tb01'), ..]
    except Exception as e:
        print(format_exception(e))
    storages_conf_parsed = bacula_parse("bareos-dir")
    for volname, storagename in media_storage:
        for storage_name, storage_value in storages_conf_parsed["Storage"].items():
            hn = storage_value["Address"]
            if not islocal(hn):
                sd_conf_parsed = bacula_parse("bareos-sd", hn=hn)
            else:
                sd_conf_parsed = bacula_parse("bareos-sd")            
            if storagename == storage_name:
                device = storage_value["Device"]
                ad = get_archive_device_of_device(device, sd_conf_parsed)
                if ad:
                    volpath = os.path.join(ad, volname)
                else:
                    continue
                if CONF('VERBOSE'):
                    log.debug("hn: %s" % hn)
                    if not find_mountpoint(ad, hn) == "/":
                        if not _isfile(volpath, hn):
                            log.info("Deleted volume %s from catalog, because file doesn't exist." % volpath)
                            with open(CONF('LOG'), 'a') as f:
                                time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                                f.write("{0} {1}\n".format(time, volpath))
                            if not dry_run or not CONF('DRY_RUN'):
                                p1 = Popen(["echo", "delete volume=%s yes" % volname], stdout=PIPE)
                                p2 = Popen(["bconsole"], stdin=p1.stdout, stdout=PIPE)
                                p1.stdout.close()
                                out, err = p2.communicate()
                                log.debug("out: %s, err: %s" % (out, err))
                        elif CONF('VERBOSE') is True:
                            log.info('File exists for %s' % volpath)
def parse_vol(volume, hn=False):
    """
    Parses volume with bls and returns jobname and timestamp of job.

    Make sure to have bls in your $PATH and add e.g. `user ALL=NOPASSWD: /usr/bin/timeout 0.1
    bls -jv` to sudoers. Check with `type timeout` timeout's binary path on your system.
    """
    log.debug(
        "Run `%s 0.1 %s -jv %s` (should be absolute path)" % (
            CONF("timeout_bin"), CONF("bls_bin"), volume
        )
    )
    cmd = [CONF("timeout_bin"), "0.1", CONF("bls_bin"), "-jv", volume]
    if hn and not islocal(hn):
        cmd = ["ssh", hn, "sudo"] + cmd
    p = Popen(cmd, stdout=PIPE, stderr=PIPE)
    out, err = p.communicate()
    out = str(out)
    vol = os.path.basename(volume)
    try:
        cn = re.search("\\\\nClientName\s+:\s(.*?)\\\\n", out).group(1)
        fn = re.search("\\\\nFileSet\s+:\s(.*?)\\\\n", out).group(1)
        jl = re.search("\\\\nJobLevel\s+:\s(.*?)\\\\n", out).group(1)
        ti = re.search("\\\\nDate written\s+:\s(.*?)\\\\n", out).group(1)
        jn = re.search("\\\\nJobName\s+:\s(.*?)\\\\n", out).group(1)
        mt = re.search("\\\\nMediaType\s+:\s(.*?)\\\\n", out).group(1)
        pn = re.search("\\\\nPoolName\s+:\s(.*?)\\\\n", out).group(1)
    except:
        log.info("Deleting volume, because no metadata found: %s " % vol)
        return None
    log.info("cn:{0}, fn:{1}, jl:{2}, ti:{3}, mt:{4}, vol:{5}, jn:{6}, pn:{7}".format(cn, fn, jl, ti, mt, vol, jn, pn))
    try:
        ti = ti.replace("\\xc3\\xa4", "ä")  # Temp fix for backups that were made while my locals where broken
        dt = datetime.strptime(ti, "%d-%b-%Y %H:%M")
    except:
        setlocals()
        dt = datetime.strptime(ti, "%d-%b-%Y %H:%M")
    ts = time.mktime(dt.timetuple())
    return (cn, fn, ts, jl, jn, mt, pn)
Example #8
0
def main():
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name, user=db_user, host=db_host, password=db_password)
        cur = con.cursor()
        cur.execute("SELECT distinct m.volumename, s.name, m.volstatus, j.jobtdate, j.filesetid, j.clientid, j.level, "
                    "c.name, f.fileset, j.name, mt.mediatype "
                    "FROM media m, storage s, job j, jobmedia jm, fileset f, client c, mediatype mt "
                    "WHERE m.storageid=s.storageid "
                    "AND jm.mediaid=m.mediaid "
                    "AND jm.jobid=j.jobid "
                    "AND f.filesetid=j.filesetid "
                    "AND j.clientid=c.clientid "
                    "AND mt.mediatype=m.mediatype;")
        volumes = cur.fetchall()
        cur.execute("SELECT distinct m.volumename, s.name "
                    "FROM media m, storage s "
                    "WHERE m.storageid=s.storageid "
                    "AND m.volstatus='Purged';")
        purged_vols = cur.fetchall()
    except Exception as e:
        log.error(format_exception(e))
    unpurged_backups = [x for x in volumes if x[2] != "Purged"]
    full_purged, diff_purged, inc_purged, remove_backup = [list() for x in range(4)]
    with open(sd_conf, "r") as f:
        sd_conf_parsed = parse_conf(f)
    with open(storages_conf, "r") as f:
        storages_conf_parsed = parse_conf(f)
    log.info("\n\n\n\nSorting purged volumes to full_purged, diff_purged and inc_purged.\n\n")
    log.info("There are %s purged_vols and %s unpurged_backups" % (len(purged_vols), len(unpurged_backups)))
    for volname, storagename in purged_vols:
        hn = storagehostname(storages_conf_parsed, storagename)
        if islocal(hn):
            volpath = build_volpath(volname, storagename, sd_conf_parsed, storages_conf_parsed)
        elif not islocal(hn):
            remote_sd_conf = remote_file_content(hn, sd_conf)
            remote_sd_conf_parsed = parse_conf(remote_sd_conf)
            volpath = build_volpath(volname, storagename, remote_sd_conf_parsed, storages_conf_parsed, hn)
        if not volpath:
            log.info("Skipping this purged volume, because storage device is not mounted. %s:%s" % (hn, volpath))
            continue
        elif _isfile(volpath, hn) == False and volpath:
            log.info("Deleting backup from catalog, because volume doesn't exist anymore: %s:%s" % (hn, volpath))
            del_backups([(volpath, hn)])
            continue
        elif _isfile(volpath, hn):
            vol_parsed = parse_vol(volpath, hn)
            if vol_parsed:
                cn, fn, ts, jl, jn, mt, pn = vol_parsed
            else:
                if del_vols_with_no_metadata:
                    log.info("Removing volume, because it has no metadata. Removing both file and catalog record.")
                    os.remove(volpath)
                    p1 = Popen(["echo", "delete volume=%s yes" % volname], stdout=PIPE)
                    p2 = Popen(["bconsole"], stdin=p1.stdout, stdout=PIPE)
                    p1.stdout.close()
                    out, err = p2.communicate()
                    log.debug("out: %s, err: %s" % (out, err))
                continue
        else:
            continue
        x1 = (volpath, cn, fn, ts, hn, jn, mt)
        if mt in offsite_mt:  # This is a workaround for copy volumes, which don't store the right job level. Notice
            # this works only if your pool names include the job level (e.g. full, inc or diff).
            pnl = pn.lower()
            if "full" in pnl:
                jl = "F"
            elif "diff" in pnl:
                jl = "D"
            elif "inc" in pnl:
                jl = "I"
        full_purged.append(x1) if jl == "F" else ""
        diff_purged.append(x1) if jl == "D" else ""
        inc_purged.append(x1) if jl == "I" else ""
    log.info("\n\n\n")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("\n\n\nDeciding which purged full vols to delete")
    for volpath, cn, fn, backup_time, hn, jn, mt in full_purged:
        # log.debug("\n\nDeciding which purged full vols to delete: cn: {0}, fn: {1}, backup_time: {2}, volpath:
        #            {3}".format(cn, fn, backup_time, volpath))
        newer_full_backups = [x3 for x3 in unpurged_backups if x3[6] == "F" and x3[3] > backup_time and cn == x3[7] and
                              fn == x3[8] and jn == x3[9] and mt == x3[10]]
        if len(newer_full_backups) == 0:
            log.info("Skipping and not removing {0}, because it's the newest full backup.".format(volpath))
            continue
        next_full_backup = min(newer_full_backups, key=lambda x: x[3])
        newer_full_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] > backup_time and
                                   cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        next_full_diff_backup = min(newer_full_diff_backups, key=lambda x: x[3])
        inc_backups = [x3 for x3 in unpurged_backups if x3[6] == "I" and x3[3] > backup_time and x3[3] <
                       next_full_diff_backup[3] and cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        # here we use next_full_backup
        diff_backups = [x3 for x3 in unpurged_backups if x3[6] == "D" and x3[3] > backup_time and x3[3] <
                        next_full_backup[3] and cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        full_backups = [x3 for x3 in unpurged_backups if x3[6] == "F" and cn == x3[7] and fn == x3[8] and
                        jn == x3[9] and mt == x3[10]]
        # log.info("newer_full_backups %s" % str(newer_full_backups))
        # log.info("newer_full_diff_backups %s" % str(newer_full_diff_backups))
        # log.info("next_full_diff_backup %s" % str(next_full_diff_backup))
        # log.info("inc_backups %s" % inc_backups)
        if len(inc_backups) > 0:
            log.info("Not removing {0}, because there are still incremental backups dependent on it.".format(volpath))
        elif len(diff_backups) > 0:
            log.info("Not removing {0}, because there are still diff backups dependent on it.".format(volpath))
            continue
        elif len(full_backups) < 3:
            log.info("Not removing {0}, because we have less than four three backups in total.".format(volpath))
            continue
        else:
            log.info("Adding backup to remove list")
            remove_backup.append((volpath, hn))
    log.info("\n\n\n")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("\n\n\nDeciding which purged incremental vols to delete")
    for volpath, cn, fn, backup_time, hn, jn, mt in inc_purged:
        newer_full_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] > backup_time and
                                   cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        older_full_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] < backup_time and
                                   cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        inc_backups = list()
        for x3 in unpurged_backups:
            inc_filter = [x3[6] == "I", cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
            if newer_full_diff_backups:
                next_full_backup = min(newer_full_diff_backups, key=lambda x: x[3])
                inc_filter.append(x3[3] < next_full_backup[3])
            if older_full_diff_backups:
                prev_full_backup = max(older_full_diff_backups, key=lambda x: x[3])
                inc_filter.append(x3[3] > prev_full_backup[3])
            if all(inc_filter):
                inc_backups.append(x3)
        if len(inc_backups) > 0:
            log.info("Not removing {0}, because there are still chained inc backups that are not "
                     "purged.".format(volpath))
            continue
        else:
            log.info("Adding backup to remove list")
            remove_backup.append((volpath, hn))
    log.info("\n\n\n")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("\n\n\nDeciding which purged diff vols to delete")
    for volpath, cn, fn, backup_time, hn, jn, mt in diff_purged:
        newer_full_or_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] > backup_time and
                                      cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        if newer_full_or_diff_backups:
            next_full_or_diff_backup = min(newer_full_or_diff_backups, key=lambda x: x[3])
            inc_backups = [x3 for x3 in unpurged_backups if x3[6] == "I" and x3[3] > backup_time and x3[3] <
                           next_full_or_diff_backup[3] and cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
            diff_backups = [x3 for x3 in unpurged_backups if x3[6] == "D" and cn == x3[7] and fn == x3[8] and jn ==
                            x3[9] and mt == x3[10]]
            # log.info("newer_full_or_diff_backups %s" % str(newer_full_or_diff_backups))
            # log.info("next_full_or_diff_backup %s" % str(next_full_or_diff_backup))
            # log.info("inc_backups %s" % inc_backups)
            if len(inc_backups) > 0:
                log.info("Not removing {0}, because there are still incremental backups dependent on "
                         "it.".format(volpath))
                continue
            elif len(diff_backups) < 2:
                log.info("Not removing {0}, because we have less than four full backups in total.".format(volpath))
                continue
            else:
                log.info("Adding backup to remove list")
                remove_backup.append((volpath, hn))
    log.info("\n\n\n\nDecisions made. Initating deletion.")
    log.info("remove_backup list: %s" % remove_backup)
    if len(remove_backup) == 0:
        log.info("Nothing to delete")
    del_backups(remove_backup)
def run(dry_run=False):
    CONF_SET('DRY_RUN', dry_run)
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name, user=db_user, host=db_host, password=db_password)
        cur = con.cursor()
        cur.execute("SELECT distinct m.volumename, s.name, m.volstatus, j.jobtdate, j.filesetid, j.clientid, j.level, "
                    "c.name, f.fileset, j.name, mt.mediatype "
                    "FROM media m, storage s, job j, jobmedia jm, fileset f, client c, mediatype mt "
                    "WHERE m.storageid=s.storageid "
                    "AND jm.mediaid=m.mediaid "
                    "AND jm.jobid=j.jobid "
                    "AND f.filesetid=j.filesetid "
                    "AND j.clientid=c.clientid "
                    "AND mt.mediatype=m.mediatype;")
        volumes = cur.fetchall()
        cur.execute("SELECT distinct m.volumename, s.name "
                    "FROM media m, storage s "
                    "WHERE m.storageid=s.storageid "
                    "AND m.volstatus='Purged';")
        purged_vols = cur.fetchall()
    except Exception as e:
        log.error(format_exception(e))
    unpurged_backups = [x for x in volumes if x[2] != "Purged"]
    full_purged, diff_purged, inc_purged, remove_backup = [list() for x in range(4)]

    sd_conf_parsed = bacula_parse(CONF("bacula_sd_bin"))
    storages_conf_parsed = bacula_parse(CONF("bacula_dir_bin"))

    log.info("\n\n\n\nSorting purged volumes to full_purged, diff_purged and inc_purged.\n\n")
    log.info("There are %s purged_vols and %s unpurged_backups" % (len(purged_vols), len(unpurged_backups)))
    for volname, storagename in purged_vols:
        hn = storagehostname(storages_conf_parsed, storagename)
        if islocal(hn):
            volpath = build_volpath(volname, storagename, sd_conf_parsed, storages_conf_parsed)
        elif not islocal(hn):
            log.info("content of %s:%s (hn:filename)" % (hn, fn))
            remote_sd_conf_parsed = bacula_parse(CONF("bacula_sd_bin"), hn=hn)
            volpath = build_volpath(volname, storagename, remote_sd_conf_parsed, storages_conf_parsed, hn)
        if not volpath:
            log.info("Skipping this purged volume, because storage device is not mounted. %s:%s" % (hn, volpath))
            continue
        elif _isfile(volpath, hn) == False and volpath:
            log.info("Deleting backup from catalog, because volume doesn't exist anymore: %s:%s" % (hn, volpath))
            del_backups([(volpath, hn)])
            continue
        elif _isfile(volpath, hn):
            vol_parsed = parse_vol(volpath, hn)
            if vol_parsed:
                cn, fn, ts, jl, jn, mt, pn = vol_parsed
            else:
                if CONF('DEL_VOLS_WITH_NO_METADATA'):
                    log.info("Removing volume, because it has no metadata. Removing both file and catalog record.")
                    os.remove(volpath)
                    p1 = Popen(["echo", "delete volume=%s yes" % volname], stdout=PIPE)
                    p2 = Popen(["bconsole"], stdin=p1.stdout, stdout=PIPE)
                    p1.stdout.close()
                    out, err = p2.communicate()
                    log.debug("out: %s, err: %s" % (out, err))
                continue
        else:
            continue
        x1 = (volpath, cn, fn, ts, hn, jn, mt)
        # Workaround for copy volumes, which don't store the right job level. Notice
        #  this works only if your pool names include the job level (e.g. full, inc or diff)
        if mt in CONF('OFFSITE_MT'):
            pnl = pn.lower()
            if "full" in pnl:
                jl = "F"
            elif "diff" in pnl:
                jl = "D"
            elif "inc" in pnl:
                jl = "I"
        full_purged.append(x1) if jl == "F" else ""
        diff_purged.append(x1) if jl == "D" else ""
        inc_purged.append(x1) if jl == "I" else ""
    log.info("\n\n\n")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("\n\n\nDeciding which purged full vols to delete")
    for volpath, cn, fn, backup_time, hn, jn, mt in full_purged:
        # log.debug("\n\nDeciding which purged full vols to delete: cn: {0}, fn: {1}, backup_time: {2}, volpath:
        #            {3}".format(cn, fn, backup_time, volpath))
        newer_full_backups = [x3 for x3 in unpurged_backups if x3[6] == "F" and x3[3] > backup_time and cn == x3[7] and
                              fn == x3[8] and jn == x3[9] and mt == x3[10]]
        if len(newer_full_backups) == 0:
            log.info("Skipping and not removing {0}, because it's the newest full backup.".format(volpath))
            continue
        next_full_backup = min(newer_full_backups, key=lambda x: x[3])
        newer_full_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] > backup_time and
                                   cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        next_full_diff_backup = min(newer_full_diff_backups, key=lambda x: x[3])
        inc_backups = [x3 for x3 in unpurged_backups if x3[6] == "I" and x3[3] > backup_time and x3[3] <
                       next_full_diff_backup[3] and cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        # here we use next_full_backup
        diff_backups = [x3 for x3 in unpurged_backups if x3[6] == "D" and x3[3] > backup_time and x3[3] <
                        next_full_backup[3] and cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        full_backups = [x3 for x3 in unpurged_backups if x3[6] == "F" and cn == x3[7] and fn == x3[8] and
                        jn == x3[9] and mt == x3[10]]
        # log.info("newer_full_backups %s" % str(newer_full_backups))
        # log.info("newer_full_diff_backups %s" % str(newer_full_diff_backups))
        # log.info("next_full_diff_backup %s" % str(next_full_diff_backup))
        # log.info("inc_backups %s" % inc_backups)
        if len(inc_backups) > 0:
            log.info("Not removing {0}, because there are still incremental backups dependent on it.".format(volpath))
        elif len(diff_backups) > 0:
            log.info("Not removing {0}, because there are still diff backups dependent on it.".format(volpath))
            continue
        elif len(full_backups) < 3:
            log.info("Not removing {0}, because we have less than four three backups in total.".format(volpath))
            continue
        else:
            log.info("Adding backup to remove list")
            remove_backup.append((volpath, hn))
    log.info("\n\n\n")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("\n\n\nDeciding which purged incremental vols to delete")
    for volpath, cn, fn, backup_time, hn, jn, mt in inc_purged:
        newer_full_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] > backup_time and
                                   cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        older_full_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] < backup_time and
                                   cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        inc_backups = list()
        for x3 in unpurged_backups:
            inc_filter = [x3[6] == "I", cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
            if newer_full_diff_backups:
                next_full_backup = min(newer_full_diff_backups, key=lambda x: x[3])
                inc_filter.append(x3[3] < next_full_backup[3])
            if older_full_diff_backups:
                prev_full_backup = max(older_full_diff_backups, key=lambda x: x[3])
                inc_filter.append(x3[3] > prev_full_backup[3])
            if all(inc_filter):
                inc_backups.append(x3)
        if len(inc_backups) > 0:
            log.info("Not removing {0}, because there are still chained inc backups that are not "
                     "purged.".format(volpath))
            continue
        else:
            log.info("Adding backup to remove list")
            remove_backup.append((volpath, hn))
    log.info("\n\n\n")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("\n\n\nDeciding which purged diff vols to delete")
    for volpath, cn, fn, backup_time, hn, jn, mt in diff_purged:
        newer_full_or_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] > backup_time and
                                      cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        if newer_full_or_diff_backups:
            next_full_or_diff_backup = min(newer_full_or_diff_backups, key=lambda x: x[3])
            inc_backups = [x3 for x3 in unpurged_backups if x3[6] == "I" and x3[3] > backup_time and x3[3] <
                           next_full_or_diff_backup[3] and cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
            diff_backups = [x3 for x3 in unpurged_backups if x3[6] == "D" and cn == x3[7] and fn == x3[8] and jn ==
                            x3[9] and mt == x3[10]]
            # log.info("newer_full_or_diff_backups %s" % str(newer_full_or_diff_backups))
            # log.info("next_full_or_diff_backup %s" % str(next_full_or_diff_backup))
            # log.info("inc_backups %s" % inc_backups)
            if len(inc_backups) > 0:
                log.info("Not removing {0}, because there are still incremental backups dependent on "
                         "it.".format(volpath))
                continue
            elif len(diff_backups) < 2:
                log.info("Not removing {0}, because we have less than four full backups in total.".format(volpath))
                continue
            else:
                log.info("Adding backup to remove list")
                remove_backup.append((volpath, hn))
    log.info("\n\n\n\nDecisions made. Initating deletion.")
    log.info("remove_backup list: %s" % remove_backup)
    if len(remove_backup) == 0:
        log.info("Nothing to delete")
    del_backups(remove_backup)