def run(dry_run=True):
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name,
                               user=db_user,
                               host=db_host,
                               password=db_password)
        cur = con.cursor()
        cur.execute(
            "SELECT DISTINCT j.name, j.jobid, m.volumename FROM job j, jobmedia jm, "
            "media m WHERE j.JobStatus "
            "IN ('E', 'A', 'f', 't', 's') AND j.jobid=jm.jobid AND jm.mediaid=m.mediaid "
            "AND j.realendtime < NOW() - INTERVAL '4 days';")
        # Selecting older than 30 days, so that running jobs won't be selected
        failed_job_jm_media = cur.fetchall()
    except Exception as e:
        log.error(format_exception(e))
    for jname, jobid, volname in failed_job_jm_media:
        volume_path = get_volpath(jname, volname)
        log.info("Deleting catalog entries for job (id: %s, volname: %s)." %
                 (jobid, volname))
        if not dry_run:
            print("volume_path: %s" % volume_path)
            if volume_path:
                log.info("Removing volume from disk %s" % volume_path)
                os.remove(volume_path)
                del_catalog(volname, jobid)
Exemplo n.º 2
0
def send_mail(event, subject, message, sender=sender, recipient=recipient, username=username, password=password, smtp_address=smtp_address):
    if not sender and recipient and username and password and smtp_address:
        raise ValueError("Missing setting/parameter.")
    msg = MIMEMultipart('alternative')  # (1)
    msg['sender'] = sender
    msg['recipient'] = recipient
    style = '<style type="text/css">body {{ background-color: {0};}} p {{ color: black; font-size: 28px;}}</style>'  # (2)
    error_style = style.format('red')
    warning_style = style.format('yellow')
    info_style = style.format('green')
    template = "<html>{0}<body><p>{1}</p></body></html>"
    if event == "error":
        html = template.format(error_style, message)
        msg['Subject'] = "error: " + subject
        log.error("Sending %s mail." % event)
    elif event == "warning":
        html = template.format(warning_style, message)
        msg['Subject'] = "warning: " + subject
        log.warning("Sending %s mail." % event)
    elif event == "info":
        html = template.format(info_style, message)
        msg['Subject'] = "info: " + subject
        log.info("Sending %s mail." % event)
    part1 = MIMEText(message, 'plain')
    part2 = MIMEText(html, 'html')
    msg.attach(part1)
    msg.attach(part2)
    s = smtplib.SMTP(smtp_address)
    s.starttls()
    s.login(username, password)
    s.sendmail(sender, recipient, msg.as_string())
    s.quit()
Exemplo n.º 3
0
def main():
    task = sys.argv[1]
    dev = "/dev/%s" % sys.argv[2] if len(sys.argv) > 2 else None
    log.info("offsiteudev: task: %s, dev: %s" % (task, dev))
    if task == "mount" and dev:
        mkdir_p(mp)
        if mount(dev=dev, mp=mp):
            log.info("Mounted %s to %s" % (dev, mp))
    elif task == "add" and dev:
        mkdir_p(mp)
        if mount(dev=dev, mp=mp):
            for x in backup_dirs:
                x = os.path.join(mp, x)
                mkdir_p(x)
                uid, gid = pwd.getpwnam(chown_user).pw_uid, grp.getgrnam(
                    chown_group).gr_gid
                os.chown(x, uid, gid)
            log.info("Running job now")
            [try_func(run_job, x, ssh_alias) for x in copy_jobs]
            log.info("Run jobs: %s" % ", ".join(copy_jobs))
        else:
            msg = "Couldn't mount offsite hdd, thus offsite backup not initiated."
            log.error(msg)
            send_mail(event="error",
                      subject=os.path.basename(__file__),
                      message=msg)
    elif task == "umount":
        umount(mp)
        msg = "Offsite backup completed successfully."
        log.info(msg)
        send_mail(event="info",
                  subject=os.path.basename(__file__),
                  message=msg)
    log.info("Done")
def main():
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name,
                               user=db_user,
                               host=db_host,
                               password=db_password)
        cur = con.cursor()
        cur.execute(
            "SELECT DISTINCT j.jobid, m.volumename FROM job j, jobmedia jm, media m WHERE j.JobStatus "
            "IN ('E', 'A', 'f') AND j.jobid=jm.jobid AND jm.mediaid=m.mediaid;"
        )
        failed_job_jm_media = cur.fetchall()
    except Exception as e:
        log.error(format_exception(e))
    for jobid, volname in failed_job_jm_media:
        log.info("Deleting catalog entries for job (id: %s, volname: %s)." %
                 (jobid, volname))
        if not dry_run:
            p1 = Popen(["echo", "delete volume=%s yes" % volname], stdout=PIPE)
            p2 = Popen(["bconsole"], stdin=p1.stdout, stdout=PIPE)
            p1.stdout.close()
            out, err = p2.communicate()
            p1 = Popen(["echo", "delete volume=%s yes" % jobid], stdout=PIPE)
            p2 = Popen(["bconsole"], stdin=p1.stdout, stdout=PIPE)
            p1.stdout.close()
            out, err = p2.communicate()
            log.debug("out: %s, err: %s" % (out, err))
Exemplo n.º 5
0
def has_catalog_entry(volume_name):
    try:
        con = psycopg2.connect(database=db_name, user=db_user, host=db_host, password=db_password)
        cur = con.cursor()
        cur.execute("SELECT * FROM media WHERE volumename=%s", (volume_name,))
        res = cur.fetchall()
        if len(res) > 0:
            return True
        else:
            return False
    except Exception as e:
        log.error(format_exception(e))
        return None
Exemplo n.º 6
0
def main():
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name,
                               user=db_user,
                               host=db_host,
                               password=db_password)
        cur = con.cursor()
        cur.execute(sql)
        volnames = cur.fetchall()
    except Exception as e:
        log.error(format_exception(e))
    for vn in volnames:
        print("Pruning volname %s." % (vn))
        if not dry_run:
            p1 = Popen(["echo", "prune volume=%s yes" % vn], stdout=PIPE)
            p2 = Popen(["bconsole"], stdin=p1.stdout, stdout=PIPE)
            p1.stdout.close()
            out, err = p2.communicate()
            log.debug("out: %s, err: %s" % (out, err))
Exemplo n.º 7
0
def validate_yaml():
    CONFIGPATH = "/etc/bacula_stats.conf"

    if not os.path.isfile(CONFIGPATH):
        log.error("Provide a /etc/bacula_stats.conf. Exiting.")
        sys.exit()
    with open((CONFIGPATH), "r") as stream:
        cf = yaml.load(stream)
    schema = Schema({
        Required('bacula_config_path'): str,
        Required('port'): int,
        Required('db_name'): str,
        Required('db_user'): str,
        Required('db_host'): str,
        'timeouts': Schema({int:
                            [str]})  # If timeouts not set, use default value.
    })
    try:
        schema(cf)
    except MultipleInvalid as e:
        exc = e
        raise AssertionError(e)
    return cf
Exemplo n.º 8
0
def send_mail(event, subject, message, priority="low"):
    try:
        # Create message container; the correct MIME type is multipart/alternative.
        msg = MIMEMultipart('alternative')
        msg['sender'] = conf["SENDER"]
        msg['recipient'] = conf["RECIPIENT"]
        if priority == "high":
            msg['X-Priority'] = '2'
        # Escape with double curly brackets. Alternatively switch to %s string format
        style = '<style type="text/css">body {{ background-color: {0};}} p {{ color: black; font-size: 28px;}}</style>'
        error_style = style.format('red')
        warning_style = style.format('yellow')
        info_style = style.format('green')
        template = "<html>{0}<body><p>{1}</p></body></html>"
        if event.lower() in ["error", "e"]:
            html = template.format(error_style, message)
            msg['Subject'] = "error: " + subject
            log.error("Sending %s mail." % event)
        elif event.lower() in ["warning", "w"]:
            html = template.format(warning_style, message)
            msg['Subject'] = "warning: " + subject
            log.warning("Sending %s mail." % event)
        elif event.lower() in ["info", "i"]:
            html = template.format(info_style, message)
            msg['Subject'] = "info: " + subject
            log.info("Sending %s mail." % event)
        part1 = MIMEText(message, 'plain')
        part2 = MIMEText(html, 'html')
        msg.attach(part1)
        msg.attach(part2)
        s = smtplib.SMTP(conf["SMTP"])
        s.starttls()
        s.login(conf["USERNAME"], conf["PASSWORD"])
        s.sendmail(conf["SENDER"], conf["RECIPIENT"], msg.as_string())
        s.quit()
    except Exception as e:
        log.error(format_exception(e))
def del_backups(remove_backup):
    """Deletes list of backups from disk and catalog.

    Make sure to add to your sudoers file something like:
    `user ALL=NOPASSWD: /usr/bin/rm /mnt/8tb01/offsite01/*`. Notice that I added the offsite's path with the
    wildcard after the rm command, so that the user can only use rm for that directory.
    """
    for volpath, hn in remove_backup:
        volname = os.path.basename(volpath)
        log.info("Deleting %s:%s" % (hn, volpath))
        if not CONF('DRY_RUN'):
            if islocal(hn):
                try:
                    os.remove(volpath)
                    log.info("Deleted %s" % volpath)
                except Exception as e:
                    log.error(format_exception(e))
                    log.info(
                        "Deleting failed, apparently volpath %s doesn't exist."
                        % volpath)
            elif not islocal(hn):
                try:
                    p = Popen(["ssh", hn, "sudo", CONF("rm_bin"), volpath])
                    o, e = p.communicate()
                    if e:
                        if "ssh: Could not resolve hostname" in e.decode(
                                "UTF-8"):
                            log.error(e)
                            log.error(
                                "Please setup ssh keys for the storage host, so that this script can ssh to the "
                                "host %s" % hn)
                            continue
                except Exception as e:
                    log.error(format_exception(e))
                    log.info(
                        "Deleting failed, apparently volpath %s doesn't exist (remote delete)."
                        % volpath)
            p1 = Popen(["echo", "delete volume=%s yes" % volname], stdout=PIPE)
            p2 = Popen(["bconsole"], stdin=p1.stdout, stdout=PIPE)
            p1.stdout.close()
            out, err = p2.communicate()
            log.debug("out: %s, err: %s" % (out, err))
Exemplo n.º 10
0
def del_backups(remove_backup):
    """Deletes list of backups from disk and catalog.

    Make sure to add to your sudoers file something like:
    `user ALL=NOPASSWD: /usr/bin/rm /mnt/8tb01/offsite01/*`. Notice that I added the offsite's path with the
    wildcard after the rm command, so that the user can only use rm for that directory.
    """
    for volpath, hn in remove_backup:
        volname = os.path.basename(volpath)
        log.info("Deleting %s:%s" % (hn, volpath))
        if not CONF('DRY_RUN'):
            if islocal(hn):
                try:
                    os.remove(volpath)
                    log.info("Deleted %s" % volpath)
                except Exception as e:
                    log.error(format_exception(e))
                    log.info("Deleting failed, apparently volpath %s doesn't exist." % volpath)
            elif not islocal(hn):
                try:
                    p = Popen(["ssh", hn, "sudo", CONF("rm_bin"), volpath])
                    o, e = p.communicate()
                    if e:
                        if "ssh: Could not resolve hostname" in e.decode("UTF-8"):
                            log.error(e)
                            log.error("Please setup ssh keys for the storage host, so that this script can ssh to the "
                                      "host %s" % hn)
                            continue
                except Exception as e:
                    log.error(format_exception(e))
                    log.info("Deleting failed, apparently volpath %s doesn't exist (remote delete)." % volpath)
            p1 = Popen(["echo", "delete volume=%s yes" % volname], stdout=PIPE)
            p2 = Popen(["bconsole"], stdin=p1.stdout, stdout=PIPE)
            p1.stdout.close()
            out, err = p2.communicate()
            log.debug("out: %s, err: %s" % (out, err))
Exemplo n.º 11
0
def encfs_mount(jobid=None):
    if os.path.ismount(CONF('MOUNT_DIR')):
        log.info("Already mounted. Trying to unmount")
        umount(CONF('MOUNT_DIR'), fuser=True)
        if os.path.ismount(CONF('MOUNT_DIR')):
            log.warning("Still mounted. Trying lazy unmount.")
            umount(CONF('MOUNT_DIR'), lazy=True, fuser=True)
            if os.path.ismount(CONF('MOUNT_DIR')):
                log.error("Couldn't be unmounted. Canceling job.")
                cancle_job(jobid)
                sys.exit()
    p1 = subprocess.Popen(CONF('cmd_mount'), stdin=PIPE)
    out, err = p1.communicate(input="{0}\n".format(CONF('ENCFS_PASSPHRASE')).encode())
    if p1.returncode != 0:
        log.error("failed: out: %s err: %s" % (out, err))
        cancle_job(jobid)
        return
    log.debug("out: %s, err %s" % (out, err))
    log.info("Mounted encfs")
    if not os.path.ismount(CONF('MOUNT_DIR')):
        log.error("(E) encfs couldn't be mounted. Exiting %s")
        cancle_job(jobid)
        sys.exit()
Exemplo n.º 12
0
def main():
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name, user=db_user, host=db_host, password=db_password)
        cur = con.cursor()
        cur.execute("SELECT distinct m.volumename, s.name, m.volstatus, j.jobtdate, j.filesetid, j.clientid, j.level, "
                    "c.name, f.fileset, j.name, mt.mediatype "
                    "FROM media m, storage s, job j, jobmedia jm, fileset f, client c, mediatype mt "
                    "WHERE m.storageid=s.storageid "
                    "AND jm.mediaid=m.mediaid "
                    "AND jm.jobid=j.jobid "
                    "AND f.filesetid=j.filesetid "
                    "AND j.clientid=c.clientid "
                    "AND mt.mediatype=m.mediatype;")
        volumes = cur.fetchall()
        cur.execute("SELECT distinct m.volumename, s.name "
                    "FROM media m, storage s "
                    "WHERE m.storageid=s.storageid "
                    "AND m.volstatus='Purged';")
        purged_vols = cur.fetchall()
    except Exception as e:
        log.error(format_exception(e))
    unpurged_backups = [x for x in volumes if x[2] != "Purged"]
    full_purged, diff_purged, inc_purged, remove_backup = [list() for x in range(4)]
    with open(sd_conf, "r") as f:
        sd_conf_parsed = parse_conf(f)
    with open(storages_conf, "r") as f:
        storages_conf_parsed = parse_conf(f)
    log.info("\n\n\n\nSorting purged volumes to full_purged, diff_purged and inc_purged.\n\n")
    log.info("There are %s purged_vols and %s unpurged_backups" % (len(purged_vols), len(unpurged_backups)))
    for volname, storagename in purged_vols:
        hn = storagehostname(storages_conf_parsed, storagename)
        if islocal(hn):
            volpath = build_volpath(volname, storagename, sd_conf_parsed, storages_conf_parsed)
        elif not islocal(hn):
            remote_sd_conf = remote_file_content(hn, sd_conf)
            remote_sd_conf_parsed = parse_conf(remote_sd_conf)
            volpath = build_volpath(volname, storagename, remote_sd_conf_parsed, storages_conf_parsed, hn)
        if not volpath:
            log.info("Skipping this purged volume, because storage device is not mounted. %s:%s" % (hn, volpath))
            continue
        elif _isfile(volpath, hn) == False and volpath:
            log.info("Deleting backup from catalog, because volume doesn't exist anymore: %s:%s" % (hn, volpath))
            del_backups([(volpath, hn)])
            continue
        elif _isfile(volpath, hn):
            vol_parsed = parse_vol(volpath, hn)
            if vol_parsed:
                cn, fn, ts, jl, jn, mt, pn = vol_parsed
            else:
                if del_vols_with_no_metadata:
                    log.info("Removing volume, because it has no metadata. Removing both file and catalog record.")
                    os.remove(volpath)
                    p1 = Popen(["echo", "delete volume=%s yes" % volname], stdout=PIPE)
                    p2 = Popen(["bconsole"], stdin=p1.stdout, stdout=PIPE)
                    p1.stdout.close()
                    out, err = p2.communicate()
                    log.debug("out: %s, err: %s" % (out, err))
                continue
        else:
            continue
        x1 = (volpath, cn, fn, ts, hn, jn, mt)
        if mt in offsite_mt:  # This is a workaround for copy volumes, which don't store the right job level. Notice
            # this works only if your pool names include the job level (e.g. full, inc or diff).
            pnl = pn.lower()
            if "full" in pnl:
                jl = "F"
            elif "diff" in pnl:
                jl = "D"
            elif "inc" in pnl:
                jl = "I"
        full_purged.append(x1) if jl == "F" else ""
        diff_purged.append(x1) if jl == "D" else ""
        inc_purged.append(x1) if jl == "I" else ""
    log.info("\n\n\n")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("\n\n\nDeciding which purged full vols to delete")
    for volpath, cn, fn, backup_time, hn, jn, mt in full_purged:
        # log.debug("\n\nDeciding which purged full vols to delete: cn: {0}, fn: {1}, backup_time: {2}, volpath:
        #            {3}".format(cn, fn, backup_time, volpath))
        newer_full_backups = [x3 for x3 in unpurged_backups if x3[6] == "F" and x3[3] > backup_time and cn == x3[7] and
                              fn == x3[8] and jn == x3[9] and mt == x3[10]]
        if len(newer_full_backups) == 0:
            log.info("Skipping and not removing {0}, because it's the newest full backup.".format(volpath))
            continue
        next_full_backup = min(newer_full_backups, key=lambda x: x[3])
        newer_full_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] > backup_time and
                                   cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        next_full_diff_backup = min(newer_full_diff_backups, key=lambda x: x[3])
        inc_backups = [x3 for x3 in unpurged_backups if x3[6] == "I" and x3[3] > backup_time and x3[3] <
                       next_full_diff_backup[3] and cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        # here we use next_full_backup
        diff_backups = [x3 for x3 in unpurged_backups if x3[6] == "D" and x3[3] > backup_time and x3[3] <
                        next_full_backup[3] and cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        full_backups = [x3 for x3 in unpurged_backups if x3[6] == "F" and cn == x3[7] and fn == x3[8] and
                        jn == x3[9] and mt == x3[10]]
        # log.info("newer_full_backups %s" % str(newer_full_backups))
        # log.info("newer_full_diff_backups %s" % str(newer_full_diff_backups))
        # log.info("next_full_diff_backup %s" % str(next_full_diff_backup))
        # log.info("inc_backups %s" % inc_backups)
        if len(inc_backups) > 0:
            log.info("Not removing {0}, because there are still incremental backups dependent on it.".format(volpath))
        elif len(diff_backups) > 0:
            log.info("Not removing {0}, because there are still diff backups dependent on it.".format(volpath))
            continue
        elif len(full_backups) < 3:
            log.info("Not removing {0}, because we have less than four three backups in total.".format(volpath))
            continue
        else:
            log.info("Adding backup to remove list")
            remove_backup.append((volpath, hn))
    log.info("\n\n\n")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("\n\n\nDeciding which purged incremental vols to delete")
    for volpath, cn, fn, backup_time, hn, jn, mt in inc_purged:
        newer_full_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] > backup_time and
                                   cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        older_full_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] < backup_time and
                                   cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        inc_backups = list()
        for x3 in unpurged_backups:
            inc_filter = [x3[6] == "I", cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
            if newer_full_diff_backups:
                next_full_backup = min(newer_full_diff_backups, key=lambda x: x[3])
                inc_filter.append(x3[3] < next_full_backup[3])
            if older_full_diff_backups:
                prev_full_backup = max(older_full_diff_backups, key=lambda x: x[3])
                inc_filter.append(x3[3] > prev_full_backup[3])
            if all(inc_filter):
                inc_backups.append(x3)
        if len(inc_backups) > 0:
            log.info("Not removing {0}, because there are still chained inc backups that are not "
                     "purged.".format(volpath))
            continue
        else:
            log.info("Adding backup to remove list")
            remove_backup.append((volpath, hn))
    log.info("\n\n\n")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("\n\n\nDeciding which purged diff vols to delete")
    for volpath, cn, fn, backup_time, hn, jn, mt in diff_purged:
        newer_full_or_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] > backup_time and
                                      cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        if newer_full_or_diff_backups:
            next_full_or_diff_backup = min(newer_full_or_diff_backups, key=lambda x: x[3])
            inc_backups = [x3 for x3 in unpurged_backups if x3[6] == "I" and x3[3] > backup_time and x3[3] <
                           next_full_or_diff_backup[3] and cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
            diff_backups = [x3 for x3 in unpurged_backups if x3[6] == "D" and cn == x3[7] and fn == x3[8] and jn ==
                            x3[9] and mt == x3[10]]
            # log.info("newer_full_or_diff_backups %s" % str(newer_full_or_diff_backups))
            # log.info("next_full_or_diff_backup %s" % str(next_full_or_diff_backup))
            # log.info("inc_backups %s" % inc_backups)
            if len(inc_backups) > 0:
                log.info("Not removing {0}, because there are still incremental backups dependent on "
                         "it.".format(volpath))
                continue
            elif len(diff_backups) < 2:
                log.info("Not removing {0}, because we have less than four full backups in total.".format(volpath))
                continue
            else:
                log.info("Adding backup to remove list")
                remove_backup.append((volpath, hn))
    log.info("\n\n\n\nDecisions made. Initating deletion.")
    log.info("remove_backup list: %s" % remove_backup)
    if len(remove_backup) == 0:
        log.info("Nothing to delete")
    del_backups(remove_backup)
Exemplo n.º 13
0
def run(dry_run=False):
    CONF_SET('DRY_RUN', dry_run)
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name, user=db_user, host=db_host, password=db_password)
        cur = con.cursor()
        cur.execute("SELECT distinct m.volumename, s.name, m.volstatus, j.jobtdate, j.filesetid, j.clientid, j.level, "
                    "c.name, f.fileset, j.name, mt.mediatype "
                    "FROM media m, storage s, job j, jobmedia jm, fileset f, client c, mediatype mt "
                    "WHERE m.storageid=s.storageid "
                    "AND jm.mediaid=m.mediaid "
                    "AND jm.jobid=j.jobid "
                    "AND f.filesetid=j.filesetid "
                    "AND j.clientid=c.clientid "
                    "AND mt.mediatype=m.mediatype;")
        volumes = cur.fetchall()
        cur.execute("SELECT distinct m.volumename, s.name "
                    "FROM media m, storage s "
                    "WHERE m.storageid=s.storageid "
                    "AND m.volstatus='Purged';")
        purged_vols = cur.fetchall()
    except Exception as e:
        log.error(format_exception(e))
    unpurged_backups = [x for x in volumes if x[2] != "Purged"]
    full_purged, diff_purged, inc_purged, remove_backup = [list() for x in range(4)]

    sd_conf_parsed = bacula_parse(CONF("bacula_sd_bin"))
    storages_conf_parsed = bacula_parse(CONF("bacula_dir_bin"))

    log.info("\n\n\n\nSorting purged volumes to full_purged, diff_purged and inc_purged.\n\n")
    log.info("There are %s purged_vols and %s unpurged_backups" % (len(purged_vols), len(unpurged_backups)))
    for volname, storagename in purged_vols:
        hn = storagehostname(storages_conf_parsed, storagename)
        if islocal(hn):
            volpath = build_volpath(volname, storagename, sd_conf_parsed, storages_conf_parsed)
        elif not islocal(hn):
            log.info("content of %s:%s (hn:filename)" % (hn, fn))
            remote_sd_conf_parsed = bacula_parse(CONF("bacula_sd_bin"), hn=hn)
            volpath = build_volpath(volname, storagename, remote_sd_conf_parsed, storages_conf_parsed, hn)
        if not volpath:
            log.info("Skipping this purged volume, because storage device is not mounted. %s:%s" % (hn, volpath))
            continue
        elif _isfile(volpath, hn) == False and volpath:
            log.info("Deleting backup from catalog, because volume doesn't exist anymore: %s:%s" % (hn, volpath))
            del_backups([(volpath, hn)])
            continue
        elif _isfile(volpath, hn):
            vol_parsed = parse_vol(volpath, hn)
            if vol_parsed:
                cn, fn, ts, jl, jn, mt, pn = vol_parsed
            else:
                if CONF('DEL_VOLS_WITH_NO_METADATA'):
                    log.info("Removing volume, because it has no metadata. Removing both file and catalog record.")
                    os.remove(volpath)
                    p1 = Popen(["echo", "delete volume=%s yes" % volname], stdout=PIPE)
                    p2 = Popen(["bconsole"], stdin=p1.stdout, stdout=PIPE)
                    p1.stdout.close()
                    out, err = p2.communicate()
                    log.debug("out: %s, err: %s" % (out, err))
                continue
        else:
            continue
        x1 = (volpath, cn, fn, ts, hn, jn, mt)
        # Workaround for copy volumes, which don't store the right job level. Notice
        #  this works only if your pool names include the job level (e.g. full, inc or diff)
        if mt in CONF('OFFSITE_MT'):
            pnl = pn.lower()
            if "full" in pnl:
                jl = "F"
            elif "diff" in pnl:
                jl = "D"
            elif "inc" in pnl:
                jl = "I"
        full_purged.append(x1) if jl == "F" else ""
        diff_purged.append(x1) if jl == "D" else ""
        inc_purged.append(x1) if jl == "I" else ""
    log.info("\n\n\n")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("\n\n\nDeciding which purged full vols to delete")
    for volpath, cn, fn, backup_time, hn, jn, mt in full_purged:
        # log.debug("\n\nDeciding which purged full vols to delete: cn: {0}, fn: {1}, backup_time: {2}, volpath:
        #            {3}".format(cn, fn, backup_time, volpath))
        newer_full_backups = [x3 for x3 in unpurged_backups if x3[6] == "F" and x3[3] > backup_time and cn == x3[7] and
                              fn == x3[8] and jn == x3[9] and mt == x3[10]]
        if len(newer_full_backups) == 0:
            log.info("Skipping and not removing {0}, because it's the newest full backup.".format(volpath))
            continue
        next_full_backup = min(newer_full_backups, key=lambda x: x[3])
        newer_full_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] > backup_time and
                                   cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        next_full_diff_backup = min(newer_full_diff_backups, key=lambda x: x[3])
        inc_backups = [x3 for x3 in unpurged_backups if x3[6] == "I" and x3[3] > backup_time and x3[3] <
                       next_full_diff_backup[3] and cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        # here we use next_full_backup
        diff_backups = [x3 for x3 in unpurged_backups if x3[6] == "D" and x3[3] > backup_time and x3[3] <
                        next_full_backup[3] and cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        full_backups = [x3 for x3 in unpurged_backups if x3[6] == "F" and cn == x3[7] and fn == x3[8] and
                        jn == x3[9] and mt == x3[10]]
        # log.info("newer_full_backups %s" % str(newer_full_backups))
        # log.info("newer_full_diff_backups %s" % str(newer_full_diff_backups))
        # log.info("next_full_diff_backup %s" % str(next_full_diff_backup))
        # log.info("inc_backups %s" % inc_backups)
        if len(inc_backups) > 0:
            log.info("Not removing {0}, because there are still incremental backups dependent on it.".format(volpath))
        elif len(diff_backups) > 0:
            log.info("Not removing {0}, because there are still diff backups dependent on it.".format(volpath))
            continue
        elif len(full_backups) < 3:
            log.info("Not removing {0}, because we have less than four three backups in total.".format(volpath))
            continue
        else:
            log.info("Adding backup to remove list")
            remove_backup.append((volpath, hn))
    log.info("\n\n\n")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("\n\n\nDeciding which purged incremental vols to delete")
    for volpath, cn, fn, backup_time, hn, jn, mt in inc_purged:
        newer_full_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] > backup_time and
                                   cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        older_full_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] < backup_time and
                                   cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        inc_backups = list()
        for x3 in unpurged_backups:
            inc_filter = [x3[6] == "I", cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
            if newer_full_diff_backups:
                next_full_backup = min(newer_full_diff_backups, key=lambda x: x[3])
                inc_filter.append(x3[3] < next_full_backup[3])
            if older_full_diff_backups:
                prev_full_backup = max(older_full_diff_backups, key=lambda x: x[3])
                inc_filter.append(x3[3] > prev_full_backup[3])
            if all(inc_filter):
                inc_backups.append(x3)
        if len(inc_backups) > 0:
            log.info("Not removing {0}, because there are still chained inc backups that are not "
                     "purged.".format(volpath))
            continue
        else:
            log.info("Adding backup to remove list")
            remove_backup.append((volpath, hn))
    log.info("\n\n\n")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("\n\n\nDeciding which purged diff vols to delete")
    for volpath, cn, fn, backup_time, hn, jn, mt in diff_purged:
        newer_full_or_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] > backup_time and
                                      cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        if newer_full_or_diff_backups:
            next_full_or_diff_backup = min(newer_full_or_diff_backups, key=lambda x: x[3])
            inc_backups = [x3 for x3 in unpurged_backups if x3[6] == "I" and x3[3] > backup_time and x3[3] <
                           next_full_or_diff_backup[3] and cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
            diff_backups = [x3 for x3 in unpurged_backups if x3[6] == "D" and cn == x3[7] and fn == x3[8] and jn ==
                            x3[9] and mt == x3[10]]
            # log.info("newer_full_or_diff_backups %s" % str(newer_full_or_diff_backups))
            # log.info("next_full_or_diff_backup %s" % str(next_full_or_diff_backup))
            # log.info("inc_backups %s" % inc_backups)
            if len(inc_backups) > 0:
                log.info("Not removing {0}, because there are still incremental backups dependent on "
                         "it.".format(volpath))
                continue
            elif len(diff_backups) < 2:
                log.info("Not removing {0}, because we have less than four full backups in total.".format(volpath))
                continue
            else:
                log.info("Adding backup to remove list")
                remove_backup.append((volpath, hn))
    log.info("\n\n\n\nDecisions made. Initating deletion.")
    log.info("remove_backup list: %s" % remove_backup)
    if len(remove_backup) == 0:
        log.info("Nothing to delete")
    del_backups(remove_backup)