Beispiel #1
0
def run(dry_run=False):
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name, user=db_user, host=db_host, password=db_password)
        cur = con.cursor()
        print(query)
        cur.execute(query)
        res = cur.fetchall()
    except Exception as e:
        print(format_exception(e))
        return
    cull = list()
    for i, x in enumerate(res):
        ts = x[0]
        na = x[1]
        # and (ts - prevts) < 8*24*60*60:
        if i != 0 and prevlevel == backuplevel(na):
            cull.append(prevx)
        prevlevel = backuplevel(na)
        prevts = ts
        prevna = na
        prevx = x
    # get first and last and see what timespan
    ts1 = cull[0][0]
    ts2 = cull[-1][0]
    timespan = (ts2 - ts1)/60/60/24
    num = ceil(timespan/24)  # rounds up. dividing by 24 for full backup every 24 days
    print(num)
    keep = [x for x in evenspread(cull, num)]
    prunes = [x for x in cull if x[1] not in [y[1] for y in keep]]
    print("in total %s" % len(res))
    print("keeping %s" % len(keep))
    print("purging %s" % len(prunes))
    prune(prunes)
def run(dry_run=True):
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name,
                               user=db_user,
                               host=db_host,
                               password=db_password)
        cur = con.cursor()
        cur.execute(
            "SELECT DISTINCT j.name, j.jobid, m.volumename FROM job j, jobmedia jm, "
            "media m WHERE j.JobStatus "
            "IN ('E', 'A', 'f', 't', 's') AND j.jobid=jm.jobid AND jm.mediaid=m.mediaid "
            "AND j.realendtime < NOW() - INTERVAL '4 days';")
        # Selecting older than 30 days, so that running jobs won't be selected
        failed_job_jm_media = cur.fetchall()
    except Exception as e:
        log.error(format_exception(e))
    for jname, jobid, volname in failed_job_jm_media:
        volume_path = get_volpath(jname, volname)
        log.info("Deleting catalog entries for job (id: %s, volname: %s)." %
                 (jobid, volname))
        if not dry_run:
            print("volume_path: %s" % volume_path)
            if volume_path:
                log.info("Removing volume from disk %s" % volume_path)
                os.remove(volume_path)
                del_catalog(volname, jobid)
def main():
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name,
                               user=db_user,
                               host=db_host,
                               password=db_password)
        cur = con.cursor()
        cur.execute(
            "SELECT DISTINCT j.jobid, m.volumename FROM job j, jobmedia jm, media m WHERE j.JobStatus "
            "IN ('E', 'A', 'f') AND j.jobid=jm.jobid AND jm.mediaid=m.mediaid;"
        )
        failed_job_jm_media = cur.fetchall()
    except Exception as e:
        log.error(format_exception(e))
    for jobid, volname in failed_job_jm_media:
        log.info("Deleting catalog entries for job (id: %s, volname: %s)." %
                 (jobid, volname))
        if not dry_run:
            p1 = Popen(["echo", "delete volume=%s yes" % volname], stdout=PIPE)
            p2 = Popen(["bconsole"], stdin=p1.stdout, stdout=PIPE)
            p1.stdout.close()
            out, err = p2.communicate()
            p1 = Popen(["echo", "delete volume=%s yes" % jobid], stdout=PIPE)
            p2 = Popen(["bconsole"], stdin=p1.stdout, stdout=PIPE)
            p1.stdout.close()
            out, err = p2.communicate()
            log.debug("out: %s, err: %s" % (out, err))
Beispiel #4
0
def run(storage_dir, dry_run=True):
    systemd_services_up(services)
    for volume in os.listdir(storage_dir):
        if not has_catalog_entry(volume):
            fn = os.path.join(storage_dir, volume)
            print("Delete %s" % fn)
            if not dry_run:
                os.remove(fn)
def main():
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name,
                               user=db_user,
                               host=db_host,
                               password=db_password)
        cur = con.cursor()
        cur.execute(
            "SELECT m.volumename, s.name FROM media m, storage s WHERE m.storageid=s.storageid;"
        )
        media_storage = cur.fetchall(
        )  # e.g. [('Incremental-ST-0126', 's8tb01'), ('Full-ST-0031', 's8tb01'), ..]
    except Exception as e:
        print(format_exception(e))
    with open(storages_conf, "r") as myfile:
        storages_conf_parsed = parse_conf(myfile)
    for volname, storagename in media_storage:
        for storage in storages_conf_parsed:
            hn = storage["Address"]
            if not islocal(hn):
                remote_sd_conf = remote_file_content(hn, sd_conf)
                sd_conf_parsed = parse_conf(remote_sd_conf)
            else:
                with open(sd_conf, "r") as myfile:
                    sd_conf_parsed = parse_conf(myfile)
            if storagename == storage["Name"]:
                devicename = storage["Device"]
                for device in sd_conf_parsed:
                    if devicename == device["Name"]:
                        volpath = os.path.join(device["Archive Device"],
                                               volname)
                        if verbose:
                            log.debug("hn: %s" % hn)
                        if not find_mountpoint(device["Archive Device"],
                                               hn) == "/":
                            if not _isfile(volpath, hn):
                                log.info(
                                    "Deleted volume %s from catalog, because file doesn't exist."
                                    % volpath)
                                with open(del_orphan_log, 'a') as f:
                                    time = datetime.now().strftime(
                                        '%Y-%m-%d %H:%M:%S')
                                    f.write("{0} {1}\n".format(time, volpath))
                                if not dry_run:
                                    p1 = Popen([
                                        "echo",
                                        "delete volume=%s yes" % volname
                                    ],
                                               stdout=PIPE)
                                    p2 = Popen(["bconsole"],
                                               stdin=p1.stdout,
                                               stdout=PIPE)
                                    p1.stdout.close()
                                    out, err = p2.communicate()
                                    log.debug("out: %s, err: %s" % (out, err))
                            elif verbose is True:
                                log.info('File exists for %s' % volpath)
def run():
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name, user=db_user, host=db_host, password=db_password)
        cur = con.cursor()
        cur.execute(CONF('QUERY'))
        del_jobids = cur.fetchall()
    except Exception as e:
        print(format_exception(e))
    del_from_catalog(del_jobids)
Beispiel #7
0
def run(dry_run=False):
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name,
                               user=db_user,
                               host=db_host,
                               password=db_password)
        cur = con.cursor()
        cur.execute(
            "SELECT m.volumename, s.name FROM media m, storage s WHERE m.storageid=s.storageid;"
        )
        media_storage = cur.fetchall(
        )  # e.g. [('Incremental-ST-0126', 's8tb01'), ('Full-ST-0031', 's8tb01'), ..]
    except Exception as e:
        print(format_exception(e))
    storages_conf_parsed = bacula_parse(BACULA_DIR_BIN)
    for volname, storagename in media_storage:
        for storage_name, storage_value in storages_conf_parsed[
                "Storage"].items():
            hn = storage_value["Address"]
            if not islocal(hn):
                sd_conf_parsed = bacula_parse(BACULA_SD_BIN, hn=hn)
            else:
                sd_conf_parsed = bacula_parse(BACULA_SD_BIN)
            if storagename == storage_name:
                device = storage_value["Device"]
                ad = get_archive_device_of_device(device, sd_conf_parsed)
                if ad:
                    volpath = os.path.join(ad, volname)
                else:
                    continue
                if CONF('VERBOSE'):
                    log.debug("hn: %s" % hn)
                    if not find_mountpoint(ad, hn) == "/":
                        if not _isfile(volpath, hn):
                            log.info(
                                "Deleted volume %s from catalog, because file doesn't exist."
                                % volpath)
                            with open(CONF('LOG'), 'a') as f:
                                time = datetime.now().strftime(
                                    '%Y-%m-%d %H:%M:%S')
                                f.write("{0} {1}\n".format(time, volpath))
                            if not dry_run or not CONF('DRY_RUN'):
                                p1 = Popen(
                                    ["echo",
                                     "delete volume=%s yes" % volname],
                                    stdout=PIPE)
                                p2 = Popen(["bconsole"],
                                           stdin=p1.stdout,
                                           stdout=PIPE)
                                p1.stdout.close()
                                out, err = p2.communicate()
                                log.debug("out: %s, err: %s" % (out, err))
                        elif CONF('VERBOSE') is True:
                            log.info('File exists for %s' % volpath)
def run(dry_run=False):
    if dry_run:
        CONF_SET('DRY_RUN', dry_run)
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name, user=db_user, host=db_host, password=db_password)
        cur = con.cursor()
        query = """
SELECT DISTINCT j.jobid, j.name, m.volumename, s.name
FROM job j, media m, jobmedia jm, storage s
WHERE m.mediaid=jm.mediaid
AND j.jobid=jm.jobid
AND s.storageid=m.storageid
"""
        data = []
        if CONF('OPERATOR').lower() == "or":
            operator2 = " OR "
        else:
            operator2 = " AND "
        if all(CONF('DEL_JOB_NAMES')):
            data += CONF('DEL_JOB_NAMES')
            query2 = "j.name IN (%s)" % jobnames_placeholders
            query += operator2 + query2
        if all(CONF('DEL_STORAGE_NAMES')):
            data += CONF("DEL_STORAGE_NAMES")
            query2 = "s.name IN (%s)" % storagenames_placeholders
            query += operator2 + query2
        if all(CONF('DEL_NEWER')):
            data += CONF('DEL_NEWER')
            query += operator2 + "j.starttime >= %s::timestamp"
        if all(CONF('DEL_OLDER')):
            data += CONF('DEL_OLDER')
            query += operator2 + "j.starttime <= %s::timestamp"
        print("Query: %s %s" % (query, str(data)))
        query += ";"
        cur.execute(query, data)
        del_job_media_jm_storage = cur.fetchall()
        print(del_job_media_jm_storage)
    except Exception as e:
        print(format_exception(e))
        print(
            "\n\nYour config /etc/bacula-scripts/bacula_del_jobs_conf.py has an error.\n"\
            "Check if all your configured values are in the tuple format. E.g.:\n"\
            "DEL_NEWER = ('',) and not DEL_NEWER = ('')"
        )
        return
    sd_conf_parsed = bacula_parse("bareos-sd")
    storages_conf_parsed = bacula_parse("bareos-dir")
    del_job_media_jm_storage = [
        (w, x, build_volpath(y, z, sd_conf_parsed, storages_conf_parsed), z) for w, x, y, z in
        del_job_media_jm_storage if build_volpath(y, z, sd_conf_parsed, storages_conf_parsed)
    ]
    del_backups(del_job_media_jm_storage)
Beispiel #9
0
def main():
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name,
                               user=db_user,
                               host=db_host,
                               password=db_password)
        cur = con.cursor()
        query = "select distinct j.jobid, j.name, m.volumename, s.name from job j, media m, jobmedia jm, storage s " \
                "WHERE m.mediaid=jm.mediaid " \
                "AND j.jobid=jm.jobid " \
                "AND s.storageid=m.storageid "
        if filters == "jobname":
            data = jobnames
            query = query + " AND j.name IN (%s);" % (jobnames_placeholders)
        elif filters == "or_both":
            data = storagenames + jobnames
            query = query + " AND (s.name IN (%s) OR j.name IN (%s));" % (
                storagenames_placeholders, jobnames_placeholders)
        elif filters == "and_both":
            data = storagenames + jobnames
            query = query + " AND (s.name IN (%s) OR j.name IN (%s));" % (
                storagenames_placeholders, jobnames_placeholders)
        elif filters == "storage":
            data = storagenames
            query = query + " AND s.name IN (%s);" % (
                storagenames_placeholders)
        elif filters == "newer_than_starttime":
            data = starttime
            query = query + " AND j.starttime >= %s::timestamp;"
        elif filters == "older_than_starttime":
            data = starttime
            query = query + " AND j.starttime <= %s::timestamp;"
        else:
            log.error("Wrong filter or filter not defined.")
            sys.exit()
        print("Query: %s %s" % (query, str(data)))
        print(query % str(data))
        cur.execute(query, data)
        del_job_media_jm_storage = cur.fetchall()
    except Exception as e:
        print(format_exception(e))
    with open(sd_conf, 'r') as f:
        sd_conf_parsed = parse_conf(f)
    with open(storages_conf, 'r') as f:
        storages_conf_parsed = parse_conf(f)
    del_job_media_jm_storage = [
        (w, x, build_volpath(y, z, sd_conf_parsed, storages_conf_parsed), z)
        for w, x, y, z in del_job_media_jm_storage
        if build_volpath(y, z, sd_conf_parsed, storages_conf_parsed)
    ]
    del_backups(del_job_media_jm_storage)
def run():
    systemd_services_up(services)
    offsite_ts = newest_offsite_backup()
    if offsite_ts:
        current_ts = int(time.time())
        offsite_days = (offsite_ts - current_ts) / (60*60*24)
        if offsite_days > CONF('MAX_OFFSITE_AGE_DAYS'):
            msg = "Offsite backups are too old %s" % (host, mp)
            send_mail(event="error", subject=os.path.basename(__file__), message=msg)
        else:
            print("Last copy job from %s is younger than %s days" % max_offsite_days)
    else:
        print("No copy backup found")
def run():
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name,
                               user=db_user,
                               host=db_host,
                               password=db_password)
        cur = con.cursor()
        cur.execute(CONF('QUERY'))
        del_jobids = cur.fetchall()
    except Exception as e:
        print(format_exception(e))
    del_from_catalog(del_jobids)
Beispiel #12
0
def main():
    systemd_services_up(services)
    offsite_ts = newest_offsite_backup()
    if offsite_ts:
        current_ts = int(time.time())
        offsite_days = (offsite_ts - current_ts) / (60*60*24)
        if offsite_days > max_offsite_days:
            msg = "Offsite backups are too old %s" % (host, mp)
            send_mail(event="error", subject=os.path.basename(__file__), message=msg)
        else:
            print("Last copy job from %s is younger than %s days" % max_offsite_days)
    else:
        print("No copy backup found")
Beispiel #13
0
def run():
    systemd_services_up(services)
    jobs = CONF("MONITOR_JOBS")
    monitor_jobs = None
    if jobs:
        monitor_jobs = dict()
        for job_name in jobs:
            monitor_jobs[job_name] = last_backup(job_name)
    data = {
        "fqdn": CONF('DIRECTOR_FQDN'),
        "last_backup": last_backup(),
        "monitor_jobs": monitor_jobs,
    }
    print(data)
    message = json.dumps(data)
    print(message)
    loop = asyncio.get_event_loop()
    loop.run_until_complete(tcp_echo_client(message, loop))
    loop.close()
def run():
    systemd_services_up(services)
    jobs = CONF("MONITOR_JOBS")
    monitor_jobs = None
    if jobs:
        monitor_jobs = dict()
        for job_name in jobs:
            monitor_jobs[job_name] = last_backup(job_name)
    data = {
        "fqdn": CONF('DIRECTOR_FQDN'),
        "last_backup": last_backup(),
        "monitor_jobs": monitor_jobs,
    }
    print(data)
    message = json.dumps(data)
    print(message)
    loop = asyncio.get_event_loop()
    loop.run_until_complete(tcp_echo_client(message, loop))
    loop.close()
def main():
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name,
                               user=db_user,
                               host=db_host,
                               password=db_password)
        cur = con.cursor()
        cur.execute(sql)
        volnames = cur.fetchall()
    except Exception as e:
        log.error(format_exception(e))
    for vn in volnames:
        print("Pruning volname %s." % (vn))
        if not dry_run:
            p1 = Popen(["echo", "prune volume=%s yes" % vn], stdout=PIPE)
            p2 = Popen(["bconsole"], stdin=p1.stdout, stdout=PIPE)
            p1.stdout.close()
            out, err = p2.communicate()
            log.debug("out: %s, err: %s" % (out, err))
def run(dry_run=False):
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name, user=db_user, host=db_host, password=db_password)
        cur = con.cursor()
        cur.execute("SELECT m.volumename, s.name FROM media m, storage s WHERE m.storageid=s.storageid;")
        media_storage = cur.fetchall()  # e.g. [('Incremental-ST-0126', 's8tb01'), ('Full-ST-0031', 's8tb01'), ..]
    except Exception as e:
        print(format_exception(e))
    storages_conf_parsed = bacula_parse("bareos-dir")
    for volname, storagename in media_storage:
        for storage_name, storage_value in storages_conf_parsed["Storage"].items():
            hn = storage_value["Address"]
            if not islocal(hn):
                sd_conf_parsed = bacula_parse("bareos-sd", hn=hn)
            else:
                sd_conf_parsed = bacula_parse("bareos-sd")            
            if storagename == storage_name:
                device = storage_value["Device"]
                ad = get_archive_device_of_device(device, sd_conf_parsed)
                if ad:
                    volpath = os.path.join(ad, volname)
                else:
                    continue
                if CONF('VERBOSE'):
                    log.debug("hn: %s" % hn)
                    if not find_mountpoint(ad, hn) == "/":
                        if not _isfile(volpath, hn):
                            log.info("Deleted volume %s from catalog, because file doesn't exist." % volpath)
                            with open(CONF('LOG'), 'a') as f:
                                time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                                f.write("{0} {1}\n".format(time, volpath))
                            if not dry_run or not CONF('DRY_RUN'):
                                p1 = Popen(["echo", "delete volume=%s yes" % volname], stdout=PIPE)
                                p2 = Popen(["bconsole"], stdin=p1.stdout, stdout=PIPE)
                                p1.stdout.close()
                                out, err = p2.communicate()
                                log.debug("out: %s, err: %s" % (out, err))
                        elif CONF('VERBOSE') is True:
                            log.info('File exists for %s' % volpath)
Beispiel #17
0
def run(dry_run=False):
    bacmon = BaculaMonitor(dry_run)
    bacmon.run()
    systemd_services_up(services)
def run(dry_run=False):
    CONF_SET('DRY_RUN', dry_run)
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name, user=db_user, host=db_host, password=db_password)
        cur = con.cursor()
        cur.execute("SELECT distinct m.volumename, s.name, m.volstatus, j.jobtdate, j.filesetid, j.clientid, j.level, "
                    "c.name, f.fileset, j.name, mt.mediatype "
                    "FROM media m, storage s, job j, jobmedia jm, fileset f, client c, mediatype mt "
                    "WHERE m.storageid=s.storageid "
                    "AND jm.mediaid=m.mediaid "
                    "AND jm.jobid=j.jobid "
                    "AND f.filesetid=j.filesetid "
                    "AND j.clientid=c.clientid "
                    "AND mt.mediatype=m.mediatype;")
        volumes = cur.fetchall()
        cur.execute("SELECT distinct m.volumename, s.name "
                    "FROM media m, storage s "
                    "WHERE m.storageid=s.storageid "
                    "AND m.volstatus='Purged';")
        purged_vols = cur.fetchall()
    except Exception as e:
        log.error(format_exception(e))
    unpurged_backups = [x for x in volumes if x[2] != "Purged"]
    full_purged, diff_purged, inc_purged, remove_backup = [list() for x in range(4)]

    sd_conf_parsed = bacula_parse(CONF("bacula_sd_bin"))
    storages_conf_parsed = bacula_parse(CONF("bacula_dir_bin"))

    log.info("\n\n\n\nSorting purged volumes to full_purged, diff_purged and inc_purged.\n\n")
    log.info("There are %s purged_vols and %s unpurged_backups" % (len(purged_vols), len(unpurged_backups)))
    for volname, storagename in purged_vols:
        hn = storagehostname(storages_conf_parsed, storagename)
        if islocal(hn):
            volpath = build_volpath(volname, storagename, sd_conf_parsed, storages_conf_parsed)
        elif not islocal(hn):
            log.info("content of %s:%s (hn:filename)" % (hn, fn))
            remote_sd_conf_parsed = bacula_parse(CONF("bacula_sd_bin"), hn=hn)
            volpath = build_volpath(volname, storagename, remote_sd_conf_parsed, storages_conf_parsed, hn)
        if not volpath:
            log.info("Skipping this purged volume, because storage device is not mounted. %s:%s" % (hn, volpath))
            continue
        elif _isfile(volpath, hn) == False and volpath:
            log.info("Deleting backup from catalog, because volume doesn't exist anymore: %s:%s" % (hn, volpath))
            del_backups([(volpath, hn)])
            continue
        elif _isfile(volpath, hn):
            vol_parsed = parse_vol(volpath, hn)
            if vol_parsed:
                cn, fn, ts, jl, jn, mt, pn = vol_parsed
            else:
                if CONF('DEL_VOLS_WITH_NO_METADATA'):
                    log.info("Removing volume, because it has no metadata. Removing both file and catalog record.")
                    os.remove(volpath)
                    p1 = Popen(["echo", "delete volume=%s yes" % volname], stdout=PIPE)
                    p2 = Popen(["bconsole"], stdin=p1.stdout, stdout=PIPE)
                    p1.stdout.close()
                    out, err = p2.communicate()
                    log.debug("out: %s, err: %s" % (out, err))
                continue
        else:
            continue
        x1 = (volpath, cn, fn, ts, hn, jn, mt)
        # Workaround for copy volumes, which don't store the right job level. Notice
        #  this works only if your pool names include the job level (e.g. full, inc or diff)
        if mt in CONF('OFFSITE_MT'):
            pnl = pn.lower()
            if "full" in pnl:
                jl = "F"
            elif "diff" in pnl:
                jl = "D"
            elif "inc" in pnl:
                jl = "I"
        full_purged.append(x1) if jl == "F" else ""
        diff_purged.append(x1) if jl == "D" else ""
        inc_purged.append(x1) if jl == "I" else ""
    log.info("\n\n\n")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("\n\n\nDeciding which purged full vols to delete")
    for volpath, cn, fn, backup_time, hn, jn, mt in full_purged:
        # log.debug("\n\nDeciding which purged full vols to delete: cn: {0}, fn: {1}, backup_time: {2}, volpath:
        #            {3}".format(cn, fn, backup_time, volpath))
        newer_full_backups = [x3 for x3 in unpurged_backups if x3[6] == "F" and x3[3] > backup_time and cn == x3[7] and
                              fn == x3[8] and jn == x3[9] and mt == x3[10]]
        if len(newer_full_backups) == 0:
            log.info("Skipping and not removing {0}, because it's the newest full backup.".format(volpath))
            continue
        next_full_backup = min(newer_full_backups, key=lambda x: x[3])
        newer_full_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] > backup_time and
                                   cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        next_full_diff_backup = min(newer_full_diff_backups, key=lambda x: x[3])
        inc_backups = [x3 for x3 in unpurged_backups if x3[6] == "I" and x3[3] > backup_time and x3[3] <
                       next_full_diff_backup[3] and cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        # here we use next_full_backup
        diff_backups = [x3 for x3 in unpurged_backups if x3[6] == "D" and x3[3] > backup_time and x3[3] <
                        next_full_backup[3] and cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        full_backups = [x3 for x3 in unpurged_backups if x3[6] == "F" and cn == x3[7] and fn == x3[8] and
                        jn == x3[9] and mt == x3[10]]
        # log.info("newer_full_backups %s" % str(newer_full_backups))
        # log.info("newer_full_diff_backups %s" % str(newer_full_diff_backups))
        # log.info("next_full_diff_backup %s" % str(next_full_diff_backup))
        # log.info("inc_backups %s" % inc_backups)
        if len(inc_backups) > 0:
            log.info("Not removing {0}, because there are still incremental backups dependent on it.".format(volpath))
        elif len(diff_backups) > 0:
            log.info("Not removing {0}, because there are still diff backups dependent on it.".format(volpath))
            continue
        elif len(full_backups) < 3:
            log.info("Not removing {0}, because we have less than four three backups in total.".format(volpath))
            continue
        else:
            log.info("Adding backup to remove list")
            remove_backup.append((volpath, hn))
    log.info("\n\n\n")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("\n\n\nDeciding which purged incremental vols to delete")
    for volpath, cn, fn, backup_time, hn, jn, mt in inc_purged:
        newer_full_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] > backup_time and
                                   cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        older_full_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] < backup_time and
                                   cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        inc_backups = list()
        for x3 in unpurged_backups:
            inc_filter = [x3[6] == "I", cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
            if newer_full_diff_backups:
                next_full_backup = min(newer_full_diff_backups, key=lambda x: x[3])
                inc_filter.append(x3[3] < next_full_backup[3])
            if older_full_diff_backups:
                prev_full_backup = max(older_full_diff_backups, key=lambda x: x[3])
                inc_filter.append(x3[3] > prev_full_backup[3])
            if all(inc_filter):
                inc_backups.append(x3)
        if len(inc_backups) > 0:
            log.info("Not removing {0}, because there are still chained inc backups that are not "
                     "purged.".format(volpath))
            continue
        else:
            log.info("Adding backup to remove list")
            remove_backup.append((volpath, hn))
    log.info("\n\n\n")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("\n\n\nDeciding which purged diff vols to delete")
    for volpath, cn, fn, backup_time, hn, jn, mt in diff_purged:
        newer_full_or_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] > backup_time and
                                      cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        if newer_full_or_diff_backups:
            next_full_or_diff_backup = min(newer_full_or_diff_backups, key=lambda x: x[3])
            inc_backups = [x3 for x3 in unpurged_backups if x3[6] == "I" and x3[3] > backup_time and x3[3] <
                           next_full_or_diff_backup[3] and cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
            diff_backups = [x3 for x3 in unpurged_backups if x3[6] == "D" and cn == x3[7] and fn == x3[8] and jn ==
                            x3[9] and mt == x3[10]]
            # log.info("newer_full_or_diff_backups %s" % str(newer_full_or_diff_backups))
            # log.info("next_full_or_diff_backup %s" % str(next_full_or_diff_backup))
            # log.info("inc_backups %s" % inc_backups)
            if len(inc_backups) > 0:
                log.info("Not removing {0}, because there are still incremental backups dependent on "
                         "it.".format(volpath))
                continue
            elif len(diff_backups) < 2:
                log.info("Not removing {0}, because we have less than four full backups in total.".format(volpath))
                continue
            else:
                log.info("Adding backup to remove list")
                remove_backup.append((volpath, hn))
    log.info("\n\n\n\nDecisions made. Initating deletion.")
    log.info("remove_backup list: %s" % remove_backup)
    if len(remove_backup) == 0:
        log.info("Nothing to delete")
    del_backups(remove_backup)
Beispiel #19
0
def run(args):
    if args.dry_run:
        CONF_SET('DRY_RUN', args.dry_run)
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name,
                               user=db_user,
                               host=db_host,
                               password=db_password)
        cur = con.cursor()
        query = """
SELECT DISTINCT j.jobid, j.name, m.volumename, s.name
FROM job j, media m, jobmedia jm, storage s
WHERE m.mediaid=jm.mediaid
AND j.jobid=jm.jobid
AND s.storageid=m.storageid
"""
        data = []
        if CONF('OPERATOR').lower() == "or":
            operator2 = " OR "
        else:
            operator2 = " AND "
        #if all([False] if not CONF('DEL_CLIENTS') else CONF('DEL_CLIENTS')):
        if CONF('DEL_CLIENTS') and all(CONF('DEL_CLIENTS')):
            data += CONF('DEL_CLIENTS')
            query2 = "j.clientid IN (SELECT clientid FROM client WHERE name IN (%s))" % clients_placeholders
            query += operator2 + query2
        if CONF('DEL_FILESETS') and all(CONF('DEL_FILESETS')):
            data += CONF('DEL_FILESETS')
            query2 = "j.filesetid IN (SELECT filesetid FROM fileset WHERE fileset IN (%s))" % filesets_placeholders
            query += operator2 + query2
        if CONF('DEL_NOT_FILESETS') and all(CONF('DEL_NOT_FILESETS')):
            data += CONF('DEL_NOT_FILESETS')
            query2 = "j.filesetid NOT IN (SELECT filesetid FROM fileset WHERE fileset IN (%s))" % filesets_not_placeholders
            query += operator2 + query2
        if CONF('DEL_JOB_NAMES') and all(CONF('DEL_JOB_NAMES')):
            data += CONF('DEL_JOB_NAMES')
            query2 = "j.name IN (%s)" % jobnames_placeholders
            query += operator2 + query2
        if CONF('DEL_STORAGE_NAMES') and all(CONF('DEL_STORAGE_NAMES')):
            data += CONF("DEL_STORAGE_NAMES")
            query2 = "s.name IN (%s)" % storagenames_placeholders
            query += operator2 + query2
        if CONF('DEL_NEWER') and all(CONF('DEL_NEWER')):
            data += CONF('DEL_NEWER')
            query += operator2 + "j.starttime >= %s::timestamp"
        if CONF('DEL_OLDER') and all(CONF('DEL_OLDER')):
            data += CONF('DEL_OLDER')
            query += operator2 + "j.starttime <= %s::timestamp"
        print("Query: %s %s" % (query, str(data)))
        directives = [
            "DEL_CLIENTS", "DEL_JOB_NAMES", "DEL_STORAGE_NAMES", "DEL_NEWER"
        ]
        if all(CONF(directive) is None for directive in directives):
            print("No deletion rule configured. Exiting")
            sys.exit()
        query += ";"
        cur.execute(query, data)
        select_job_media_jm_storage = cur.fetchall()
    except Exception as e:
        print(format_exception(e))
        print(
            "\n\nYour config /etc/bacula-scripts/bacula_del_jobs_conf.py has an error.\n"\
            "Check if all your configured values are in the tuple format. E.g.:\n"\
            "DEL_NEWER = ('',) and not DEL_NEWER = ('')"
        )
        return
    sd_conf_parsed = bacula_parse("bareos-sd")
    storages_conf_parsed = bacula_parse("bareos-dir")

    del_job_media_jm_storage = list()
    for jobid, jobname, volname, storagename in select_job_media_jm_storage:
        if build_volpath(volname, storagename, sd_conf_parsed,
                         storages_conf_parsed):
            storage_path = build_volpath(volname, storagename, sd_conf_parsed,
                                         storages_conf_parsed)
            print("Storage found: %s" % storage_path)
            del_job_media_jm_storage.append(
                (jobid, jobname, volname,
                 build_volpath(volname, storagename, sd_conf_parsed,
                               storages_conf_parsed)))
        elif args.default_storage:
            print("Storage not found. Specified default_storage: %s" %
                  args.default_storage)
            del_job_media_jm_storage.append(
                (jobid, jobname, volname,
                 os.path.join(args.default_storage, volname)))
        elif args.force_del_catalog:
            # Setting path to None. This way only the catalog entry will be deleted
            print(
                "Storage not found. force_del_catalog: True. Deleting catalog entries"
            )
            del_job_media_jm_storage.append((jobid, jobname, volname, None))
        else:
            # Neither deleting file nor catalog
            print("Storage not found. Skipping")
            pass
    print("Deleting: %s" % del_job_media_jm_storage)
    del_backups(del_job_media_jm_storage)
Beispiel #20
0
def main():
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name, user=db_user, host=db_host, password=db_password)
        cur = con.cursor()
        cur.execute("SELECT distinct m.volumename, s.name, m.volstatus, j.jobtdate, j.filesetid, j.clientid, j.level, "
                    "c.name, f.fileset, j.name, mt.mediatype "
                    "FROM media m, storage s, job j, jobmedia jm, fileset f, client c, mediatype mt "
                    "WHERE m.storageid=s.storageid "
                    "AND jm.mediaid=m.mediaid "
                    "AND jm.jobid=j.jobid "
                    "AND f.filesetid=j.filesetid "
                    "AND j.clientid=c.clientid "
                    "AND mt.mediatype=m.mediatype;")
        volumes = cur.fetchall()
        cur.execute("SELECT distinct m.volumename, s.name "
                    "FROM media m, storage s "
                    "WHERE m.storageid=s.storageid "
                    "AND m.volstatus='Purged';")
        purged_vols = cur.fetchall()
    except Exception as e:
        log.error(format_exception(e))
    unpurged_backups = [x for x in volumes if x[2] != "Purged"]
    full_purged, diff_purged, inc_purged, remove_backup = [list() for x in range(4)]
    with open(sd_conf, "r") as f:
        sd_conf_parsed = parse_conf(f)
    with open(storages_conf, "r") as f:
        storages_conf_parsed = parse_conf(f)
    log.info("\n\n\n\nSorting purged volumes to full_purged, diff_purged and inc_purged.\n\n")
    log.info("There are %s purged_vols and %s unpurged_backups" % (len(purged_vols), len(unpurged_backups)))
    for volname, storagename in purged_vols:
        hn = storagehostname(storages_conf_parsed, storagename)
        if islocal(hn):
            volpath = build_volpath(volname, storagename, sd_conf_parsed, storages_conf_parsed)
        elif not islocal(hn):
            remote_sd_conf = remote_file_content(hn, sd_conf)
            remote_sd_conf_parsed = parse_conf(remote_sd_conf)
            volpath = build_volpath(volname, storagename, remote_sd_conf_parsed, storages_conf_parsed, hn)
        if not volpath:
            log.info("Skipping this purged volume, because storage device is not mounted. %s:%s" % (hn, volpath))
            continue
        elif _isfile(volpath, hn) == False and volpath:
            log.info("Deleting backup from catalog, because volume doesn't exist anymore: %s:%s" % (hn, volpath))
            del_backups([(volpath, hn)])
            continue
        elif _isfile(volpath, hn):
            vol_parsed = parse_vol(volpath, hn)
            if vol_parsed:
                cn, fn, ts, jl, jn, mt, pn = vol_parsed
            else:
                if del_vols_with_no_metadata:
                    log.info("Removing volume, because it has no metadata. Removing both file and catalog record.")
                    os.remove(volpath)
                    p1 = Popen(["echo", "delete volume=%s yes" % volname], stdout=PIPE)
                    p2 = Popen(["bconsole"], stdin=p1.stdout, stdout=PIPE)
                    p1.stdout.close()
                    out, err = p2.communicate()
                    log.debug("out: %s, err: %s" % (out, err))
                continue
        else:
            continue
        x1 = (volpath, cn, fn, ts, hn, jn, mt)
        if mt in offsite_mt:  # This is a workaround for copy volumes, which don't store the right job level. Notice
            # this works only if your pool names include the job level (e.g. full, inc or diff).
            pnl = pn.lower()
            if "full" in pnl:
                jl = "F"
            elif "diff" in pnl:
                jl = "D"
            elif "inc" in pnl:
                jl = "I"
        full_purged.append(x1) if jl == "F" else ""
        diff_purged.append(x1) if jl == "D" else ""
        inc_purged.append(x1) if jl == "I" else ""
    log.info("\n\n\n")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("\n\n\nDeciding which purged full vols to delete")
    for volpath, cn, fn, backup_time, hn, jn, mt in full_purged:
        # log.debug("\n\nDeciding which purged full vols to delete: cn: {0}, fn: {1}, backup_time: {2}, volpath:
        #            {3}".format(cn, fn, backup_time, volpath))
        newer_full_backups = [x3 for x3 in unpurged_backups if x3[6] == "F" and x3[3] > backup_time and cn == x3[7] and
                              fn == x3[8] and jn == x3[9] and mt == x3[10]]
        if len(newer_full_backups) == 0:
            log.info("Skipping and not removing {0}, because it's the newest full backup.".format(volpath))
            continue
        next_full_backup = min(newer_full_backups, key=lambda x: x[3])
        newer_full_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] > backup_time and
                                   cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        next_full_diff_backup = min(newer_full_diff_backups, key=lambda x: x[3])
        inc_backups = [x3 for x3 in unpurged_backups if x3[6] == "I" and x3[3] > backup_time and x3[3] <
                       next_full_diff_backup[3] and cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        # here we use next_full_backup
        diff_backups = [x3 for x3 in unpurged_backups if x3[6] == "D" and x3[3] > backup_time and x3[3] <
                        next_full_backup[3] and cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        full_backups = [x3 for x3 in unpurged_backups if x3[6] == "F" and cn == x3[7] and fn == x3[8] and
                        jn == x3[9] and mt == x3[10]]
        # log.info("newer_full_backups %s" % str(newer_full_backups))
        # log.info("newer_full_diff_backups %s" % str(newer_full_diff_backups))
        # log.info("next_full_diff_backup %s" % str(next_full_diff_backup))
        # log.info("inc_backups %s" % inc_backups)
        if len(inc_backups) > 0:
            log.info("Not removing {0}, because there are still incremental backups dependent on it.".format(volpath))
        elif len(diff_backups) > 0:
            log.info("Not removing {0}, because there are still diff backups dependent on it.".format(volpath))
            continue
        elif len(full_backups) < 3:
            log.info("Not removing {0}, because we have less than four three backups in total.".format(volpath))
            continue
        else:
            log.info("Adding backup to remove list")
            remove_backup.append((volpath, hn))
    log.info("\n\n\n")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("\n\n\nDeciding which purged incremental vols to delete")
    for volpath, cn, fn, backup_time, hn, jn, mt in inc_purged:
        newer_full_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] > backup_time and
                                   cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        older_full_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] < backup_time and
                                   cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        inc_backups = list()
        for x3 in unpurged_backups:
            inc_filter = [x3[6] == "I", cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
            if newer_full_diff_backups:
                next_full_backup = min(newer_full_diff_backups, key=lambda x: x[3])
                inc_filter.append(x3[3] < next_full_backup[3])
            if older_full_diff_backups:
                prev_full_backup = max(older_full_diff_backups, key=lambda x: x[3])
                inc_filter.append(x3[3] > prev_full_backup[3])
            if all(inc_filter):
                inc_backups.append(x3)
        if len(inc_backups) > 0:
            log.info("Not removing {0}, because there are still chained inc backups that are not "
                     "purged.".format(volpath))
            continue
        else:
            log.info("Adding backup to remove list")
            remove_backup.append((volpath, hn))
    log.info("\n\n\n")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("\n\n\nDeciding which purged diff vols to delete")
    for volpath, cn, fn, backup_time, hn, jn, mt in diff_purged:
        newer_full_or_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] > backup_time and
                                      cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        if newer_full_or_diff_backups:
            next_full_or_diff_backup = min(newer_full_or_diff_backups, key=lambda x: x[3])
            inc_backups = [x3 for x3 in unpurged_backups if x3[6] == "I" and x3[3] > backup_time and x3[3] <
                           next_full_or_diff_backup[3] and cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
            diff_backups = [x3 for x3 in unpurged_backups if x3[6] == "D" and cn == x3[7] and fn == x3[8] and jn ==
                            x3[9] and mt == x3[10]]
            # log.info("newer_full_or_diff_backups %s" % str(newer_full_or_diff_backups))
            # log.info("next_full_or_diff_backup %s" % str(next_full_or_diff_backup))
            # log.info("inc_backups %s" % inc_backups)
            if len(inc_backups) > 0:
                log.info("Not removing {0}, because there are still incremental backups dependent on "
                         "it.".format(volpath))
                continue
            elif len(diff_backups) < 2:
                log.info("Not removing {0}, because we have less than four full backups in total.".format(volpath))
                continue
            else:
                log.info("Adding backup to remove list")
                remove_backup.append((volpath, hn))
    log.info("\n\n\n\nDecisions made. Initating deletion.")
    log.info("remove_backup list: %s" % remove_backup)
    if len(remove_backup) == 0:
        log.info("Nothing to delete")
    del_backups(remove_backup)