Esempio n. 1
0
def run(dry_run=False):
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name, user=db_user, host=db_host, password=db_password)
        cur = con.cursor()
        print(query)
        cur.execute(query)
        res = cur.fetchall()
    except Exception as e:
        print(format_exception(e))
        return
    cull = list()
    for i, x in enumerate(res):
        ts = x[0]
        na = x[1]
        # and (ts - prevts) < 8*24*60*60:
        if i != 0 and prevlevel == backuplevel(na):
            cull.append(prevx)
        prevlevel = backuplevel(na)
        prevts = ts
        prevna = na
        prevx = x
    # get first and last and see what timespan
    ts1 = cull[0][0]
    ts2 = cull[-1][0]
    timespan = (ts2 - ts1)/60/60/24
    num = ceil(timespan/24)  # rounds up. dividing by 24 for full backup every 24 days
    print(num)
    keep = [x for x in evenspread(cull, num)]
    prunes = [x for x in cull if x[1] not in [y[1] for y in keep]]
    print("in total %s" % len(res))
    print("keeping %s" % len(keep))
    print("purging %s" % len(prunes))
    prune(prunes)
def last_backup(job_name=None):
    if job_name:
        sql_job_name = " AND j.name='%s'" % job_name
        SQL = "%s %s %s" % (SQL_SUCCESSFUL_JOBS, sql_job_name, SQL_ORDER_BY)
    else:
        SQL = "%s %s" % (SQL_SUCCESSFUL_JOBS, SQL_ORDER_BY)
    try:
        con = psycopg2.connect(
            database=db_name,
            user=db_user,
            host=db_host,
            password=db_password
        )
        cur = con.cursor()
        cur.execute(SQL)
        volnames = cur.fetchall()
    except Exception as e:
        log.error(format_exception(e))
    realendtime = liget(liget(volnames, 0), 0)
    print(realendtime)
    if not realendtime:
        log.error("realendtime is None")
        return None
    last_backup = (datetime.datetime.now() - realendtime).total_seconds()
    return last_backup
def main():
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name,
                               user=db_user,
                               host=db_host,
                               password=db_password)
        cur = con.cursor()
        cur.execute(
            "SELECT DISTINCT j.jobid, m.volumename FROM job j, jobmedia jm, media m WHERE j.JobStatus "
            "IN ('E', 'A', 'f') AND j.jobid=jm.jobid AND jm.mediaid=m.mediaid;"
        )
        failed_job_jm_media = cur.fetchall()
    except Exception as e:
        log.error(format_exception(e))
    for jobid, volname in failed_job_jm_media:
        log.info("Deleting catalog entries for job (id: %s, volname: %s)." %
                 (jobid, volname))
        if not dry_run:
            p1 = Popen(["echo", "delete volume=%s yes" % volname], stdout=PIPE)
            p2 = Popen(["bconsole"], stdin=p1.stdout, stdout=PIPE)
            p1.stdout.close()
            out, err = p2.communicate()
            p1 = Popen(["echo", "delete volume=%s yes" % jobid], stdout=PIPE)
            p2 = Popen(["bconsole"], stdin=p1.stdout, stdout=PIPE)
            p1.stdout.close()
            out, err = p2.communicate()
            log.debug("out: %s, err: %s" % (out, err))
def run(dry_run=True):
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name,
                               user=db_user,
                               host=db_host,
                               password=db_password)
        cur = con.cursor()
        cur.execute(
            "SELECT DISTINCT j.name, j.jobid, m.volumename FROM job j, jobmedia jm, "
            "media m WHERE j.JobStatus "
            "IN ('E', 'A', 'f', 't', 's') AND j.jobid=jm.jobid AND jm.mediaid=m.mediaid "
            "AND j.realendtime < NOW() - INTERVAL '4 days';")
        # Selecting older than 30 days, so that running jobs won't be selected
        failed_job_jm_media = cur.fetchall()
    except Exception as e:
        log.error(format_exception(e))
    for jname, jobid, volname in failed_job_jm_media:
        volume_path = get_volpath(jname, volname)
        log.info("Deleting catalog entries for job (id: %s, volname: %s)." %
                 (jobid, volname))
        if not dry_run:
            print("volume_path: %s" % volume_path)
            if volume_path:
                log.info("Removing volume from disk %s" % volume_path)
                os.remove(volume_path)
                del_catalog(volname, jobid)
Esempio n. 5
0
    def run(self):
        job_conf = self.dir_conf.get('Job', None)
        job_excludes = CONF("job_excludes")
        if isinstance(job_excludes, str) or isinstance(job_excludes, int):
            print("Config error: job_excludes has to be a list.")
        if job_excludes:
            for job in job_excludes:
                job_conf.pop(job, None)
        jobs = list(job_conf)
        job_patterns = CONF("job_patterns")
        if isinstance(job_patterns, str) or isinstance(job_patterns, int):
            print("Config error: job_patterns has to be a list.")
        if job_patterns and jobs:
            job_patterns = "(" + ")|(".join(job_patterns) + ")"
            jobs = [job for job in jobs if not re.match(job_patterns, job)]


# j.jobid, j.name, m.volumename, s.name
        query = """
SELECT DISTINCT j.name, max(j.realendtime)
FROM job j, media m, jobmedia jm, storage s
WHERE m.mediaid=jm.mediaid
AND j.jobid=jm.jobid
AND s.storageid=m.storageid
AND j.name IN {0}
AND j.jobstatus IN ('T', 'W')
AND j.level IN ('F', 'I', 'D')
AND j.type IN ('B', 'C', 'c')
GROUP BY j.name;
""".format(tuple(jobs))
        try:
            con = psycopg2.connect(database=db_name,
                                   user=db_user,
                                   host=db_host,
                                   password=db_password)
            cur = con.cursor()
            cur.execute(query)
            res = cur.fetchall()
        except Exception as e:
            print(format_exception(e))
            return
        for job in res:
            name = job[0]
            realendtime = job[1]
            age = (datetime.datetime.now() - realendtime).days
            if age > CONF("max_days"):
                print(
                    "Warning: Backup job '{0}' is older than {2} days. Last backup is from "
                    "{1}.".format(name, realendtime, CONF("max_days")))
                if not self.dry_run:
                    send_mail(
                        "Error",
                        "HIGH: Backup older than %s days" % CONF("max_days"),
                        "Backup job '{0}' is older than {2} days. Last backup is from "
                        "{1}. Check the backup-server for errors ASAP."
                        "".format(name, realendtime, CONF("max_days")))
            else:
                print("OK: Backup job '{0}' is fine. Last backup is from "
                      "{1}.".format(name, realendtime))
def main():
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name,
                               user=db_user,
                               host=db_host,
                               password=db_password)
        cur = con.cursor()
        cur.execute(
            "SELECT m.volumename, s.name FROM media m, storage s WHERE m.storageid=s.storageid;"
        )
        media_storage = cur.fetchall(
        )  # e.g. [('Incremental-ST-0126', 's8tb01'), ('Full-ST-0031', 's8tb01'), ..]
    except Exception as e:
        print(format_exception(e))
    with open(storages_conf, "r") as myfile:
        storages_conf_parsed = parse_conf(myfile)
    for volname, storagename in media_storage:
        for storage in storages_conf_parsed:
            hn = storage["Address"]
            if not islocal(hn):
                remote_sd_conf = remote_file_content(hn, sd_conf)
                sd_conf_parsed = parse_conf(remote_sd_conf)
            else:
                with open(sd_conf, "r") as myfile:
                    sd_conf_parsed = parse_conf(myfile)
            if storagename == storage["Name"]:
                devicename = storage["Device"]
                for device in sd_conf_parsed:
                    if devicename == device["Name"]:
                        volpath = os.path.join(device["Archive Device"],
                                               volname)
                        if verbose:
                            log.debug("hn: %s" % hn)
                        if not find_mountpoint(device["Archive Device"],
                                               hn) == "/":
                            if not _isfile(volpath, hn):
                                log.info(
                                    "Deleted volume %s from catalog, because file doesn't exist."
                                    % volpath)
                                with open(del_orphan_log, 'a') as f:
                                    time = datetime.now().strftime(
                                        '%Y-%m-%d %H:%M:%S')
                                    f.write("{0} {1}\n".format(time, volpath))
                                if not dry_run:
                                    p1 = Popen([
                                        "echo",
                                        "delete volume=%s yes" % volname
                                    ],
                                               stdout=PIPE)
                                    p2 = Popen(["bconsole"],
                                               stdin=p1.stdout,
                                               stdout=PIPE)
                                    p1.stdout.close()
                                    out, err = p2.communicate()
                                    log.debug("out: %s, err: %s" % (out, err))
                            elif verbose is True:
                                log.info('File exists for %s' % volpath)
def run():
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name, user=db_user, host=db_host, password=db_password)
        cur = con.cursor()
        cur.execute(CONF('QUERY'))
        del_jobids = cur.fetchall()
    except Exception as e:
        print(format_exception(e))
    del_from_catalog(del_jobids)
Esempio n. 8
0
def run(dry_run=False):
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name,
                               user=db_user,
                               host=db_host,
                               password=db_password)
        cur = con.cursor()
        cur.execute(
            "SELECT m.volumename, s.name FROM media m, storage s WHERE m.storageid=s.storageid;"
        )
        media_storage = cur.fetchall(
        )  # e.g. [('Incremental-ST-0126', 's8tb01'), ('Full-ST-0031', 's8tb01'), ..]
    except Exception as e:
        print(format_exception(e))
    storages_conf_parsed = bacula_parse(BACULA_DIR_BIN)
    for volname, storagename in media_storage:
        for storage_name, storage_value in storages_conf_parsed[
                "Storage"].items():
            hn = storage_value["Address"]
            if not islocal(hn):
                sd_conf_parsed = bacula_parse(BACULA_SD_BIN, hn=hn)
            else:
                sd_conf_parsed = bacula_parse(BACULA_SD_BIN)
            if storagename == storage_name:
                device = storage_value["Device"]
                ad = get_archive_device_of_device(device, sd_conf_parsed)
                if ad:
                    volpath = os.path.join(ad, volname)
                else:
                    continue
                if CONF('VERBOSE'):
                    log.debug("hn: %s" % hn)
                    if not find_mountpoint(ad, hn) == "/":
                        if not _isfile(volpath, hn):
                            log.info(
                                "Deleted volume %s from catalog, because file doesn't exist."
                                % volpath)
                            with open(CONF('LOG'), 'a') as f:
                                time = datetime.now().strftime(
                                    '%Y-%m-%d %H:%M:%S')
                                f.write("{0} {1}\n".format(time, volpath))
                            if not dry_run or not CONF('DRY_RUN'):
                                p1 = Popen(
                                    ["echo",
                                     "delete volume=%s yes" % volname],
                                    stdout=PIPE)
                                p2 = Popen(["bconsole"],
                                           stdin=p1.stdout,
                                           stdout=PIPE)
                                p1.stdout.close()
                                out, err = p2.communicate()
                                log.debug("out: %s, err: %s" % (out, err))
                        elif CONF('VERBOSE') is True:
                            log.info('File exists for %s' % volpath)
def del_backups(remove_backup):
    """Deletes list of backups from disk and catalog.

    Make sure to add to your sudoers file something like:
    `user ALL=NOPASSWD: /usr/bin/rm /mnt/8tb01/offsite01/*`. Notice that I added the offsite's path with the
    wildcard after the rm command, so that the user can only use rm for that directory.
    """
    for volpath, hn in remove_backup:
        volname = os.path.basename(volpath)
        log.info("Deleting %s:%s" % (hn, volpath))
        if not CONF('DRY_RUN'):
            if islocal(hn):
                try:
                    os.remove(volpath)
                    log.info("Deleted %s" % volpath)
                except Exception as e:
                    log.error(format_exception(e))
                    log.info(
                        "Deleting failed, apparently volpath %s doesn't exist."
                        % volpath)
            elif not islocal(hn):
                try:
                    p = Popen(["ssh", hn, "sudo", CONF("rm_bin"), volpath])
                    o, e = p.communicate()
                    if e:
                        if "ssh: Could not resolve hostname" in e.decode(
                                "UTF-8"):
                            log.error(e)
                            log.error(
                                "Please setup ssh keys for the storage host, so that this script can ssh to the "
                                "host %s" % hn)
                            continue
                except Exception as e:
                    log.error(format_exception(e))
                    log.info(
                        "Deleting failed, apparently volpath %s doesn't exist (remote delete)."
                        % volpath)
            p1 = Popen(["echo", "delete volume=%s yes" % volname], stdout=PIPE)
            p2 = Popen(["bconsole"], stdin=p1.stdout, stdout=PIPE)
            p1.stdout.close()
            out, err = p2.communicate()
            log.debug("out: %s, err: %s" % (out, err))
Esempio n. 10
0
def run(dry_run=False):
    if dry_run:
        CONF_SET('DRY_RUN', dry_run)
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name, user=db_user, host=db_host, password=db_password)
        cur = con.cursor()
        query = """
SELECT DISTINCT j.jobid, j.name, m.volumename, s.name
FROM job j, media m, jobmedia jm, storage s
WHERE m.mediaid=jm.mediaid
AND j.jobid=jm.jobid
AND s.storageid=m.storageid
"""
        data = []
        if CONF('OPERATOR').lower() == "or":
            operator2 = " OR "
        else:
            operator2 = " AND "
        if all(CONF('DEL_JOB_NAMES')):
            data += CONF('DEL_JOB_NAMES')
            query2 = "j.name IN (%s)" % jobnames_placeholders
            query += operator2 + query2
        if all(CONF('DEL_STORAGE_NAMES')):
            data += CONF("DEL_STORAGE_NAMES")
            query2 = "s.name IN (%s)" % storagenames_placeholders
            query += operator2 + query2
        if all(CONF('DEL_NEWER')):
            data += CONF('DEL_NEWER')
            query += operator2 + "j.starttime >= %s::timestamp"
        if all(CONF('DEL_OLDER')):
            data += CONF('DEL_OLDER')
            query += operator2 + "j.starttime <= %s::timestamp"
        print("Query: %s %s" % (query, str(data)))
        query += ";"
        cur.execute(query, data)
        del_job_media_jm_storage = cur.fetchall()
        print(del_job_media_jm_storage)
    except Exception as e:
        print(format_exception(e))
        print(
            "\n\nYour config /etc/bacula-scripts/bacula_del_jobs_conf.py has an error.\n"\
            "Check if all your configured values are in the tuple format. E.g.:\n"\
            "DEL_NEWER = ('',) and not DEL_NEWER = ('')"
        )
        return
    sd_conf_parsed = bacula_parse("bareos-sd")
    storages_conf_parsed = bacula_parse("bareos-dir")
    del_job_media_jm_storage = [
        (w, x, build_volpath(y, z, sd_conf_parsed, storages_conf_parsed), z) for w, x, y, z in
        del_job_media_jm_storage if build_volpath(y, z, sd_conf_parsed, storages_conf_parsed)
    ]
    del_backups(del_job_media_jm_storage)
Esempio n. 11
0
def main():
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name,
                               user=db_user,
                               host=db_host,
                               password=db_password)
        cur = con.cursor()
        query = "select distinct j.jobid, j.name, m.volumename, s.name from job j, media m, jobmedia jm, storage s " \
                "WHERE m.mediaid=jm.mediaid " \
                "AND j.jobid=jm.jobid " \
                "AND s.storageid=m.storageid "
        if filters == "jobname":
            data = jobnames
            query = query + " AND j.name IN (%s);" % (jobnames_placeholders)
        elif filters == "or_both":
            data = storagenames + jobnames
            query = query + " AND (s.name IN (%s) OR j.name IN (%s));" % (
                storagenames_placeholders, jobnames_placeholders)
        elif filters == "and_both":
            data = storagenames + jobnames
            query = query + " AND (s.name IN (%s) OR j.name IN (%s));" % (
                storagenames_placeholders, jobnames_placeholders)
        elif filters == "storage":
            data = storagenames
            query = query + " AND s.name IN (%s);" % (
                storagenames_placeholders)
        elif filters == "newer_than_starttime":
            data = starttime
            query = query + " AND j.starttime >= %s::timestamp;"
        elif filters == "older_than_starttime":
            data = starttime
            query = query + " AND j.starttime <= %s::timestamp;"
        else:
            log.error("Wrong filter or filter not defined.")
            sys.exit()
        print("Query: %s %s" % (query, str(data)))
        print(query % str(data))
        cur.execute(query, data)
        del_job_media_jm_storage = cur.fetchall()
    except Exception as e:
        print(format_exception(e))
    with open(sd_conf, 'r') as f:
        sd_conf_parsed = parse_conf(f)
    with open(storages_conf, 'r') as f:
        storages_conf_parsed = parse_conf(f)
    del_job_media_jm_storage = [
        (w, x, build_volpath(y, z, sd_conf_parsed, storages_conf_parsed), z)
        for w, x, y, z in del_job_media_jm_storage
        if build_volpath(y, z, sd_conf_parsed, storages_conf_parsed)
    ]
    del_backups(del_job_media_jm_storage)
Esempio n. 12
0
def has_catalog_entry(volume_name):
    try:
        con = psycopg2.connect(database=db_name, user=db_user, host=db_host, password=db_password)
        cur = con.cursor()
        cur.execute("SELECT * FROM media WHERE volumename=%s", (volume_name,))
        res = cur.fetchall()
        if len(res) > 0:
            return True
        else:
            return False
    except Exception as e:
        log.error(format_exception(e))
        return None
def run():
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name,
                               user=db_user,
                               host=db_host,
                               password=db_password)
        cur = con.cursor()
        cur.execute(CONF('QUERY'))
        del_jobids = cur.fetchall()
    except Exception as e:
        print(format_exception(e))
    del_from_catalog(del_jobids)
 def main(self):
     self.get_config()
     for app, app_config in self.conf.items():
         if app in self.registered_app():
             print("(RUN APP) %s" % app)
             _app = getattr(self, app)
             try:
                 app_ages = _app(app_config)
             except Exception as e:
                 sbj = "(ERROR) %s failed" % (app)
                 msg = "Exception message: %s" % format_exception(e)
                 send_mail(event="error", subject=sbj, message=msg)
         if app_config["evaluate_method"] in self.registered_evaluation():
             print("(RUN EVAL) %s" % app_config["evaluate_method"])
             evaluate_method = getattr(
                 self,
                 self.registered_evaluation().get(
                     app_config["evaluate_method"]))
             try:
                 evaluate_method(app_ages, app_config)
             except Exception as e:
                 sbj = "(ERROR) %s failed" % app_config["evaluate_method"]
                 msg = "Exception message: %s" % format_exception(e)
                 send_mail(event="error", subject=sbj, message=msg)
def del_backups(remove_backup):
    """Deletes list of backups from disk and catalog.

    Make sure to add to your sudoers file something like:
    `user ALL=NOPASSWD: /usr/bin/rm /mnt/8tb01/offsite01/*`. Notice that I added the offsite's path with the
    wildcard after the rm command, so that the user can only use rm for that directory.
    """
    for volpath, hn in remove_backup:
        volname = os.path.basename(volpath)
        log.info("Deleting %s:%s" % (hn, volpath))
        if not CONF('DRY_RUN'):
            if islocal(hn):
                try:
                    os.remove(volpath)
                    log.info("Deleted %s" % volpath)
                except Exception as e:
                    log.error(format_exception(e))
                    log.info("Deleting failed, apparently volpath %s doesn't exist." % volpath)
            elif not islocal(hn):
                try:
                    p = Popen(["ssh", hn, "sudo", CONF("rm_bin"), volpath])
                    o, e = p.communicate()
                    if e:
                        if "ssh: Could not resolve hostname" in e.decode("UTF-8"):
                            log.error(e)
                            log.error("Please setup ssh keys for the storage host, so that this script can ssh to the "
                                      "host %s" % hn)
                            continue
                except Exception as e:
                    log.error(format_exception(e))
                    log.info("Deleting failed, apparently volpath %s doesn't exist (remote delete)." % volpath)
            p1 = Popen(["echo", "delete volume=%s yes" % volname], stdout=PIPE)
            p2 = Popen(["bconsole"], stdin=p1.stdout, stdout=PIPE)
            p1.stdout.close()
            out, err = p2.communicate()
            log.debug("out: %s, err: %s" % (out, err))
Esempio n. 16
0
def newest_offsite_backup():
    """Returns for newest offsite backup"""
    try:
        con = psycopg2.connect(database=db_name, user=db_user, host=db_host, password=db_password)
        cur = con.cursor()
        query = "SELECT distinct j.jobtdate "\
                "FROM media m, job j, jobmedia jm "\
                "WHERE jm.mediaid=m.mediaid "\
                "AND jm.jobid=j.jobid "\
                "AND j.name IN (%s) " \
                "ORDER BY j.jobtdate DESC;" % jobnames_placeholders
        cur.execute(query % jobnames)
        time = cur.fetchall()
        if not time:
            return None
        else:
            return int(time[0])
    except Exception as e:
        print(format_exception(e))
        return None
def newest_offsite_backup():
    """Returns for newest offsite backup"""
    try:
        con = psycopg2.connect(database=db_name, user=db_user, host=db_host, password=db_password)
        cur = con.cursor()
        query = "SELECT distinct j.jobtdate "\
                "FROM media m, job j, jobmedia jm "\
                "WHERE jm.mediaid=m.mediaid "\
                "AND jm.jobid=j.jobid "\
                "AND j.name IN (%s) " \
                "ORDER BY j.jobtdate DESC;" % jobnames_placeholders
        cur.execute(query % CONF('JOB_NAMES'))
        time = cur.fetchall()
        if not time:
            return None
        else:
            return int(time[0])
    except Exception as e:
        print(format_exception(e))
        return None
Esempio n. 18
0
def main():
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name,
                               user=db_user,
                               host=db_host,
                               password=db_password)
        cur = con.cursor()
        cur.execute(sql)
        volnames = cur.fetchall()
    except Exception as e:
        log.error(format_exception(e))
    for vn in volnames:
        print("Pruning volname %s." % (vn))
        if not dry_run:
            p1 = Popen(["echo", "prune volume=%s yes" % vn], stdout=PIPE)
            p2 = Popen(["bconsole"], stdin=p1.stdout, stdout=PIPE)
            p1.stdout.close()
            out, err = p2.communicate()
            log.debug("out: %s, err: %s" % (out, err))
def run(dry_run=False):
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name, user=db_user, host=db_host, password=db_password)
        cur = con.cursor()
        cur.execute("SELECT m.volumename, s.name FROM media m, storage s WHERE m.storageid=s.storageid;")
        media_storage = cur.fetchall()  # e.g. [('Incremental-ST-0126', 's8tb01'), ('Full-ST-0031', 's8tb01'), ..]
    except Exception as e:
        print(format_exception(e))
    storages_conf_parsed = bacula_parse("bareos-dir")
    for volname, storagename in media_storage:
        for storage_name, storage_value in storages_conf_parsed["Storage"].items():
            hn = storage_value["Address"]
            if not islocal(hn):
                sd_conf_parsed = bacula_parse("bareos-sd", hn=hn)
            else:
                sd_conf_parsed = bacula_parse("bareos-sd")            
            if storagename == storage_name:
                device = storage_value["Device"]
                ad = get_archive_device_of_device(device, sd_conf_parsed)
                if ad:
                    volpath = os.path.join(ad, volname)
                else:
                    continue
                if CONF('VERBOSE'):
                    log.debug("hn: %s" % hn)
                    if not find_mountpoint(ad, hn) == "/":
                        if not _isfile(volpath, hn):
                            log.info("Deleted volume %s from catalog, because file doesn't exist." % volpath)
                            with open(CONF('LOG'), 'a') as f:
                                time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                                f.write("{0} {1}\n".format(time, volpath))
                            if not dry_run or not CONF('DRY_RUN'):
                                p1 = Popen(["echo", "delete volume=%s yes" % volname], stdout=PIPE)
                                p2 = Popen(["bconsole"], stdin=p1.stdout, stdout=PIPE)
                                p1.stdout.close()
                                out, err = p2.communicate()
                                log.debug("out: %s, err: %s" % (out, err))
                        elif CONF('VERBOSE') is True:
                            log.info('File exists for %s' % volpath)
Esempio n. 20
0
def last_backup(job_name=None):
    if job_name:
        sql_job_name = " AND j.name='%s'" % job_name
        SQL = "%s %s %s" % (SQL_SUCCESSFUL_JOBS, sql_job_name, SQL_ORDER_BY)
    else:
        SQL = "%s %s" % (SQL_SUCCESSFUL_JOBS, SQL_ORDER_BY)
    try:
        con = psycopg2.connect(database=db_name,
                               user=db_user,
                               host=db_host,
                               password=db_password)
        cur = con.cursor()
        cur.execute(SQL)
        volnames = cur.fetchall()
    except Exception as e:
        log.error(format_exception(e))
    realendtime = liget(liget(volnames, 0), 0)
    print(realendtime)
    if not realendtime:
        log.error("realendtime is None")
        return None
    last_backup = (datetime.datetime.now() - realendtime).total_seconds()
    return last_backup
Esempio n. 21
0
def send_mail(event, subject, message, priority="low"):
    try:
        # Create message container; the correct MIME type is multipart/alternative.
        msg = MIMEMultipart('alternative')
        msg['sender'] = conf["SENDER"]
        msg['recipient'] = conf["RECIPIENT"]
        if priority == "high":
            msg['X-Priority'] = '2'
        # Escape with double curly brackets. Alternatively switch to %s string format
        style = '<style type="text/css">body {{ background-color: {0};}} p {{ color: black; font-size: 28px;}}</style>'
        error_style = style.format('red')
        warning_style = style.format('yellow')
        info_style = style.format('green')
        template = "<html>{0}<body><p>{1}</p></body></html>"
        if event.lower() in ["error", "e"]:
            html = template.format(error_style, message)
            msg['Subject'] = "error: " + subject
            log.error("Sending %s mail." % event)
        elif event.lower() in ["warning", "w"]:
            html = template.format(warning_style, message)
            msg['Subject'] = "warning: " + subject
            log.warning("Sending %s mail." % event)
        elif event.lower() in ["info", "i"]:
            html = template.format(info_style, message)
            msg['Subject'] = "info: " + subject
            log.info("Sending %s mail." % event)
        part1 = MIMEText(message, 'plain')
        part2 = MIMEText(html, 'html')
        msg.attach(part1)
        msg.attach(part2)
        s = smtplib.SMTP(conf["SMTP"])
        s.starttls()
        s.login(conf["USERNAME"], conf["PASSWORD"])
        s.sendmail(conf["SENDER"], conf["RECIPIENT"], msg.as_string())
        s.quit()
    except Exception as e:
        log.error(format_exception(e))
Esempio n. 22
0
def preprocess_config(daemon, hn=False):
    """Parse bareos-dir, bareos-sd or bareos-fd config and return as dictionary"""
    cmd = ("%s -xc" % daemon).split()
    if hn:
        cmd = ["ssh", "-tt", hn, "sudo"] + cmd
    p1 = Popen(cmd, stdout=PIPE)
    try:
        text2 = p1.communicate()[0].decode("UTF-8")
    except Exception as e:
        print(format_exception(e))
        print("""\n---------\n
Failed to decode config. Try `bareos-dir -xc`, `bareos-fd -xc`, `bareos-sd -xc`
manually. There could be an error in your bareos config.
---------\n
""")
        return None
    if p1.returncode != 0:
        print("Bareos config error")
        return None
    # Remove spaces
    text2 = "{".join(
        list(filter(None, [x.strip(" ") for x in text2.split("{")])))
    text2 = "}".join(
        list(filter(None, [x.strip(" ") for x in text2.split("}")])))
    text2 = "\n".join(
        list(filter(None, [x.strip() for x in text2.split("\n")])))
    # Add quotes and remove lines containing commas or multiple equal signs
    quote_open = False
    text3 = list()
    unescaped_quotes = r'(?<!\\)(?:\\\\)*"'
    has_comma = r'(,)(?=(?:[^"]|"[^"]*")*$)'
    for line in text2.split("\n"):
        if line.lower().startswith("device =") and not quote_open:
            # Add Device preprocessing for multiple Autochanger devices 10/2018
            line = line.replace('"', "")
            # Transform to one quoted string
            splitted = line.split("=")
            line = '"%s" = "%s"' % (splitted[0].strip(), splitted[1].lstrip())
        elif "=" in line or quote_open:
            if not quote_open:
                # Split only on first equal sign occurence
                directive = line.split("=", 1)
                directive_name = directive[0]
                directive_value = directive[1]
            else:
                directive_value += " %s" % line
            # Omit Lines with Comma
            comma_count = len(re.findall(has_comma, line))
            if not quote_open and comma_count != 0:
                continue
            # Omit Lines with multiple equal signs
            equal_count = line.count("=")
            if equal_count >= 2 and not quote_open:
                continue
            else:
                quote_count = len(re.findall(unescaped_quotes, line))
                if quote_count >= 5 and not quote_open:
                    continue
                directive_name = directive_name.strip()
                directive_value = directive_value.strip()
                quote_count_value = len(
                    re.findall(unescaped_quotes, directive_value))
                if quote_count_value >= 3:
                    continue
                if quote_count == 2:
                    # quote_count implies that directive_name has no quotes
                    line = "\"%s\" = %s" % (directive_name, directive_value)
                elif quote_count == 0 and not quote_open:
                    directive_value = directive_value.strip()
                    line = "\"%s\" = \"%s\"" % (directive_name,
                                                directive_value)
                elif quote_count == 0 and quote_open:
                    continue
                elif quote_count == 1 and quote_open:
                    quote_open = False
                    directive_value = directive_value.strip()
                    line = "\"%s\" = %s" % (directive_name, directive_value)
                elif quote_count == 1 and not quote_open:
                    quote_open = True
                    continue
        text3.append(line)

    # Add Quotes to Resource type
    text4 = list()
    for line in text3:
        if "{" in line:
            left, right = line.split("{")
            left = "\"%s\"" % left
            line = left + "{"
        text4.append(line)

    # Put it back into a string and standardize last character by adding a newline in the end
    text4 = "\n".join(text4)
    if text4[-1] != "\n":
        text4 += "\n"
    return text4
Esempio n. 23
0
                      '# PASSWORD = "******"\n'
                      '# SMTP = "smtp.gmail.com:587"')
    if not os.path.isfile(toml_file):
        print("(Create) example config in %s" % toml_file)
        print("(Info) Please edit the config file, then run ledger-mv")
        with open(toml_file, "w") as _file:
            _file.write(example_config)


create_example_config()
try:
    conf = toml.loads(pathlib.Path(toml_file).read_text())
except Exception as e:
    print("(Error) Please edit the config file, then run ledger-mv %s" %
          toml_file)
    print(format_exception(e))
    sys.exit()


def send_mail(event, subject, message, priority="low"):
    try:
        # Create message container; the correct MIME type is multipart/alternative.
        msg = MIMEMultipart('alternative')
        msg['sender'] = conf["SENDER"]
        msg['recipient'] = conf["RECIPIENT"]
        if priority == "high":
            msg['X-Priority'] = '2'
        # Escape with double curly brackets. Alternatively switch to %s string format
        style = '<style type="text/css">body {{ background-color: {0};}} p {{ color: black; font-size: 28px;}}</style>'
        error_style = style.format('red')
        warning_style = style.format('yellow')
Esempio n. 24
0
def main():
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name, user=db_user, host=db_host, password=db_password)
        cur = con.cursor()
        cur.execute("SELECT distinct m.volumename, s.name, m.volstatus, j.jobtdate, j.filesetid, j.clientid, j.level, "
                    "c.name, f.fileset, j.name, mt.mediatype "
                    "FROM media m, storage s, job j, jobmedia jm, fileset f, client c, mediatype mt "
                    "WHERE m.storageid=s.storageid "
                    "AND jm.mediaid=m.mediaid "
                    "AND jm.jobid=j.jobid "
                    "AND f.filesetid=j.filesetid "
                    "AND j.clientid=c.clientid "
                    "AND mt.mediatype=m.mediatype;")
        volumes = cur.fetchall()
        cur.execute("SELECT distinct m.volumename, s.name "
                    "FROM media m, storage s "
                    "WHERE m.storageid=s.storageid "
                    "AND m.volstatus='Purged';")
        purged_vols = cur.fetchall()
    except Exception as e:
        log.error(format_exception(e))
    unpurged_backups = [x for x in volumes if x[2] != "Purged"]
    full_purged, diff_purged, inc_purged, remove_backup = [list() for x in range(4)]
    with open(sd_conf, "r") as f:
        sd_conf_parsed = parse_conf(f)
    with open(storages_conf, "r") as f:
        storages_conf_parsed = parse_conf(f)
    log.info("\n\n\n\nSorting purged volumes to full_purged, diff_purged and inc_purged.\n\n")
    log.info("There are %s purged_vols and %s unpurged_backups" % (len(purged_vols), len(unpurged_backups)))
    for volname, storagename in purged_vols:
        hn = storagehostname(storages_conf_parsed, storagename)
        if islocal(hn):
            volpath = build_volpath(volname, storagename, sd_conf_parsed, storages_conf_parsed)
        elif not islocal(hn):
            remote_sd_conf = remote_file_content(hn, sd_conf)
            remote_sd_conf_parsed = parse_conf(remote_sd_conf)
            volpath = build_volpath(volname, storagename, remote_sd_conf_parsed, storages_conf_parsed, hn)
        if not volpath:
            log.info("Skipping this purged volume, because storage device is not mounted. %s:%s" % (hn, volpath))
            continue
        elif _isfile(volpath, hn) == False and volpath:
            log.info("Deleting backup from catalog, because volume doesn't exist anymore: %s:%s" % (hn, volpath))
            del_backups([(volpath, hn)])
            continue
        elif _isfile(volpath, hn):
            vol_parsed = parse_vol(volpath, hn)
            if vol_parsed:
                cn, fn, ts, jl, jn, mt, pn = vol_parsed
            else:
                if del_vols_with_no_metadata:
                    log.info("Removing volume, because it has no metadata. Removing both file and catalog record.")
                    os.remove(volpath)
                    p1 = Popen(["echo", "delete volume=%s yes" % volname], stdout=PIPE)
                    p2 = Popen(["bconsole"], stdin=p1.stdout, stdout=PIPE)
                    p1.stdout.close()
                    out, err = p2.communicate()
                    log.debug("out: %s, err: %s" % (out, err))
                continue
        else:
            continue
        x1 = (volpath, cn, fn, ts, hn, jn, mt)
        if mt in offsite_mt:  # This is a workaround for copy volumes, which don't store the right job level. Notice
            # this works only if your pool names include the job level (e.g. full, inc or diff).
            pnl = pn.lower()
            if "full" in pnl:
                jl = "F"
            elif "diff" in pnl:
                jl = "D"
            elif "inc" in pnl:
                jl = "I"
        full_purged.append(x1) if jl == "F" else ""
        diff_purged.append(x1) if jl == "D" else ""
        inc_purged.append(x1) if jl == "I" else ""
    log.info("\n\n\n")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("\n\n\nDeciding which purged full vols to delete")
    for volpath, cn, fn, backup_time, hn, jn, mt in full_purged:
        # log.debug("\n\nDeciding which purged full vols to delete: cn: {0}, fn: {1}, backup_time: {2}, volpath:
        #            {3}".format(cn, fn, backup_time, volpath))
        newer_full_backups = [x3 for x3 in unpurged_backups if x3[6] == "F" and x3[3] > backup_time and cn == x3[7] and
                              fn == x3[8] and jn == x3[9] and mt == x3[10]]
        if len(newer_full_backups) == 0:
            log.info("Skipping and not removing {0}, because it's the newest full backup.".format(volpath))
            continue
        next_full_backup = min(newer_full_backups, key=lambda x: x[3])
        newer_full_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] > backup_time and
                                   cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        next_full_diff_backup = min(newer_full_diff_backups, key=lambda x: x[3])
        inc_backups = [x3 for x3 in unpurged_backups if x3[6] == "I" and x3[3] > backup_time and x3[3] <
                       next_full_diff_backup[3] and cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        # here we use next_full_backup
        diff_backups = [x3 for x3 in unpurged_backups if x3[6] == "D" and x3[3] > backup_time and x3[3] <
                        next_full_backup[3] and cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        full_backups = [x3 for x3 in unpurged_backups if x3[6] == "F" and cn == x3[7] and fn == x3[8] and
                        jn == x3[9] and mt == x3[10]]
        # log.info("newer_full_backups %s" % str(newer_full_backups))
        # log.info("newer_full_diff_backups %s" % str(newer_full_diff_backups))
        # log.info("next_full_diff_backup %s" % str(next_full_diff_backup))
        # log.info("inc_backups %s" % inc_backups)
        if len(inc_backups) > 0:
            log.info("Not removing {0}, because there are still incremental backups dependent on it.".format(volpath))
        elif len(diff_backups) > 0:
            log.info("Not removing {0}, because there are still diff backups dependent on it.".format(volpath))
            continue
        elif len(full_backups) < 3:
            log.info("Not removing {0}, because we have less than four three backups in total.".format(volpath))
            continue
        else:
            log.info("Adding backup to remove list")
            remove_backup.append((volpath, hn))
    log.info("\n\n\n")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("\n\n\nDeciding which purged incremental vols to delete")
    for volpath, cn, fn, backup_time, hn, jn, mt in inc_purged:
        newer_full_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] > backup_time and
                                   cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        older_full_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] < backup_time and
                                   cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        inc_backups = list()
        for x3 in unpurged_backups:
            inc_filter = [x3[6] == "I", cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
            if newer_full_diff_backups:
                next_full_backup = min(newer_full_diff_backups, key=lambda x: x[3])
                inc_filter.append(x3[3] < next_full_backup[3])
            if older_full_diff_backups:
                prev_full_backup = max(older_full_diff_backups, key=lambda x: x[3])
                inc_filter.append(x3[3] > prev_full_backup[3])
            if all(inc_filter):
                inc_backups.append(x3)
        if len(inc_backups) > 0:
            log.info("Not removing {0}, because there are still chained inc backups that are not "
                     "purged.".format(volpath))
            continue
        else:
            log.info("Adding backup to remove list")
            remove_backup.append((volpath, hn))
    log.info("\n\n\n")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("\n\n\nDeciding which purged diff vols to delete")
    for volpath, cn, fn, backup_time, hn, jn, mt in diff_purged:
        newer_full_or_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] > backup_time and
                                      cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        if newer_full_or_diff_backups:
            next_full_or_diff_backup = min(newer_full_or_diff_backups, key=lambda x: x[3])
            inc_backups = [x3 for x3 in unpurged_backups if x3[6] == "I" and x3[3] > backup_time and x3[3] <
                           next_full_or_diff_backup[3] and cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
            diff_backups = [x3 for x3 in unpurged_backups if x3[6] == "D" and cn == x3[7] and fn == x3[8] and jn ==
                            x3[9] and mt == x3[10]]
            # log.info("newer_full_or_diff_backups %s" % str(newer_full_or_diff_backups))
            # log.info("next_full_or_diff_backup %s" % str(next_full_or_diff_backup))
            # log.info("inc_backups %s" % inc_backups)
            if len(inc_backups) > 0:
                log.info("Not removing {0}, because there are still incremental backups dependent on "
                         "it.".format(volpath))
                continue
            elif len(diff_backups) < 2:
                log.info("Not removing {0}, because we have less than four full backups in total.".format(volpath))
                continue
            else:
                log.info("Adding backup to remove list")
                remove_backup.append((volpath, hn))
    log.info("\n\n\n\nDecisions made. Initating deletion.")
    log.info("remove_backup list: %s" % remove_backup)
    if len(remove_backup) == 0:
        log.info("Nothing to delete")
    del_backups(remove_backup)
Esempio n. 25
0
def preprocess_config(daemon, hn=False):
    """Parse bareos-dir, bareos-sd or bareos-fd config and return as dictionary"""
    cmd = ("%s -xc" % daemon).split()
    if hn:
        cmd = ["ssh", "-tt", hn, "sudo"] + cmd
    p1 = Popen(cmd, stdout=PIPE)
    try:
        text2 = p1.communicate()[0].decode("UTF-8")
    except Exception as e:
        print(format_exception(e))
        print("""\n---------\n
Failed to decode config. Try `bareos-dir -xc`, `bareos-fd -xc`, `bareos-sd -xc`
manually. There could be an error in your bareos config.
---------\n
""")
        return None
    if p1.returncode != 0:
        print("Bareos config error")
        return None
    # Remove spaces
    text2 = "{".join(list(filter(None, [x.strip(" ") for x in text2.split("{")])))
    text2 = "}".join(list(filter(None, [x.strip(" ") for x in text2.split("}")])))
    text2 = "\n".join(list(filter(None, [x.strip() for x in text2.split("\n")])))
    # Add quotes and remove lines containing commas or multiple equal signs
    quote_open = False
    text3 = list()
    unescaped_quotes = r'(?<!\\)(?:\\\\)*"'
    has_comma = r'(,)(?=(?:[^"]|"[^"]*")*$)'
    for line in text2.split("\n"):
        if line.lower().startswith("device =") and not quote_open:
            # Add Device preprocessing for multiple Autochanger devices 10/2018
            line = line.replace('"', "")
            # Transform to one quoted string
            splitted = line.split("=")
            line = '"%s" = "%s"' % (splitted[0].strip(), splitted[1].lstrip())
        elif "=" in line or quote_open:
            if not quote_open:
                # Split only on first equal sign occurence
                directive = line.split("=", 1)
                directive_name = directive[0]
                directive_value = directive[1]
            else:
                directive_value += " %s" % line
            # Omit Lines with Comma
            comma_count = len(re.findall(has_comma, line))
            if not quote_open and comma_count != 0:
                continue
            # Omit Lines with multiple equal signs
            equal_count = line.count("=")
            if equal_count >= 2 and not quote_open:
                continue
            else:
                quote_count = len(re.findall(unescaped_quotes, line))
                if quote_count >= 5 and not quote_open:
                    continue
                directive_name = directive_name.strip()
                directive_value = directive_value.strip()
                quote_count_value = len(re.findall(unescaped_quotes, directive_value))
                if quote_count_value >= 3:
                    continue
                if quote_count == 2:
                    # quote_count implies that directive_name has no quotes
                    line = "\"%s\" = %s" % (directive_name, directive_value)
                elif quote_count == 0 and not quote_open:
                    directive_value = directive_value.strip()
                    line = "\"%s\" = \"%s\"" % (directive_name, directive_value)
                elif quote_count == 0 and quote_open:
                    continue
                elif quote_count == 1 and quote_open:
                    quote_open = False
                    directive_value = directive_value.strip()
                    line = "\"%s\" = %s" % (directive_name, directive_value)
                elif quote_count == 1 and not quote_open:
                    quote_open = True
                    continue
        text3.append(line)

    # Add Quotes to Resource type
    text4 = list()
    for line in text3:
        if "{" in line:
            left, right = line.split("{")
            left = "\"%s\"" % left
            line = left + "{"
        text4.append(line)

    # Put it back into a string and standardize last character by adding a newline in the end
    text4 = "\n".join(text4)
    if text4[-1] != "\n":
        text4 += "\n"
    return text4
def run(dry_run=False):
    CONF_SET('DRY_RUN', dry_run)
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name, user=db_user, host=db_host, password=db_password)
        cur = con.cursor()
        cur.execute("SELECT distinct m.volumename, s.name, m.volstatus, j.jobtdate, j.filesetid, j.clientid, j.level, "
                    "c.name, f.fileset, j.name, mt.mediatype "
                    "FROM media m, storage s, job j, jobmedia jm, fileset f, client c, mediatype mt "
                    "WHERE m.storageid=s.storageid "
                    "AND jm.mediaid=m.mediaid "
                    "AND jm.jobid=j.jobid "
                    "AND f.filesetid=j.filesetid "
                    "AND j.clientid=c.clientid "
                    "AND mt.mediatype=m.mediatype;")
        volumes = cur.fetchall()
        cur.execute("SELECT distinct m.volumename, s.name "
                    "FROM media m, storage s "
                    "WHERE m.storageid=s.storageid "
                    "AND m.volstatus='Purged';")
        purged_vols = cur.fetchall()
    except Exception as e:
        log.error(format_exception(e))
    unpurged_backups = [x for x in volumes if x[2] != "Purged"]
    full_purged, diff_purged, inc_purged, remove_backup = [list() for x in range(4)]

    sd_conf_parsed = bacula_parse(CONF("bacula_sd_bin"))
    storages_conf_parsed = bacula_parse(CONF("bacula_dir_bin"))

    log.info("\n\n\n\nSorting purged volumes to full_purged, diff_purged and inc_purged.\n\n")
    log.info("There are %s purged_vols and %s unpurged_backups" % (len(purged_vols), len(unpurged_backups)))
    for volname, storagename in purged_vols:
        hn = storagehostname(storages_conf_parsed, storagename)
        if islocal(hn):
            volpath = build_volpath(volname, storagename, sd_conf_parsed, storages_conf_parsed)
        elif not islocal(hn):
            log.info("content of %s:%s (hn:filename)" % (hn, fn))
            remote_sd_conf_parsed = bacula_parse(CONF("bacula_sd_bin"), hn=hn)
            volpath = build_volpath(volname, storagename, remote_sd_conf_parsed, storages_conf_parsed, hn)
        if not volpath:
            log.info("Skipping this purged volume, because storage device is not mounted. %s:%s" % (hn, volpath))
            continue
        elif _isfile(volpath, hn) == False and volpath:
            log.info("Deleting backup from catalog, because volume doesn't exist anymore: %s:%s" % (hn, volpath))
            del_backups([(volpath, hn)])
            continue
        elif _isfile(volpath, hn):
            vol_parsed = parse_vol(volpath, hn)
            if vol_parsed:
                cn, fn, ts, jl, jn, mt, pn = vol_parsed
            else:
                if CONF('DEL_VOLS_WITH_NO_METADATA'):
                    log.info("Removing volume, because it has no metadata. Removing both file and catalog record.")
                    os.remove(volpath)
                    p1 = Popen(["echo", "delete volume=%s yes" % volname], stdout=PIPE)
                    p2 = Popen(["bconsole"], stdin=p1.stdout, stdout=PIPE)
                    p1.stdout.close()
                    out, err = p2.communicate()
                    log.debug("out: %s, err: %s" % (out, err))
                continue
        else:
            continue
        x1 = (volpath, cn, fn, ts, hn, jn, mt)
        # Workaround for copy volumes, which don't store the right job level. Notice
        #  this works only if your pool names include the job level (e.g. full, inc or diff)
        if mt in CONF('OFFSITE_MT'):
            pnl = pn.lower()
            if "full" in pnl:
                jl = "F"
            elif "diff" in pnl:
                jl = "D"
            elif "inc" in pnl:
                jl = "I"
        full_purged.append(x1) if jl == "F" else ""
        diff_purged.append(x1) if jl == "D" else ""
        inc_purged.append(x1) if jl == "I" else ""
    log.info("\n\n\n")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("\n\n\nDeciding which purged full vols to delete")
    for volpath, cn, fn, backup_time, hn, jn, mt in full_purged:
        # log.debug("\n\nDeciding which purged full vols to delete: cn: {0}, fn: {1}, backup_time: {2}, volpath:
        #            {3}".format(cn, fn, backup_time, volpath))
        newer_full_backups = [x3 for x3 in unpurged_backups if x3[6] == "F" and x3[3] > backup_time and cn == x3[7] and
                              fn == x3[8] and jn == x3[9] and mt == x3[10]]
        if len(newer_full_backups) == 0:
            log.info("Skipping and not removing {0}, because it's the newest full backup.".format(volpath))
            continue
        next_full_backup = min(newer_full_backups, key=lambda x: x[3])
        newer_full_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] > backup_time and
                                   cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        next_full_diff_backup = min(newer_full_diff_backups, key=lambda x: x[3])
        inc_backups = [x3 for x3 in unpurged_backups if x3[6] == "I" and x3[3] > backup_time and x3[3] <
                       next_full_diff_backup[3] and cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        # here we use next_full_backup
        diff_backups = [x3 for x3 in unpurged_backups if x3[6] == "D" and x3[3] > backup_time and x3[3] <
                        next_full_backup[3] and cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        full_backups = [x3 for x3 in unpurged_backups if x3[6] == "F" and cn == x3[7] and fn == x3[8] and
                        jn == x3[9] and mt == x3[10]]
        # log.info("newer_full_backups %s" % str(newer_full_backups))
        # log.info("newer_full_diff_backups %s" % str(newer_full_diff_backups))
        # log.info("next_full_diff_backup %s" % str(next_full_diff_backup))
        # log.info("inc_backups %s" % inc_backups)
        if len(inc_backups) > 0:
            log.info("Not removing {0}, because there are still incremental backups dependent on it.".format(volpath))
        elif len(diff_backups) > 0:
            log.info("Not removing {0}, because there are still diff backups dependent on it.".format(volpath))
            continue
        elif len(full_backups) < 3:
            log.info("Not removing {0}, because we have less than four three backups in total.".format(volpath))
            continue
        else:
            log.info("Adding backup to remove list")
            remove_backup.append((volpath, hn))
    log.info("\n\n\n")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("\n\n\nDeciding which purged incremental vols to delete")
    for volpath, cn, fn, backup_time, hn, jn, mt in inc_purged:
        newer_full_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] > backup_time and
                                   cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        older_full_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] < backup_time and
                                   cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        inc_backups = list()
        for x3 in unpurged_backups:
            inc_filter = [x3[6] == "I", cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
            if newer_full_diff_backups:
                next_full_backup = min(newer_full_diff_backups, key=lambda x: x[3])
                inc_filter.append(x3[3] < next_full_backup[3])
            if older_full_diff_backups:
                prev_full_backup = max(older_full_diff_backups, key=lambda x: x[3])
                inc_filter.append(x3[3] > prev_full_backup[3])
            if all(inc_filter):
                inc_backups.append(x3)
        if len(inc_backups) > 0:
            log.info("Not removing {0}, because there are still chained inc backups that are not "
                     "purged.".format(volpath))
            continue
        else:
            log.info("Adding backup to remove list")
            remove_backup.append((volpath, hn))
    log.info("\n\n\n")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~~*~*~*~~~~~*~*~*~")
    log.info("\n\n\nDeciding which purged diff vols to delete")
    for volpath, cn, fn, backup_time, hn, jn, mt in diff_purged:
        newer_full_or_diff_backups = [x3 for x3 in unpurged_backups if x3[6] in ["F", "D"] and x3[3] > backup_time and
                                      cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
        if newer_full_or_diff_backups:
            next_full_or_diff_backup = min(newer_full_or_diff_backups, key=lambda x: x[3])
            inc_backups = [x3 for x3 in unpurged_backups if x3[6] == "I" and x3[3] > backup_time and x3[3] <
                           next_full_or_diff_backup[3] and cn == x3[7] and fn == x3[8] and jn == x3[9] and mt == x3[10]]
            diff_backups = [x3 for x3 in unpurged_backups if x3[6] == "D" and cn == x3[7] and fn == x3[8] and jn ==
                            x3[9] and mt == x3[10]]
            # log.info("newer_full_or_diff_backups %s" % str(newer_full_or_diff_backups))
            # log.info("next_full_or_diff_backup %s" % str(next_full_or_diff_backup))
            # log.info("inc_backups %s" % inc_backups)
            if len(inc_backups) > 0:
                log.info("Not removing {0}, because there are still incremental backups dependent on "
                         "it.".format(volpath))
                continue
            elif len(diff_backups) < 2:
                log.info("Not removing {0}, because we have less than four full backups in total.".format(volpath))
                continue
            else:
                log.info("Adding backup to remove list")
                remove_backup.append((volpath, hn))
    log.info("\n\n\n\nDecisions made. Initating deletion.")
    log.info("remove_backup list: %s" % remove_backup)
    if len(remove_backup) == 0:
        log.info("Nothing to delete")
    del_backups(remove_backup)
Esempio n. 27
0
def run(args):
    if args.dry_run:
        CONF_SET('DRY_RUN', args.dry_run)
    systemd_services_up(services)
    try:
        con = psycopg2.connect(database=db_name,
                               user=db_user,
                               host=db_host,
                               password=db_password)
        cur = con.cursor()
        query = """
SELECT DISTINCT j.jobid, j.name, m.volumename, s.name
FROM job j, media m, jobmedia jm, storage s
WHERE m.mediaid=jm.mediaid
AND j.jobid=jm.jobid
AND s.storageid=m.storageid
"""
        data = []
        if CONF('OPERATOR').lower() == "or":
            operator2 = " OR "
        else:
            operator2 = " AND "
        #if all([False] if not CONF('DEL_CLIENTS') else CONF('DEL_CLIENTS')):
        if CONF('DEL_CLIENTS') and all(CONF('DEL_CLIENTS')):
            data += CONF('DEL_CLIENTS')
            query2 = "j.clientid IN (SELECT clientid FROM client WHERE name IN (%s))" % clients_placeholders
            query += operator2 + query2
        if CONF('DEL_FILESETS') and all(CONF('DEL_FILESETS')):
            data += CONF('DEL_FILESETS')
            query2 = "j.filesetid IN (SELECT filesetid FROM fileset WHERE fileset IN (%s))" % filesets_placeholders
            query += operator2 + query2
        if CONF('DEL_NOT_FILESETS') and all(CONF('DEL_NOT_FILESETS')):
            data += CONF('DEL_NOT_FILESETS')
            query2 = "j.filesetid NOT IN (SELECT filesetid FROM fileset WHERE fileset IN (%s))" % filesets_not_placeholders
            query += operator2 + query2
        if CONF('DEL_JOB_NAMES') and all(CONF('DEL_JOB_NAMES')):
            data += CONF('DEL_JOB_NAMES')
            query2 = "j.name IN (%s)" % jobnames_placeholders
            query += operator2 + query2
        if CONF('DEL_STORAGE_NAMES') and all(CONF('DEL_STORAGE_NAMES')):
            data += CONF("DEL_STORAGE_NAMES")
            query2 = "s.name IN (%s)" % storagenames_placeholders
            query += operator2 + query2
        if CONF('DEL_NEWER') and all(CONF('DEL_NEWER')):
            data += CONF('DEL_NEWER')
            query += operator2 + "j.starttime >= %s::timestamp"
        if CONF('DEL_OLDER') and all(CONF('DEL_OLDER')):
            data += CONF('DEL_OLDER')
            query += operator2 + "j.starttime <= %s::timestamp"
        print("Query: %s %s" % (query, str(data)))
        directives = [
            "DEL_CLIENTS", "DEL_JOB_NAMES", "DEL_STORAGE_NAMES", "DEL_NEWER"
        ]
        if all(CONF(directive) is None for directive in directives):
            print("No deletion rule configured. Exiting")
            sys.exit()
        query += ";"
        cur.execute(query, data)
        select_job_media_jm_storage = cur.fetchall()
    except Exception as e:
        print(format_exception(e))
        print(
            "\n\nYour config /etc/bacula-scripts/bacula_del_jobs_conf.py has an error.\n"\
            "Check if all your configured values are in the tuple format. E.g.:\n"\
            "DEL_NEWER = ('',) and not DEL_NEWER = ('')"
        )
        return
    sd_conf_parsed = bacula_parse("bareos-sd")
    storages_conf_parsed = bacula_parse("bareos-dir")

    del_job_media_jm_storage = list()
    for jobid, jobname, volname, storagename in select_job_media_jm_storage:
        if build_volpath(volname, storagename, sd_conf_parsed,
                         storages_conf_parsed):
            storage_path = build_volpath(volname, storagename, sd_conf_parsed,
                                         storages_conf_parsed)
            print("Storage found: %s" % storage_path)
            del_job_media_jm_storage.append(
                (jobid, jobname, volname,
                 build_volpath(volname, storagename, sd_conf_parsed,
                               storages_conf_parsed)))
        elif args.default_storage:
            print("Storage not found. Specified default_storage: %s" %
                  args.default_storage)
            del_job_media_jm_storage.append(
                (jobid, jobname, volname,
                 os.path.join(args.default_storage, volname)))
        elif args.force_del_catalog:
            # Setting path to None. This way only the catalog entry will be deleted
            print(
                "Storage not found. force_del_catalog: True. Deleting catalog entries"
            )
            del_job_media_jm_storage.append((jobid, jobname, volname, None))
        else:
            # Neither deleting file nor catalog
            print("Storage not found. Skipping")
            pass
    print("Deleting: %s" % del_job_media_jm_storage)
    del_backups(del_job_media_jm_storage)
Esempio n. 28
0
def all_backups():
    """List all jobs by client and fileset."""
    con = None
    jobs = defaultdict(lambda: defaultdict(defaultdict))
    hosts = dict(host_up())
    try:
        con = psycopg2.connect(database=cf["db_name"],
                               user=cf["db_user"],
                               host=cf["db_host"])
        con.set_session(readonly=True)
        cur = con.cursor()
        cur.execute("""
            SELECT c.name, p.name, j.jobbytes, j.realendtime, j.starttime, j.jobfiles, f.fileset,
            m.volumename, j.jobid
            FROM client c, job j, fileset f, pool p, media m, jobmedia jm
            WHERE j.jobstatus IN ('T', 'W') AND j.level IN ('F', 'I', 'D') AND j.type IN ('B', 'C')
            AND j.clientid=c.clientid AND j.poolid=p.poolid AND j.filesetid=f.filesetid AND
            jm.mediaid=m.mediaid AND jm.jobid=j.jobid;
        """)
        tuples = cur.fetchall()
        total_size = float()
        for t in tuples:
            client = t[0]
            pool = t[1]
            jobbytes = t[2]
            realendtime = t[3]
            starttime = t[4]
            jobfiles = t[5]
            fileset = t[6]
            volname = t[7]
            jobid = t[8]
            pool_sub_dict = defaultdict(list)
            pool_list = list()
            try:
                duration = realendtime - starttime
            except Exception as e:
                logger.debug(format_exception(e))
                continue
            seconds = duration.total_seconds()
            minutes = int((seconds % 3600) // 60)
            endtime = realendtime.strftime("%d.%m.%y %H:%M")
            jobgigabytes = int(jobbytes / 1000000000)  # Round up roughly.
            current_time = datetime.datetime.now()
            pool_list = (volname, jobid, jobgigabytes, endtime, minutes,
                         jobfiles)
            try:
                j = jobs[client][fileset][pool]
            except:
                jobs[client][fileset][pool] = set()
                j = jobs[client][fileset][pool]
            j.add(pool_list)
    except Exception as e:
        logger.debug(format_exception(e))
        pass
    jobs = default_to_regular(jobs)  # (5)
    for jck, jcv in iteritems(jobs):
        for jfk, jfv in iteritems(jcv):
            jobs[jck][jfk] = OrderedDict(sorted(iteritems(jobs[jck][jfk])))
            for jpk, jpv in iteritems(jfv):
                for jpe in jpv:
                    # outputs: (92, 85, '22.05.15 21:23', 16, 384467, 'Full-LT-0007')
                    total_size += jpe[2]
                jobs[jck][jfk][jpk] = sorted(jpv)
    jobs = OrderedDict(sorted(iteritems(jobs)))
    return {'jobs': jobs, 'hosts': hosts, 'total_size': total_size}