Beispiel #1
0
def listJobs(sort):
    with Pidfile(config().lockfile, logger().debug, logger().error):
        # Run director
        directorInstance = director()
        jobs = directorInstance.getJobArray()
        sizes = {}
        averages = {}
        tot_size = 0
        tot_avg = 0
        for job in jobs:
            sizes[job.hostname], averages[
                job.hostname] = director().getBackupsSize(job)
        aux = sorted(sizes.items(), key=lambda x: x[1], reverse=True)
        if sort == 'average':
            aux = sorted(averages.items(), key=lambda x: x[1], reverse=True)
        x = PrettyTable([
            'Hostname', 'Estimated total backup size',
            'Average backup size increase'
        ])
        for elem in aux:
            hostname = elem[0]
            tot_size += sizes[hostname]
            tot_avg += averages[hostname]
            size = jinjafilters()._bytesToReadableStr(sizes[hostname])
            avg = jinjafilters()._bytesToReadableStr(averages[hostname])
            x.add_row([hostname, size, avg])
        tot_size = jinjafilters()._bytesToReadableStr(tot_size)
        tot_avg = jinjafilters()._bytesToReadableStr(tot_avg)
        x.add_row(['Total', tot_size, tot_avg])
        x.align = "l"
        x.padding_width = 1
        print(x)
def listJobs(sort):
    with Pidfile(config().lockfile, logger().debug, logger().error):
        # Run director
        directorInstance = director()
        jobs = directorInstance.getJobArray()
        sizes = {}
        averages = {}
        tot_size=0
        tot_avg=0
        for job in jobs:
            sizes[job.hostname], averages[job.hostname] = director().getBackupsSize(job)
        aux = sorted(sizes.items(), key=lambda x: x[1], reverse=True)
        if sort == 'average':
            aux = sorted(averages.items(), key=lambda x: x[1], reverse=True)
        x = PrettyTable(['Hostname', 'Estimated total backup size', 'Average backup size increase'])
        for elem in aux:
            hostname = elem[0]
            tot_size += sizes[hostname]
            tot_avg += averages[hostname] 
            size = jinjafilters()._bytesToReadableStr(sizes[hostname])
            avg = jinjafilters()._bytesToReadableStr(averages[hostname])
            x.add_row([hostname, size, avg])
        tot_size = jinjafilters()._bytesToReadableStr(tot_size)
        tot_avg = jinjafilters()._bytesToReadableStr(tot_avg)
        x.add_row(['Total', tot_size, tot_avg])
        x.align = "l"
        x.padding_width = 1
        print(x)
 def getSizes(self, jobs):
     from lib.director import director
     sizes = {}
     averages = {}
     for job in jobs:
         sizes[job.hostname], averages[job.hostname] = director().getBackupsSize(job)
     return sizes, averages
 def getSizes(self, jobs):
     from lib.director import director
     sizes = {}
     averages = {}
     for job in jobs:
         sizes[job.hostname], averages[
             job.hostname] = director().getBackupsSize(job)
     return sizes, averages
Beispiel #5
0
def runBackup(jobpath, dryrun):
    """ Start backup run """
    exitFlag = threading.Event()
    queueLock = threading.Lock()
    workQueue = queue.Queue(0)

    try:
        with Pidfile(config().lockfile, logger().debug, logger().error):
            # Run director
            directorInstance = director()
            jobs = directorInstance.getJobArray(jobpath)
            # Start threads
            threads = []
            if not dryrun:
                for i in range(0, config().jobworkers):
                    thread = jobThread(i, exitFlag, queueLock,
                                       directorInstance, workQueue)
                    thread.start()
                    threads.append(thread)

            # Execute jobs
            queueLock.acquire()
            durationstats = {}
            durationstats['backupstartdatetime'] = int(time.time())
            for job in jobs:
                if (job.enabled):
                    if directorInstance.checkRemoteHost(job):
                        if not dryrun:
                            # Add to queue
                            workQueue.put(job)
                    else:
                        jobrunhistory().insertJob(job.backupstatus, None)
            queueLock.release()
            # Wait for queue to empty
            while not workQueue.empty():
                time.sleep(0.1)

            # Notify threads it's time to exit
            exitFlag.set()

            # Wait for all threads to complete
            for t in threads:
                t.join()
            durationstats['backupenddatetime'] = int(time.time())

            if not dryrun:
                # Do housekeeping
                durationstats['housekeepingstartdatetime'] = int(time.time())
                for job in jobs:
                    if (job.enabled):
                        if job.backupstatus['rsync_backup_status'] == 1:
                            directorInstance.backupRotate(job)
                jobrunhistory().deleteHistory()
                durationstats['housekeepingenddatetime'] = int(time.time())

                # Sent status report
                statusemail().sendStatusEmail(jobs, durationstats)


#            else:
#                for job in jobs:
#                    job.showjob()
    except ProcessRunningException as m:
        statusemail().sendSuddenDeath(m)
        logger().error(m)
Beispiel #6
0
def checkRemoteHost(jobpath):
    """ Check remote host and exit with exitcode 0 (success) or 1 (error) """
    directorInstance = director()
    jobs = directorInstance.getJobArray(jobpath)
    return not directorInstance.checkRemoteHost(jobs[0])
def runBackup(jobpath, dryrun):
    """ Start backup run """
    exitFlag = threading.Event()
    queueLock = threading.Lock()
    workQueue = queue.Queue(0)

    try:
        with Pidfile(config().lockfile, logger().debug, logger().error):
            # Run director
            directorInstance = director()
            jobs = directorInstance.getJobArray(jobpath)

            # Start threads
            threads = []
            if not dryrun:
                for i in range(0, config().jobworkers):
                    thread = jobThread(i, exitFlag, queueLock, directorInstance, workQueue)
                    thread.start()
                    threads.append(thread)

            # Execute jobs
            queueLock.acquire()
            durationstats = {}
            durationstats['backupstartdatetime'] = int(time.time())
            for job in jobs:
                if(job.enabled):
                    if directorInstance.checkRemoteHost(job):
                        if not dryrun:
                            # Add to queue
                            workQueue.put(job)
                    else:
                        jobrunhistory().insertJob(job.backupstatus, None)
            queueLock.release()
            # Wait for queue to empty
            while not workQueue.empty():
                time.sleep(0.1)

            # Notify threads it's time to exit
            exitFlag.set()

            # Wait for all threads to complete
            for t in threads:
                t.join()
            durationstats['backupenddatetime'] = int(time.time())

            if not dryrun:
                # Do housekeeping
                durationstats['housekeepingstartdatetime'] = int(time.time())
                for job in jobs:
                    if(job.enabled):
                        if job.backupstatus['rsync_backup_status'] == 1:
                            directorInstance.backupRotate(job)
                jobrunhistory().deleteHistory()
                durationstats['housekeepingenddatetime'] = int(time.time())

                # Sent status report
                statusemail().sendStatusEmail(jobs, durationstats)
#            else:
#                for job in jobs:
#                    job.showjob()
    except ProcessRunningException as m:
        statusemail().sendSuddenDeath(m)
        logger().error(m)
def checkRemoteHost(jobpath):
    """ Check remote host and exit with exitcode 0 (success) or 1 (error) """
    directorInstance = director()
    jobs = directorInstance.getJobArray(jobpath)
    return not directorInstance.checkRemoteHost(jobs[0])