def processBackupStatus(self, job): job.backupstatus['hostname'] = job.hostname job.backupstatus['username'] = job.username if(job.ssh): ssh = 'True' else: ssh = 'False' job.backupstatus['ssh'] = ssh job.backupstatus['share'] = job.share job.backupstatus['fileset'] = ':'.join(job.fileset) job.backupstatus['backupdir'] = job.backupdir job.backupstatus['speedlimitkb'] = job.speedlimitkb job.backupstatus['type'] = self.getWorkingDirectory() p = re.compile(r"^(.*)\s*?Number of files: (\d+)\s*Number of files transferred: (\d+)\s*Total file size: (\d+) bytes\s*Total transferred file size: (\d+)\s* bytes\s*Literal data: (\d+) bytes\s*Matched data: (\d+) bytes\s*File list size: (\d+)\s*File list generation time: (\S+)\s* seconds?\s*File list transfer time: (\S+)\s*seconds?\s*Total bytes sent: (\d+)\s*Total bytes received: (\d+)(\s|\S)*$", re.MULTILINE|re.DOTALL) m = p.match(job.backupstatus['rsync_stdout']) if m: # Limit output on max 10.000 characters incase of thousends of vanished files will fill up the SQLite db / e-mail output job.backupstatus['rsync_stdout'] = job.backupstatus['rsync_stdout'][:10000] job.backupstatus['rsync_pre_stdout'] = m.group(1)[:10000] # Set backupstatus vars via regexp group capture job.backupstatus['rsync_number_of_files'] = m.group(2) job.backupstatus['rsync_number_of_files_transferred'] = m.group(3) job.backupstatus['rsync_total_file_size'] = m.group(4) job.backupstatus['rsync_total_transferred_file_size'] = m.group(5) job.backupstatus['rsync_literal_data'] = m.group(6) job.backupstatus['rsync_matched_data'] = m.group(7) job.backupstatus['rsync_file_list_size'] = m.group(8) job.backupstatus['rsync_file_list_generation_time'] = float(m.group(9)) job.backupstatus['rsync_file_list_transfer_time'] = float(m.group(10)) job.backupstatus['rsync_total_bytes_sent'] = m.group(11) job.backupstatus['rsync_total_bytes_received'] = m.group(12) else: if job.backupstatus['rsync_backup_status'] == 1: logger().error("Error unhandled output in rsync command (%s)" % job.backupstatus['rsync_stdout']) jobrunhistory().insertJob(job.backupstatus)
def test_openDbHandler_exception(): path = '/non-existent' with pytest.raises(SystemExit) as e: jobrunhistory(path, check=True) assert e.type == SystemExit assert e.value.code == 1
def test_printOutput_no_history(test_config, tmp_path, capsys): config().jobspooldirectory = str(tmp_path) jobrunhistory(str(tmp_path), check=True) sc = statuscli() sc.printOutput('localhost') captured = capsys.readouterr() assert 'Could not find hostname' in captured.out
def test_identifyJob_error(tmp_path, caplog): logger().debuglevel = 3 jrh = jobrunhistory(str(tmp_path), check=True) backupstatus = { 'hostname': 'localhost', 'startdatetime': time.time(), 'rsync_total_file_size': 1337, 'rsync_literal_data': 42, } hooks = [] jrh.insertJob(backupstatus, hooks) path = os.path.join( os.path.dirname(__file__), 'etc/localhost.job', ) j = job(path) directory = datetime.datetime.today().strftime( "%Y-%m-%d_%H-%M-%S_backup.0") i = jrh.identifyJob(j, directory) assert 'cannot identify job for' in caplog.text assert i is None
def getOverallBackupState(self, jobs): """Overall backup state = 'ok' unless there is at least one failed backup""" ret = "ok" for j in jobrunhistory().getJobHistory(self.getBackupHosts(jobs)): if j['rsync_backup_status'] != 1: ret = "error" return ret
def test_getJobHistory_none_hosts(tmp_path): jrh = jobrunhistory(str(tmp_path), check=True) history = jrh.getJobHistory(None) assert history is not None assert history == []
def getMissingHosts(self, jobs): """Add all configured hosts, remove all runned hosts, incase there are elements left we have 'missing' hosts""" hosts = [] for i in jobs: if i.enabled: hosts.append(i.hostname) for j in jobrunhistory().getJobHistory(self.getBackupHosts(jobs)): hosts.remove(j['hostname']) return hosts
def test_closeDbHandler(tmp_path): jrh = jobrunhistory(str(tmp_path), check=True) assert jrh.closeDbHandler() is None with pytest.raises(sqlite3.ProgrammingError) as e: jrh.conn.in_transaction assert e.type == sqlite3.ProgrammingError assert 'Cannot operate on a closed database' in str(e.value)
def sendStatusEmail(self, jobs, durationstats): state = self.getOverallBackupState(jobs) hosts = self.getBackupHosts(jobs) missinghosts = self.getMissingHosts(jobs) stats = self.getStats(jobs) jrh = jobrunhistory().getJobHistory(self.getBackupHosts(jobs)) subject = "%d jobs OK - %d jobs FAILED - %s" % (stats['total_backups_success'], stats['total_backups_failed'], datetime.datetime.today().strftime("%a, %d/%m/%Y")) body = self.getHtmlEmailBody(state, hosts, missinghosts, stats, durationstats, jrh, jobs) self._send(subject=subject, htmlbody=body)
def test_deleteHistory_exception(tmp_path, caplog): logger().debuglevel = 3 jrh = jobrunhistory(str(tmp_path), check=True) jrh.closeDbHandler() jrh.deleteHistory() assert 'Cannot operate on a closed database' in caplog.text
def test_init(tmp_path): jrh = jobrunhistory(str(tmp_path)) db_file = os.path.join(tmp_path, 'autorsyncbackup.db') assert os.path.exists(db_file) c = jrh.conn.cursor() for table in tables: assert get_table_sql(c, table) is None
def test_checkTables_twice(tmp_path): jrh = jobrunhistory(str(tmp_path), check=True) jrh.checkTables() c = jrh.conn.cursor() for table in tables: sql = get_table_sql(c, table) assert sql is not None assert re.search(r' %s ' % table, sql)
def test_createTableJobcommandhistoryTable(tmp_path): jrh = jobrunhistory(str(tmp_path), check=False) jrh.createTableJobcommandhistoryTable() c = jrh.conn.cursor() table = 'jobcommandhistory' sql = get_table_sql(c, table) assert sql is not None assert re.search(r' %s ' % table, sql)
def test_getJobHistory_exception(tmp_path, caplog): logger().debuglevel = 3 jrh = jobrunhistory(str(tmp_path), check=True) jrh.closeDbHandler() history = jrh.getJobHistory(['localhost']) assert 'Cannot operate on a closed database' in caplog.text assert history is not None assert history == []
def test_init_check(tmp_path): jrh = jobrunhistory(str(tmp_path), check=True) db_file = os.path.join(tmp_path, 'autorsyncbackup.db') assert os.path.exists(db_file) c = jrh.conn.cursor() for table in tables: sql = get_table_sql(c, table) assert sql is not None assert re.search(r' %s ' % table, sql)
def test_printOutput(test_config, tmp_path, capsys): config().jobspooldirectory = str(tmp_path) jrh = jobrunhistory(str(tmp_path), check=True) backupstatus = { 'integrity_id': str(uuid.uuid1()), 'hostname': 'localhost', 'startdatetime': time.time(), 'enddatetime': time.time(), 'username': '******', 'ssh': 'False', 'share': 'backup', 'include': '/etc', 'exclude': '*.bak:.cache/*', 'backupdir': '/tmp', 'speedlimitkb': 1600, 'filesrotate': None, 'type': 'daily', 'rsync_backup_status': 1, 'rsync_return_code': 0, 'rsync_pre_stdout': None, 'rsync_stdout': 'foo\nbar\n', 'rsync_number_of_files': 3278, 'rsync_number_of_files_transferred': 1790, 'rsync_total_file_size': 6249777, 'rsync_total_transferred_file_size': 6213437, 'rsync_literal_data': 6213437, 'rsync_matched_data': 0, 'rsync_file_list_size': 80871, 'rsync_file_list_generation_time': 0.001, 'rsync_file_list_transfer_time': 0, 'rsync_total_bytes_sent': 39317, 'rsync_total_bytes_received': 6430608, 'sanity_check': 1, } hooks = [] jrh.insertJob(backupstatus, hooks) sc = statuscli() sc.printOutput(backupstatus['hostname']) captured = capsys.readouterr() assert 'localhost' in captured.out assert 'Ok' in captured.out
def test_insertJob_exception(tmp_path, caplog): logger().debuglevel = 3 jrh = jobrunhistory(str(tmp_path), check=True) backupstatus = { 'hostname': 'localhost', 'non_existing_column': None, } jrh.insertJob(backupstatus, None) assert 'Could not insert job details for host' in caplog.text assert ('table jobrunhistory has no column' ' named non_existing_column') in caplog.text
def getStats(self, jobs): """Produce total/average stats out database/jobs data""" result = jobrunhistory().getJobHistory(self.getBackupHosts(jobs)) ret = {} ret['total_host_count'] = len(result) ret['total_backups_failed'] = 0 ret['total_rsync_duration'] = 0 ret['total_number_of_files'] = 0 ret['total_number_of_files_transferred'] = 0 ret['total_file_size'] = 0 ret['total_transferred_file_size'] = 0 ret['total_literal_data'] = 0 ret['total_matched_data'] = 0 ret['total_file_list_size'] = 0 ret['total_file_list_generation_time'] = 0 ret['total_file_list_transfer_time'] = 0 ret['total_bytes_sent'] = 0 ret['total_bytes_received'] = 0 ret['total_speed_limit_kb'] = 0 ret['average_backup_duration'] = 0 ret['average_speed_limit_kb'] = 0 for i in result: if i['rsync_backup_status'] == 1: ret['total_rsync_duration'] = ret['total_rsync_duration'] + (i['enddatetime'] - i['startdatetime']) ret['total_number_of_files'] = ret['total_number_of_files'] + i['rsync_number_of_files'] ret['total_number_of_files_transferred'] = ret['total_number_of_files_transferred'] + i['rsync_number_of_files_transferred'] ret['total_file_size'] = ret['total_file_size'] + i['rsync_total_file_size'] ret['total_transferred_file_size'] = ret['total_transferred_file_size'] + i['rsync_total_transferred_file_size'] ret['total_literal_data'] = ret['total_literal_data'] + i['rsync_literal_data'] ret['total_matched_data'] = ret['total_matched_data'] + i['rsync_matched_data'] ret['total_file_list_size'] = ret['total_file_list_size'] + i['rsync_file_list_size'] ret['total_file_list_generation_time'] = ret['total_file_list_generation_time'] + i['rsync_file_list_generation_time'] ret['total_file_list_transfer_time'] = ret['total_file_list_transfer_time'] + i['rsync_file_list_transfer_time'] ret['total_bytes_sent'] = ret['total_bytes_sent'] + i['rsync_total_bytes_sent'] ret['total_bytes_received'] = ret['total_bytes_received'] + i['rsync_total_bytes_received'] if i['speedlimitkb']: ret['total_speed_limit_kb'] = ret['total_speed_limit_kb'] + i['speedlimitkb'] else: ret['total_backups_failed'] = ret['total_backups_failed'] + 1 ret['total_backups_success'] = ret['total_host_count'] - ret['total_backups_failed'] if ret['total_backups_success'] > 0: ret['average_backup_duration'] = ret['total_rsync_duration'] / ret['total_backups_success'] ret['average_speed_limit_kb'] = ret['total_speed_limit_kb'] / ret['total_backups_success'] return ret
def processBackupStatus(self, job): job.backupstatus['hostname'] = job.hostname if (job.ssh): ssh = 'True' job.backupstatus['username'] = job.sshusername else: ssh = 'False' job.backupstatus['username'] = job.rsyncusername job.backupstatus['ssh'] = ssh job.backupstatus['share'] = job.rsyncshare job.backupstatus['include'] = ':'.join(job.include) job.backupstatus['exclude'] = ':'.join(job.exclude) job.backupstatus['backupdir'] = job.backupdir job.backupstatus['speedlimitkb'] = job.speedlimitkb job.backupstatus['type'] = self.getWorkingDirectory() self.parseRsyncOutput(job) jrh = jobrunhistory(check=True) jrh.insertJob(job.backupstatus, job.hooks) jrh.closeDbHandler()
def processBackupStatus(self, job): job.backupstatus['hostname'] = job.hostname if(job.ssh): ssh = 'True' job.backupstatus['username'] = job.sshusername else: ssh = 'False' job.backupstatus['username'] = job.rsyncusername job.backupstatus['ssh'] = ssh job.backupstatus['share'] = job.rsyncshare job.backupstatus['include'] = ':'.join(job.include) job.backupstatus['exclude'] = ':'.join(job.exclude) job.backupstatus['backupdir'] = job.backupdir job.backupstatus['speedlimitkb'] = job.speedlimitkb job.backupstatus['type'] = self.getWorkingDirectory() self.parseRsyncOutput(job) jrh = jobrunhistory(check = True) jrh.insertJob(job.backupstatus, job.hooks) jrh.closeDbHandler()
def getBackupsSize(self, job): size = 0 values = [] latest = os.path.realpath(job.backupdir.rstrip('/') + "/" + job.hostname + "/latest") daily_path = job.backupdir.rstrip('/') + "/" + job.hostname + "/daily" jrh = jobrunhistory(check = True) for interval in ['daily', 'weekly', 'monthly']: dirlist = self.getBackups(job, interval) for directory in dirlist: jobRow = jrh.identifyJob(job, directory) if not jobRow: continue if interval == 'daily': values.append(jobRow[3] or 0) if latest == daily_path + "/" + directory: size += jobRow[2] or 0 else: size += jobRow[3] or 0 jrh.closeDbHandler() avg = sum(values) / len(values) if values else 0 return size, avg
def getBackupsSize(self, job): size = 0 values = [] latest = os.path.realpath( job.backupdir.rstrip('/') + "/" + job.hostname + "/latest") daily_path = job.backupdir.rstrip('/') + "/" + job.hostname + "/daily" jrh = jobrunhistory(check=True) for interval in ['daily', 'weekly', 'monthly']: dirlist = self.getBackups(job, interval) for directory in dirlist: jobRow = jrh.identifyJob(job, directory) if not jobRow: continue if interval == 'daily': values.append(jobRow[3] or 0) if latest == daily_path + "/" + directory: size += jobRow[2] or 0 else: size += jobRow[3] or 0 jrh.closeDbHandler() avg = sum(values) / len(values) if values else 0 return size, avg
def test_deleteHistory(tmp_path): jrh = jobrunhistory(str(tmp_path), check=True) backupstatus = { 'hostname': 'localhost', 'startdatetime': time.time() - (86400 * 600) } hooks = [ { 'local': 1, 'runtime': 'before', 'returncode': 0, 'continueonerror': True, 'script': 'uptime', 'stdout': (' 12:11:51 up 2:40, 13 users, ' ' load average: 0.84, 0.71, 0.71\n'), 'stderr': '', }, ] jrh.insertJob(backupstatus, hooks) jrh.deleteHistory() c = jrh.conn.cursor() for table in tables: count = get_record_count(c, table) assert count is not None assert count == 0
def test_identifyJob(tmp_path, caplog): logger().debuglevel = 3 jrh = jobrunhistory(str(tmp_path), check=True) backupstatus = { 'hostname': 'localhost', 'startdatetime': time.time() - 2, 'rsync_total_file_size': 1337, 'rsync_literal_data': 42, } hooks = [] jrh.insertJob(backupstatus, hooks) path = os.path.join( os.path.dirname(__file__), 'etc/localhost.job', ) j = job(path) directory = datetime.datetime.today().strftime( "%Y-%m-%d_%H-%M-%S_backup.0") i = jrh.identifyJob(j, directory) for record in caplog.records: assert 'cannot identify job for' not in record.msg assert 'large time difference for job' not in record.msg assert 'invalid values for job' not in record.msg assert i is not None assert i[0] >= 1 assert i[1] == backupstatus['startdatetime'] assert i[2] == backupstatus['rsync_total_file_size'] assert i[3] == backupstatus['rsync_literal_data']
def test_insertJob_none_hooks(tmp_path, caplog): logger().debuglevel = 3 jrh = jobrunhistory(str(tmp_path), check=True) backupstatus = { 'hostname': 'localhost', 'startdatetime': time.time(), } jrh.insertJob(backupstatus, None) c = jrh.conn.cursor() for table in tables: count = get_record_count(c, table) assert count is not None if table == 'jobrunhistory': assert count == 1 elif table == 'jobcommandhistory': assert count == 0
def test_identifyJob_invalid_values(tmp_path, caplog): logger().debuglevel = 3 jrh = jobrunhistory(str(tmp_path), check=True) backupstatus = { 'hostname': 'localhost', 'startdatetime': time.time() - 86400, 'rsync_total_file_size': '', 'rsync_literal_data': None, } hooks = [] jrh.insertJob(backupstatus, hooks) path = os.path.join( os.path.dirname(__file__), 'etc/localhost.job', ) j = job(path) directory = datetime.datetime.today().strftime( "%Y-%m-%d_%H-%M-%S_backup.0") i = jrh.identifyJob(j, directory) # Warnings are not logged by default if len(caplog.records) > 0: assert 'invalid values for job' in caplog.text assert i is not None assert i[0] >= 1 assert i[1] == backupstatus['startdatetime'] assert i[2] == backupstatus['rsync_total_file_size'] assert i[3] == backupstatus['rsync_literal_data']
print("Starting AutoRsyncBackup") # Only check if host is reachable, set appropriate settings if options.job and options.dryrun: checkSingleHost = True options.verbose = True config().debuglevel = 2 # Set logpath logger(config().logfile) logger().setDebuglevel(config().debuglevel) logger().setVerbose(options.verbose) for msg in config().debugmessages: logger().debug(msg) #make sure database structure is created jobrunhistory(check=True) # Determine next step based on CLI options if options.version: print(getVersion()) exit(0) elif options.hostname: exit(getLastBackupStatus(options.hostname)) elif options.sort: exit(listJobs(options.sort)) elif checkSingleHost: exit(checkRemoteHost(options.job)) else: runBackup(options.job, options.dryrun)
def __init__(self): self.jobrunhistory = jobrunhistory()
def test_sendStatusEmail(test_config, tmp_path, monkeypatch): email_path = os.path.join(str(tmp_path), 'status.eml') def mock_send(self, message): with open(email_path, 'w') as f: f.write(message.as_string()) return True monkeypatch.setattr(mailer.Mailer, 'send', mock_send) config().jobspooldirectory = str(tmp_path) config().backupmailrecipients = ['*****@*****.**'] path = os.path.join( os.path.dirname(__file__), 'etc/localhost.job', ) jrh = jobrunhistory(str(tmp_path), check=True) backupstatus = { 'integrity_id': str(uuid.uuid1()), 'hostname': 'localhost', 'startdatetime': time.time(), 'enddatetime': time.time(), 'username': '******', 'ssh': 'False', 'share': 'backup', 'include': '/etc', 'exclude': '*.bak:.cache/*', 'backupdir': '/tmp', 'speedlimitkb': 1600, 'filesrotate': None, 'type': 'daily', 'rsync_backup_status': 1, 'rsync_return_code': 0, 'rsync_pre_stdout': None, 'rsync_stdout': 'foo\nbar\n', 'rsync_number_of_files': 3278, 'rsync_number_of_files_transferred': 1790, 'rsync_total_file_size': 6249777, 'rsync_total_transferred_file_size': 6213437, 'rsync_literal_data': 6213437, 'rsync_matched_data': 0, 'rsync_file_list_size': 80871, 'rsync_file_list_generation_time': 0.001, 'rsync_file_list_transfer_time': 0, 'rsync_total_bytes_sent': 39317, 'rsync_total_bytes_received': 6430608, 'sanity_check': 1, } hooks = [ { 'local': 1, 'runtime': 'before', 'returncode': 0, 'continueonerror': True, 'script': 'uptime', 'stdout': ( ' 12:11:51 up 2:40, 13 users, ' ' load average: 0.84, 0.71, 0.71\n' ), 'stderr': '', }, ] jrh.insertJob(backupstatus, hooks) j = job(path) jobs = [ j, ] durationstats = { 'backupstartdatetime': int(time.time()) - 40, 'backupenddatetime': int(time.time()) - 30, 'housekeepingstartdatetime': int(time.time()) - 20, 'housekeepingenddatetime': int(time.time()) - 10, } se = statusemail() se.sendStatusEmail(jobs, durationstats) assert se.history is not None assert se.history != [] assert os.path.exists(email_path) text_body = None html_body = None with open(email_path) as f: msg = email.message_from_file(f) assert msg is not None assert msg.is_multipart() is True for part in msg.walk(): content_type = part.get_content_type() assert content_type in [ 'multipart/alternative', 'text/plain', 'text/html', ] if content_type == 'multipart/alternative': continue body = part.get_payload() assert body is not None encoding = part.get('Content-Transfer-Encoding') if encoding is not None and encoding == 'base64': body = base64.b64decode(body) if content_type == 'text/plain': text_body = body.decode() elif content_type == 'text/html': html_body = body.decode() assert text_body is not None assert html_body is not None assert 'Integrity:' in text_body assert '>Integrity</td>' in html_body
def test_openDbHandler(tmp_path): jrh = jobrunhistory(str(tmp_path), check=True) assert jrh.conn.in_transaction is False
def runBackup(jobpath, dryrun): """ Start backup run """ exitFlag = threading.Event() queueLock = threading.Lock() workQueue = queue.Queue(0) try: with Pidfile(config().lockfile, logger().debug, logger().error): # Run director directorInstance = director() jobs = directorInstance.getJobArray(jobpath) # Start threads threads = [] if not dryrun: for i in range(0, config().jobworkers): thread = jobThread(i, exitFlag, queueLock, directorInstance, workQueue) thread.start() threads.append(thread) # Execute jobs queueLock.acquire() durationstats = {} durationstats['backupstartdatetime'] = int(time.time()) for job in jobs: if (job.enabled): if directorInstance.checkRemoteHost(job): if not dryrun: # Add to queue workQueue.put(job) else: jobrunhistory().insertJob(job.backupstatus, None) queueLock.release() # Wait for queue to empty while not workQueue.empty(): time.sleep(0.1) # Notify threads it's time to exit exitFlag.set() # Wait for all threads to complete for t in threads: t.join() durationstats['backupenddatetime'] = int(time.time()) if not dryrun: # Do housekeeping durationstats['housekeepingstartdatetime'] = int(time.time()) for job in jobs: if (job.enabled): if job.backupstatus['rsync_backup_status'] == 1: directorInstance.backupRotate(job) jobrunhistory().deleteHistory() durationstats['housekeepingenddatetime'] = int(time.time()) # Sent status report statusemail().sendStatusEmail(jobs, durationstats) # else: # for job in jobs: # job.showjob() except ProcessRunningException as m: statusemail().sendSuddenDeath(m) logger().error(m)
def test_insertJob(tmp_path): jrh = jobrunhistory(str(tmp_path), check=True) backupstatus = { 'integrity_id': str(uuid.uuid1()), 'hostname': 'localhost', 'startdatetime': time.time(), 'enddatetime': time.time(), 'username': '******', 'ssh': 'False', 'share': 'backup', 'include': '/etc', 'exclude': '*.bak:.cache/*', 'backupdir': '/tmp', 'speedlimitkb': 1600, 'filesrotate': None, 'type': 'daily', 'rsync_backup_status': 1, 'rsync_return_code': 0, 'rsync_pre_stdout': None, 'rsync_stdout': 'foo\nbar\n', 'rsync_number_of_files': 3278, 'rsync_number_of_files_transferred': 1790, 'rsync_total_file_size': 6249777, 'rsync_total_transferred_file_size': 6213437, 'rsync_literal_data': 6213437, 'rsync_matched_data': 0, 'rsync_file_list_size': 80871, 'rsync_file_list_generation_time': 0.001, 'rsync_file_list_transfer_time': 0, 'rsync_total_bytes_sent': 39317, 'rsync_total_bytes_received': 6430608, 'sanity_check': 1, } hooks = [ { 'local': 1, 'runtime': 'before', 'returncode': 0, 'continueonerror': True, 'script': 'uptime', 'stdout': (' 12:11:51 up 2:40, 13 users, ' ' load average: 0.84, 0.71, 0.71\n'), 'stderr': '', }, ] jrh.insertJob(backupstatus, hooks) c = jrh.conn.cursor() query = """ SELECT id FROM jobrunhistory WHERE integrity_id = :integrity_id """ param = { 'integrity_id': backupstatus['integrity_id'], } c.execute(query, param) row = c.fetchone() assert row is not None assert row[0] >= 1 query = """ SELECT c.id FROM jobcommandhistory AS c, jobrunhistory AS h WHERE c.jobrunid = h.id AND h.integrity_id = :integrity_id """ c.execute(query, param) row = c.fetchone() assert row is not None assert row[0] >= 1
def runBackup(jobpath, dryrun): """ Start backup run """ exitFlag = threading.Event() queueLock = threading.Lock() workQueue = queue.Queue(0) try: with Pidfile(config().lockfile, logger().debug, logger().error): # Run director directorInstance = director() jobs = directorInstance.getJobArray(jobpath) # Start threads threads = [] if not dryrun: for i in range(0, config().jobworkers): thread = jobThread(i, exitFlag, queueLock, directorInstance, workQueue) thread.start() threads.append(thread) # Execute jobs queueLock.acquire() durationstats = {} durationstats['backupstartdatetime'] = int(time.time()) for job in jobs: if(job.enabled): if directorInstance.checkRemoteHost(job): if not dryrun: # Add to queue workQueue.put(job) else: jobrunhistory().insertJob(job.backupstatus, None) queueLock.release() # Wait for queue to empty while not workQueue.empty(): time.sleep(0.1) # Notify threads it's time to exit exitFlag.set() # Wait for all threads to complete for t in threads: t.join() durationstats['backupenddatetime'] = int(time.time()) if not dryrun: # Do housekeeping durationstats['housekeepingstartdatetime'] = int(time.time()) for job in jobs: if(job.enabled): if job.backupstatus['rsync_backup_status'] == 1: directorInstance.backupRotate(job) jobrunhistory().deleteHistory() durationstats['housekeepingenddatetime'] = int(time.time()) # Sent status report statusemail().sendStatusEmail(jobs, durationstats) # else: # for job in jobs: # job.showjob() except ProcessRunningException as m: statusemail().sendSuddenDeath(m) logger().error(m)
def test_getJobHistory(tmp_path): jrh = jobrunhistory(str(tmp_path), check=True) backupstatus = { 'integrity_id': str(uuid.uuid1()), 'hostname': 'localhost', 'startdatetime': time.time(), 'enddatetime': time.time(), 'username': '******', 'ssh': 'False', 'share': 'backup', 'include': '/etc', 'exclude': '*.bak:.cache/*', 'backupdir': '/tmp', 'speedlimitkb': 1600, 'filesrotate': None, 'type': 'daily', 'rsync_backup_status': 1, 'rsync_return_code': 0, 'rsync_pre_stdout': None, 'rsync_stdout': 'foo\nbar\n', 'rsync_number_of_files': 3278, 'rsync_number_of_files_transferred': 1790, 'rsync_total_file_size': 6249777, 'rsync_total_transferred_file_size': 6213437, 'rsync_literal_data': 6213437, 'rsync_matched_data': 0, 'rsync_file_list_size': 80871, 'rsync_file_list_generation_time': 0.001, 'rsync_file_list_transfer_time': 0, 'rsync_total_bytes_sent': 39317, 'rsync_total_bytes_received': 6430608, 'sanity_check': 1, } hooks = [ { 'local': 1, 'runtime': 'before', 'returncode': 0, 'continueonerror': True, 'script': 'uptime', 'stdout': (' 12:11:51 up 2:40, 13 users, ' ' load average: 0.84, 0.71, 0.71\n'), 'stderr': '', }, ] jrh.insertJob(backupstatus, hooks) history = jrh.getJobHistory([backupstatus['hostname']]) assert history is not None assert len(history) == 1 assert history[0]['hostname'] == backupstatus['hostname'] assert history[0]['commands'] is not None assert len(history[0]['commands']) == 1 assert history[0]['commands'][0]['script'] == 'uptime'