def test_check(tmp_path): path = os.path.join( str(tmp_path), 'autorsyncbackup.pid', ) pf = Pidfile(path) pf.__enter__() pid = pf._check() assert pid == int(os.getpid())
def test_enter_stale(tmp_path, monkeypatch): def mock_kill(pid, signal): raise IOError('Mock kill failure') monkeypatch.setattr(os, 'kill', mock_kill) path = os.path.join( str(tmp_path), 'autorsyncbackup.pid', ) pf1 = Pidfile(path) pf1.__enter__() assert pf1.pidfd is not None assert os.path.exists(path) is True pf2 = Pidfile(path) pf2.__enter__() assert pf2.pidfd is not None assert os.path.exists(path) is True
def test_check_ValueError(tmp_path): path = os.path.join( str(tmp_path), 'autorsyncbackup.pid', ) pf = Pidfile(path) with open(pf.pidfile, 'w') as f: f.write('Test') pid = pf._check() assert pid is False
def listJobs(sort): with Pidfile(config().lockfile, logger().debug, logger().error): # Run director directorInstance = director() jobs = directorInstance.getJobArray() sizes = {} averages = {} tot_size = 0 tot_avg = 0 for job in jobs: sizes[job.hostname], averages[ job.hostname] = director().getBackupsSize(job) aux = sorted(sizes.items(), key=lambda x: x[1], reverse=True) if sort == 'average': aux = sorted(averages.items(), key=lambda x: x[1], reverse=True) x = PrettyTable([ 'Hostname', 'Estimated total backup size', 'Average backup size increase' ]) for elem in aux: hostname = elem[0] tot_size += sizes[hostname] tot_avg += averages[hostname] size = jinjafilters()._bytesToReadableStr(sizes[hostname]) avg = jinjafilters()._bytesToReadableStr(averages[hostname]) x.add_row([hostname, size, avg]) tot_size = jinjafilters()._bytesToReadableStr(tot_size) tot_avg = jinjafilters()._bytesToReadableStr(tot_avg) x.add_row(['Total', tot_size, tot_avg]) x.align = "l" x.padding_width = 1 print(x)
def test_enter_exception(tmp_path, monkeypatch): def mock_open(pid, signal): raise OSError(errno.ENOSPC, 'Mock open failure') monkeypatch.setattr(os, 'open', mock_open) path = os.path.join( str(tmp_path), 'autorsyncbackup.pid', ) pf = Pidfile(path) with pytest.raises(OSError) as e: pf.__enter__() assert e.errno == errno.ENOSPC assert 'Mock open failure' in str(e)
def test_enter(tmp_path): path = os.path.join( str(tmp_path), 'autorsyncbackup.pid', ) pf = Pidfile(path) pf.__enter__() assert pf.pidfd is not None assert os.path.exists(path) is True with open(path) as f: pid = f.read() assert pid == str(os.getpid())
def test_check_OSError(tmp_path, monkeypatch): def mock_kill(pid, signal): raise IOError('Mock kill failure') monkeypatch.setattr(os, 'kill', mock_kill) path = os.path.join( str(tmp_path), 'autorsyncbackup.pid', ) pf = Pidfile(path) pf.__enter__() pid = pf._check() assert pid is False
def test_exit_OSError(tmp_path): path = os.path.join( str(tmp_path), 'autorsyncbackup.pid', ) pf = Pidfile(path) pf.__enter__() exc_type = OSError exc_value = 'Mock value' exc_tb = 'Mock traceback' ret = pf.__exit__(exc_type, exc_value, exc_tb) assert ret is False assert os.path.exists(path) is False
def test_exit_PidfileProcessRunningException(tmp_path): path = os.path.join( str(tmp_path), 'autorsyncbackup.pid', ) pf = Pidfile(path) pf.__enter__() exc_type = PidfileProcessRunningException exc_value = 'Mock value' exc_tb = 'Mock traceback' ret = pf.__exit__(exc_type, exc_value, exc_tb) assert ret is False assert os.path.exists(path) is True
def test_exit(tmp_path): path = os.path.join( str(tmp_path), 'autorsyncbackup.pid', ) pf = Pidfile(path) pf.__enter__() exc_type = None exc_value = None exc_tb = None ret = pf.__exit__(exc_type, exc_value, exc_tb) assert ret is True assert os.path.exists(path) is False
def test_init(tmp_path): path = os.path.join( str(tmp_path), 'autorsyncbackup.pid', ) pf = Pidfile(path) assert pf.pidfile == path assert pf.log == sys.stdout.write assert pf.warn == sys.stderr.write
def test_enter_exists(tmp_path): path = os.path.join( str(tmp_path), 'autorsyncbackup.pid', ) pf1 = Pidfile(path) pf1.__enter__() assert pf1.pidfd is not None assert os.path.exists(path) is True pf2 = Pidfile(path) with pytest.raises(ProcessRunningException) as e: pf2.__enter__() assert pf2.pidfd is None assert 'process already running' in str(e)
def test_remove(tmp_path): path = os.path.join( str(tmp_path), 'autorsyncbackup.pid', ) pf = Pidfile(path) pf.__enter__() assert os.path.exists(path) is True pf._remove() assert os.path.exists(path) is False
def runBackup(jobpath, dryrun): """ Start backup run """ exitFlag = threading.Event() queueLock = threading.Lock() workQueue = queue.Queue(0) try: with Pidfile(config().lockfile, logger().debug, logger().error): # Run director directorInstance = director() jobs = directorInstance.getJobArray(jobpath) # Start threads threads = [] if not dryrun: for i in range(0, config().jobworkers): thread = jobThread(i, exitFlag, queueLock, directorInstance, workQueue) thread.start() threads.append(thread) # Execute jobs queueLock.acquire() durationstats = {} durationstats['backupstartdatetime'] = int(time.time()) for job in jobs: if (job.enabled): if directorInstance.checkRemoteHost(job): if not dryrun: # Add to queue workQueue.put(job) else: jobrunhistory().insertJob(job.backupstatus, None) queueLock.release() # Wait for queue to empty while not workQueue.empty(): time.sleep(0.1) # Notify threads it's time to exit exitFlag.set() # Wait for all threads to complete for t in threads: t.join() durationstats['backupenddatetime'] = int(time.time()) if not dryrun: # Do housekeeping durationstats['housekeepingstartdatetime'] = int(time.time()) for job in jobs: if (job.enabled): if job.backupstatus['rsync_backup_status'] == 1: directorInstance.backupRotate(job) jobrunhistory().deleteHistory() durationstats['housekeepingenddatetime'] = int(time.time()) # Sent status report statusemail().sendStatusEmail(jobs, durationstats) # else: # for job in jobs: # job.showjob() except ProcessRunningException as m: statusemail().sendSuddenDeath(m) logger().error(m)