class SchedulerDaemon(): metaDataGenerationScheduler = MetaDataGenerationScheduler() def __init__(self): pidFile = REPO_CONFIG.get('SCHEDULER_DAEMON_PIDFILE') self.stdout = sys.stdout self.stderr = sys.stderr self.pidfile_path = pidFile self.pidfile_timeout = 5 def ensure_parent_path_exists(self, path): directory = os.path.dirname(path) if not os.path.isdir(directory): os.makedirs(directory) def run(self): logging.config.fileConfig(REPO_CONFIG['SCHEDULER_DAEMON_LOGGING_CONF'], disable_existing_loggers=False) self.metaDataGenerationScheduler.start() ''' Do not end this thread, because the SIGTERM signal can only be caught from the main thread. See: http://docs.python.org/library/signal.html ''' while True: time.sleep(20) def stop(self, signum, frame): self.metaDataGenerationScheduler.shutdown() exit(0)
def test_cleanup_cache_dir(self): reponame1 = self.createStaticRepoWithContent() reponame2 = self.createStaticRepoWithContent() reponame3 = self.createStaticRepoWithContent() # delete repo 1 shutil.rmtree(self.config.getStaticRepoDir(reponame1)) # lock repo 2 open(self.config.getRepoLockFile(reponame2), 'w').close() MetaDataGenerationScheduler().cleanupCacheDir() self.assertFalse(os.path.exists( self.config.getRepoCacheDir(reponame1))) self.assertTrue(os.path.exists(self.config.getRepoCacheDir(reponame2))) self.assertFalse(os.path.exists( self.config.getRepoCacheDir(reponame3)))
def count_add_job(reponame): self.add_job_counter += 1 MetaDataGenerationScheduler.add_job_for_repo(self.mockedscheduler, reponame)
def count_unschedule(reponame): self.unscheduled_counter += 1 MetaDataGenerationScheduler.unschedule_by_reponame(self.mockedscheduler, reponame)
def get_mocked_scheduler(self): mockedscheduler = MetaDataGenerationScheduler(5) mockedscheduler.sched=MockedScheduler() return mockedscheduler