def test_simple_scheduler(self): # Run a simple tasks that increments the integer contained in a file. d = Dispatcher() path = "/tmp/labmanager-unit-test-{}".format(os.getpid()) if os.path.exists(path): os.remove(path) d.schedule_task(bg_jobs.test_incr, args=(path, ), repeat=3, interval=2) time.sleep(8) try: with open(path) as fp: assert json.load(fp)['amt'] == 3 except Exception as e: raise e
def test_unschedule_task(self): d = Dispatcher() path = "/tmp/labmanager-unit-test-{}".format(os.getpid()) if os.path.exists(path): os.remove(path) future_t = datetime.datetime.utcnow() + datetime.timedelta(seconds=5) jr = d.schedule_task(bg_jobs.test_incr, scheduled_time=future_t, args=(path, ), repeat=4, interval=1) time.sleep(2) n = d.unschedule_task(jr) assert n, "Task should have been cancelled, instead it was not found." time.sleep(5) assert not os.path.exists(path=path)
def test_run_only_once(self): # Assert that this method only gets called once. d = Dispatcher() path = "/tmp/labmanager-unit-test-{}".format(os.getpid()) if os.path.exists(path): os.remove(path) future_t = datetime.datetime.utcnow() + datetime.timedelta(seconds=1) jr = d.schedule_task(bg_jobs.test_incr, scheduled_time=future_t, args=(path, ), repeat=0) time.sleep(4) with open(path) as fp: assert json.load(fp)['amt'] == 1
def test_unschedule_midway_through(self): d = Dispatcher() path = "/tmp/labmanager-unit-test-{}".format(os.getpid()) if os.path.exists(path): os.remove(path) future_t = None # i.e., start right now. jr = d.schedule_task(bg_jobs.test_incr, scheduled_time=future_t, args=(path, ), repeat=6, interval=5) time.sleep(8) n = d.unschedule_task(jr) assert n, "Task should have been cancelled, instead it was not found." time.sleep(5) with open(path) as fp: assert json.load(fp)['amt'] in [2]
def test_schedule_with_repeat_is_zero(self): # When repeat is zero, it should run only once. d = Dispatcher() path = "/tmp/labmanager-unit-test-{}".format(os.getpid()) if os.path.exists(path): os.remove(path) jr = d.schedule_task(bg_jobs.test_incr, args=(path, ), repeat=0, interval=4) time.sleep(6) n = d.unschedule_task(jr) time.sleep(5) with open(path) as fp: assert json.load(fp)['amt'] in [ 1 ], "When repeat=0, the task should run only once."
def start_labbook_monitor(labbook: LabBook, username: str, dev_tool: str, url: str, database: int = 1, author: Optional[GitAuthor] = None) -> None: """Method to start Development Environment Monitors for a given Lab Book if available Args: labbook(LabBook): A populated LabBook instance to start monitoring username(str): The username of the logged in user dev_tool(str): The name of the development tool to monitor url(str): URL (from LabManager) at which this dev tool can be reached. database(int): The redis database ID to use for key storage. Default should be 1 author(GitAuthor): A GitAuthor instance for the current logged in user starting the monitor Returns: None """ # Connect to redis redis_conn = redis.Redis(db=database) # Get all dev env monitors currently running dev_env_monitors = redis_conn.keys("dev_env_monitor:*") # Clean up after Lab Books that have "closed" by checking if the container is running docker_client = get_docker_client() for key in dev_env_monitors: if "activity_monitor" in key.decode(): # Ignore all associated activity monitors, as they'll get cleaned up with the dev env monitor continue container_name = redis_conn.hget(key, 'container_name') try: docker_client.containers.get(container_name.decode()) except NotFound: # Container isn't running, clean up logger.warn( "Shutting down zombie Activity Monitoring for {}.".format( key.decode())) stop_dev_env_monitors(key.decode(), redis_conn, labbook.name) # Check if Dev Env is supported and then start Dev Env Monitor dev_env_mgr = DevEnvMonitorManager(database=database) if dev_env_mgr.is_available(dev_tool): # Add record to redis for Dev Env Monitor owner = InventoryManager().query_owner(labbook) dev_env_monitor_key = "dev_env_monitor:{}:{}:{}:{}".format( username, owner, labbook.name, dev_tool) if redis_conn.exists(dev_env_monitor_key): # Assume already set up properly (it wasn't cleaned up above) logger.info( f'Found existing entry for {dev_env_monitor_key}, skipping setup' ) return owner = InventoryManager().query_owner(labbook) redis_conn.hset(dev_env_monitor_key, "container_name", infer_docker_image_name(labbook.name, owner, username)) redis_conn.hset(dev_env_monitor_key, "labbook_root", labbook.root_dir) redis_conn.hset(dev_env_monitor_key, "url", url) # Set author information so activity records can be committed on behalf of the user if author: redis_conn.hset(dev_env_monitor_key, "author_name", author.name) redis_conn.hset(dev_env_monitor_key, "author_email", author.email) # Schedule dev env d = Dispatcher() kwargs = {'dev_env_name': dev_tool, 'key': dev_env_monitor_key} job_key = d.schedule_task(run_dev_env_monitor, kwargs=kwargs, repeat=None, interval=3) redis_conn.hset(dev_env_monitor_key, "process_id", job_key.key_str) logger.info("Started `{}` dev env monitor for lab book `{}`".format( dev_tool, labbook.name)) else: raise ValueError( f"{dev_tool} Developer Tool does not support monitoring")