async def show_progress(self): while self.process is None: trio.sleep(0.001) logger.info( log_helper.generate( message=self._get_executable(), task_id=self._identifier, pid=self.process.pid, tags=["command", "execution"], status="STARTED", )) while self.process.poll() is None: await self.flush_streams() await self.flush_streams(final=True) successful = self.process.returncode == 0 logger.info( log_helper.generate( message=self._get_executable(), task_id=self.identifier, pid=self.process.pid, returncode=self.process.returncode, status="SUCCESS" if successful else "FAILURE", ))
def stop(self, warm=False): self.change_state_to_stopping() if not warm: logger.info( log_helper.generate( message="Terminating all active subprocesses in pool", shutdown="cold", ) ) trio.run(self._pool.terminate) logger.info( log_helper.generate( message="Running processes terminated", shutdown="cold" ) ) else: waiter = PollingLoop( interval=1, status_reporter=lambda elapsed_time: logger.info( log_helper.generate( message=f"Waiting for all active subprocesses in pool to complete", elapsed_time=elapsed_time, shutdown="warm", ) ), ) with waiter: pass logger.info( log_helper.generate( message="All subprocesses completed", shutdown="warm" ) ) self.change_state_to_stopped()
async def flush_streams(self, final=False): streams_active = True while streams_active: streams_active = False stdout = await self.process.stdout.receive_some() if stdout: streams_active = True logger.info( log_helper.generate( stream="STDOUT", message=stdout.decode(), task_id=self.identifier, pid=self.process.pid, )) stderr = await self.process.stderr.receive_some() if stderr: streams_active = True logger.info( log_helper.generate( stream="STDERR", message=stderr.decode(), task_id=self.identifier, pid=self.process.pid, )) if not final: return
async def _mark_as_completed(self, commands): for command in commands: try: self.processlist.remove(command) except KeyError: log_helper.generate(self.processlist.commands, tags=["pool", "removal", "failure"])
async def poll(self): async with self.clock as current_time: if self._stopped: logger.info( log_helper.generate( message="paused", tags=["scheduler", "queue", "freeze"], status="PAUSED", schedule_time=current_time, )) return for job in self._jobs: logger.info( log_helper.generate( task_id=job.identifier, tags=["scheduler", "queue", "check"], schedule_time=current_time, remaining_time=job.time_left_for_next_run( self.clock.now), )) if job.is_pending(current_time): logger.info( log_helper.generate( task_id=job.identifier, tags=["scheduler", "queue", "put"], schedule_time=current_time, )) self.queue.put((job.priority, job)) return self.queue.qsize()
async def _schedule(self, initial_time, single_cycle, dry_run): self._scheduler = Scheduler(jobs=self.get_jobs(), initial_time=initial_time) self._set_job_base_time() while True: async with trio.open_nursery() as executor_pool: while True: await self.check_for_pause() self._handle_pause() if not await self._poll(): continue await self._execute_commands(executor_pool) if ( self._check_parallel_executions_count(executor_pool) or self.is_stopping() ): break if single_cycle or self.is_stopping(): logger.info( log_helper.generate(message="Single cycle selected - exiting") ) break
async def _poll(self): queued_jobs_count = await self.scheduler.poll() logger.info( log_helper.generate( message=queued_jobs_count, tags=["scheduler", "queue", "size"] ) ) return not self.scheduler.queue.empty()
def process(self, command): if self.already_running(command): logger.warning( log_helper.generate( "skipped", str(command), tags=["duplicate_command", "job", "filtering"], )) return False return True
async def __aenter__(self): time_remaining = self.current_time - self.now if time_remaining >= 1: logger.info( log_helper.generate(message=time_remaining, tags=["scheduler", "clock", "wait"])) wait_until = self.now + time_remaining while self.now < wait_until: await trio.sleep(0.01) return self.current_time
def process(self, command): if self.already_running(command): running_instance = self._find_running_instance(command) running_instance.process.terminate() logger.warning( log_helper.generate( "terminated", str(command), tags=["duplicate_command", "job", "filtering"], )) return True
def _check_parallel_executions_count(self, nursery): max_parallel_executions_reached = ( len(nursery.child_tasks) == self.max_parallel_executions ) logger.info( log_helper.generate( message=len(nursery.child_tasks), tags=["executor", "pool", "size"], max_parallel_executions=self.max_parallel_executions, max_parallel_executions_reached=max_parallel_executions_reached, ) ) return max_parallel_executions_reached
async def run(self, additional_environment=None): additional_environment = additional_environment or {} environment = os.environ.copy() if self.environment is self.ENV_INHERIT_WHITELISTED_ONLY: environment = { variable: value for variable, value in environment.items() if variable in self.WHITELISTED_ENVIRONMENT_VARS } for name in additional_environment.keys(): value = additional_environment[name] if callable(value): value = value(self) additional_environment[name] = value environment.update(additional_environment) if self._dry_run: logger.info( log_helper.generate( message=self._get_executable(), task_id=self.identifier, stream=None, tags=["command", "execution"], environment=environment, status="DRY_RUN", )) with trio.move_on_after(self.timeout) as cancel_scope: self._process = await trio.open_process( self._get_executable(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=self._shell, env=environment, ) logger.info( log_helper.generate( message=self._get_executable(), task_id=self.identifier, pid=self.process.pid, stream=None, tags=["command", "execution"], status="PENDING", )) self._started_at = Clock.get_current_timestamp() await self.show_progress() if cancel_scope.cancelled_caught: logger.error( log_helper.generate( f"timeout since {self._started_at}", task_id=self.identifier, pid=self.process.pid, tags=["command", "execution"], status="CANCELLED", )) with trio.move_on_after(self.FLUSH_STREAM_TIMEOUT): await self.flush_streams(final=True)