def faas_idler(env: Environment, inactivity_duration=300, reconcile_interval=30): """ https://github.com/openfaas-incubator/faas-idler https://github.com/openfaas-incubator/faas-idler/blob/master/main.go default values: https://github.com/openfaas-incubator/faas-idler/blob/668991c532156275993399ee79a297a4c2d651ec/docker-compose.yml :param env: the faas environment :param inactivity_duration: i.e. 15m (Golang duration) :param reconcile_interval: i.e. 1m (default value) :return: an event generator """ faas: FaasSystem = env.faas while True: yield env.timeout(reconcile_interval) for deployment in faas.get_deployments(): if not deployment.scale_zero: continue for function in deployment.function_definitions.values(): if len(faas.get_replicas(function.name, FunctionState.RUNNING)) == 0: continue idle_time = env.now - env.metrics.last_invocation[ function.name] if idle_time >= inactivity_duration: env.process(faas.suspend(function.name)) logger.debug('%.2f function %s has been idle for %.2fs', env.now, function.name, idle_time)
def run(self, env: Environment): # deploy functions deployments = self.prepare_deployments() for deployment in deployments: yield from env.faas.deploy(deployment) # block until replicas become available (scheduling has finished and replicas have been deployed on the node) logger.info('waiting for replica') yield env.process(env.faas.poll_available_replica('python-pi')) yield env.process(env.faas.poll_available_replica('resnet50-inference')) # run workload ps = [] # execute 10 requests in parallel logger.info('executing 10 python-pi requests') for i in range(10): ps.append(env.process(env.faas.invoke(FunctionRequest('python-pi')))) logger.info('executing 10 resnet50-inference requests') for i in range(10): ps.append(env.process(env.faas.invoke(FunctionRequest('resnet50-inference')))) # wait for invocation processes to finish for p in ps: yield p
def function_trigger(env: Environment, deployment: FunctionDeployment, ia_generator, max_requests=None): try: if max_requests is None: while True: ia = next(ia_generator) yield env.timeout(ia) env.process(env.faas.invoke(FunctionRequest(deployment.name))) else: for _ in range(max_requests): ia = next(ia_generator) yield env.timeout(ia) env.process(env.faas.invoke(FunctionRequest(deployment.name))) except simpy.Interrupt: pass except StopIteration: logging.error(f'{deployment.name} gen has finished')
def run(self, env: Environment): for deployment in self.deployments: yield from env.faas.deploy(deployment) for deployment in self.deployments: yield env.process(env.faas.poll_available_replica(deployment.name)) ps = [] logging.info('executing requests') for deployment in self.deployments: try: ia_generator = self.arrival_profiles[deployment.name] if self.duration is None: p = env.process(function_trigger(env, deployment, ia_generator, max_requests=1000)) else: p = env.process(function_trigger(env, deployment, ia_generator)) ps.append(p) except KeyError: logging.warning('no arrival profile for deployment %s', deployment.name) if self.duration is not None: env.process(self.wait(env, ps)) yield from ps
def run(self, env: Environment): # deploy functions deployments = self.prepare_deployments() for deployment in deployments: yield from env.faas.deploy(deployment) # block until replicas become available (scheduling has finished and replicas have been deployed on the node) logger.info('waiting for replica') yield env.process(env.faas.poll_available_replica('python-pi')) # generate profile ia_generator = expovariate_arrival_profile( constant_rps_profile(rps=20)) # run profile yield from function_trigger(env, deployments[0], ia_generator, max_requests=100)