def handle(self): if self.server.singleton_server: if self.server.singleton_server.is_cluster_wide: msg = 'Ignoring event, cid:[{}], server id:[{}], name:[{}], singleton is already cluster-wide'.format( self.cid, self.server.id, self.server.name) logger.debug(msg) else: if self.server.singleton_server.become_cluster_wide( self.server.connector_server_keep_alive_job_time, self.server.connector_server_grace_time, self.server.id, self.server.cluster_id, False): self.server.singleton_server.scheduler.delete( Bunch( name='zato.server.ensure-cluster-wide-singleton')) self.server.init_connectors() add_scheduler_jobs(self.server) else: msg = 'Not becoming a cluster-wide singleton, cid:[{}], server id:[{}], name:[{}]'.format( self.cid, self.server.id, self.server.name) logger.info(msg) else: msg = 'Ignoring event, cid:[{}], server id:[{}], name:[{}] has no singleton attached'.format( self.cid, self.server.id, self.server.name) logger.debug(msg)
def handle(self): if self.server.singleton_server: if self.server.singleton_server.is_cluster_wide: msg = "Ignoring event, cid:[{}], server id:[{}], name:[{}], singleton is already cluster-wide".format( self.cid, self.server.id, self.server.name ) logger.debug(msg) else: if self.server.singleton_server.become_cluster_wide( self.server.connector_server_keep_alive_job_time, self.server.connector_server_grace_time, self.server.id, self.server.cluster_id, False, ): self.server.singleton_server.scheduler.delete( Bunch(name="zato.server.ensure-cluster-wide-singleton") ) self.server.init_connectors() add_scheduler_jobs(self.server) self.server.singleton_server.init_notifiers() else: msg = "Not becoming a cluster-wide singleton, cid:[{}], server id:[{}], name:[{}]".format( self.cid, self.server.id, self.server.name ) logger.info(msg) else: msg = "Ignoring event, cid:[{}], server id:[{}], name:[{}] has no singleton attached".format( self.cid, self.server.id, self.server.name ) logger.debug(msg)
def run(self): try: # Add the statistics-related scheduler jobs to the ODB if self._add_startup_jobs: spawn_greenlet(self.add_startup_jobs) # All other jobs if self._add_scheduler_jobs: add_scheduler_jobs(self.api, self.odb, self.config.main.cluster.id, spawn=False) _sleep = self.sleep _sleep_time = self.sleep_time with self.lock: for job in sorted(self.jobs.itervalues()): if job.max_repeats_reached: logger.info('Job `%s` already reached max runs count (%s UTC)', job.name, job.max_repeats_reached_at) else: self.spawn_job(job) # Ok, we're good now. self.ready = True logger.info('Scheduler started') while self.keep_running: _sleep(_sleep_time) if self.iter_cb: self.iter_cb(*self.iter_cb_args) except Exception, e: logger.warn(format_exc(e))
def init_jobs(self): sleep( initial_sleep ) # To make sure that at least one server is running if the environment was started from quickstart scripts cluster_conf = self.config.main.cluster add_startup_jobs(cluster_conf.id, self.odb, self.startup_jobs, asbool(cluster_conf.stats_enabled)) # Actually start jobs now, including any added above if self._add_scheduler_jobs: add_scheduler_jobs(self.api, self.odb, self.config.main.cluster.id, spawn=False)
def init_scheduler(self): add_scheduler_jobs(self.parallel_server, spawn=False) self.scheduler.init()