node_id = event.get('hostname') db_manager.update_one('nodes', id=node_id, values={ 'status': NodeStatus.ONLINE }) def update_nodes_status_online(event): other.info(f"{event}") with celery_app.connection() as connection: recv = celery_app.events.Receiver(connection, handlers={ 'worker-heartbeat': update_nodes_status, # 'worker-online': update_nodes_status_online, }) recv.capture(limit=None, timeout=None, wakeup=True) # run scheduler as a separate process scheduler.run() # monitor node status p_monitor = Process(target=monitor_nodes_status, args=(celery_app,)) p_monitor.start() # create folder if it does not exist if not os.path.exists(PROJECT_LOGS_FOLDER): os.makedirs(PROJECT_LOGS_FOLDER) if __name__ == '__main__': # run app instance app.run(host=FLASK_HOST, port=FLASK_PORT, threaded=True)
def run_scheduler(): scheduler.run()