def build_server_list(): # The file to build the server list without querying the grid5000 API server_file = "node-iot.json" # Get all nodes in the default queue of this site servers = {} if os.path.isfile(server_file): with open(server_file, "r") as f: servers = json.load(f) else: # List all nodes of the site logging.info("Query the API to build the server list") process = subprocess.run("iotlab-status --nodes --site %s" % get_config()["iot_site"], shell=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, universal_newlines=True) json_data = json.loads(process.stdout)["items"] for node in json_data: if node["state"] == "Alive" or node["state"] == "Busy": node_name = node["network_address"].split(".")[0] servers[node_name] = { "name": node_name, "archi": node["archi"], "site": node["site"], "coords (x,y,z)": "%s, %s, %s" % (node["x"], node["y"], node["z"]) } with open(server_file, "w") as f: f.write(json.dumps(servers, indent=4)) return servers
def decorated_function(*args, **kwargs): if flask.request.json is None: flask.abort(503) else: token = flask.request.json.get("token") if len(token) < 10 or not token in get_config()["auth_token"]: flask.abort(503) return f(*args, **kwargs)
def g5k_connect(action, db): credential = db.query(ActionProperty).filter( ActionProperty.node_name == action.node_name).filter( ActionProperty.prop_name == "g5k").first() user = credential.prop_value.split("/", 1)[0] pwd = credential.prop_value.split("/", 1)[1] return (Grid5000( username=user, password=decrypt_password(pwd)).sites[get_config()["g5k_site"]], user)
def node_extend(arg_dict): result = {} # Check POST data if "nodes" not in arg_dict or "user" not in arg_dict: return json.dumps({ "parameters": { "user": "******", "nodes": ["name1", "name2"] } }) wanted = arg_dict["nodes"] user = arg_dict["user"] if len(user) == 0 or '@' not in user: for n in wanted: result[n] = "no_email" return json.dumps(result) # Get information about the requested nodes db = open_session() nodes = db.query(Schedule).filter( Schedule.node_name.in_(wanted)).filter(Schedule.owner == user).all() for n in nodes: # Allow users to extend their reservation 4 hours before the end_date if n.end_date - int(time.time()) < 4 * 3600: hours_added = int((n.end_date - n.start_date) / 3600) api_url = "https://api.grid5000.fr/stable/sites/%s/internal/oarapi/jobs/%s.json" % ( get_config()["g5k_site"], n.node_name) g5k_login = (arg_dict["g5k_user"], decrypt_password(arg_dict["g5k_password"])) json_data = { "method": "walltime-change", "walltime": "+%d:00" % hours_added } r = requests.post(url=api_url, auth=g5k_login, json=json_data) if r.status_code == 202: if r.json()["status"] == "Accepted": n.end_date += hours_added * 3600 result[n.node_name] = "success" else: error_msg = "failure: walltime modification rejected" logging.error(error_msg) logging.error(r.json()) result[n.node_name] = error_msg else: error_msg = "failure: wrong API return code %d" % r.status_code logging.error(error_msg) result[n.node_name] = error_msg else: result[ n. node_name] = "failure: it is too early to extend the reservation" close_session(db) # Build the result for n in wanted: if n not in result: result[n] = "failure" return json.dumps(result)
def node_mine(arg_dict): if "user" not in arg_dict or "@" not in arg_dict["user"]: return json.dumps({ "parameters": "user: '******'" }) result = { "states": [], "nodes": {} } # Get the list of the states for the 'deploy' process py_module = import_module("%s.states" % get_config()["node_type"]) PROCESS = getattr(py_module, "PROCESS") for p in PROCESS["deploy"]: if len(p["states"]) > len(result["states"]): result["states"] = p["states"] db = open_session() # Get my nodes node_names = [] nodes = db.query(Schedule ).filter(Schedule.owner == arg_dict["user"] ).filter(Schedule.state != "configuring" ).all() for n in nodes: result["nodes"][n.node_name] = { "node_name": n.node_name, "bin": n.bin, "start_date": n.start_date, "end_date": n.end_date, "state": n.state, "action_state": n.action_state } nodes = db.query(RaspNode).filter(RaspNode.name.in_(result["nodes"].keys())).all() for n in nodes: result["nodes"][n.name]["ip"] = n.ip result["nodes"][n.name]["switch"] = n.switch result["nodes"][n.name]["port_number"] = n.port_number result["nodes"][n.name]["model"] = n.model result["nodes"][n.name]["serial"] = n.serial envs = db.query(ActionProperty ).filter(ActionProperty.node_name.in_(result["nodes"].keys()) ).filter(ActionProperty.prop_name.in_(["environment", "os_password"]) ).all() env_web = {} for e in envs: if e.prop_name == "environment": # Check if the environment provides a web interface if e.prop_value not in env_web: has_web = db.query(RaspEnvironment).filter(RaspEnvironment.name == e.prop_value ).first().web env_web[e.prop_value] = has_web if env_web[e.prop_value]: #result["nodes"][e.node_name]["url"] = "http://%s:8181" % result["nodes"][e.node_name]["ip"] # Hack for the PiSeduce cluster result["nodes"][e.node_name]["url"] = "https://pi%02d.seduce.fr" % ( int(result["nodes"][e.node_name]["port_number"])) result["nodes"][e.node_name][e.prop_name] = e.prop_value close_session(db) return json.dumps(result)
def img_upload_exec(action, db): ssh_user = db.query(RaspEnvironment).filter( RaspEnvironment.name == action.environment).first().ssh_user env_path = get_config()["env_path"] img_path = db.query(ActionProperty).filter( ActionProperty.node_name == action.node_name).filter( ActionProperty.prop_name == "img_path").first().prop_value cmd = "scp -o 'StrictHostKeyChecking no' root@%s:%s %s" % ( action.node_ip, img_path, env_path) subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) return True
def register_environment(arg_dict): db = open_session() node = db.query(Schedule ).filter(Schedule.node_name == arg_dict["node_name"] ).filter(Schedule.owner == arg_dict["user"] ).first() if node is None: close_session(db) msg = "No reservation for the node '%s'" % arg_dict["node_name"] logging.error("[%s] %s" % (arg_dict["node_name"], msg)) return json.dumps({ "error": msg }) # Check the image file does not exist yet file_name = os.path.basename(arg_dict["img_path"]) env_path = get_config()["env_path"] if os.path.exists("%s%s" % (env_path, file_name)): msg = "The image file '%s' already exists in the server. Please, rename this file." % file_name logging.error("[%s] %s" % (arg_dict["node_name"], msg)) return json.dumps({ "error": msg }) node_action = db.query(Action).filter(Action.node_name == node.node_name).first() if node_action is not None: db.delete(node_action) # The deployment is completed, add a new action node_action = new_action(node, db) # The deployment is completed, add a new action init_action_process(node_action, "reg_env") db.add(node_action) # Delete old values old_props = db.query(ActionProperty ).filter(ActionProperty.node_name == node.node_name ).filter(ActionProperty.prop_name.in_(["img_path", "env_name" ]) ).all() for p in old_props: db.delete(p) act_prop = ActionProperty() act_prop.node_name = node.node_name act_prop.prop_name = "img_path" act_prop.prop_value = arg_dict["img_path"] act_prop.owner = node.owner db.add(act_prop) act_prop = ActionProperty() act_prop.node_name = node.node_name act_prop.prop_name = "env_name" act_prop.prop_value = arg_dict["env_name"] act_prop.owner = node.owner db.add(act_prop) close_session(db) return json.dumps({ "success": "environment is registering" })
def env_copy_exec(action, db): env_path = get_config()["env_path"] node_ip = db.query(RaspNode).filter( RaspNode.name == action.node_name).first().ip # WARN: the pimaster SSH user is in pimaster.switch (sorry) pimaster = db.query(RaspNode).filter(RaspNode.name == "pimaster").first() env = db.query(RaspEnvironment).filter( RaspEnvironment.name == action.environment).first() try: ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(node_ip, username="******", timeout=SSH_TIMEOUT) # Get the path to the IMG file img_path = env_path + env.img_name logging.info("[%s] copy %s to the SDCARD" % (action.node_name, img_path)) # Write the image of the environment on SD card deploy_cmd = "rsh -o StrictHostKeyChecking=no %s@%s 'cat %s' | tar xzOf - | \ pv -n -p -s %s 2> progress-%s.txt | dd of=/dev/mmcblk0 bs=4M conv=fsync &" % ( pimaster.switch, pimaster.ip, img_path, env.img_size, action.node_name) (stdin, stdout, stderr) = ssh.exec_command(deploy_cmd) return_code = stdout.channel.recv_exit_status() ssh.close() act_prop = db.query(ActionProperty).filter( ActionProperty.node_name == action.node_name).filter( ActionProperty.prop_name == "percent").first() if act_prop is None: owner_email = db.query(ActionProperty).filter( ActionProperty.node_name == action.node_name).first().owner act_prop = ActionProperty() act_prop.node_name = action.node_name act_prop.prop_name = "percent" act_prop.prop_value = 0 act_prop.owner = owner_email db.add(act_prop) else: act_prop.prop_value = 0 except (BadHostKeyException, AuthenticationException, SSHException, socket.error) as e: logging.warning("[%s] SSH connection failed" % action.node_name) return True
def experiment_to_reservation(): reservations = {} # Get the reservations from the IoT-Lab plateform process = subprocess.run("iotlab-status -er", shell=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, universal_newlines=True) json_data = json.loads(process.stdout)["items"] iot_site = get_config()["iot_site"] for resa in json_data: for node_name in resa["nodes"]: if iot_site in node_name: name_short = node_name.split(".")[0] if name_short not in reservations: reservations[name_short] = [] info_res = {} start_date = datetime.strptime(resa["start_date"], "%Y-%m-%dT%H:%M:%SZ") start_date = start_date.replace(tzinfo=pytz.UTC) info_res["start_date"] = start_date.timestamp() info_res["end_date"] = start_date.timestamp( ) + resa["submitted_duration"] * 60 info_res["owner"] = resa["user"] reservations[name_short].append(info_res) # Get the reservations from the database db = open_session() db_selection = db.query(IotSelection).filter( IotSelection.node_ids != "").all() # Check the IoT existing selections for selection in db_selection: for node_id in selection.node_ids.split("+"): name = "%s-%s" % (selection.archi.split(":")[0], node_id) if name not in reservations: reservations[name] = [] reservations[name].append({ "start_date": selection.start_date, "end_date": selection.end_date, "owner": selection.owner }) close_session(db) return reservations
def new_action(db_node, db): # Delete existing actions existing = db.query(Action).filter( Action.node_name == db_node.node_name).all() for e in existing: db.delete(e) # Get the node IP (only for Raspberry agents) if get_config()["node_type"] == "raspberry": node_ip = db.query(RaspNode).filter( RaspNode.name == db_node.node_name).first().ip else: node_ip = None # Add a new action act = Action() act_prop = db.query(ActionProperty).filter( ActionProperty.node_name == db_node.node_name).filter( ActionProperty.prop_name == "environment").first() if act_prop is not None: act.environment = act_prop.prop_value act.node_name = db_node.node_name if node_ip is not None: act.node_ip = node_ip db_node.state = "in_progress" return act
def node_mine(arg_dict): if "user" not in arg_dict or "@" not in arg_dict["user"] or \ "iot_user" not in arg_dict or "iot_password" not in arg_dict: return json.dumps({ "parameters": { "user": "******", "iot_user": "******", "iot_password": "******" } }) result = {"states": [], "nodes": {}} # Get the list of the states for the 'deploy' process py_module = import_module("%s.states" % get_config()["node_type"]) PROCESS = getattr(py_module, "PROCESS") for p in PROCESS["deploy"]: if len(p["states"]) > len(result["states"]): result["states"] = p["states"] # Get the existing job for this user db = open_session() schedule = db.query(Schedule).filter( Schedule.owner == arg_dict["user"]).all() db_jobs = {sch.node_name: sch for sch in schedule} cmd = "iotlab-experiment -u %s -p %s get -l" % ( arg_dict["iot_user"], decrypt_password(arg_dict["iot_password"])) process = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, universal_newlines=True) json_data = json.loads(process.stdout)["items"] now = time.time() for resa in json_data: id_str = str(resa["id"]) # Manage the jobs registrered in the DB if id_str in db_jobs: # Delete from the DB the jobs that ends more than 24 hours ago if db_jobs[id_str].end_date < now - 24 * 3600: db.delete(db_jobs[id_str]) nodes = db.query(IotNodes).filter(IotNodes.job_id).first() if nodes is not None: db.delete(nodes) # Do not analyze this job my_sch = None else: my_sch = db_jobs[id_str] else: start_date = datetime.strptime(resa["start_date"], "%Y-%m-%dT%H:%M:%SZ") start_date = start_date.replace(tzinfo=pytz.UTC) start_time = start_date.timestamp() end_time = start_date.timestamp() + resa["submitted_duration"] * 60 # Check if the job is terminated within the last 24 hours if end_time > now - 24 * 3600: # Register the job to the DB schedule = Schedule() schedule.node_name = resa["id"] schedule.owner = arg_dict["user"] schedule.bin = "autodetected-jobs" schedule.state = "ready" schedule.action_state = "" # Compute the dates start_date = datetime.strptime(resa["start_date"], "%Y-%m-%dT%H:%M:%SZ") start_date = start_date.replace(tzinfo=pytz.UTC) schedule.start_date = start_date.timestamp() schedule.end_date = start_date.timestamp( ) + resa["submitted_duration"] * 60 db.add(schedule) my_sch = schedule else: # Do not analyze this job my_sch = None if my_sch is not None: # Get the list of the assigned nodes nodes = db.query(IotNodes).filter( IotNodes.job_id == resa["id"]).first() if nodes is None: cmd = "iotlab-experiment -u %s -p %s get -i %s -n" % ( arg_dict["iot_user"], decrypt_password(arg_dict["iot_password"]), id_str) process = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, universal_newlines=True) node_data = json.loads(process.stdout)["items"] assigned_nodes = [] for n in node_data: name = n["network_address"] assigned_nodes.append( name.split(".")[0] + "@" + name.split(".")[1]) nodes_str = ",".join(assigned_nodes) nodes_db = IotNodes() nodes_db.job_id = resa["id"] nodes_db.assigned_nodes = nodes_str db.add(nodes_db) else: nodes_str = nodes.assigned_nodes # Send job information result["nodes"][my_sch.node_name] = { "node_name": my_sch.node_name, "bin": my_sch.bin, "start_date": my_sch.start_date, "end_date": my_sch.end_date, "state": resa["state"].lower(), "assigned_nodes": nodes_str } result["nodes"][my_sch.node_name]["data_link"] = ( resa["state"] == "Terminated" or resa["state"] == "Stopped") close_session(db) return json.dumps(result)
def node_reserve(arg_dict): # Check arguments if "filter" not in arg_dict or "user" not in arg_dict or \ "start_date" not in arg_dict or "duration" not in arg_dict or \ "iot_user" not in arg_dict or "iot_password" not in arg_dict: logging.error("Missing parameters: '%s'" % arg_dict) return json.dumps({ "parameters": { "user": "******", "filter": "{...}", "start_date": 1623395254, "duration": 3, "iot_password": "******", "iot_user": "******" } }) result = {"nodes": []} user = arg_dict["user"] f = arg_dict["filter"] # f = {'nb_nodes': '3', 'model': 'RPI4B8G', 'switch': 'main_switch'} nb_nodes = int(f["nb_nodes"]) del f["nb_nodes"] start_date = arg_dict["start_date"] end_date = start_date + arg_dict["duration"] * 3600 # Get the node list servers = build_server_list() filtered_nodes = [] if "name" in f: if f["name"] in servers: filtered_nodes.append(f["name"]) else: # Get the node properties used in the filter node_props = {} if len(f) == 0: filtered_nodes += servers.keys() else: for node in servers.values(): ok_filtered = True for prop in f: if node[prop] != f[prop]: ok_filtered = False if ok_filtered: filtered_nodes.append(node["name"]) # Check the availability of the filtered nodes logging.info("Filtered nodes: %s" % filtered_nodes) selected_nodes = [] node_status = {} for node_name in filtered_nodes: ok_selected = True # Move the start date back 15 minutes to give the time for destroying the previous reservation back_date = start_date - 15 * 60 # Check the running experiments of the IoT-Lab plateform for name, reservations in experiment_to_reservation().items(): if name == node_name: for resa in reservations: # Only one reservation for a specific node per user if name == node_name and resa["owner"] == user: ok_selected = False # There is no reservation at the same date if (back_date > resa["start_date"] and back_date < resa["end_date"]) or \ (back_date < resa["start_date"] and end_date > resa["start_date"]): ok_selected = False if ok_selected: # Add the node to the reservation selected_nodes.append(node_name) if len(selected_nodes) == nb_nodes: # Exit when the required number of nodes is reached break logging.info("Selected nodes: %s" % selected_nodes) if len(selected_nodes) > 0: archi = servers[selected_nodes[0]]["archi"] db = open_session() if "name" in f: node_id = selected_nodes[0].split("-")[1] selection = db.query(IotSelection).filter( IotSelection.owner == user).filter( IotSelection.archi == archi).filter( IotSelection.start_date == start_date).filter( IotSelection.node_ids != "").first() if selection is None: iot_filter = "%s,%s,%s" % (get_config()["iot_site"], archi.split(":")[0], node_id) iot_selection = IotSelection() iot_selection.owner = user iot_selection.filter_str = iot_filter iot_selection.archi = archi iot_selection.node_ids = node_id iot_selection.node_nb = "" iot_selection.start_date = start_date iot_selection.end_date = end_date db.add(iot_selection) else: selection.filter_str += "+%s" % node_id selection.node_ids += "+%s" % node_id else: iot_filter = "%d,archi=%s+site=%s" % (len(selected_nodes), archi, get_config()["iot_site"]) iot_selection = IotSelection() iot_selection.owner = user iot_selection.filter_str = iot_filter iot_selection.archi = archi iot_selection.node_ids = "" iot_selection.node_nb = len(selected_nodes) iot_selection.start_date = start_date iot_selection.end_date = end_date db.add(iot_selection) # Store the iot-lab login/password to the DB in order to use it with agent_exec.py result["nodes"] = selected_nodes close_session(db) return json.dumps(result)
def g5k_connect(args): return Grid5000(username=args["g5k_user"], password=decrypt_password( args["g5k_password"])).sites[get_config()["g5k_site"]]
from api.auth import auth from flask import Blueprint from lib.config_loader import get_config from importlib import import_module import flask user_v1 = Blueprint("user_v1", __name__) # Get the python module from the type of the nodes managed by this agent node_type = get_config()["node_type"] api_exec_mod = import_module("%s.api" % node_type) # List the DHCP clients (only used by administrators but the URL must be in /user/) @user_v1.route("/client/list", methods=["POST"]) @auth def client_list(): """ Return the list of the DHCP clients read from the '/etc/dnsmasq.conf' file. JSON parameters: none. Example of return value: { 'switch1': {'mac_address': 'c4:41:1e:11:11:11', 'ip': '4.4.0.8'}, 'node-1': {'mac_address': 'dc:a6:32:12:12:12', 'ip': '4.4.0.11'}, 'node-2': {'mac_address': 'dc:a6:32:12:12:12', 'ip': '4.4.0.12'} } """ return getattr(api_exec_mod, "client_list")(flask.request.json) # List the switches (only used by administrators but the URL must be in /user/) @user_v1.route("/switch/list", methods=["POST"])
from importlib import import_module import logging, os, sys # Create the application agent_api = Flask(__name__) # Add routes from blueprints agent_api.register_blueprint(user_v1, url_prefix='/v1/user/') agent_api.register_blueprint(admin_v1, url_prefix='/v1/admin/') agent_api.register_blueprint(debug_v1, url_prefix='/v1/debug/') if __name__ == '__main__': logging.basicConfig(filename='info_api.log', level=logging.INFO, format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S') if (get_config()["node_type"] == "g5k" or get_config()["node_type"] == "iot-lab") and \ not os.path.isfile(get_config()["key_file"]): msg = "Please, ask the secret key to the webui in order to securely share passwords!" print(msg) logging.error(msg) sys.exit(13) # Check the token syntax if len(get_config()["auth_token"][0]) < 10: msg = "Please, increase the length of the authorization token (at least 10 characters)" print(msg) logging.error(msg) sys.exit(13) # Get the python module from the type of the nodes managed by this agent node_type = get_config()["node_type"] api_exec_mod = import_module("%s.api" % node_type) # Start the application
def decrypt_password(pwd): with open(get_config()["key_file"], "r") as keyfile: key = keyfile.read() f = Fernet(key) return f.decrypt(pwd.encode()).decode() return ""
def node_mine(arg_dict): if "user" not in arg_dict or "@" not in arg_dict["user"] or \ "g5k_user" not in arg_dict or "g5k_password" not in arg_dict: return json.dumps({ "parameters": { "user": "******", "g5k_user": "******", "g5k_password": "******" } }) result = {"states": [], "nodes": {}} # Get the list of the states for the 'deploy' process py_module = import_module("%s.states" % get_config()["node_type"]) PROCESS = getattr(py_module, "PROCESS") for p in PROCESS["deploy"]: if len(p["states"]) > len(result["states"]): result["states"] = p["states"] # Get the existing job for this user db = open_session() schedule = db.query(Schedule).filter( Schedule.owner == arg_dict["user"]).filter( Schedule.state != "configuring").all() db_jobs = {sch.node_name: sch for sch in schedule} if len(db_jobs) == 0: close_session(db) return json.dumps(result) # Connect to the grid5000 API g5k_site = g5k_connect(arg_dict) user_jobs = g5k_site.jobs.list(state="running", user=arg_dict["g5k_user"]) user_jobs += g5k_site.jobs.list(state="waiting", user=arg_dict["g5k_user"]) check_deleted_jobs(db_jobs, user_jobs, db) for j in user_jobs: j.refresh() uid_str = str(j.uid) if uid_str in db_jobs: my_conf = db_jobs[uid_str] result["nodes"][uid_str] = { "node_name": uid_str, "bin": my_conf.bin, "start_date": my_conf.start_date, "end_date": my_conf.end_date, "state": my_conf.state, "job_state": j.state } assigned_nodes = db.query(ActionProperty).filter( ActionProperty.node_name == my_conf.node_name).filter( ActionProperty.prop_name == "assigned_nodes").first() if assigned_nodes is None: if len(j.assigned_nodes) > 0: assigned_nodes = ActionProperty() assigned_nodes.owner = arg_dict["user"] assigned_nodes.node_name = my_conf.node_name assigned_nodes.prop_name = "assigned_nodes" assigned_nodes.prop_value = ",".join(j.assigned_nodes) db.add(assigned_nodes) result["nodes"][uid_str][ "assigned_nodes"] = assigned_nodes.prop_value else: result["nodes"][uid_str][ "assigned_nodes"] = assigned_nodes.prop_value close_session(db) return json.dumps(result)
if len(sys.argv) != 2: print("The configuration file is required in parameter.") print("For example, 'python3 %s config.json'" % sys.argv[0]) sys.exit(2) load_config(sys.argv[1]) from database.connector import open_session, close_session from database.tables import Action, ActionProperty, RaspNode, Schedule from datetime import datetime from importlib import import_module from lib.config_loader import load_config from sqlalchemy import or_ import logging, os, subprocess, sys, time # Import the action driver from config_agent.json node_type = get_config()["node_type"] # Import the action executor module exec_action_mod = import_module("%s.exec" % node_type) # Import the PROCESS and STATE_DESC variables py_module = import_module("%s.states" % node_type) PROCESS = getattr(py_module, "PROCESS") STATE_DESC = getattr(py_module, "STATE_DESC") # Move the action to the next state of the process def next_state_move(db_action): # Select the state list from the environment state_list = [] for process in PROCESS[db_action.process]: if len(process["environments"] ) == 0 or db_action.environment in process["environments"]:
from lib.config_loader import get_config from sqlalchemy import create_engine from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker DB_URL = get_config()["db_url"] engine = create_engine(DB_URL, connect_args={"check_same_thread": False}) SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) Base = declarative_base()
def state(): return json.dumps({"state": "running", "type": get_config()["node_type"] })