def users(): cluster_desc = get_cluster_desc() config = load_config() session = open_session() db_user = session.query(User).filter(User.email == current_user.id).first() authorized = [] admin = [] pending = [] if db_user.is_admin: for user in session.query(User).all(): if user.is_admin: category = admin elif user.user_authorized: category = authorized else: category = pending category.append({ 'id': user.id, 'email': user.email, 'firstname': user.firstname, 'lastname': user.lastname, 'email_confirmed': user.email_confirmed }) return json.dumps({ "status": "ok", "user_info": { 'filters': cluster_desc['email_filters'], 'admin': admin, 'authorized': authorized, 'pending': pending, 'smtp_config': config['mail'], 'email_signup': cluster_desc['email_signup'] } })
def nginx_stream(): cluster_desc = get_cluster_desc() nginx_stream_config = """ ## <stream config for pi.seduce.fr> stream { """ for idx, node in enumerate(cluster_desc["nodes"].values(), start=1): ssh_port_number = 22000 + idx server_ip = node.get("ip") nginx_stream_config += f""" upstream ssh_pi{idx} {{ server {server_ip}:22; }} server {{ listen {ssh_port_number}; proxy_pass ssh_pi{idx}; ssl_preread on; }} """ nginx_stream_config += """ } ## </stream config for pi.seduce.fr> """ return '<pre>' + nginx_stream_config + '</pre>'
def build_env(): """ Execute the 'save environment' action => create a file image from the internal storage of the server""" cluster_desc = get_cluster_desc() db_session = open_session() db_user = db_session.query(User).filter_by(email=current_user.id).first() deployment = db_session.query(Deployment).filter_by( user_id=db_user.id, node_name=flask.request.form.get('node_name')).filter( Deployment.state != "destroyed").first() env = cluster_desc['environments'][deployment.environment] env['name'] = flask.request.form.get("user_env_name") env['img_name'] = env['name'] + '.img.gz' user_ssh_user = flask.request.form.get("user_ssh_user") if len(user_ssh_user) > 0: env['ssh_user'] = user_ssh_user env['type'] = 'user' env_file_path = cluster_desc['env_cfg_dir'] + env['name'].replace( ' ', '_') + '.json' with open(env_file_path, 'w') as jsonfile: json.dump(env, jsonfile) deployment.process = 'save_env' deployment.state = 'img_part' deployment.temp_info = env['name'] close_session(db_session) return flask.redirect(flask.url_for("app.home"))
def take(server_info): """ Create deployments in the initialized state """ cluster_desc = get_cluster_desc() db_session = open_session() db_user = db_session.query(User).filter_by(email=current_user.id).first() # Delete previous deployments still in initialized state old_dep = db_session.query(Deployment).filter_by( user_id=db_user.id, state="initialized").delete() # Reserve the nodes to avoid other users to take them info = server_info.split(";") server_ids = info[0].split(",") server_names = info[1].replace(' ', '').split(",") for n_name in server_names: new_deployment = Deployment() new_deployment.process = "deploy" new_deployment.state = "initialized" new_deployment.node_name = n_name new_deployment.user_id = db_user.id new_deployment.name = "initialized" db_session.add(new_deployment) close_session(db_session) # Reload the cluster decription to add new environments load_cluster_desc() return flask.render_template( "form_take.html.jinja2", server_ids=server_ids, server_names=server_names, environments=[ env for env in cluster_desc["environments"].values() if env['type'] != 'hidden' ])
def email_config(): # Modify the email_signup parameter cluster_desc = get_cluster_desc() new_value = flask.request.form.get("esup_value") == 'true' if new_value != cluster_desc['email_signup']: set_email_signup(new_value) # Modify the SMTP configuration config = load_config() new_config = config['mail'] new_server = flask.request.form.get('smtp_server') new_port = flask.request.form.get('smtp_port') new_user = flask.request.form.get('smtp_user') new_password = flask.request.form.get('smtp_pwd') smtp_change = False if len(new_server) > 0 and config['mail']['smtp_address'] != new_server: smtp_change = True new_config['smtp_address'] = new_server if len(new_port) > 0 and config['mail']['smtp_port'] != new_port: smtp_change = True new_config['smtp_port'] = new_port if len(new_user) > 0 and config['mail']['account'] != new_user: smtp_change = True new_config['account'] = new_user if len(new_password) > 0 and config['mail']['password'] != new_password: smtp_change = True new_config['password'] = new_password if smtp_change: save_mail_config(new_config) return flask.redirect(flask.url_for("app.admin"))
def set_power_port(switch_name, port, value): switch_desc = get_cluster_desc()['switches'][switch_name] snmp_address = "%s.%d" % (switch_desc['oid'], switch_desc['oid_offset'] + int(port)) cmd = "snmpset -v2c -c %s %s %s i %s" % ( switch_desc['community'], switch_desc['ip'], snmp_address, value) subprocess.run(cmd.split(), check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) return True
def get_poe_status(switch_name): switch_desc = get_cluster_desc()['switches'][switch_name] oid = switch_desc['oid'] cmd = "snmpwalk -v2c -c %s %s %s" % ( switch_desc['community'], switch_desc['ip'], oid[:oid.rindex('.')]) process = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, universal_newlines=True) power_state = process.stdout.split('\n') return [p[-1] for p in power_state if len(p) > 0]
def nginx_http(): cluster_desc = get_cluster_desc() nginx_http_config = """ ## <http config for pi.seduce.fr> """ for idx, node in enumerate(cluster_desc["nodes"].values(), start=1): ssh_port_number = 22000 + idx server_ip = node.get("ip") nginx_http_config += f""" server {{ listen 80; listen 443 ssl; server_name pi{idx}.seduce.fr; location / {{ proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "upgrade"; client_max_body_size 5m; proxy_pass http://{server_ip}:8181; }} error_page 502 https://seduce.fr/maintenance; }} """ nginx_http_config += f""" server {{ listen 80; listen 443 ssl; server_name ~^(?<target_port>.+)\.pi4\.seduce\.fr$; location / {{ proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "upgrade"; client_max_body_size 5m; proxy_pass http://192.168.1.54:$target_port; }} #error_page 502 https://seduce.fr/maintenance; }} """ nginx_http_config += """ ## </http config for pi.seduce.fr> """ return '<pre>' + nginx_http_config + '</pre>'
def poe_status(switch_name): cluster_desc = get_cluster_desc() switch = cluster_desc['switches'][switch_name] poe = get_poe_status(switch['name']) status = [] for port in range(0, switch['port_nb']): if poe[port] == '1': status.append('ON') elif poe[port] == '2': status.append('OFF') else: status.append('UNKNOWN') return {'status': status}
def user_deployments(): cluster_desc = copy.deepcopy(get_cluster_desc()) user = current_user misc = {} session = open_session() db_user = session.query(User).filter_by(email=user.id).first() deployments = session.query(Deployment).filter( Deployment.state != "destroyed").filter_by( user_id=db_user.id).order_by(Deployment.node_name).all() deployment_info = {} for d in deployments: deployed = True if d.name not in deployment_info.keys(): # Deployment state is used to show/hide both the 'destroy' and the 'More info' buttons deployment_info[d.name] = { "name": d.name, "state": d.state, "user_id": d.user_id, "ids": [], "server_names": [], "server_infos": [] } deployment_info[d.name]["ids"].append(d.id) node_desc = cluster_desc["nodes"][d.node_name] deployment_info[d.name]["server_names"].append(node_desc["name"]) if d.environment is not None: if d.start_date is not None: s_date = datetime.datetime.strptime(str(d.start_date), '%Y-%m-%d %H:%M:%S') node_desc['starts_at'] = s_date.strftime("%d %b. at %H:%M") if d.state == 'lost': node_desc['last_state'] = d.temp_info env_desc = cluster_desc["environments"][d.environment] web_interface = False if 'web' in env_desc: web_interface = env_desc['web'] node_desc['number'] = int(node_desc["name"].split('-')[1]) node_desc['env'] = d.environment node_desc['state'] = d.state if d.state.endswith('_check'): node_desc["progress"] = d.temp_info else: node_desc["progress"] = 100 node_desc['password'] = d.system_pwd node_desc['web'] = web_interface node_desc['desc'] = env_desc['desc'] deployment_info[d.name]["server_infos"].append(node_desc) close_session(session) return {"deployments": list(deployment_info.values())}
def confirm_email(token): logger = logging.getLogger("LOGIN") logger.info("Receive the token '%s' to confirm email" % token) db_session = open_session() user_candidate = db_session.query(User).filter(User.email_confirmation_token == token).first() url = "Bad request: could not find the given token '%s'" % (token) if user_candidate is not None: user_candidate.email_confirmed = True if get_cluster_desc()['email_signup']: user_candidate.user_authorized = True url = flask.redirect(flask.url_for("login.confirmation_authorized_account")) else: url = flask.redirect(flask.url_for("login.confirmation_email")) close_session(db_session) return url
def resources(res_type): cluster_desc = copy.deepcopy(get_cluster_desc()) session = open_session() # Get my user_id db_user = session.query(User).filter(User.email == current_user.id).first() # Get information about the used resources not_destroyed_deployments = session.query(Deployment).filter( Deployment.state != "destroyed").all() used_nodes = {} id2email = {} for d in not_destroyed_deployments: if d.user_id == db_user.id: # This is one of my deployments used_nodes[d.node_name] = { 'user': '******', 'dep_name': d.name, 'state': d.state } else: # This is not my deployment, get information about the user if d.user_id not in id2email: foreign = session.query(User).filter( User.id == d.user_id).first() id2email[foreign.id] = foreign.email used_nodes[d.node_name] = { 'user': id2email[d.user_id], 'dep_name': d.name } if d.start_date is not None: s_date = datetime.datetime.strptime(str(d.start_date), '%Y-%m-%d %H:%M:%S') used_nodes[d.node_name]['starts_at'] = s_date.strftime( "%d %b. at %H:%M") close_session(session) result = {} for node in cluster_desc['nodes'].values(): if res_type in node: if not node[res_type] in result: result[node[res_type]] = {'name': node[res_type], 'values': []} if node['name'] in used_nodes: for key in used_nodes[node['name']]: node[key] = used_nodes[node['name']][key] else: node['user'] = '' result[node[res_type]]['values'].append(node) return {'resources': list(result.values())}
def switches(): cluster_desc = get_cluster_desc() # Read the state of the nodes from the deployment table db_session = open_session() states = db_session.query(Deployment).filter( Deployment.state != 'destroyed').all() node_states = {} for s in states: node_states[s.node_name] = s.state close_session(db_session) # Read the switch information all_switches = [] for switch in cluster_desc['switches'].values(): switch_desc = {'name': switch['name'], 'ip': switch['ip'], 'ports': []} for port in range(0, switch['port_nb']): # Get the PoE status (on or off) if port % 4 == 0: switch_desc['ports'].append({}) if port + 1 == switch['master_port']: switch_desc['ports'][-1][str(port + 1)] = { 'port': port + 1, 'name': 'pimaster', 'ip': cluster_desc['pimaster']['ip'], 'poe_state': 'ON', 'node_state': 'private' } else: switch_desc['ports'][-1][str(port + 1)] = { 'port': port + 1, 'poe_state': 'UNKNOWN' } for node in cluster_desc['nodes'].values(): if 'switch' in node and node['switch'] == switch['name']: port_row = int((node['port_number'] - 1) / 4) my_state = 'free' if node['name'] in node_states: my_state = node_states[node['name']] switch_desc['ports'][port_row][str(node['port_number'])] = { 'port': node['port_number'], 'name': node['name'], 'ip': node['ip'], 'poe_state': 'ON', 'node_state': my_state } all_switches.append(switch_desc) return {'switches': all_switches}
def turnMe(switch_ports, onOff): cluster_desc = get_cluster_desc() switch_ports = switch_ports.split(',') switch = cluster_desc['switches'][switch_ports[0].split('-')[0]] result = [] if switch is not None: for port in switch_ports: port_number = int(port.split('-')[1]) if switch['master_port'] == port_number: result.append('error') else: if onOff == 'on': turn_on_port(switch['name'], port_number) else: turn_off_port(switch['name'], port_number) result.append('done') return {'status': result} else: return {'status': result}
def signup(): cluster_desc = get_cluster_desc() if flask.request.method == 'GET': next_url = flask.request.args.get("next") return render_template("login/signup.html", next_url=next_url) email = flask.request.form['email'] firstname = flask.request.form['firstname'] lastname = flask.request.form['lastname'] password = flask.request.form['password'] confirm_password = flask.request.form['confirm_password'] db_session = open_session() existing_email = db_session.query(User).filter(User.email == email).all() if password != confirm_password: return 'The two passwords are not identical!<a href="/signup">Try again</a>' if len(existing_email) > 0: return "Your email address '%s' already exists. <a href='/'>Try to login</a>" % email email_filters = cluster_desc['email_filters'] filter_ok = len(email_filters) == 0 for f in email_filters: if f in email: filter_ok = True if not filter_ok: return 'Wrong email address.\ Your domain name is not in the authorized domains.\ Please contact the administrator or <a href="/signup">try with another email address</a>' user = User() user.email = email user.firstname = firstname user.lastname = lastname user._set_password = password db_session.add(user) close_session(db_session) email_conf = get_email_configuration() if len(email_conf['smtp_server_url']) > 0 and email_conf['smtp_server_url'] != 'no_config': # Send an email to confirm the user email address send_confirmation_request(email, firstname) if cluster_desc['email_signup']: redirect_url = flask.url_for("login.confirmation_created_account") else: redirect_url = flask.url_for("login.wait_admin_approval") return flask.redirect(redirect_url)
def available_servers(): cluster_desc = get_cluster_desc() session = open_session() db_user = session.query(User).filter(User.email == current_user.id).first() not_destroyed_deployments = session.query(Deployment).filter( Deployment.state != "destroyed").all() server_info = {} for s in cluster_desc["nodes"].values(): server_info[s["name"]] = { "id": s["id"], "name": s["name"], "ip": s["ip"], "state": "free" } id2email = {} for d in not_destroyed_deployments: if d.user_id == db_user.id: server_info[d.node_name]["state"] = d.state server_info[d.node_name]["dname"] = d.name server_info[d.node_name]["env"] = d.environment if d.state.endswith('_check'): server_info[d.node_name]["progress"] = d.temp_info else: server_info[d.node_name]["progress"] = 100 else: server_info[d.node_name]["state"] = "in_use" server_info[d.node_name]["progress"] = 100 if d.user_id not in id2email.keys(): foreign = session.query(User).filter( User.id == d.user_id).first() id2email[foreign.id] = foreign.email server_info[d.node_name]["dname"] = d.name server_info[d.node_name]["email"] = id2email[d.user_id] close_session(session) if not deployment: return json.dumps({"status": "ko"}) return json.dumps({ "status": "ok", "server_info": list(server_info.values()) })
def reserve_free_nodes(test_user_id, stats, nb_nodes, random_select=True, test_env="tiny_core"): db_session = open_session() deployments = db_session.query(Deployment).filter( Deployment.state != "destroyed").all() used_nodes = [d.node_name for d in deployments] cluster_desc = get_cluster_desc() logger.info("Unavailable nodes: %s" % used_nodes) free_nodes = [] ssh_user_env = None shell_env = None script_env = None if test_env != boot_test_environment: for env in cluster_desc["environments"].values(): if env['name'] == test_env: ssh_user_env = env['ssh_user'] shell_env = env['shell'] script_env = env['script_test'] if ssh_user_env is None or shell_env is None: logger.error("Environment '%s' not found!" % test_env) sys.exit(13) for server in cluster_desc["nodes"].values(): if server.get("name") not in used_nodes: free_nodes.append({ 'name': server.get("name"), 'ip': server.get('ip'), 'ssh_user': ssh_user_env, 'shell': shell_env, 'script': script_env, 'env': test_env }) if len(free_nodes) > nb_nodes: if random_select: selected_nodes = random.sample(free_nodes, nb_nodes) else: # Sort the nodes using the node number selected_nodes = sorted( free_nodes, key=lambda node: int(node['name'].split('-')[1]))[:nb_nodes] else: selected_nodes = free_nodes process = subprocess.run("cat %s" % pubkey_file, shell=True, check=True, stdout=subprocess.PIPE, universal_newlines=True) for node in selected_nodes: new_deployment = Deployment() if test_env == boot_test_environment: new_deployment.process = "boot_test" new_deployment.state = "boot_conf" else: new_deployment.process = "deploy" new_deployment.state = "boot_conf" new_deployment.environment = test_env # Update the operating system new_deployment.os_update = 1 new_deployment.node_name = node['name'] new_deployment.name = test_deployment_name new_deployment.system_size = 8 new_deployment.system_pwd = "superC9PWD" new_deployment.public_key = process.stdout new_deployment.init_script = node['script'] new_deployment.start_date = datetime.now() new_deployment.user_id = test_user_id db_session.add(new_deployment) # Write statistics about deployments if test_env in stats: if node['name'] not in stats[test_env]: stats[test_env][node['name']] = [] else: stats[test_env] = {node['name']: []} stats[test_env][node['name']].append({ 'ip': node['ip'], 'start_date': str(datetime.now()), 'states': { 'last_state': '', 'last_date': None }, 'total': 0, 'ping': False, 'ssh': False, 'init_script': False }) close_session(db_session) return selected_nodes
else: logger.info("Write the statistics to the '%s'" % file_summary) if not os.path.isfile(file_summary): with open(file_summary, 'w') as fsum: fsum.write( 'date_environment failed_deployments complete_deployments active_nodes environment_name\n' ) with open(file_summary, 'a') as fsum: fsum.write( ' %s %2d %2d %2d %s\n' % (file_id, failed_nodes, success_nodes, active_nodes, dep_env)) if __name__ == "__main__": cluster_desc = get_cluster_desc() random_select = True for nb_nodes in [4]: stats_data = {} file_id = datetime.now().strftime("%y_%m_%d_%H_%M") file_stats = 'json_test/%d_nodes_%s.json' % (nb_nodes, file_id) if '/' in file_stats: dirs = file_stats.replace(os.path.basename(file_stats), '') if not os.path.exists(dirs): print("Path '%s' does not exist" % dirs) sys.exit(12) #for env in [ { 'name': boot_test_environment } ]: for env in [{"name": "ubuntu_20.04_32bit"}]: #for env in cluster_desc["environments"].values(): logger.info("Deploying the '%s' environment" % env['name']) testing_environment(env['name'], file_id, stats_data, nb_nodes,
def check_port(switch_port): cluster_desc = get_cluster_desc() switch = cluster_desc['switches'][switch_port.split('-')[0]] port_number = int(switch_port.split('-')[1]) # Do not turn off the pimaster if switch['master_port'] == port_number: return {'status': 'failed'} # Looking for the node on the port my_node = None for node in cluster_desc['nodes'].values(): if node['switch'] == switch['name'] and node[ 'port_number'] == port_number: my_node = node # No node linked to the port if my_node is None: return {'status': 'failed'} else: # Check the node is not currently used db_session = open_session() states = db_session.query(Deployment).filter( Deployment.state != 'destroyed').filter( Deployment.node_name == node['name']).all() close_session(db_session) if len(states) > 0: return {'status': 'failed'} # Create the TFTP boot folder tftpboot_template_folder = "/tftpboot/rpiboot_uboot" tftpboot_node_folder = "/tftpboot/%s" % my_node["id"] if os.path.isdir(tftpboot_node_folder): shutil.rmtree(tftpboot_node_folder) os.mkdir(tftpboot_node_folder) for tftpfile in glob('%s/*' % tftpboot_template_folder): if tftpfile.endswith('cmdline.txt'): shutil.copyfile( tftpfile, tftpfile.replace(tftpboot_template_folder, tftpboot_node_folder)) else: os.symlink( tftpfile, tftpfile.replace(tftpboot_template_folder, tftpboot_node_folder)) # Turn off the PoE port turn_off_port(switch['name'], port_number) # Ping the node IP address cmd = 'ping -c 1 -W 1 %s' % my_node['ip'] process = subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) ret_code = process.returncode if ret_code == 0: return {'status': 'failed'} # Turn on the PoE port turn_on_port(switch['name'], port_number) # Try to ping the node time.sleep(30) ret_code = 1 nb = 0 while ret_code != 0 and nb < 6: cmd = 'ping -c 1 -W 1 %s' % my_node['ip'] process = subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) ret_code = process.returncode nb += 1 if ret_code != 0: time.sleep(10) # Delete tftp directory shutil.rmtree(tftpboot_node_folder) # Turn off the PoE port turn_off_port(switch['name'], port_number) if ret_code == 0: return {'status': 'succeed'} else: return {'status': 'failed'}
def analyze_port(switch_ports): logger = logging.getLogger("STATE_EXEC") cluster_desc = get_cluster_desc() switch_ports = switch_ports.split(',') switch = cluster_desc['switches'][switch_ports[0].split('-')[0]] existing_nodes = list(cluster_desc['nodes'].keys()) # Node index to compute new node names if len(existing_nodes) == 0: node_name_idx = 0 else: node_name_idx = int(existing_nodes[-1].split('-')[1]) if switch is None: return {'status': 'ko'} new_nodes = [] last_dot_idx = cluster_desc['first_node_ip'].rindex('.') network_ip = cluster_desc['first_node_ip'][:last_dot_idx] ip_offset = int(cluster_desc['first_node_ip'].split('.')[-1]) - 1 # Expose TFTP files to all nodes (boot from the NFS server) logger.info('Copy TFTP files to the tftpboot directory') tftp_files = glob('/tftpboot/rpiboot_uboot/*') for f in tftp_files: if os.path.isdir(f): new_f = '/tftpboot/%s' % os.path.basename(f) if not os.path.isdir(new_f): shutil.copytree(f, new_f) else: shutil.copy(f, '/tftpboot/%s' % os.path.basename(f)) #TODO Check there is no deployment using the node attached to this port for port in switch_ports: port_number = int(port.split('-')[1]) if switch['master_port'] == port_number: logger.info('Can not analyze the pimaster port. Aborting !') else: logger.info('Analyzing the node on the port %d' % port_number) # Turn off the node turn_off_port(switch['name'], port_number) time.sleep(1) # Turn on the node turn_on_port(switch['name'], port_number) time.sleep(5) logger.info('Capturing DHCP requests') # Listening DHCP Requests during 10 seconds cmd = "rm -f /tmp/port.pcap; tshark -nni eth0 -w /tmp/port.pcap -a duration:20 port 67 and 68" subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) # Analyzing the captured requests to detect the MAC address cmd = "tshark -r /tmp/port.pcap -Y 'bootp.option.type == 53 and bootp.ip.client == 0.0.0.0' -T fields \ -e frame.time -e bootp.ip.client -e bootp.hw.mac_addr | awk '{ print $7 }' | uniq" process = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, universal_newlines=True) # Cast to set to remove duplicate strings mac = set(process.stdout.split('\n')) # Keep only string that looks like mac addresses mac = [ m for m in mac if len(m) == 17 and ( m.startswith("dc:a6:32") or m.startswith("b8:27:eb")) ] if len(mac) == 1: # The MAC address is detected node_name_idx += 1 node_ip = '%s.%d' % (network_ip, node_name_idx + ip_offset) node_name = 'node-%d' % node_name_idx mac = mac[0] logger.info('The MAC address is %s' % mac) # Add the IP to the DHCP server cmd = "sed -i '/%s/d' /etc/dnsmasq.conf; echo 'dhcp-host=%s,%s,%s' >> /etc/dnsmasq.conf" % ( mac, mac, node_name, node_ip) subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) # Restart the DHCP server cmd = 'service dnsmasq restart' subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) new_nodes.append({ 'name': node_name, 'port_number': port_number, 'ip': node_ip, 'switch': switch['name'] }) else: logger.info("Wrong MAC addresses detected: %s" % mac) # Turn off the node turn_off_port(switch['name'], port_number) # Start all nodes for node in new_nodes: logger.info("%s: Starting..." % node['name']) time.sleep(1) turn_on_port(switch['name'], node['port_number']) # Let nodes boot time.sleep(30) # Get the model and the identifier of nodes from SSH connections for node in new_nodes: again = 0 while again < 9: ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) try: ssh.connect(node['ip'], username='******', timeout=1.0) again = 42 (stdin, stdout, stderr) = ssh.exec_command("cat /proc/cpuinfo") return_code = stdout.channel.recv_exit_status() for line in stdout.readlines(): output = line.strip() if 'Revision' in output: rev = output.split()[-1] if rev == 'c03111': node['model'] = 'RPI4B' if rev == 'a020d3': node['model'] = 'RPI3Bplus' else: node['model'] = 'RPI4B' if 'Serial' in output: node['id'] = output.split()[-1][-8:] ssh.close() # Write the configuration file of the node logger.info('%s: Writing the configuration file' % node['name']) with open('cluster_desc/nodes/%s.json' % node['name'], 'w+') as conf: json.dump(node, conf, indent=4) load_cluster_desc() except (AuthenticationException, SSHException, socket.error): logger.info('%s: Can not connect via SSH' % node['name']) again += 1 time.sleep(10) # Turn off the node turn_off_port(switch['name'], node['port_number']) # Clean the TFTP folder logger.info("Cleaning the TFTP folder") for f in tftp_files: new_f = f.replace('/rpiboot_uboot', '') if os.path.isdir(new_f): shutil.rmtree(new_f) else: if not 'bootcode.bin' in new_f: os.remove(new_f) return {'nodes': new_nodes}
def dump_cluster_desc(): return '<pre>' + json.dumps(get_cluster_desc(), indent=2) + '</pre>'