def test_nested_cycles(self): watch = Watch() def job(i): return Job(co_print_sleep(watch, .2, f"job {i}"), label=f"job{i}") js1, js2, js3 = [job(i) for i in range(11, 14)] s2 = Scheduler(Sequence(js1, js2, js3)) j1, j3 = job(1), job(3) s1 = Scheduler(Sequence(j1, s2, j3)) self.assertTrue(s1.check_cycles()) # create cycle in subgraph js1.requires(js3) self.assertFalse(s1.check_cycles()) # restore in OK state js1.requires(js3, remove=True) self.assertTrue(s1.check_cycles()) # add cycle in toplevel j1.requires(j3) self.assertFalse(s1.check_cycles()) # restore in OK state j1.requires(j3, remove=True) self.assertTrue(s1.check_cycles()) # add one level down s3 = Scheduler() jss1, jss2, jss3 = [job(i) for i in range(111, 114)] Sequence(jss1, jss2, jss3, scheduler=s3) # surgery in s2; no cycles s2.remove(js2) s2.sanitize() s2.add(s3) s3.requires(js1) js3.requires(s3) self.assertTrue(s1.check_cycles()) # add cycle in s3 js1.requires(js3) self.assertFalse(s1.check_cycles())
def run(*, gateway, slicename, disaggregated_cn, operator_version, nodes, node_master, node_enb, quectel_nodes, phones, flexran, drone, verbose, dry_run, load_images, master_image, worker_image, quectel_image): """ Install K8S on R2lab Arguments: slicename: the Unix login name (slice name) to enter the gateway quectel_nodes: list of indices of quectel UE nodes to use phones: list of indices of phones to use nodes: a list of node ids to run the scenario on; strings or ints are OK; node_master: the master node id, must be part of selected nodes node_enb: the node id for the enb, which is connected to usrp/duplexer disaggregated_cn: Boolean; True for the disaggregated CN scenario. False for all-in-one CN. operator_version: str, either "none" or "v1" or "v2". """ if operator_version == "none": only_kube5g = True else: only_kube5g = False if node_master not in nodes: print(f"master node {node_master} must be part of selected fit nodes {nodes}") exit(1) if node_enb not in nodes: print(f"eNB worker node {node_enb} must be part of selected fit nodes {nodes}") exit(1) # Check if the browser can be automatically run to display the Drone app if drone: run_browser = True if platform == "linux": cmd_open = "xdg-open" elif platform == "darwin": cmd_open = "open" else: run_browser = False if run_browser: print(f"**** Will run the browser with command {cmd_open}") else: print(f"**** Will not be able to run the browser as platform is {platform}") worker_ids = nodes[:] worker_ids.remove(node_master) quectel_ids = quectel_nodes[:] quectel = len(quectel_ids) > 0 faraday = SshNode(hostname=default_gateway, username=slicename, verbose=verbose, formatter=TimeColonFormatter()) master = SshNode(gateway=faraday, hostname=fitname(node_master), username="******", verbose=verbose, formatter=TimeColonFormatter()) node_index = { id: SshNode(gateway=faraday, hostname=fitname(id), username="******",formatter=TimeColonFormatter(), verbose=verbose) for id in nodes } nodes_quectel_index = { id: SshNode(gateway=faraday, hostname=fitname(id), username="******",formatter=TimeColonFormatter(), verbose=verbose) for id in quectel_nodes } worker_index = dict(node_index) del worker_index[node_master] fit_master = fitname(node_master) fit_enb = fitname(node_enb) # the global scheduler scheduler = Scheduler(verbose=verbose) ########## check_lease = SshJob( scheduler=scheduler, node = faraday, critical = True, verbose=verbose, command = Run("rhubarbe leases --check"), ) green_light = check_lease if load_images: green_light = [ SshJob( scheduler=scheduler, required=check_lease, node=faraday, critical=True, verbose=verbose, label = f"Load image {master_image} on master {fit_master}", commands=[ Run(f"rhubarbe load {node_master} -i {master_image}"), Run(f"rhubarbe wait {node_master}"), ] ), SshJob( scheduler=scheduler, required=check_lease, node=faraday, critical=True, verbose=verbose, label = f"Load image {worker_image} on worker nodes", commands=[ Run(f"rhubarbe usrpoff {node_enb}"), # if usrp is on, load could be problematic... Run("rhubarbe", "load", *worker_ids, "-i", worker_image), Run("rhubarbe", "wait", *worker_ids), Run(f"rhubarbe usrpon {node_enb}"), # ensure a reset of the USRP on the enB node ], ), SshJob( scheduler=scheduler, required=check_lease, node=faraday, critical=False, verbose=verbose, label="turning off unused nodes", command=[ Run("rhubarbe bye --all " + "".join(f"~{x} " for x in nodes)) ] ) ] if quectel: prepare_quectel = SshJob( scheduler=scheduler, required=green_light, node=faraday, critical=True, verbose=verbose, label = f"Load image {quectel_image} on quectel UE nodes", commands=[ Run("rhubarbe", "usrpoff", *quectel_ids), Run("rhubarbe", "load", *quectel_ids, "-i", quectel_image), Run("rhubarbe", "wait", *quectel_ids), Run("rhubarbe", "usrpon", *quectel_ids), ], ), ########## if quectel: # wait 30s for Quectel modules show up wait_quectel_ready = PrintJob( "Let Quectel modules show up", scheduler=scheduler, required=prepare_quectel, sleep=30, label="sleep 30s for the Quectel modules to show up" ) # run the Quectel Connection Manager as a service on each Quectel UE node quectelCM_service = Service( command="quectel-CM -s oai.ipv4 -4", service_id="QuectelCM", verbose=verbose, ) init_quectel_nodes = [ SshJob( scheduler=scheduler, required=wait_quectel_ready, node=node, critical=True, verbose=verbose, label=f"Init Quectel UE on fit node {id}", commands = [ RunScript(find_local_embedded_script("nodes.sh"), "check-quectel-on", includes=INCLUDES), quectelCM_service.start_command(), ], ) for id, node in nodes_quectel_index.items() ] # wait 20s for Quectel Connection Manager to start up wait_quectelCM_ready = PrintJob( "Let QuectelCM start up", scheduler=scheduler, required=init_quectel_nodes, sleep=20, label="Sleep 20s for the Quectel Connection Manager(s) to start up" ) detach_quectel_nodes = [ SshJob( scheduler=scheduler, required=wait_quectelCM_ready, node=node, critical=True, verbose=verbose, label=f"Detach Quectel UE on fit node {id}", command = RunScript(find_local_embedded_script("nodes.sh"), "quectel-detach", includes=INCLUDES), ) for id, node in nodes_quectel_index.items() ] ########## # Initialize k8s on the master node init_master = SshJob( scheduler=scheduler, required=green_light, node=master, critical=True, verbose=verbose, label = f"Install and launch k8s on the master {node_master}", commands = [ Run("swapoff -a"), Run("hostnamectl set-hostname master-node"), Run("kubeadm init --pod-network-cidr=10.244.0.0/16 > /tmp/join_msg.txt"), Run("tail -2 /tmp/join_msg.txt > /tmp/join_msg"), Run("mkdir -p $HOME/.kube"), Run("cp -i /etc/kubernetes/admin.conf $HOME/.kube/config"), Run("chown $(id -u):$(id -g) $HOME/.kube/config"), Run("kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml"), Run("kubectl get pods --all-namespaces"), ], ) init_workers = [ SshJob( scheduler=scheduler, required=init_master, node=node, critical=True, verbose=verbose, label=f"Init k8s on fit node {id} and join the cluster", commands = [ Run("swapoff -a"), Run("increase-control-mtu"), Run(f"scp -o 'StrictHostKeyChecking no' {fit_master}:/tmp/join_msg /tmp/join_msg"), Run("chmod a+x /tmp/join_msg"), Run("/tmp/join_msg"), ], ) for id, node in worker_index.items() ] # wait 10s for K8S nodes setup wait_k8nodes_ready = PrintJob( "Let k8s set up", scheduler=scheduler, required=init_workers, sleep=10, label="sleep 10s for the k8s nodes to settle" ) init_kube5g = SshJob( scheduler=scheduler, required = wait_k8nodes_ready, node = master, verbose=verbose, label = f"Add oai:ran label to oai-ran pod on {node_enb} and start 5GOperator pod", commands = [ Run("kubectl get nodes"), # add label to the eNB node to help k8s scheduler selects the right fit node Run(f"kubectl label nodes fit{node_enb} oai=ran"), Run("kubectl get nodes -Loai"), ## retrieve the kube5g operator #Run("git clone -b develop [email protected]:mosaic5g/kube5g.git"), # install a few dependencies Run("apt install -y python3-pip"), Run("pip3 install --upgrade pip"), Run("pip3 install ruamel.yaml==0.16.12 colorlog==4.6.2"), Run("sed -i 's/oairan:v1-1.0-1/oairan:v1-1.0-3/g' /root/kube5g/common/config-manager/conf_global_default.yaml"), # specify the R2lab specific configuration Run("cd /root/kube5g/common/config-manager; ./conf-manager.py -s conf_short_r2lab.yaml"), # apply the R2lab CRD Run("cd /root/kube5g/openshift/kube5g-operator; ./k5goperator.sh -n"), # start the kube5g operator pod Run("cd /root/kube5g/openshift/kube5g-operator; ./k5goperator.sh container start"), Run("kubectl get pods"), ], ) # wait 30s for K8S 5G Operator setup wait_k8_5GOp_ready = PrintJob( "Let 5G Operator set up", scheduler=scheduler, required=init_kube5g, sleep=30, label="wait 30s for the 5G Operator pod to settle" ) if only_kube5g: finish = SshJob( scheduler=scheduler, required = wait_k8_5GOp_ready, node = master, verbose=verbose, label = f"showing nodes and pods before leaving", commands = [ Run("kubectl get nodes -Loai"), Run("kubectl get pods"), ], ) else: if disaggregated_cn: cn_type="disaggregated-cn" # setup_time = 120 setup_time = 200 else: cn_type="all-in-one" # setup_time = 60 setup_time = 140 if flexran: flexran_opt="flexran" else: flexran_opt="" run_kube5g = SshJob( scheduler=scheduler, required = wait_k8_5GOp_ready, node = master, verbose=verbose, label = f"deploy {operator_version} {cn_type} {flexran_opt} pods", commands = [ Run("kubectl get nodes -Loai"), Run(f"cd /root/kube5g/openshift/kube5g-operator; ./k5goperator.sh deploy {operator_version} {cn_type} {flexran_opt}"), Run("kubectl get pods"), ], ) # Coffee Break -- wait 1 or 2mn for K8S 5G pods setup wait_k8_5Gpods_ready = PrintJob( "Let all 5G pods set up", scheduler=scheduler, required=run_kube5g, sleep=setup_time, label=f"waiting {setup_time}s for all 5G pods to settle" ) check_kube5g = SshJob( scheduler=scheduler, required = wait_k8_5Gpods_ready, node = master, verbose=verbose, label = "Check which pods are deployed", commands = [ Run("kubectl get nodes -Loai"), Run("kubectl get pods"), ], ) if drone: # the place where runtime variables get stored env = Variables() # # Define and run all the services to launch the Drone app locally on a firefox browser # drone_service = Service( command=f"python /root/mosaic5g/store/sdk/frontend/drone/drone.py --port=8088 --tasks --address=192.168.3.{node_enb}", service_id="drone_app", verbose=verbose, ) k8s_port9999_fwd_service = Service( command=Deferred("kubectl port-forward {{flexran_pod}} 9999:9999 --address 0.0.0.0", env), service_id="k8s-port9999-fwd", verbose=verbose, # somehow this is required for kubectl to run properly environ={'KUBECONFIG': '/etc/kubernetes/admin.conf'} ) # can't use a Service instance on the local box if it's not a Linux # and we have macs... local_port_fwd = (f"ssh -f -N -4" f" -L9999:192.168.3.{node_master}:9999" f" -L8088:192.168.3.{node_enb}:8088" f" -o ExitOnForwardFailure=yes" f" {slicename}@faraday.inria.fr") browser_service = Service( command=f"sleep 10; {cmd_open} http://127.0.0.1:8088/", service_id="drone_browser", verbose=verbose, ) run_drone=SshJob( scheduler=scheduler, required=check_kube5g, node=worker_index[node_enb], verbose=verbose, label=f"Run the drone app on worker node {node_enb} as a service", commands=[ drone_service.start_command(), ], ) get_flexran_podname=SshJob( scheduler=scheduler, required=check_kube5g, node=master, verbose=verbose, label=f"Retrieve the name of the FlexRAN pod", commands=[ # xxx here Run("kubectl get --no-headers=true pods -l app=flexran -o custom-columns=:metadata.name", capture=Capture('flexran_pod', env)), ], ) run_k8s_port9999_fwd=SshJob( scheduler=scheduler, required=get_flexran_podname, node=master, verbose=verbose, label=f"Run port forwarding on the master node as a service", commands=[ k8s_port9999_fwd_service.start_command(), ], ) # On the local machine, impossible to use Services as the latter uses systemd-run, only available on Linux run_local_ports_fwd = SshJob( scheduler=scheduler, required = check_kube5g, node = LocalNode(), verbose=verbose, label = f"Forward local ports 8088 and 9999", command=Run(local_port_fwd + "&", ignore_outputs=True), ) if run_browser: run_local_browser = SshJob( scheduler=scheduler, required = (run_drone, run_k8s_port9999_fwd, run_local_ports_fwd), node = LocalNode(), verbose=verbose, label = f"Run the browser on the local node in background", command=browser_service.command+"&", ) phones_requirements=run_local_browser else: phones_requirements=run_k8s_port9999_fwd else: phones_requirements=check_kube5g ########## Test phone(s) connectivity sleeps_ran = (20, 25) phone_msgs = [f"wait for {sleep}s for eNB to start up before waking up phone{id}" for sleep, id in zip(sleeps_ran, phones)] wait_commands = [f"echo {msg}; sleep {sleep}" for msg, sleep in zip(phone_msgs, sleeps_ran)] sleeps_phone = (15, 20) phone2_msgs = [f"wait for {sleep}s for phone{id} before starting tests" for sleep, id in zip(sleeps_phone, phones)] wait2_commands = [f"echo {msg}; sleep {sleep}" for msg, sleep in zip(phone2_msgs, sleeps_phone)] job_start_phones = [ SshJob( node=faraday, commands=[ Run(wait_command), RunScript(find_local_embedded_script("faraday.sh"), f"macphone{id}", "r2lab-embedded/shell/macphone.sh", "phone-on", includes=INCLUDES), Run(wait2_command), RunScript(find_local_embedded_script("faraday.sh"), f"macphone{id}", "r2lab-embedded/shell/macphone.sh", "phone-check-cx", includes=INCLUDES), RunScript(find_local_embedded_script("faraday.sh"), f"macphone{id}", "r2lab-embedded/shell/macphone.sh", "phone-start-app", includes=INCLUDES), ], label=f"turn off airplane mode on phone {id}", required=phones_requirements, scheduler=scheduler) for id, wait_command, wait2_command in zip(phones, wait_commands, wait2_commands) ] if quectel: # wait 60s for Quectel connection(s) to set up wait_before_attach_quectel = PrintJob( "Wait again 30s before attaching Quectel device(s)", scheduler=scheduler, required=(job_start_phones,check_kube5g,detach_quectel_nodes), sleep=30, label="Sleep 30s before attaching Quectel device(s)" ) job_attach_quectel = [ SshJob( scheduler=scheduler, required=wait_before_attach_quectel, node=node, critical=True, verbose=verbose, label=f"Attach Quectel UE on fit node {id}", command = RunScript(find_local_embedded_script("nodes.sh"), "quectel-attach", includes=INCLUDES), ) for id, node in nodes_quectel_index.items() ] # wait 30s for Quectel connection(s) to set up wait_quectel_cx_ready = PrintJob( "Let the Quectel connection(s) set up", scheduler=scheduler, required=job_attach_quectel, sleep=30, label="Sleep 30s for the Quectel connection(s) to set up" ) test_quectel_cx = [ SshJob( scheduler=scheduler, required=wait_quectel_cx_ready, node=node, critical=False, verbose=verbose, label=f"Check the Quectel cx on fit node {id}", command = RunScript(find_local_embedded_script("nodes.sh"), "check-quectel-cx", includes=INCLUDES), ) for id, node in nodes_quectel_index.items() ] ########## # Update the .dot and .png file for illustration purposes scheduler.check_cycles() name = "deploy-kube5g" print(10*'*', 'See main scheduler in', scheduler.export_as_pngfile(name)) # orchestration scheduler jobs if verbose: scheduler.list() if dry_run: return True if not scheduler.orchestrate(): print(f"RUN KO : {scheduler.why()}") scheduler.debrief() return False print(f"RUN OK, you can log now on master node {fit_master} to manually change the scenario") print(80*'*')
def run( *, # the pieces to use slice, hss, epc, enb, phones, e3372_ues, oai_ues, gnuradios, e3372_ue_xterms, oai_ue_xterms, gnuradio_xterms, # boolean flags load_nodes, skip_reset_usb, oscillo, # the images to load image_gw, image_enb, image_oai_ue, image_e3372_ue, image_gnuradio, # miscell n_rb, verbose, dry_run): """ ########## # 3 methods to get nodes ready # (*) load images # (*) reset nodes that are known to have the right image # (*) do nothing, proceed to experiment expects e.g. * slice : s.t like [email protected] * hss : 04 * epc : 03 * enb : 23 * phones: list of indices of phones to use * e3372_ues : list of nodes to use as a UE using e3372 * oai_ues : list of nodes to use as a UE using OAI * gnuradios : list of nodes to load with a gnuradio image * image_* : the name of the images to load on the various nodes Plus * load_nodes: whether to load images or not - in which case image_gw, image_enb and image_* are used to tell the image names * skip_reset_usb : the USRP board will be reset as well unless this is set """ # what argparse knows as a slice actually is a gateway (user + host) gwuser, gwhost = r2lab_parse_slice(slice) gwnode = SshNode(hostname=gwhost, username=gwuser, formatter=TimeColonFormatter(verbose=verbose), debug=verbose) hostnames = hssname, epcname, enbname = [ r2lab_hostname(x) for x in (hss, epc, enb) ] optional_ids = e3372_ues + oai_ues + gnuradios + \ e3372_ue_xterms + oai_ue_xterms + gnuradio_xterms hssnode, epcnode, enbnode = [ SshNode(gateway=gwnode, hostname=hostname, username='******', formatter=TimeColonFormatter(verbose=verbose), debug=verbose) for hostname in hostnames ] sched = Scheduler(verbose=verbose) # preparation job_check_for_lease = SshJob( node=gwnode, command=["rhubarbe", "leases", "--check"], label="check we have a current lease", scheduler=sched, ) # turn off all nodes turn_off_command = ["rhubarbe", "off", "-a"] # except our 3 nodes and the optional ones turn_off_command += [f"~{x}" for x in [hss, epc, enb] + optional_ids] # only do the turn-off thing if load_nodes if load_nodes: job_off_nodes = SshJob( # pylint: disable=w0612 node=gwnode, # switch off all nodes but the ones we use command=turn_off_command, label="turn off unused nodes", required=job_check_for_lease, scheduler=sched, ) # actually run this in the gateway, not on the macphone # the ssh keys are stored in the gateway and we do not yet have # the tools to leverage such remote keys job_stop_phones = [ SshJob( # pylint: disable=w0612 node=gwnode, command=RunScript( # script find_local_embedded_script("faraday.sh"), # arguments f"macphone{id}", "r2lab-embedded/shell/macphone.sh", "phone-off", # options includes=INCLUDES), label=f"put phone{id} in airplane mode", required=job_check_for_lease, scheduler=sched, ) for id in phones ] # prepare the image-loading phase # this will be a dict of items imagename -> ids to_load = defaultdict(list) to_load[image_gw] += [hss, epc] to_load[image_enb] += [enb] if e3372_ues: to_load[image_e3372_ue] += e3372_ues if e3372_ue_xterms: to_load[image_e3372_ue] += e3372_ue_xterms if oai_ues: to_load[image_oai_ue] += oai_ues if oai_ue_xterms: to_load[image_oai_ue] += oai_ue_xterms if gnuradios: to_load[image_gnuradio] += gnuradios if gnuradio_xterms: to_load[image_gnuradio] += gnuradio_xterms prep_job_by_node = {} for image, nodes in to_load.items(): commands = [] if load_nodes: commands.append(Run("rhubarbe", "usrpoff", *nodes)) commands.append(Run("rhubarbe", "load", "-i", image, *nodes)) commands.append(Run("rhubarbe", "usrpon", *nodes)) # always do this commands.append(Run("rhubarbe", "wait", "-t", 120, *nodes)) job = SshJob( node=gwnode, commands=commands, label=f"Prepare node(s) {nodes}", required=job_check_for_lease, scheduler=sched, ) for node in nodes: prep_job_by_node[node] = job # start services job_service_hss = SshJob( node=hssnode, command=RunScript(find_local_embedded_script("oai-hss.sh"), "run-hss", epc, includes=INCLUDES), label="start HSS service", required=prep_job_by_node[hss], scheduler=sched, ) delay = 15 job_service_epc = SshJob( node=epcnode, commands=[ Run(f"echo giving HSS a headstart {delay}s to warm up; sleep {delay}" ), RunScript(find_local_embedded_script("oai-epc.sh"), "run-epc", hss, includes=INCLUDES), ], label="start EPC services", required=prep_job_by_node[epc], scheduler=sched, ) # enodeb job_warm_enb = SshJob( node=enbnode, commands=[ RunScript(find_local_embedded_script("oai-enb.sh"), "warm-enb", epc, n_rb, not skip_reset_usb, includes=INCLUDES), ], label="Warm eNB", required=prep_job_by_node[enb], scheduler=sched, ) enb_requirements = (job_warm_enb, job_service_hss, job_service_epc) # wait for everything to be ready, and add an extra grace delay grace = 30 if load_nodes else 10 grace_delay = SshJob( node=LocalNode(formatter=TimeColonFormatter()), command=f"echo Allowing grace of {grace} seconds; sleep {grace}", required=enb_requirements, scheduler=sched, ) # start services job_service_enb = SshJob( # pylint: disable=w0612 node=enbnode, # run-enb expects the id of the epc as a parameter # n_rb means number of resource blocks for DL, set to either 25 or 50. commands=[ RunScript(find_local_embedded_script("oai-enb.sh"), "run-enb", oscillo, includes=INCLUDES, x11=oscillo), ], label="start softmodem on eNB", required=grace_delay, scheduler=sched, ) # run experiment per se # Manage phone(s) # this starts at the same time as the eNB, but some # headstart is needed so that eNB actually is ready to serve delay = 12 msg = f"wait for {delay}s for enodeb to start up" wait_command = f"echo {msg}; sleep {delay}" job_start_phones = [ SshJob( # pylint: disable=w0612 node=gwnode, commands=[ Run(wait_command), RunScript(find_local_embedded_script("faraday.sh"), f"macphone{id}", "r2lab-embedded/shell/macphone.sh", "phone-on", includes=INCLUDES), RunScript(find_local_embedded_script("faraday.sh"), f"macphone{id}", "r2lab-embedded/shell/macphone.sh", "phone-start-app", includes=INCLUDES), ], label="start Nexus phone and speedtest app", required=grace_delay, scheduler=sched, ) for id in phones ] job_ping_phones_from_epc = [ SshJob( # pylint: disable=w0612 node=epcnode, commands=[ Run("sleep 10"), Run(f"ping -c 100 -s 100 -i .05 172.16.0.{id+1} &> /root/ping-phone" ), ], label="ping Nexus phone from EPC", critical=False, required=job_start_phones, ) for id in phones ] # xterm nodes colors = ["wheat", "gray", "white", "darkolivegreen"] xterms = e3372_ue_xterms + oai_ue_xterms + gnuradio_xterms for xterm, color in zip(xterms, itertools.cycle(colors)): xterm_node = SshNode(gateway=gwnode, hostname=r2lab_hostname(xterm), username='******', formatter=TimeColonFormatter(verbose=verbose), debug=verbose) SshJob( node=xterm_node, command=Run( f"xterm -fn -*-fixed-medium-*-*-*-20-*-*-*-*-*-*-*" f" -bg {color} -geometry 90x10", x11=True), label="xterm on node {xterm_node.hostname}", required=prep_job_by_node[xterm], scheduler=sched, # don't set forever; if we do, then these xterms get killed # when all other tasks have completed # forever = True, ) # # remove dangling requirements - if any - should not be needed but won't hurt either sched.sanitize() print(20 * "*", "nodes usage summary") if load_nodes: for image, nodes in to_load.items(): for node in nodes: print(f"node {node} : {image}") else: print("NODES ARE USED AS IS (no image loaded, no reset)") print(10 * "*", "phones usage summary") if phones: for phone in phones: print(f"Using phone{phone}") else: print("No phone involved") sched.check_cycles() # Update the .dot and .png file for illustration purposes if verbose or dry_run: sched.list() name = "scenario-load" if load_nodes else \ "scenario" sched.export_as_dotfile(f"{name}.dot") os.system(f"dot -Tpng {name}.dot -o {name}.png") print(f"(Over)wrote {name}.png") if dry_run: return False if verbose: input('OK ? - press control C to abort ? ') if not sched.orchestrate(): print(f"RUN KO : {sched.why()}") sched.debrief() return False else: print("RUN OK") return True
def run(*, gateway, slicename, nodes, node_epc, node_enb, quectel_nodes, phones, verbose, dry_run, load_images, epc_image, enb_image, quectel_image): """ Launch latest OAICI EPC and eNB Docker images on R2lab Arguments: slicename: the Unix login name (slice name) to enter the gateway quectel_nodes: list of indices of quectel UE nodes to use phones: list of indices of phones to use nodes: a list of node ids to run the scenario on; strings or ints are OK; node_epc: the node id on which to run the EPC node_enb: the node id for the enb, which is connected to B210/eNB-duplexer """ quectel_ids = quectel_nodes[:] quectel = len(quectel_ids) > 0 faraday = SshNode(hostname=default_gateway, username=slicename, verbose=verbose, formatter=TimeColonFormatter()) epc = SshNode(gateway=faraday, hostname=fitname(node_epc), username="******", verbose=verbose, formatter=TimeColonFormatter()) node_index = { id: SshNode(gateway=faraday, hostname=fitname(id), username="******", formatter=TimeColonFormatter(), verbose=verbose) for id in nodes } nodes_quectel_index = { id: SshNode(gateway=faraday, hostname=fitname(id), username="******", formatter=TimeColonFormatter(), verbose=verbose) for id in quectel_nodes } allnodes = nodes + quectel_nodes fit_epc = fitname(node_epc) fit_enb = fitname(node_enb) # the global scheduler scheduler = Scheduler(verbose=verbose) ########## check_lease = SshJob( scheduler=scheduler, node=faraday, critical=True, verbose=verbose, command=Run("rhubarbe leases --check"), ) green_light = check_lease if load_images: green_light = [ SshJob(scheduler=scheduler, required=check_lease, node=faraday, critical=True, verbose=verbose, label=f"Load image {epc_image} on {fit_epc}", commands=[ Run(f"rhubarbe load {node_epc} -i {epc_image}"), Run(f"rhubarbe wait {node_epc}"), RunScript("oaici.sh", "init-epc", node_epc, node_enb), ]), SshJob( scheduler=scheduler, required=check_lease, node=faraday, critical=True, verbose=verbose, label=f"Load image {enb_image} on {fit_enb}", commands=[ Run(f"rhubarbe usrpoff {node_enb}" ), # if usrp is on, load could be problematic... Run(f"rhubarbe load {node_enb} -i {enb_image}"), Run(f"rhubarbe wait {node_enb}"), Run(f"rhubarbe usrpon {node_enb}" ), # ensure a reset of the USRP on the enB node RunScript("oaici.sh", "init-enb", node_enb, node_epc), ], ), SshJob(scheduler=scheduler, required=check_lease, node=faraday, critical=False, verbose=verbose, label="turning off unused nodes", command=[ Run("rhubarbe bye --all " + "".join(f"~{x} " for x in allnodes)) ]) ] if quectel: prepare_quectel = SshJob( scheduler=scheduler, required=check_lease, node=faraday, critical=True, verbose=verbose, label=f"Load image {quectel_image} on quectel UE nodes", commands=[ Run("rhubarbe", "usrpoff", *quectel_ids), Run("rhubarbe", "load", *quectel_ids, "-i", quectel_image), Run("rhubarbe", "wait", *quectel_ids), Run("rhubarbe", "usrpon", *quectel_ids), ], ), ########## # Prepare the Quectel UE nodes if quectel: # wait 30s for Quectel modules show up wait_quectel_ready = PrintJob( "Let Quectel modules show up", scheduler=scheduler, required=prepare_quectel, sleep=30, label="sleep 30s for the Quectel modules to show up") # run the Quectel Connection Manager as a service on each Quectel UE node quectelCM_service = Service( command="quectel-CM -s oai.ipv4 -4", service_id="QuectelCM", verbose=verbose, ) init_quectel_nodes = [ SshJob( scheduler=scheduler, required=wait_quectel_ready, node=node, critical=True, verbose=verbose, label=f"Init Quectel UE on fit node {id}", commands=[ RunScript(find_local_embedded_script("nodes.sh"), "check-quectel-on", includes=INCLUDES), quectelCM_service.start_command(), ], ) for id, node in nodes_quectel_index.items() ] # wait 20s for Quectel Connection Manager to start up wait_quectelCM_ready = PrintJob( "Let QuectelCM start up", scheduler=scheduler, required=init_quectel_nodes, sleep=20, label="Sleep 20s for the Quectel Connection Manager(s) to start up" ) detach_quectel_nodes = [ SshJob( scheduler=scheduler, required=wait_quectelCM_ready, node=node, critical=True, verbose=verbose, label=f"Detach Quectel UE on fit node {id}", command=RunScript(find_local_embedded_script("nodes.sh"), "quectel-detach", includes=INCLUDES), ) for id, node in nodes_quectel_index.items() ] ########## # Launch the EPC start_epc = SshJob( scheduler=scheduler, required=green_light, node=faraday, critical=True, verbose=verbose, label=f"Launch EPC on {fit_epc}", commands=[ RunScript("oaici.sh", "start-epc", node_epc), ], ) # Launch the eNB if quectel: req = (start_epc, detach_quectel_nodes) else: req = start_epc start_enb = SshJob( scheduler=scheduler, required=req, node=faraday, critical=True, verbose=verbose, label=f"Launch eNB on {fit_enb}", commands=[ RunScript("oaici.sh", "start-enb", node_enb), ], ) wait_ran_ready = PrintJob("Let the eNB start up", scheduler=scheduler, required=start_enb, sleep=50, label="sleep 50s for the eNB to start up") ########## Test phone(s) connectivity sleeps_ran = (0, 10) phone_msgs = [ f"wait again for {sleep}s before waking up phone{id}" for sleep, id in zip(sleeps_ran, phones) ] wait_commands = [ f"echo {msg}; sleep {sleep}" for msg, sleep in zip(phone_msgs, sleeps_ran) ] sleeps_phone = (10, 10) phone2_msgs = [ f"wait for {sleep}s for phone{id} before starting tests" for sleep, id in zip(sleeps_phone, phones) ] wait2_commands = [ f"echo {msg}; sleep {sleep}" for msg, sleep in zip(phone2_msgs, sleeps_phone) ] job_start_phones = [ SshJob(node=faraday, commands=[ Run(wait_command), RunScript(find_local_embedded_script("faraday.sh"), f"macphone{id}", "r2lab-embedded/shell/macphone.sh", "phone-on", includes=INCLUDES), Run(wait2_command), RunScript(find_local_embedded_script("faraday.sh"), f"macphone{id}", "r2lab-embedded/shell/macphone.sh", "phone-check-cx", includes=INCLUDES), RunScript(find_local_embedded_script("faraday.sh"), f"macphone{id}", "r2lab-embedded/shell/macphone.sh", "phone-start-app", includes=INCLUDES), ], label=f"turn off airplane mode on phone {id}", required=wait_ran_ready, scheduler=scheduler) for id, wait_command, wait2_command in zip( phones, wait_commands, wait2_commands) ] if quectel: job_attach_quectel = [ SshJob( scheduler=scheduler, required=(job_start_phones, wait_ran_ready, detach_quectel_nodes), node=node, critical=True, verbose=verbose, label=f"Attach Quectel UE on fit node {id}", command=RunScript(find_local_embedded_script("nodes.sh"), "quectel-attach", includes=INCLUDES), ) for id, node in nodes_quectel_index.items() ] # wait 30s for Quectel connection to set up wait_quectel_cx_ready = PrintJob( "Let the Quectel connection(s) set up", scheduler=scheduler, required=job_attach_quectel, sleep=30, label="Sleep 30s for the Quectel connection(s) to set up") test_quectel_cx = [ SshJob( scheduler=scheduler, required=wait_quectel_cx_ready, node=node, verbose=verbose, label=f"Check the Quectel cx on fit node {id}", command=RunScript(find_local_embedded_script("nodes.sh"), "check-quectel-cx", includes=INCLUDES), ) for id, node in nodes_quectel_index.items() ] ########## # Update the .dot and .png file for illustration purposes scheduler.check_cycles() name = "deploy-oaici" print(10 * '*', 'See main scheduler in', scheduler.export_as_pngfile(name)) # orchestration scheduler jobs if verbose: scheduler.list() if dry_run: return True if not scheduler.orchestrate(): print(f"RUN KO : {scheduler.why()}") scheduler.debrief() return False print( f"RUN OK, you can log now on the EPC node {fit_epc} and the eNB node {fit_enb} to check the logs" ) print(80 * '*')
def run(*, # pylint: disable=r0912, r0914, r0915 # the pieces to use slicename, cn, ran, phones, e3372_ues, oai_ues, gnuradios, e3372_ue_xterms, gnuradio_xterms, ns3, # boolean flags load_nodes, reset_usb, oscillo, tcp_streaming, # the images to load image_cn, image_ran, image_oai_ue, image_e3372_ue, image_gnuradio, image_T_tracer, image_ns3, # miscell n_rb, nodes_left_alone, T_tracer, publisher_ip, verbose, dry_run): """ ########## # 3 methods to get nodes ready # (*) load images # (*) reset nodes that are known to have the right image # (*) do nothing, proceed to experiment expects e.g. * slicename : s.t like [email protected] * cn : 7 * ran : 23 * ns3 : 32 * phones: list of indices of phones to use * e3372_ues : list of nodes to use as a UE using e3372 * oai_ues : list of nodes to use as a UE using OAI * gnuradios : list of nodes to load with a gnuradio image * T_tracer : list of nodes to load with a tracer image * image_* : the name of the images to load on the various nodes Plus * load_nodes: whether to load images or not - in which case image_cn, image_ran and image_* are used to tell the image names * reset_usb : the USRP board will be reset when this is set * tcp_streaming : set up TCP streaming scenario * publisher_ip : IP address of the publisher """ # what argparse knows as a slice actually is about the gateway (user + host) gwuser, gwhost = r2lab_parse_slice(slicename) gwnode = SshNode(hostname=gwhost, username=gwuser, formatter=TimeColonFormatter(verbose=verbose), debug=verbose) hostnames = [r2lab_hostname(x) for x in (cn, ran)] cnnode, rannode = [ SshNode(gateway=gwnode, hostname=hostname, username='******', formatter=TimeColonFormatter(verbose=verbose), debug=verbose) for hostname in hostnames ] scheduler = Scheduler(verbose=verbose, label="CORE EXP") ########## prepare the image-loading phase # focus on the experiment, and use # prepare_testbed_scheduler later on to prepare testbed # all we need to do at this point is compute a mapping dict # image -> list-of-nodes images_to_load = defaultdict(list) images_to_load[image_cn] += [cn] images_to_load[image_ran] += [ran] if e3372_ues: images_to_load[image_e3372_ue] += e3372_ues if e3372_ue_xterms: images_to_load[image_e3372_ue] += e3372_ue_xterms if oai_ues: images_to_load[image_oai_ue] += oai_ues if gnuradios: images_to_load[image_gnuradio] += gnuradios if gnuradio_xterms: images_to_load[image_gnuradio] += gnuradio_xterms if T_tracer: images_to_load[image_T_tracer] += T_tracer if ns3: images_to_load[image_ns3] += [ns3] # start core network job_start_cn = SshJob( node=cnnode, commands=[ RunScript(find_local_embedded_script("nodes.sh"), "git-pull-r2lab", includes=INCLUDES), RunScript(find_local_embedded_script("mosaic-cn.sh"), "journal --vacuum-time=1s", includes=INCLUDES), RunScript(find_local_embedded_script("mosaic-cn.sh"), "configure", includes=INCLUDES), RunScript(find_local_embedded_script("mosaic-cn.sh"), "start", includes=INCLUDES), tcpdump_cn_service.start_command(), ], label="start CN service", scheduler=scheduler, ) # prepare enodeb reset_option = "-u" if reset_usb else "" job_warm_ran = SshJob( node=rannode, commands=[ RunScript(find_local_embedded_script("nodes.sh"), "git-pull-r2lab", includes=INCLUDES), RunScript(find_local_embedded_script("mosaic-ran.sh"), "journal --vacuum-time=1s", includes=INCLUDES), RunScript(find_local_embedded_script("mosaic-ran.sh"), "warm-up", reset_option, includes=INCLUDES), RunScript(find_local_embedded_script("mosaic-ran.sh"), "configure -b", n_rb, cn, includes=INCLUDES), ], label="Configure eNB", scheduler=scheduler, ) ran_requirements = [job_start_cn, job_warm_ran] ### if oai_ues: # prepare OAI UEs for ue in oai_ues: ue_node = SshNode(gateway=gwnode, hostname=r2lab_hostname(ue), username='******', formatter=TimeColonFormatter(verbose=verbose), debug=verbose) job_warm_ues = [ SshJob( node=ue_node, commands=[ RunScript(find_local_embedded_script("nodes.sh"), "git-pull-r2lab", includes=INCLUDES), RunScript(find_local_embedded_script("mosaic-oai-ue.sh"), "journal --vacuum-time=1s", includes=INCLUDES), RunScript(find_local_embedded_script("mosaic-oai-ue.sh"), "warm-up", reset_option, includes=INCLUDES), RunScript(find_local_embedded_script("mosaic-oai-ue.sh"), "configure -b", n_rb, includes=INCLUDES), ], label=f"Configure OAI UE on fit{ue:02d}", scheduler=scheduler) ] ran_requirements.append(job_warm_ues) ### if not load_nodes and phones: job_turn_off_phones = SshJob( node=gwnode, commands=[ RunScript(find_local_embedded_script("faraday.sh"), f"macphone{phone} phone-off") for phone in phones], scheduler=scheduler, ) ran_requirements.append(job_turn_off_phones) # wait for everything to be ready, and add an extra grace delay grace = 5 grace_delay = PrintJob( f"Allowing grace of {grace} seconds", sleep=grace, required=ran_requirements, scheduler=scheduler, label=f"settle for {grace}s", ) # optionally start T_tracer if T_tracer: job_start_T_tracer = SshJob( # pylint: disable=w0612 node=SshNode( gateway=gwnode, hostname=r2lab_hostname(T_tracer[0]), username='******', formatter=TimeColonFormatter(verbose=verbose), debug=verbose), commands=[ Run(f"/root/trace {ran}", x11=True), ], label="start T_tracer service", required=ran_requirements, scheduler=scheduler, ) # ran_requirements.append(job_start_T_tracer) # start services graphical_option = "-x" if oscillo else "" graphical_message = "graphical" if oscillo else "regular" tracer_option = " -T" if T_tracer else "" # we use a Python variable for consistency # although it not used down the road _job_service_ran = SshJob( node=rannode, commands=[ RunScript(find_local_embedded_script("mosaic-ran.sh"), "start", graphical_option, tracer_option, includes=INCLUDES, x11=oscillo, ), ], label=f"start {graphical_message} softmodem on eNB", required=grace_delay, scheduler=scheduler, ) ########## run experiment per se # Manage phone(s) and OAI UE(s) # this starts at the same time as the eNB, but some # headstart is needed so that eNB actually is ready to serve sleeps = [20, 30] phone_msgs = [f"wait for {sleep}s for eNB to start up before waking up phone{id}" for sleep, id in zip(sleeps, phones)] wait_commands = [f"echo {msg}; sleep {sleep}" for msg, sleep in zip(phone_msgs, sleeps)] job_start_phones = [ SshJob( node=gwnode, commands=[ Run(wait_command), RunScript(find_local_embedded_script("faraday.sh"), f"macphone{id}", "r2lab-embedded/shell/macphone.sh", "phone-on", includes=INCLUDES), RunScript(find_local_embedded_script("faraday.sh"), f"macphone{id}", "r2lab-embedded/shell/macphone.sh", "phone-start-app", includes=INCLUDES), ], label=f"turn off airplace mode on phone {id}", required=grace_delay, scheduler=scheduler) for id, wait_command in zip(phones, wait_commands)] if oai_ues: delay = 25 for ue in oai_ues: msg = f"wait for {delay}s for eNB to start up before running UE on node fit{ue:02d}" wait_command = f"echo {msg}; sleep {delay}" ue_node = SshNode(gateway=gwnode, hostname=r2lab_hostname(ue), username='******', formatter=TimeColonFormatter(verbose=verbose), debug=verbose) job_start_ues = [ SshJob( node=ue_node, commands=[ Run(wait_command), RunScript(find_local_embedded_script("mosaic-oai-ue.sh"), "start", includes=INCLUDES), ], label=f"Start OAI UE on fit{ue:02d}", required=grace_delay, scheduler=scheduler) ] delay += 20 for ue in oai_ues: environ = {'USER': '******'} cefnet_ue_service = Service("cefnetd", service_id="cefnet", environ=environ) ue_node = SshNode(gateway=gwnode, hostname=r2lab_hostname(ue), username='******', formatter=TimeColonFormatter(verbose=verbose), debug=verbose) msg = f"Wait 60s and then ping faraday gateway from UE on fit{ue:02d}" ue_commands = f"echo {msg}; sleep 60; ping -c 5 -I oip1 faraday.inria.fr" if tcp_streaming: # TCP streaming scenario if load_nodes: ue_commands += "sysctl -w net.ipv4.ip_forward=1;" ue_commands += "ip route add 10.1.1.0/24 via 192.168.2.32 dev data" job_setup_ue = [ SshJob( node=ue_node, commands=[ Run(ue_commands,label="test UE link and set up routing for TCP streaming"), ], label=f"ping faraday gateway from UE on fit{ue:02d} and set up routing for the TCP streaming scenario", critical=True, required=job_start_ues, scheduler=scheduler) ] else: # Cefore streaming scenario if load_nodes: ue_commands += "sysctl -w net.ipv4.ip_forward=1;" ue_commands += f"ip route add {publisher_ip}/32 dev oip1;" ue_commands += "ip route add 10.1.1.0/24 via 192.168.2.32 dev data;" ue_commands += "iptables -t nat -A POSTROUTING -s 10.1.1.2/32 -j SNAT --to-source 172.16.0.2;" ue_commands += "iptables -t nat -A PREROUTING -d 172.16.0.2 -j DNAT --to-destination 10.1.1.2;" ue_commands += "iptables -A FORWARD -d 10.1.1.2/32 -i oip1 -j ACCEPT;" ue_commands += f"iptables -A FORWARD -d {publisher_ip}/32 -i data -j ACCEPT;" ue_commands += "ip rule del from all to 172.16.0.2 lookup 201;" ue_commands += "ip rule del from 172.16.0.2 lookup 201;" ue_commands += "ip rule add from 10.1.1.2 lookup lte prio 32760;" ue_commands += "ip rule add from all to 172.16.0.2 lookup lte prio 32761;" ue_commands += "ip rule add from all fwmark 0x1 lookup lte prio 32762;" ue_commands += "ip route add table lte 10.1.1.0/24 via 192.168.2.32 dev data;" # ue_commands += "killall cefnetd || true" job_setup_ue = [ SshJob( node=ue_node, commands=[ Run(ue_commands,label="test UE link and set up routing for Cefore streaming"), cefnet_ue_service.start_command(), ], label=f"ping faraday gateway from fit{ue:02d} UE and set up routing for the Cefore streaming scenario", critical=True,#old cefnetd not killed when running new one... required=job_start_ues, scheduler=scheduler) ] if ns3: ns3_node = SshNode(gateway=gwnode, hostname=r2lab_hostname(ns3), username='******', formatter=TimeColonFormatter(verbose=verbose), debug=verbose) msg = f"Wait for the UE node to be ready before running the streaming scenario with ns-3 on fit{ns3}" if load_nodes: job_prepare_ns3_node = [ SshJob( node=ns3_node, commands=[ Run("turn-on-data"), Run("ifconfig data promisc up"), Run("ip route add default via 192.168.2.6 dev data || true"), Run("sysctl -w net.ipv4.ip_forward=1"), ], label=f"setup routing on ns-3 fit{ns3:02d} node", # ip route may already be there so the ip route command may fail critical=True, required=job_setup_ue, scheduler=scheduler) ] ns3_requirements = job_prepare_ns3_node else: ns3_requirements = job_setup_ue if not tcp_streaming: environ = {'USER': '******'} cefnet_ns3_service = Service("cefnetd", service_id="cefnet", environ=environ) job_start_cefnet_on_cn = [ SshJob( node=cnnode, commands=[ Run(f"echo 'ccn:/streaming tcp {publisher_ip}:80' > /usr/local/cefore/cefnetd.conf"), # Run("killall cefnetd || true"),# not done by default with service.start_command() cefnet_ns3_service.start_command(), ], label=f"Start Cefnet on EPC running at fit{cn:02d}", critical=True,#old cefnetd not killed when running new one... required=ns3_requirements, scheduler=scheduler, ) ] # ditto _job_ping_phones_from_cn = [ SshJob( node=cnnode, commands=[ Run("sleep 20"), Run(f"ping -c 100 -s 100 -i .05 172.16.0.{id+1} &> /root/ping-phone{id}"), ], label=f"ping phone {id} from core network", critical=False, required=job_start_phones, scheduler=scheduler) for id in phones] ########## xterm nodes colors = ("wheat", "gray", "white", "darkolivegreen") xterms = e3372_ue_xterms + gnuradio_xterms for xterm, color in zip(xterms, cycle(colors)): xterm_node = SshNode( gateway=gwnode, hostname=r2lab_hostname(xterm), username='******', formatter=TimeColonFormatter(verbose=verbose), debug=verbose) SshJob( node=xterm_node, command=Run(f"xterm -fn -*-fixed-medium-*-*-*-20-*-*-*-*-*-*-*", f" -bg {color} -geometry 90x10", x11=True), label=f"xterm on node {xterm_node.hostname}", scheduler=scheduler, # don't set forever; if we do, then these xterms get killed # when all other tasks have completed # forever = True, ) # remove dangling requirements - if any # should not be needed but won't hurt either scheduler.sanitize() ########## print(10*"*", "nodes usage summary") if load_nodes: for image, nodes in images_to_load.items(): for node in nodes: print(f"node fit{node:02d} : {image}") else: print("NODES ARE USED AS IS (no image loaded, no reset)") print(10*"*", "phones usage summary") if phones: for phone in phones: print(f"Using phone{phone}") else: print("No phone involved") if nodes_left_alone: print(f"Ignore following fit nodes: {nodes_left_alone}") print(f"Publisher IP is {publisher_ip}") if tcp_streaming: print("Run streaming scenario with TCP") else: print("Run streaming scenario with Cefore") # wrap scheduler into global scheduler that prepares the testbed scheduler = prepare_testbed_scheduler( gwnode, load_nodes, scheduler, images_to_load, nodes_left_alone) scheduler.check_cycles() # Update the .dot and .png file for illustration purposes name = "cefore-load" if load_nodes else "cefore" print(10*'*', 'See main scheduler in', scheduler.export_as_pngfile(name)) if verbose: scheduler.list() if dry_run: return True if verbose: input('OK ? - press control C to abort ? ') if not scheduler.orchestrate(): print(f"RUN KO : {scheduler.why()}") scheduler.debrief() return False print("RUN OK") print(80*'*') if tcp_streaming: # TCP streaming scenario print(f"Now it's time to run the ns-3 script on node fit{ns3:02d}") print(f"root@fit{ns3:02d}:~# /root/NS3/source/ns-3-dce/waf --run dce-tcp-test") print("Then, run iperf on the publisher host:") print("yourlogin@publisher:~# iperf -s -P 1 -p 80") print(f"Log file will be available on fit{ns3:02d} at:") print(" /root/NS3/source/ns-3-dce/files-4/var/log/56884/stdout") else: # Cefore streaming scenario print("Now, if not already done, copy cefnetd and cefputfile binaries on your publisher host") print("login@your_host:r2lab-demos/cefore# scp bin/cefnetd yourlogin@publisher_node:/usr/local/sbin") print("login@your_host:r2lab-demos/cefore# scp bin/cefputfile yourlogin@publisher_node:/user/local/bin") print(f"After that, run on the ns-3 fit{ns3:02d} node the following command:") print(f"root@fit{ns3:02d}:~# /root/NS3/source/ns-3-dce/waf --run dce-cefore-test ") print("Then, run on the publisher host:") print("yourlogin@publisher:~# cefnetd") print("yourlogin@publisher:~# cefputfile ccn:/streaming/test -f ./[file-name] -r [1 <= streaming_rate <= 32 (Mbps)]") print(f"Log file will be available on fit{ns3:02d} at:") print(" /root/NS3/source/ns-3-dce/files-3/tmp/cefgetstream-thuputLog-126230400110000") print(80*'*') return True
def run( *, # pylint: disable=r0912, r0914, r0915 # the pieces to use slicename, cn, ran, phones, e3372_ues, oai_ues, gnuradios, e3372_ue_xterms, gnuradio_xterms, # boolean flags load_nodes, reset_usb, oscillo, # the images to load image_cn, image_ran, image_oai_ue, image_e3372_ue, image_gnuradio, image_T_tracer, # miscell n_rb, nodes_left_alone, T_tracer, verbose, dry_run): """ ########## # 3 methods to get nodes ready # (*) load images # (*) reset nodes that are known to have the right image # (*) do nothing, proceed to experiment expects e.g. * slicename : s.t like [email protected] * cn : 7 * ran : 23 * phones: list of indices of phones to use * e3372_ues : list of nodes to use as a UE using e3372 * oai_ues : list of nodes to use as a UE using OAI * gnuradios : list of nodes to load with a gnuradio image * T_tracer : list of nodes to load with a tracer image * image_* : the name of the images to load on the various nodes Plus * load_nodes: whether to load images or not - in which case image_cn, image_ran and image_* are used to tell the image names * reset_usb : the USRP board will be reset when this is set """ # what argparse knows as a slice actually is about the gateway (user + host) gwuser, gwhost = r2lab_parse_slice(slicename) gwnode = SshNode(hostname=gwhost, username=gwuser, formatter=TimeColonFormatter(verbose=verbose), debug=verbose) hostnames = [r2lab_hostname(x) for x in (cn, ran)] cnnode, rannode = [ SshNode(gateway=gwnode, hostname=hostname, username='******', formatter=TimeColonFormatter(verbose=verbose), debug=verbose) for hostname in hostnames ] scheduler = Scheduler(verbose=verbose, label="CORE EXP") ########## prepare the image-loading phase # focus on the experiment, and use # prepare_testbed_scheduler later on to prepare testbed # all we need to do at this point is compute a mapping dict # image -> list-of-nodes images_to_load = defaultdict(list) images_to_load[image_cn] += [cn] images_to_load[image_ran] += [ran] if e3372_ues: images_to_load[image_e3372_ue] += e3372_ues if e3372_ue_xterms: images_to_load[image_e3372_ue] += e3372_ue_xterms if oai_ues: images_to_load[image_oai_ue] += oai_ues if gnuradios: images_to_load[image_gnuradio] += gnuradios if gnuradio_xterms: images_to_load[image_gnuradio] += gnuradio_xterms if T_tracer: images_to_load[image_T_tracer] += T_tracer # start core network job_start_cn = SshJob( node=cnnode, commands=[ RunScript(find_local_embedded_script("nodes.sh"), "git-pull-r2lab", includes=INCLUDES), RunScript(find_local_embedded_script("mosaic-cn.sh"), "journal --vacuum-time=1s", includes=INCLUDES), RunScript(find_local_embedded_script("mosaic-cn.sh"), "configure", includes=INCLUDES), RunScript(find_local_embedded_script("mosaic-cn.sh"), "start", includes=INCLUDES), tcpdump_cn_service.start_command(), ], label="start CN service", scheduler=scheduler, ) # prepare enodeb reset_option = "-u" if reset_usb else "" job_warm_ran = SshJob( node=rannode, commands=[ RunScript(find_local_embedded_script("nodes.sh"), "git-pull-r2lab", includes=INCLUDES), RunScript(find_local_embedded_script("mosaic-ran.sh"), "journal --vacuum-time=1s", includes=INCLUDES), RunScript(find_local_embedded_script("mosaic-ran.sh"), "warm-up", reset_option, includes=INCLUDES), RunScript(find_local_embedded_script("mosaic-ran.sh"), "configure -b", n_rb, cn, includes=INCLUDES), ], label="Configure eNB", scheduler=scheduler, ) ran_requirements = [job_start_cn, job_warm_ran] ### if oai_ues: # prepare OAI UEs for ue in oai_ues: ue_node = SshNode(gateway=gwnode, hostname=r2lab_hostname(ue), username='******', formatter=TimeColonFormatter(verbose=verbose), debug=verbose) job_warm_ues = [ SshJob(node=ue_node, commands=[ RunScript(find_local_embedded_script("nodes.sh"), "git-pull-r2lab", includes=INCLUDES), RunScript( find_local_embedded_script("mosaic-oai-ue.sh"), "journal --vacuum-time=1s", includes=INCLUDES), RunScript( find_local_embedded_script("mosaic-oai-ue.sh"), "warm-up", reset_option, includes=INCLUDES), RunScript( find_local_embedded_script("mosaic-oai-ue.sh"), "configure -b", n_rb, includes=INCLUDES), ], label=f"Configure OAI UE on fit{ue}", scheduler=scheduler) ] ran_requirements.append(job_warm_ues) ### if not load_nodes and phones: job_turn_off_phones = SshJob( node=gwnode, commands=[ RunScript(find_local_embedded_script("faraday.sh"), f"macphone{phone} phone-off") for phone in phones ], scheduler=scheduler, ) ran_requirements.append(job_turn_off_phones) # wait for everything to be ready, and add an extra grace delay grace = 5 grace_delay = PrintJob( f"Allowing grace of {grace} seconds", sleep=grace, required=ran_requirements, scheduler=scheduler, label=f"settle for {grace}s", ) # optionally start T_tracer if T_tracer: job_start_T_tracer = SshJob( # pylint: disable=w0612 node=SshNode(gateway=gwnode, hostname=r2lab_hostname(T_tracer[0]), username='******', formatter=TimeColonFormatter(verbose=verbose), debug=verbose), commands=[ Run(f"/root/trace {ran}", x11=True), ], label="start T_tracer service", required=ran_requirements, scheduler=scheduler, ) # ran_requirements.append(job_start_T_tracer) # start services graphical_option = "-x" if oscillo else "" graphical_message = "graphical" if oscillo else "regular" tracer_option = " -T" if T_tracer else "" # we use a Python variable for consistency # although it not used down the road _job_service_ran = SshJob( node=rannode, commands=[ RunScript( find_local_embedded_script("mosaic-ran.sh"), "start", graphical_option, tracer_option, includes=INCLUDES, x11=oscillo, ), ], label=f"start {graphical_message} softmodem on eNB", required=grace_delay, scheduler=scheduler, ) ########## run experiment per se # Manage phone(s) and OAI UE(s) # this starts at the same time as the eNB, but some # headstart is needed so that eNB actually is ready to serve sleeps = [20, 30] phone_msgs = [ f"wait for {sleep}s for eNB to start up before waking up phone{id}" for sleep, id in zip(sleeps, phones) ] wait_commands = [ f"echo {msg}; sleep {sleep}" for msg, sleep in zip(phone_msgs, sleeps) ] job_start_phones = [ SshJob(node=gwnode, commands=[ Run(wait_command), RunScript(find_local_embedded_script("faraday.sh"), f"macphone{id}", "r2lab-embedded/shell/macphone.sh", "phone-on", includes=INCLUDES), RunScript(find_local_embedded_script("faraday.sh"), f"macphone{id}", "r2lab-embedded/shell/macphone.sh", "phone-start-app", includes=INCLUDES), ], label=f"turn off airplace mode on phone {id}", required=grace_delay, scheduler=scheduler) for id, wait_command in zip(phones, wait_commands) ] if oai_ues: delay = 25 for ue in oai_ues: msg = f"wait for {delay}s for eNB to start up before running UE on node fit{ue}" wait_command = f"echo {msg}; sleep {delay}" ue_node = SshNode(gateway=gwnode, hostname=r2lab_hostname(ue), username='******', formatter=TimeColonFormatter(verbose=verbose), debug=verbose) job_start_ues = [ SshJob(node=ue_node, commands=[ Run(wait_command), RunScript( find_local_embedded_script("mosaic-oai-ue.sh"), "start", includes=INCLUDES), ], label=f"Start OAI UE on fit{ue}", required=grace_delay, scheduler=scheduler) ] delay += 20 for ue in oai_ues: ue_node = SshNode(gateway=gwnode, hostname=r2lab_hostname(ue), username='******', formatter=TimeColonFormatter(verbose=verbose), debug=verbose) msg = f"Wait 60s and then ping faraday gateway from UE on fit{ue}" _job_ping_gw_from_ue = [ SshJob(node=ue_node, commands=[ Run(f"echo {msg}; sleep 60"), Run(f"ping -c 5 -I oip1 faraday.inria.fr"), ], label=f"ping faraday gateway from UE on fit{ue}", critical=False, required=job_start_ues, scheduler=scheduler) ] # ditto _job_ping_phones_from_cn = [ SshJob( node=cnnode, commands=[ Run("sleep 20"), Run(f"ping -c 100 -s 100 -i .05 172.16.0.{id+1} &> /root/ping-phone{id}" ), ], label=f"ping phone {id} from core network", critical=False, required=job_start_phones, scheduler=scheduler) for id in phones ] ########## xterm nodes colors = ("wheat", "gray", "white", "darkolivegreen") xterms = e3372_ue_xterms + gnuradio_xterms for xterm, color in zip(xterms, cycle(colors)): xterm_node = SshNode(gateway=gwnode, hostname=r2lab_hostname(xterm), username='******', formatter=TimeColonFormatter(verbose=verbose), debug=verbose) SshJob( node=xterm_node, command=Run(f"xterm -fn -*-fixed-medium-*-*-*-20-*-*-*-*-*-*-*", f" -bg {color} -geometry 90x10", x11=True), label=f"xterm on node {xterm_node.hostname}", scheduler=scheduler, # don't set forever; if we do, then these xterms get killed # when all other tasks have completed # forever = True, ) # remove dangling requirements - if any # should not be needed but won't hurt either scheduler.sanitize() ########## print(10 * "*", "nodes usage summary") if load_nodes: for image, nodes in images_to_load.items(): for node in nodes: print(f"node {node} : {image}") else: print("NODES ARE USED AS IS (no image loaded, no reset)") print(10 * "*", "phones usage summary") if phones: for phone in phones: print(f"Using phone{phone}") else: print("No phone involved") if nodes_left_alone: print(f"Ignore following fit nodes: {nodes_left_alone}") # wrap scheduler into global scheduler that prepares the testbed scheduler = prepare_testbed_scheduler(gwnode, load_nodes, scheduler, images_to_load, nodes_left_alone) scheduler.check_cycles() # Update the .dot and .png file for illustration purposes name = "mosaic-load" if load_nodes else "mosaic" print(10 * '*', 'See main scheduler in', scheduler.export_as_pngfile(name)) if verbose: scheduler.list() if dry_run: return True if verbose: input('OK ? - press control C to abort ? ') if not scheduler.orchestrate(): print(f"RUN KO : {scheduler.why()}") scheduler.debrief() return False print("RUN OK") return True