示例#1
0
    def test_nesting_sequence(self):

        expected_duration = 1.

        watch = Watch('test_nesting_sequence')

        subjob = Scheduler(
            Sequence(
                Job(co_print_sleep(watch, .2, "one")),
                Job(co_print_sleep(watch, .2, "two")),
                Job(co_print_sleep(watch, .2, "three")),
            ),
            watch=watch,
            label="sub-scheduler\non several lines",
            critical=True,
            forever=True,
        )

        main = Scheduler(Sequence(
            Job(co_print_sleep(watch, .2, "BEGIN"), label="job-label"),
            subjob,
            Job(co_print_sleep(watch, .2, "END")),
        ),
                         watch=watch)

        print("===== test_nesting_sequence", "LIST with details")
        main.list(details=True)

        self.assertTrue(main.run())
        self.assertAlmostEqual(watch.seconds(), expected_duration, delta=.05)

        produce_png(main, "test_nesting_sequence")
示例#2
0
    def _allowed_retcod(self, allowed_exits,
                       host="localhost", username=None):

        print(f"Testing allowed retcod allowed_exits={allowed_exits}")

        # global timeout
        total = 4
        # scheduled duration
        long = 1
        # we always exit code 100
        retcod = 1000

        if username is None:
            username = util.localuser()
        node = SshNode(host, username=username)

        scheduler = Scheduler(timeout = total, critical=False)
        SshJob(node=node, scheduler=scheduler,
               command=Run(f"sleep {long}; exit {retcod}",
                           allowed_exits=allowed_exits))

        expected = retcod in allowed_exits

        run = scheduler.run()
        scheduler.list()
        self.assertEqual(run, expected)
示例#3
0
 def test_forever(self):
     a1, a2, t1 = SLJ(1), SLJ(1.5), TJ(.6)
     a2.requires(a1)
     sched = Scheduler(a1, a2, t1)
     sched.list()
     self.assertTrue(sched.orchestrate())
     sched.list()
示例#4
0
    def _allowed_signal(self, allowed_exits,
                       host="localhost", username=None):

        print(f"Testing allowed signal allowed_exits={allowed_exits}")

        # global timeout
        total = 4
        # scheduled duration
        long = 2
        # send signal after that amount
        short = 1
        # we always kill with TERM
        signal = "TERM"

        if username is None:
            username = util.localuser()
        node = SshNode(host, username=username)

        scheduler = Scheduler(timeout = total, critical=False)
        SshJob(node=node, scheduler=scheduler,
               command=Run(f"sleep {long}",
                           allowed_exits=allowed_exits))
        SshJob(node=node, scheduler=scheduler,
               command=f"sleep {short}; pkill -{signal} sleep")

        expected = signal in allowed_exits

        run = scheduler.run()
        scheduler.list()
        self.assertEqual(run, expected)
示例#5
0
 def test_timeout(self):
     a1, a2, a3 = [SLJ(x) for x in (0.5, 0.6, 0.7)]
     a2.requires(a1)
     a3.requires(a2)
     sched = Scheduler(a1, a2, a3)
     # should timeout in the middle of stage 2
     self.assertFalse(sched.orchestrate(timeout=1))
     sched.list()
示例#6
0
    def test_display(self):

        class FakeTask:

            def __init__(self):
                self._result = 0
                self._exception = None

        def annotate_job_with_fake_task(job, state, boom):
            task = FakeTask()
            if state == "done":
                task._state = asyncio.futures._FINISHED
                job._task = task
                job._running = True
            elif state == "running":
                task._state = "NONE"
                job._task = task
                job._running = True
            elif state == "scheduled":
                task._state = "NONE"
                job._task = task
                job._running = False
            else:
                pass

            # here we assume that a job that has raised an exception is
            # necessarily done
            if boom:
                if state in ("idle", "scheduled", "running"):
                    print("incompatible combination boom x idle - ignored")
                    return
                else:
                    job._task._exception = True
            return job

        class J(AbstractJob):
            pass

        sched = Scheduler()
        previous = None
        for state in "idle", "scheduled", "running", "done":
            for boom in True, False:
                for critical in True, False:
                    for forever in True, False:
                        j = J(critical=critical,
                              forever=forever,
                              label="forever={} crit.={} status={} boom={}"
                              .format(forever, critical, state, boom),
                              required=previous
                              )
                        if annotate_job_with_fake_task(j, state, boom):
                            sched.add(j)
                            previous = j
        sched.list()
示例#7
0
 def run_one_job(self, job, *, details=False, expected=True):
     print(job)
     scheduler = Scheduler(job, verbose=True)
     orchestration = scheduler.run()
     scheduler.list(details=details)
     if not orchestration:
         scheduler.debrief()
     self.assertTrue(orchestration)
     if expected:
         self.assertEqual(job.result(), 0)
     else:
         self.assertNotEqual(job.result(), 0)
示例#8
0
def run(*,                                # pylint: disable=r0912, r0914, r0915
        # the pieces to use
        slicename, cn, ran, phones,
        e3372_ues, oai_ues, gnuradios,
        e3372_ue_xterms, gnuradio_xterms, ns3,
        # boolean flags
        load_nodes, reset_usb, oscillo, tcp_streaming,
        # the images to load
        image_cn, image_ran, image_oai_ue, image_e3372_ue, image_gnuradio, image_T_tracer, image_ns3,
        # miscell
        n_rb, nodes_left_alone, T_tracer, publisher_ip, verbose, dry_run):
    """
    ##########
    # 3 methods to get nodes ready
    # (*) load images
    # (*) reset nodes that are known to have the right image
    # (*) do nothing, proceed to experiment

    expects e.g.
    * slicename : s.t like [email protected]
    * cn : 7
    * ran : 23
    * ns3 : 32
    * phones: list of indices of phones to use

    * e3372_ues : list of nodes to use as a UE using e3372
    * oai_ues   : list of nodes to use as a UE using OAI
    * gnuradios : list of nodes to load with a gnuradio image
    * T_tracer  : list of nodes to load with a tracer image

    * image_* : the name of the images to load on the various nodes

    Plus
    * load_nodes: whether to load images or not - in which case
                  image_cn, image_ran and image_*
                  are used to tell the image names
    * reset_usb : the USRP board will be reset when this is set
    * tcp_streaming : set up TCP streaming scenario
    * publisher_ip : IP address of the publisher 
    """

    # what argparse knows as a slice actually is about the gateway (user + host)
    gwuser, gwhost = r2lab_parse_slice(slicename)
    gwnode = SshNode(hostname=gwhost, username=gwuser,
                     formatter=TimeColonFormatter(verbose=verbose), debug=verbose)

    hostnames = [r2lab_hostname(x) for x in (cn, ran)]

    cnnode, rannode = [
        SshNode(gateway=gwnode, hostname=hostname, username='******',
                formatter=TimeColonFormatter(verbose=verbose), debug=verbose)
        for hostname in hostnames
    ]

    scheduler = Scheduler(verbose=verbose, label="CORE EXP")

    ########## prepare the image-loading phase
    # focus on the experiment, and use
    # prepare_testbed_scheduler later on to prepare testbed
    # all we need to do at this point is compute a mapping dict
    # image -> list-of-nodes

    images_to_load = defaultdict(list)
    images_to_load[image_cn] += [cn]
    images_to_load[image_ran] += [ran]
    if e3372_ues:
        images_to_load[image_e3372_ue] += e3372_ues
    if e3372_ue_xterms:
        images_to_load[image_e3372_ue] += e3372_ue_xterms
    if oai_ues:
        images_to_load[image_oai_ue] += oai_ues
    if gnuradios:
        images_to_load[image_gnuradio] += gnuradios
    if gnuradio_xterms:
        images_to_load[image_gnuradio] += gnuradio_xterms
    if T_tracer:
        images_to_load[image_T_tracer] += T_tracer
    if ns3:
        images_to_load[image_ns3] += [ns3]

    # start core network
    job_start_cn = SshJob(
        node=cnnode,
        commands=[
            RunScript(find_local_embedded_script("nodes.sh"),
                      "git-pull-r2lab",
                      includes=INCLUDES),
            RunScript(find_local_embedded_script("mosaic-cn.sh"),
                      "journal --vacuum-time=1s",
                      includes=INCLUDES),
            RunScript(find_local_embedded_script("mosaic-cn.sh"), "configure",
                      includes=INCLUDES),
            RunScript(find_local_embedded_script("mosaic-cn.sh"), "start",
                      includes=INCLUDES),
            tcpdump_cn_service.start_command(),
        ],
        label="start CN service",
        scheduler=scheduler,
    )

    # prepare enodeb
    reset_option = "-u" if reset_usb else ""
    job_warm_ran = SshJob(
        node=rannode,
        commands=[
            RunScript(find_local_embedded_script("nodes.sh"),
                      "git-pull-r2lab",
                      includes=INCLUDES),
            RunScript(find_local_embedded_script("mosaic-ran.sh"),
                      "journal --vacuum-time=1s",
                      includes=INCLUDES),
            RunScript(find_local_embedded_script("mosaic-ran.sh"),
                      "warm-up", reset_option,
                      includes=INCLUDES),
            RunScript(find_local_embedded_script("mosaic-ran.sh"),
                      "configure -b", n_rb, cn,
                      includes=INCLUDES),
        ],
        label="Configure eNB",
        scheduler=scheduler,
    )

    ran_requirements = [job_start_cn, job_warm_ran]
###
    if oai_ues:
        # prepare OAI UEs
        for ue in oai_ues:
            ue_node = SshNode(gateway=gwnode, hostname=r2lab_hostname(ue), username='******',
                              formatter=TimeColonFormatter(verbose=verbose), debug=verbose)
            job_warm_ues = [
                SshJob(
                    node=ue_node,
                    commands=[
                        RunScript(find_local_embedded_script("nodes.sh"),
                                  "git-pull-r2lab",
                                  includes=INCLUDES),
                        RunScript(find_local_embedded_script("mosaic-oai-ue.sh"),
                                  "journal --vacuum-time=1s",
                                  includes=INCLUDES),
                        RunScript(find_local_embedded_script("mosaic-oai-ue.sh"),
                                  "warm-up", reset_option,
                                  includes=INCLUDES),
                        RunScript(find_local_embedded_script("mosaic-oai-ue.sh"),
                                  "configure -b", n_rb,
                                  includes=INCLUDES),
                        ],
                    label=f"Configure OAI UE on fit{ue:02d}",
                    scheduler=scheduler)
                ]
            ran_requirements.append(job_warm_ues)

###
    if not load_nodes and phones:
        job_turn_off_phones = SshJob(
            node=gwnode,
            commands=[
                RunScript(find_local_embedded_script("faraday.sh"),
                          f"macphone{phone} phone-off")
                for phone in phones],
            scheduler=scheduler,
        )
        ran_requirements.append(job_turn_off_phones)

    # wait for everything to be ready, and add an extra grace delay

    grace = 5
    grace_delay = PrintJob(
        f"Allowing grace of {grace} seconds",
        sleep=grace,
        required=ran_requirements,
        scheduler=scheduler,
        label=f"settle for {grace}s",
    )

    # optionally start T_tracer
    if T_tracer:
        job_start_T_tracer = SshJob(                    # pylint: disable=w0612
            node=SshNode(
                gateway=gwnode, hostname=r2lab_hostname(T_tracer[0]), username='******',
                formatter=TimeColonFormatter(verbose=verbose), debug=verbose),
            commands=[
                Run(f"/root/trace {ran}",
                    x11=True),
            ],
            label="start T_tracer service",
            required=ran_requirements,
            scheduler=scheduler,
        )
#        ran_requirements.append(job_start_T_tracer)


# start services

    graphical_option = "-x" if oscillo else ""
    graphical_message = "graphical" if oscillo else "regular"
    tracer_option = " -T" if T_tracer else ""

    # we use a Python variable for consistency
    # although it not used down the road
    _job_service_ran = SshJob(
        node=rannode,
        commands=[
            RunScript(find_local_embedded_script("mosaic-ran.sh"),
                      "start", graphical_option, tracer_option,
                      includes=INCLUDES,
                      x11=oscillo,
                      ),
        ],
        label=f"start {graphical_message} softmodem on eNB",
        required=grace_delay,
        scheduler=scheduler,
    )

    ########## run experiment per se
    # Manage phone(s) and OAI UE(s)
    # this starts at the same time as the eNB, but some
    # headstart is needed so that eNB actually is ready to serve
    sleeps = [20, 30]
    phone_msgs = [f"wait for {sleep}s for eNB to start up before waking up phone{id}"
                  for sleep, id in zip(sleeps, phones)]
    wait_commands = [f"echo {msg}; sleep {sleep}"
                     for msg, sleep in zip(phone_msgs, sleeps)]

    job_start_phones = [
        SshJob(
            node=gwnode,
            commands=[
                Run(wait_command),
                RunScript(find_local_embedded_script("faraday.sh"), f"macphone{id}",
                          "r2lab-embedded/shell/macphone.sh", "phone-on",
                          includes=INCLUDES),
                RunScript(find_local_embedded_script("faraday.sh"), f"macphone{id}",
                          "r2lab-embedded/shell/macphone.sh", "phone-start-app",
                          includes=INCLUDES),
            ],
            label=f"turn off airplace mode on phone {id}",
            required=grace_delay,
            scheduler=scheduler)
        for id, wait_command in zip(phones, wait_commands)]

    if oai_ues:
        delay = 25
        for ue in oai_ues:
            msg = f"wait for {delay}s for eNB to start up before running UE on node fit{ue:02d}"
            wait_command = f"echo {msg}; sleep {delay}"
            ue_node = SshNode(gateway=gwnode, hostname=r2lab_hostname(ue), username='******',
                              formatter=TimeColonFormatter(verbose=verbose), debug=verbose)
            job_start_ues = [
                SshJob(
                    node=ue_node,
                    commands=[
                        Run(wait_command),
                        RunScript(find_local_embedded_script("mosaic-oai-ue.sh"),
                                  "start",
                                  includes=INCLUDES),
                        ],
                    label=f"Start OAI UE on fit{ue:02d}",
                    required=grace_delay,
                    scheduler=scheduler)
                ]
            delay += 20

        for ue in oai_ues:
            environ = {'USER': '******'}
            cefnet_ue_service = Service("cefnetd", service_id="cefnet", environ=environ) 
            ue_node = SshNode(gateway=gwnode, hostname=r2lab_hostname(ue), username='******',
                              formatter=TimeColonFormatter(verbose=verbose), debug=verbose)
            msg = f"Wait 60s and then ping faraday gateway from UE on fit{ue:02d}"
            ue_commands = f"echo {msg}; sleep 60; ping -c 5 -I oip1 faraday.inria.fr"

            if tcp_streaming:
                # TCP streaming scenario
                if load_nodes:
                    ue_commands += "sysctl -w net.ipv4.ip_forward=1;"
                    ue_commands += "ip route add 10.1.1.0/24 via 192.168.2.32 dev data"
                job_setup_ue = [
                    SshJob(
                        node=ue_node,
                        commands=[
                            Run(ue_commands,label="test UE link and set up routing for TCP streaming"),
                            ],
                        label=f"ping faraday gateway from UE on fit{ue:02d} and set up routing for the TCP streaming scenario",
                        critical=True,
                        required=job_start_ues,
                        scheduler=scheduler)
                    ]
            else:
                # Cefore streaming scenario
                if load_nodes:
                    ue_commands += "sysctl -w net.ipv4.ip_forward=1;"
                    ue_commands += f"ip route add {publisher_ip}/32 dev oip1;"
                    ue_commands += "ip route add 10.1.1.0/24 via 192.168.2.32 dev data;"
                    ue_commands += "iptables -t nat -A POSTROUTING -s 10.1.1.2/32  -j SNAT --to-source 172.16.0.2;"
                    ue_commands += "iptables -t nat -A PREROUTING -d 172.16.0.2   -j DNAT --to-destination 10.1.1.2;"
                    ue_commands += "iptables -A FORWARD -d 10.1.1.2/32 -i oip1 -j ACCEPT;"
                    ue_commands += f"iptables -A FORWARD -d {publisher_ip}/32 -i data -j ACCEPT;"
                    ue_commands += "ip rule del from all to 172.16.0.2 lookup 201;"
                    ue_commands += "ip rule del from 172.16.0.2 lookup 201;"
                    ue_commands += "ip rule add from 10.1.1.2 lookup lte prio 32760;"
                    ue_commands += "ip rule add from all to 172.16.0.2 lookup lte prio 32761;"
                    ue_commands += "ip rule add from all fwmark 0x1 lookup lte prio 32762;"
                    ue_commands += "ip route add table lte 10.1.1.0/24 via 192.168.2.32 dev data;"
#                    ue_commands += "killall cefnetd || true"
                job_setup_ue = [
                    SshJob(
                        node=ue_node,
                        commands=[
                            Run(ue_commands,label="test UE link and set up routing for Cefore streaming"),
                            cefnet_ue_service.start_command(),
                            ],
                        label=f"ping faraday gateway from fit{ue:02d} UE and set up routing for the Cefore streaming scenario",
                         critical=True,#old cefnetd not killed when running new one...
                        required=job_start_ues,
                        scheduler=scheduler)
                    ]

        if ns3:
            ns3_node = SshNode(gateway=gwnode, hostname=r2lab_hostname(ns3), username='******',
                               formatter=TimeColonFormatter(verbose=verbose), debug=verbose)
            msg = f"Wait for the UE node to be ready before running the streaming scenario with ns-3 on fit{ns3}"

            if load_nodes:
                job_prepare_ns3_node = [
                    SshJob(
                        node=ns3_node,
                        commands=[
                            Run("turn-on-data"),
                            Run("ifconfig data promisc up"),
                            Run("ip route add default via 192.168.2.6 dev data || true"),
                            Run("sysctl -w net.ipv4.ip_forward=1"),
                            ],
                        label=f"setup routing on ns-3 fit{ns3:02d} node",
                        # ip route may already be there so the ip route command may fail
                        critical=True,
                        required=job_setup_ue,
                        scheduler=scheduler)
                    ]
                ns3_requirements = job_prepare_ns3_node
            else:
                ns3_requirements = job_setup_ue

            if not tcp_streaming:
                environ = {'USER': '******'}
                cefnet_ns3_service = Service("cefnetd", service_id="cefnet", environ=environ) 
                job_start_cefnet_on_cn = [
                    SshJob(
                        node=cnnode,
                        commands=[
                            Run(f"echo 'ccn:/streaming tcp {publisher_ip}:80' > /usr/local/cefore/cefnetd.conf"),
#                            Run("killall cefnetd || true"),# not done by default with service.start_command()
                            cefnet_ns3_service.start_command(),
                            ],
                        label=f"Start Cefnet on EPC running at fit{cn:02d}",
                        critical=True,#old cefnetd not killed when running new one...
                        required=ns3_requirements,
                        scheduler=scheduler,
                        )
                    ]

    # ditto
    _job_ping_phones_from_cn = [
        SshJob(
            node=cnnode,
            commands=[
                Run("sleep 20"),
                Run(f"ping -c 100 -s 100 -i .05 172.16.0.{id+1} &> /root/ping-phone{id}"),
                ],
            label=f"ping phone {id} from core network",
            critical=False,
            required=job_start_phones,
            scheduler=scheduler)
        for id in phones]

    ########## xterm nodes

    colors = ("wheat", "gray", "white", "darkolivegreen")

    xterms = e3372_ue_xterms + gnuradio_xterms

    for xterm, color in zip(xterms, cycle(colors)):
        xterm_node = SshNode(
            gateway=gwnode, hostname=r2lab_hostname(xterm), username='******',
            formatter=TimeColonFormatter(verbose=verbose), debug=verbose)
        SshJob(
            node=xterm_node,
            command=Run(f"xterm -fn -*-fixed-medium-*-*-*-20-*-*-*-*-*-*-*",
                        f" -bg {color} -geometry 90x10",
                        x11=True),
            label=f"xterm on node {xterm_node.hostname}",
            scheduler=scheduler,
            # don't set forever; if we do, then these xterms get killed
            # when all other tasks have completed
            # forever = True,
            )

    # remove dangling requirements - if any
    # should not be needed but won't hurt either
    scheduler.sanitize()

    ##########
    print(10*"*", "nodes usage summary")
    if load_nodes:
        for image, nodes in images_to_load.items():
            for node in nodes:
                print(f"node fit{node:02d} : {image}")
    else:
        print("NODES ARE USED AS IS (no image loaded, no reset)")
    print(10*"*", "phones usage summary")
    if phones:
        for phone in phones:
            print(f"Using phone{phone}")
    else:
        print("No phone involved")
    if nodes_left_alone:
        print(f"Ignore following fit nodes: {nodes_left_alone}")

    print(f"Publisher IP is {publisher_ip}")
    if tcp_streaming:
        print("Run streaming scenario with TCP")
    else:
        print("Run streaming scenario with Cefore")
    # wrap scheduler into global scheduler that prepares the testbed
    scheduler = prepare_testbed_scheduler(
        gwnode, load_nodes, scheduler, images_to_load, nodes_left_alone)

    scheduler.check_cycles()
    # Update the .dot and .png file for illustration purposes
    name = "cefore-load" if load_nodes else "cefore"
    print(10*'*', 'See main scheduler in',
          scheduler.export_as_pngfile(name))

    if verbose:
        scheduler.list()

    if dry_run:
        return True

    if verbose:
        input('OK ? - press control C to abort ? ')

    if not scheduler.orchestrate():
        print(f"RUN KO : {scheduler.why()}")
        scheduler.debrief()
        return False
    print("RUN OK")
    print(80*'*')
    if tcp_streaming:
        # TCP streaming scenario 
        print(f"Now it's time to run the ns-3 script on node fit{ns3:02d}")
        print(f"root@fit{ns3:02d}:~# /root/NS3/source/ns-3-dce/waf  --run dce-tcp-test")
        print("Then, run iperf on the publisher host:")
        print("yourlogin@publisher:~# iperf -s -P 1 -p 80")
        print(f"Log file will be available on fit{ns3:02d} at:")
        print("  /root/NS3/source/ns-3-dce/files-4/var/log/56884/stdout")
    else:
        # Cefore streaming scenario
        print("Now, if not already done, copy cefnetd and cefputfile binaries on your publisher host")
        print("login@your_host:r2lab-demos/cefore# scp bin/cefnetd yourlogin@publisher_node:/usr/local/sbin")
        print("login@your_host:r2lab-demos/cefore# scp bin/cefputfile yourlogin@publisher_node:/user/local/bin")
        print(f"After that, run on the ns-3 fit{ns3:02d} node the following command:")
        print(f"root@fit{ns3:02d}:~# /root/NS3/source/ns-3-dce/waf  --run dce-cefore-test ")
        print("Then, run on the publisher host:")
        print("yourlogin@publisher:~# cefnetd")
        print("yourlogin@publisher:~# cefputfile ccn:/streaming/test -f ./[file-name] -r [1 <= streaming_rate <= 32 (Mbps)]")
        print(f"Log file will be available on fit{ns3:02d} at:")
        print("  /root/NS3/source/ns-3-dce/files-3/tmp/cefgetstream-thuputLog-126230400110000")
    print(80*'*')

    return True
示例#9
0
def run(slice, hss, epc, enb, extras, load_nodes, image_gw, image_enb,
        image_extra, reset_nodes, reset_usrp, spawn_xterms, verbose):
    """
    ##########
    # 3 methods to get nodes ready
    # (*) load images
    # (*) reset nodes that are known to have the right image
    # (*) do nothing, proceed to experiment

    expects e.g.
    * slice : s.t like [email protected]
    * hss : 04
    * epc : 03
    * enb : 23
    * extras : a list of ids that will be loaded with the gnuradio image

    Plus
    * load_nodes: whether to load images or not - in which case
                  image_gw, image_enb and image_extra
                  are used to tell the image names
    * reset_nodes: if load_nodes is false and reset_nodes is true, the nodes are reset - i.e. rebooted
    * otherwise (both False): do nothing
    * reset_usrp : if not False, the USRP board won't be reset
    * spawn_xterms : if set, starts xterm on all extra nodes
    * image_* : the name of the images to load on the various nodes
    """

    # what argparse knows as a slice actually is a gateway (user + host)
    gwuser, gwhost = parse_slice(slice)
    gwnode = SshNode(hostname=gwhost,
                     username=gwuser,
                     formatter=ColonFormatter(verbose=verbose),
                     debug=verbose)

    hostnames = hssname, epcname, enbname = [
        r2lab_hostname(x) for x in (hss, epc, enb)
    ]
    extra_hostnames = [r2lab_hostname(x) for x in extras]

    hssnode, epcnode, enbnode = [
        SshNode(gateway=gwnode,
                hostname=hostname,
                username='******',
                formatter=ColonFormatter(verbose=verbose),
                debug=verbose) for hostname in hostnames
    ]

    extra_nodes = [
        SshNode(gateway=gwnode,
                hostname=hostname,
                username='******',
                formatter=ColonFormatter(verbose=verbose),
                debug=verbose) for hostname in extra_hostnames
    ]

    ########## preparation
    job_check_for_lease = SshJob(
        node=gwnode,
        command=["rhubarbe", "leases", "--check"],
        label="check we have a current lease",
    )

    # turn off all nodes
    turn_off_command = ["rhubarbe", "off", "-a"]
    # except our 3 nodes and the optional extras
    turn_off_command += [
        "~{}".format(x) for x in [hss, epc, enb] + extras + [20]
    ]

    job_off_nodes = SshJob(
        node=gwnode,
        # switch off all nodes but the ones we use
        command=turn_off_command,
        label="turn off unused nodes",
        required=job_check_for_lease,
    )

    # actually run this in the gateway, not on the mac
    # the ssh keys are stored in the gateway and we do not yet have
    # the tools to leverage such remote keys
    job_stop_phone = SshJob(
        node=gwnode,
        command=RunScript(locate_local_script("faraday.sh"),
                          "macphone",
                          "r2lab/infra/user-env/macphone.sh",
                          "phone-off",
                          includes=includes),
        label="stop phone",
        required=job_check_for_lease,
    )

    jobs_prepare = [job_check_for_lease, job_stop_phone]
    # turn off nodes only when --load or --reset is set
    if load_nodes or reset_nodes:
        jobs_prepare.append(job_off_nodes)

    ########## infra nodes hss + epc

    # prepare nodes

    commands = []
    if load_nodes:
        commands.append(
            Run("rhubarbe", "load", "-i", image_gw, hssname, epcname))
    elif reset_nodes:
        commands.append(Run("rhubarbe", "reset", hssname, epcname))
    # always do this
    commands.append(Run("rhubarbe", "wait", "-t", 120, hssname, epcname))
    job_load_infra = SshJob(
        node=gwnode,
        commands=commands,
        label="load and wait HSS and EPC nodes",
        required=jobs_prepare,
    )

    # start services

    job_service_hss = SshJob(
        node=hssnode,
        command=RunScript(locate_local_script("oai-hss.sh"),
                          "run-hss",
                          epc,
                          includes=includes),
        label="start HSS service",
        required=job_load_infra,
    )

    msg = "wait for HSS to warm up"
    job_service_epc = Sequence(
        # let 15 seconds to HSS
        Job(
            verbose_delay(15, msg),
            label=msg,
        ),
        SshJob(
            node=epcnode,
            command=RunScript(locate_local_script("oai-epc.sh"),
                              "run-epc",
                              hss,
                              includes=includes),
            label="start EPC services",
        ),
        required=job_load_infra,
    )

    jobs_infra = job_load_infra, job_service_hss, job_service_epc

    ########## enodeb

    # prepare node

    commands = []
    if load_nodes:
        commands.append(Run("rhubarbe", "usrpoff", enb))
        commands.append(Run("rhubarbe", "load", "-i", image_enb, enb))
    elif reset_nodes:
        commands.append(Run("rhubarbe", "reset", enb))
    commands.append(Run("rhubarbe", "wait", "-t", "120", enb))

    job_load_enb = SshJob(
        node=gwnode,
        commands=commands,
        label="load and wait ENB",
        required=jobs_prepare,
    )

    # start service

    msg = "wait for EPC to warm up"
    job_service_enb = Sequence(
        Job(verbose_delay(15, msg), label=msg),
        SshJob(
            node=enbnode,
            # run-enb expects the id of the epc as a parameter
            command=RunScript(locate_local_script("oai-enb.sh"),
                              "run-enb",
                              epc,
                              reset_usrp,
                              includes=includes),
            label="start softmodem on ENB",
        ),
        required=(job_load_enb, job_service_hss, job_service_epc),
    )

    jobs_enb = job_load_enb, job_service_enb

    ########## run experiment per se

    # the phone
    # we need to wait for the USB firmware to be loaded
    duration = 30 if reset_usrp is not False else 8
    msg = "wait for enodeb firmware to load on USRP".format(duration)
    job_wait_enb = Job(verbose_delay(duration, msg),
                       label=msg,
                       required=job_service_enb)

    job_start_phone = SshJob(
        node=gwnode,
        commands=[
            RunScript(locate_local_script("faraday.sh"),
                      "macphone",
                      "r2lab/infra/user-env/macphone.sh",
                      "phone-on",
                      includes=includes),
            RunScript(locate_local_script("faraday.sh"),
                      "macphone",
                      "r2lab/infra/user-env/macphone.sh",
                      "phone-start-app",
                      includes=includes),
        ],
        label="start phone 4g and speedtest app",
        required=job_wait_enb,
    )

    job_ping_phone_from_epc = SshJob(
        node=epcnode,
        commands=[
            Run("sleep 10"),
            Run("ping -c 100 -s 100 -i .05 172.16.0.2 &> /root/ping-phone"),
        ],
        label="ping phone from EPC",
        critical=False,
        required=job_wait_enb,
    )

    jobs_exp = job_wait_enb, job_start_phone, job_ping_phone_from_epc

    ########## extra nodes
    # ssh -X not yet supported in apssh, so one option is to start them using
    # a local process
    # xxx to update: The following code kind of works, but it needs to be
    # turned off, because the process in question would be killed
    # at the end of the Scheduler orchestration (at the end of the run function)
    # which is the exact time where it would be useful :)
    # however the code for LocalJob appears to work fine, it would be nice to
    # move it around - maybe in apssh ?

    commands = []
    if not extras:
        commands.append(Run("echo no extra nodes specified - ignored"))
    else:
        if load_nodes:
            commands.append(Run("rhubarbe", "usrpoff", *extra_hostnames))
            commands.append(
                Run("rhubarbe", "load", "-i", image_extra, *extra_hostnames))
            commands.append(
                Run("rhubarbe", "wait", "-t", 120, *extra_hostnames))
            commands.append(Run("rhubarbe", "usrpon", *extra_hostnames))
        elif reset_nodes:
            commands.append(Run("rhubarbe", "reset", extra_hostnames))
        commands.append(Run("rhubarbe", "wait", "-t", "120", *extra_hostnames))
    job_load_extras = SshJob(
        node=gwnode,
        commands=commands,
        label="load and wait extra nodes",
        required=job_check_for_lease,
    )

    jobs_extras = [job_load_extras]

    colors = ["wheat", "gray", "white"]

    if spawn_xterms:
        jobs_xterms_extras = [
            SshJob(
                node=extra_node,
                command=Run("xterm -fn -*-fixed-medium-*-*-*-20-*-*-*-*-*-*-*"
                            " -bg {} -geometry 90x10".format(color),
                            x11=True),
                label="xterm on node {}".format(extra_node.hostname),
                required=job_load_extras,
                # don't set forever; if we do, then these xterms get killed
                # when all other tasks have completed
                # forever = True,
            )
            for extra_node, color in zip(extra_nodes, itertools.cycle(colors))
        ]
        jobs_extras += jobs_xterms_extras

    # schedule the load phases only if required
    sched = Scheduler(verbose=verbose)
    # this is just a way to add a collection of jobs to the scheduler
    sched.update(jobs_prepare)
    sched.update(jobs_infra)
    sched.update(jobs_enb)
    sched.update(jobs_exp)
    sched.update(jobs_extras)
    # remove dangling requirements - if any - should not be needed but won't hurt either
    sched.sanitize()

    print(40 * "*")
    if load_nodes:
        print("LOADING IMAGES: (gw->{}, enb->{}, extras->{})".format(
            load_nodes, image_gw, image_enb, image_extra))
    elif reset_nodes:
        print("RESETTING NODES")
    else:
        print("NODES ARE USED AS IS (no image loaded, no reset)")

    sched.rain_check()
    # Update the .dot and .png file for illustration purposes
    if verbose:
        sched.list()
        name = "scenario-load" if load_nodes else \
               "scenario-reset" if reset_nodes else \
               "scenario"
        sched.export_as_dotfile("{}.dot".format(name))
        os.system("dot -Tpng {}.dot -o {}.png".format(name, name))

    sched.list()

    if not sched.orchestrate():
        print("RUN KO : {}".format(sched.why()))
        sched.debrief()
        return False
    else:
        print("RUN OK")
        return True
示例#10
0
    def _simple(self, forever):

        storage = f"/root/TCPDUMP-{forever}.pcap"
        status = f"/root/TCPDUMP-{forever}.status"

        tcpdump = Service(f"tcpdump -i lo -w {storage}",
                          service_id='tcpdump',
                          verbose=True)
        monitor = ProcessMonitor()

        scheduler = Scheduler()
        node = SshNode("localhost")

        SshJob(node,
               scheduler=scheduler,
               command=tcpdump.start_command(),
               forever=forever)

        Sequence(
            SshJob(node, command="sleep 1"),
            SshJob(node, command=tcpdump.status_command(output=status)),
            SshJob(node, command="sleep 1"),
            SshJob(node, command=tcpdump.stop_command()),
            # could use a pull to retrive both files but that's not required
            # since we run on localhost, so keep tests simple
            scheduler=scheduler,
        )

        # cleanup before we run
        paths = (Path(x) for x in (storage, status))
        for path in paths:
            if path.exists():
                path.unlink()
            self.assertFalse(path.exists())
        produce_png(scheduler, f"service-{forever}")

        self.assertTrue(scheduler.run())
        scheduler.list()
        for path in paths:
            self.assertTrue(path.exists())

        with Path(status).open() as feed:
            contents = feed.read()
            for needle in ('Loaded: loaded', 'Active: active'):
                self.assertTrue(contents.find(needle) >= 0)

        close_ssh_in_scheduler(scheduler)

        # let it settle for a short while, and check the process space
        import time
        time.sleep(0.5)
        monitor.difference()

        news = monitor.news
        if news:
            print(f"we have {len(news)} new processes, {news}")
            ps_command = "ps " + "".join(str(pid) for pid in news)
            import os
            os.system(ps_command)

        self.assertEqual(len(news), 0)
示例#11
0
def run(*, gateway, slicename,
        disaggregated_cn, operator_version, nodes, node_master, node_enb, quectel_nodes, phones, flexran,
        drone, verbose, dry_run,
        load_images, master_image, worker_image, quectel_image):
    """
    Install K8S on R2lab

    Arguments:
        slicename: the Unix login name (slice name) to enter the gateway
        quectel_nodes: list of indices of quectel UE nodes to use
        phones: list of indices of phones to use
        nodes: a list of node ids to run the scenario on; strings or ints
                  are OK;
        node_master: the master node id, must be part of selected nodes
        node_enb: the node id for the enb, which is connected to usrp/duplexer
        disaggregated_cn: Boolean; True for the disaggregated CN scenario. False for all-in-one CN.
        operator_version: str, either "none" or "v1" or "v2".
    """

    if operator_version == "none":
        only_kube5g = True
    else:
        only_kube5g = False

    if node_master not in nodes:
        print(f"master node {node_master} must be part of selected fit nodes {nodes}")
        exit(1)
    if node_enb not in nodes:
        print(f"eNB worker node {node_enb} must be part of selected fit nodes {nodes}")
        exit(1)

    # Check if the browser can be automatically run to display the Drone app
    if drone:
        run_browser = True
        if platform == "linux":
            cmd_open = "xdg-open"
        elif platform == "darwin":
            cmd_open = "open"
        else:
            run_browser = False
        if run_browser:
            print(f"**** Will run the browser with command {cmd_open}")
        else:
            print(f"**** Will not be able to run the browser as platform is {platform}")


    worker_ids = nodes[:]
    worker_ids.remove(node_master)

    quectel_ids = quectel_nodes[:]
    quectel = len(quectel_ids) > 0

    faraday = SshNode(hostname=default_gateway, username=slicename,
                      verbose=verbose,
                      formatter=TimeColonFormatter())

    master = SshNode(gateway=faraday, hostname=fitname(node_master),
                     username="******",
                     verbose=verbose,
                     formatter=TimeColonFormatter())

    node_index = {
        id: SshNode(gateway=faraday, hostname=fitname(id),
                    username="******",formatter=TimeColonFormatter(),
                    verbose=verbose)
        for id in nodes
    }

    nodes_quectel_index = {
        id: SshNode(gateway=faraday, hostname=fitname(id),
                    username="******",formatter=TimeColonFormatter(),
                    verbose=verbose)
        for id in quectel_nodes
    }

    worker_index = dict(node_index)
    del worker_index[node_master]
    fit_master = fitname(node_master)
    fit_enb = fitname(node_enb)

    # the global scheduler
    scheduler = Scheduler(verbose=verbose)


    ##########
    check_lease = SshJob(
        scheduler=scheduler,
        node = faraday,
        critical = True,
        verbose=verbose,
        command = Run("rhubarbe leases --check"),
    )

    green_light = check_lease

    if load_images:
        green_light = [
            SshJob(
                scheduler=scheduler,
                required=check_lease,
                node=faraday,
                critical=True,
                verbose=verbose,
                label = f"Load image {master_image} on master {fit_master}",
                commands=[
                    Run(f"rhubarbe load {node_master} -i {master_image}"),
                    Run(f"rhubarbe wait {node_master}"),
                ]
            ),
            SshJob(
                scheduler=scheduler,
                required=check_lease,
                node=faraday,
                critical=True,
                verbose=verbose,
                label = f"Load image {worker_image} on worker nodes",
                commands=[
                    Run(f"rhubarbe usrpoff {node_enb}"), # if usrp is on, load could be problematic...
                    Run("rhubarbe", "load", *worker_ids, "-i", worker_image),
                    Run("rhubarbe", "wait", *worker_ids),
                    Run(f"rhubarbe usrpon {node_enb}"), # ensure a reset of the USRP on the enB node
                ],
            ),
            SshJob(
                scheduler=scheduler,
                required=check_lease,
                node=faraday,
                critical=False,
                verbose=verbose,
                label="turning off unused nodes",
                command=[
                    Run("rhubarbe bye --all "
                        + "".join(f"~{x} " for x in nodes))
                ]
            )
        ]
        if quectel:
            prepare_quectel = SshJob(
                scheduler=scheduler,
                required=green_light,
                node=faraday,
                critical=True,
                verbose=verbose,
                label = f"Load image {quectel_image} on quectel UE nodes",
                commands=[
                    Run("rhubarbe", "usrpoff", *quectel_ids), 
                    Run("rhubarbe", "load", *quectel_ids, "-i", quectel_image),
                    Run("rhubarbe", "wait", *quectel_ids),
                    Run("rhubarbe", "usrpon", *quectel_ids), 
                ],
            ),

    ##########
    if quectel:
        # wait 30s for Quectel modules show up 
        wait_quectel_ready = PrintJob(
            "Let Quectel modules show up",
            scheduler=scheduler,
            required=prepare_quectel,
            sleep=30,
            label="sleep 30s for the Quectel modules to show up"
        )
        # run the Quectel Connection Manager as a service on each Quectel UE node
        quectelCM_service = Service(
                command="quectel-CM -s oai.ipv4 -4",
                service_id="QuectelCM",
                verbose=verbose,
        )

        init_quectel_nodes = [
            SshJob(
                scheduler=scheduler,
                required=wait_quectel_ready,
                node=node,
    	        critical=True,
                verbose=verbose,
                label=f"Init Quectel UE on fit node {id}",
                commands = [
                    RunScript(find_local_embedded_script("nodes.sh"), "check-quectel-on", includes=INCLUDES),
                    quectelCM_service.start_command(),
                ],
            ) for id, node in nodes_quectel_index.items()
        ]
        # wait 20s for Quectel Connection Manager to start up 
        wait_quectelCM_ready = PrintJob(
            "Let QuectelCM start up",
            scheduler=scheduler,
            required=init_quectel_nodes,
            sleep=20,
            label="Sleep 20s for the Quectel Connection Manager(s) to start up"
        )
        detach_quectel_nodes = [
            SshJob(
                scheduler=scheduler,
                required=wait_quectelCM_ready,
                node=node,
    	        critical=True,
                verbose=verbose,
                label=f"Detach Quectel UE on fit node {id}",
                command = RunScript(find_local_embedded_script("nodes.sh"), "quectel-detach", includes=INCLUDES),
            ) for id, node in nodes_quectel_index.items()
        ]
        
    ##########
    # Initialize k8s on the master node
    init_master = SshJob(
        scheduler=scheduler,
        required=green_light,
        node=master,
        critical=True,
        verbose=verbose,
        label = f"Install and launch k8s on the master {node_master}",
        commands = [
            Run("swapoff -a"),
            Run("hostnamectl set-hostname master-node"),
            Run("kubeadm init --pod-network-cidr=10.244.0.0/16 > /tmp/join_msg.txt"),
            Run("tail -2 /tmp/join_msg.txt > /tmp/join_msg"),
            Run("mkdir -p $HOME/.kube"),
            Run("cp -i /etc/kubernetes/admin.conf $HOME/.kube/config"),
            Run("chown $(id -u):$(id -g) $HOME/.kube/config"),
            Run("kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml"),
            Run("kubectl get pods --all-namespaces"),
        ],
    )
    init_workers = [
        SshJob(
            scheduler=scheduler,
            required=init_master,
            node=node,
            critical=True,
            verbose=verbose,
            label=f"Init k8s on fit node {id} and join the cluster",
            commands = [
                Run("swapoff -a"),
                Run("increase-control-mtu"),
                Run(f"scp -o 'StrictHostKeyChecking no' {fit_master}:/tmp/join_msg /tmp/join_msg"),
                Run("chmod a+x /tmp/join_msg"),
                Run("/tmp/join_msg"),
            ],
        ) for id, node in worker_index.items()
    ]

    # wait 10s for K8S nodes setup
    wait_k8nodes_ready = PrintJob(
        "Let k8s set up",
        scheduler=scheduler,
        required=init_workers,
        sleep=10,
        label="sleep 10s for the k8s nodes to settle"
    )


    init_kube5g = SshJob(
        scheduler=scheduler,
        required = wait_k8nodes_ready,
        node = master,
        verbose=verbose,
        label = f"Add oai:ran label to oai-ran pod on {node_enb} and start 5GOperator pod",
        commands = [
            Run("kubectl get nodes"),
            # add label to the eNB node to help k8s scheduler selects the right fit node
            Run(f"kubectl label nodes fit{node_enb} oai=ran"),
            Run("kubectl get nodes -Loai"),
            ## retrieve the kube5g operator
            #Run("git clone -b develop [email protected]:mosaic5g/kube5g.git"),
            # install a few dependencies
            Run("apt install -y python3-pip"),
            Run("pip3 install --upgrade pip"),
            Run("pip3 install ruamel.yaml==0.16.12 colorlog==4.6.2"),
            Run("sed -i 's/oairan:v1-1.0-1/oairan:v1-1.0-3/g' /root/kube5g/common/config-manager/conf_global_default.yaml"),
            # specify the R2lab specific configuration
            Run("cd /root/kube5g/common/config-manager; ./conf-manager.py -s conf_short_r2lab.yaml"),
            # apply the R2lab CRD
            Run("cd /root/kube5g/openshift/kube5g-operator; ./k5goperator.sh -n"),
            # start the kube5g operator pod
            Run("cd /root/kube5g/openshift/kube5g-operator; ./k5goperator.sh container start"),
            Run("kubectl get pods"),
        ],
    )

    # wait 30s for K8S 5G Operator setup
    wait_k8_5GOp_ready = PrintJob(
        "Let 5G Operator set up",
        scheduler=scheduler,
        required=init_kube5g,
        sleep=30,
        label="wait 30s for the 5G Operator pod to settle"
    )

    if only_kube5g:
        finish = SshJob(
            scheduler=scheduler,
            required = wait_k8_5GOp_ready,
            node = master,
            verbose=verbose,
            label = f"showing nodes and pods before leaving",
            commands = [
                Run("kubectl get nodes -Loai"),
                Run("kubectl get pods"),
            ],
        )
    else:
        if disaggregated_cn:
            cn_type="disaggregated-cn"
#            setup_time = 120
            setup_time = 200
        else:
            cn_type="all-in-one"
#            setup_time = 60
            setup_time = 140
        if flexran:
            flexran_opt="flexran"
        else:
            flexran_opt=""

        run_kube5g = SshJob(
            scheduler=scheduler,
            required = wait_k8_5GOp_ready,
            node = master,
            verbose=verbose,
            label = f"deploy {operator_version} {cn_type} {flexran_opt} pods",
            commands = [
                Run("kubectl get nodes -Loai"),
                Run(f"cd /root/kube5g/openshift/kube5g-operator; ./k5goperator.sh deploy {operator_version} {cn_type} {flexran_opt}"),
                Run("kubectl get pods"),
            ],
        )

        # Coffee Break -- wait 1 or 2mn for K8S 5G pods setup
        wait_k8_5Gpods_ready = PrintJob(
            "Let all 5G pods set up",
            scheduler=scheduler,
            required=run_kube5g,
            sleep=setup_time,
            label=f"waiting {setup_time}s for all 5G pods to settle"
        )

        check_kube5g = SshJob(
            scheduler=scheduler,
            required = wait_k8_5Gpods_ready,
            node = master,
            verbose=verbose,
            label = "Check which pods are deployed",
            commands = [
                Run("kubectl get nodes -Loai"),
                Run("kubectl get pods"),
            ],
        )

        if drone:
            # the place where runtime variables get stored
            env = Variables()
            #
            # Define and run all the services to launch the Drone app locally on a firefox browser
            #
            drone_service = Service(
                command=f"python /root/mosaic5g/store/sdk/frontend/drone/drone.py --port=8088 --tasks --address=192.168.3.{node_enb}",
                service_id="drone_app",
                verbose=verbose,
            )
            k8s_port9999_fwd_service = Service(
                command=Deferred("kubectl port-forward {{flexran_pod}} 9999:9999 --address 0.0.0.0", env),
                service_id="k8s-port9999-fwd",
                verbose=verbose,
                # somehow this is required for kubectl to run properly
                environ={'KUBECONFIG': '/etc/kubernetes/admin.conf'}
            )
            # can't use a Service instance on the local box if it's not a Linux
            # and we have macs...
            local_port_fwd = (f"ssh -f -N -4"
                             f" -L9999:192.168.3.{node_master}:9999"
                             f" -L8088:192.168.3.{node_enb}:8088"
                             f" -o ExitOnForwardFailure=yes"
                             f" {slicename}@faraday.inria.fr")
            browser_service = Service(
                command=f"sleep 10; {cmd_open} http://127.0.0.1:8088/",
                service_id="drone_browser",
                verbose=verbose,
            )

            run_drone=SshJob(
                scheduler=scheduler,
                required=check_kube5g,
                node=worker_index[node_enb],
                verbose=verbose,
                label=f"Run the drone app on worker node {node_enb} as a service",
                commands=[
                    drone_service.start_command(),
                ],
            )
            get_flexran_podname=SshJob(
                scheduler=scheduler,
                required=check_kube5g,
                node=master,
                verbose=verbose,
                label=f"Retrieve the name of the FlexRAN pod",
                commands=[
                    # xxx here
                    Run("kubectl get --no-headers=true pods -l app=flexran -o custom-columns=:metadata.name",
                        capture=Capture('flexran_pod', env)),
                ],
            )
            run_k8s_port9999_fwd=SshJob(
                scheduler=scheduler,
                required=get_flexran_podname,
                node=master,
                verbose=verbose,
                label=f"Run port forwarding on the master node as a service",
                commands=[
                    k8s_port9999_fwd_service.start_command(),
                ],
            )
            # On the local machine, impossible to use Services as the latter uses systemd-run, only available on Linux
            run_local_ports_fwd = SshJob(
                scheduler=scheduler,
                required = check_kube5g,
                node = LocalNode(),
                verbose=verbose,
                label = f"Forward local ports 8088 and 9999",
                command=Run(local_port_fwd + "&", ignore_outputs=True),
            )
            if run_browser:
                run_local_browser = SshJob(
                    scheduler=scheduler,
                    required = (run_drone, run_k8s_port9999_fwd, run_local_ports_fwd),
                    node = LocalNode(),
                    verbose=verbose,
                    label = f"Run the browser on the local node in background",
                    command=browser_service.command+"&",
                )
                phones_requirements=run_local_browser
            else:
                phones_requirements=run_k8s_port9999_fwd
        else:
            phones_requirements=check_kube5g


        
        ########## Test phone(s) connectivity

        sleeps_ran = (20, 25)
        phone_msgs = [f"wait for {sleep}s for eNB to start up before waking up phone{id}"
                      for sleep, id in zip(sleeps_ran, phones)]
        wait_commands = [f"echo {msg}; sleep {sleep}"
                         for msg, sleep in zip(phone_msgs, sleeps_ran)]
        sleeps_phone = (15, 20)
        phone2_msgs = [f"wait for {sleep}s for phone{id} before starting tests"
                       for sleep, id in zip(sleeps_phone, phones)]
        wait2_commands = [f"echo {msg}; sleep {sleep}"
                          for msg, sleep in zip(phone2_msgs, sleeps_phone)]

        job_start_phones = [
            SshJob(
                node=faraday,
                commands=[
                    Run(wait_command),
                    RunScript(find_local_embedded_script("faraday.sh"), f"macphone{id}",
                              "r2lab-embedded/shell/macphone.sh", "phone-on",
                              includes=INCLUDES),
                    Run(wait2_command),
                    RunScript(find_local_embedded_script("faraday.sh"), f"macphone{id}",
                              "r2lab-embedded/shell/macphone.sh", "phone-check-cx",
                              includes=INCLUDES),
                    RunScript(find_local_embedded_script("faraday.sh"), f"macphone{id}",
                              "r2lab-embedded/shell/macphone.sh", "phone-start-app",
                              includes=INCLUDES),
                ],
                label=f"turn off airplane mode on phone {id}",
                required=phones_requirements,
                scheduler=scheduler)
            for id, wait_command, wait2_command in zip(phones, wait_commands, wait2_commands)
        ]

        if quectel:
            # wait 60s for Quectel connection(s) to set up
            wait_before_attach_quectel = PrintJob(
                "Wait again 30s before attaching Quectel device(s)",
                scheduler=scheduler,
                required=(job_start_phones,check_kube5g,detach_quectel_nodes),
                sleep=30,
                label="Sleep 30s before attaching Quectel device(s)"
            )
            job_attach_quectel = [
                SshJob(
	            scheduler=scheduler,
                    required=wait_before_attach_quectel,
                    node=node,
                    critical=True,
                    verbose=verbose,
                    label=f"Attach Quectel UE on fit node {id}",
                    command = RunScript(find_local_embedded_script("nodes.sh"),
                                        "quectel-attach", includes=INCLUDES),
                ) for id, node in nodes_quectel_index.items()
	    ]
            # wait 30s for Quectel connection(s) to set up
            wait_quectel_cx_ready = PrintJob(
                "Let the Quectel connection(s) set up",
                scheduler=scheduler,
                required=job_attach_quectel,
                sleep=30,
                label="Sleep 30s for the Quectel connection(s) to set up"
            )
            test_quectel_cx = [
                SshJob(
                    scheduler=scheduler,
                    required=wait_quectel_cx_ready,
                    node=node,
                    critical=False,
		    verbose=verbose,
                    label=f"Check the Quectel cx on fit node {id}",
                    command = RunScript(find_local_embedded_script("nodes.sh"),
                                        "check-quectel-cx", includes=INCLUDES),
	        ) for id, node in nodes_quectel_index.items()
            ]            
            
    ##########
    # Update the .dot and .png file for illustration purposes
    scheduler.check_cycles()
    name = "deploy-kube5g"
    print(10*'*', 'See main scheduler in',
          scheduler.export_as_pngfile(name))

    # orchestration scheduler jobs
    if verbose:
        scheduler.list()

    if dry_run:
        return True

    if not scheduler.orchestrate():
        print(f"RUN KO : {scheduler.why()}")
        scheduler.debrief()
        return False
    print(f"RUN OK, you can log now on master node {fit_master} to manually change the scenario")
    print(80*'*')
示例#12
0
def one_run(*, protocol, interference,
            run_name=default_run_name, slicename=default_slicename,
            tx_power, phy_rate, antenna_mask, channel,
            load_images=False,
            node_ids=DEFAULT_NODE_IDS,
            src_ids=DEFAULT_SRC_IDS, dest_ids=DEFAULT_DEST_IDS,
            scrambler_id=DEFAULT_SCRAMBLER_ID,
            tshark=False, map=False, warmup=False,
            route_sampling=False, iperf=False,
            verbose_ssh=False, verbose_jobs=False, dry_run=False,
            run_number=None):
    """
    Performs data acquisition on all nodes with the following settings

    Arguments:
        tx_power: in dBm, a string like 5, 10 or 14.
          Corresponds to the transmission power.
        phy_rate: a string among 1, 54. Correspond to the wifi rate.
        antenna_mask: a string among 1, 3, 7.
        channel: a string like e.g. 1 or 40. Correspond to the channel.
        protocol: a string among batman , olsr. Correspond to the protocol
        interference : in amplitude percentage, a string like 15 or 20.
          Correspond to the power of the noise generated in the spectrum.
          Can be either None or "None" to mean no interference.
        run_name: the name for a subdirectory where all data will be kept
          successive runs should use the same name for further visualization
        slicename: the Unix login name (slice name) to enter the gateway
        load_images: a boolean specifying whether nodes should be re-imaged first
        node_ids: a list of node ids to run the scenario against;
          strings or ints are OK;
        tshark: a boolean specifying wether we should format/parse the .pcap.
        map: a boolean specifying wether we should fetch/parse
          the route tables of the nodes.
        warmup: a boolean specifying whether we should run a ping before
          the experiment to be certain of the stabilisation on the network.
        src_ids: a list of nodes from which we will launch the ping from.
          strings or ints are OK.
        ping_messages : the number of ping packets that will be generated

    """
    # set default for the nodes parameter
    node_ids = ([int(id) for id in node_ids]
                if node_ids is not None else DEFAULT_NODE_IDS)
    src_ids = ([int(id) for id in src_ids]
               if src_ids is not None else DEFAULT_SRC_IDS)
    dest_ids = ([int(id) for id in dest_ids]
                if dest_ids is not None else DEFAULT_NODE_IDS)

    # all nodes - i.e. including sources and destinations -
    # need to run the protocol
    node_ids = list(set(node_ids).union(set(src_ids).union(set(dest_ids))))

    if interference == "None":
        interference = None

    # open result dir no matter what
    run_root = naming_scheme(
        run_name=run_name, protocol=protocol,
        interference=interference, autocreate=True)

# fix me    trace = run_root / f"trace-{%m-%d-%H-%M}"
    ref_time = apssh_time()
    trace = run_root / f"trace-{ref_time}"

    try:
        with trace.open('w') as feed:
            def log_line(line):
                time_line(line, file=feed)
            load_msg = f"{'WITH' if load_images else 'NO'} image loading"
            interference_msg = (f"interference={interference} "
                                f"from scrambler={scrambler_id}")
            nodes = " ".join(str(n) for n in node_ids)
            srcs = " ".join(str(n) for n in src_ids)
            dests = " ".join(str(n) for n in dest_ids)
            ping_labels = [
                f"PING {s} ➡︎ {d}"
                for s in src_ids
                # and on the destination
                for d in dest_ids
                if d != s
            ]

            log_line(f"output in {run_root}")
            log_line(f"trace in {trace}")
            log_line(f"protocol={protocol}")
            log_line(f"{load_msg}")
            log_line(f"{interference_msg}")
            log_line("----")
            log_line(f"Selected nodes : {nodes}")
            log_line(f"Sources : {srcs}")
            log_line(f"Destinations : {dests}")
            for label in ping_labels:
                log_line(f"{label}")
            log_line("----")
            for feature in ('warmup', 'tshark', 'map',
                            'route_sampling', 'iperf'):
                log_line(f"Feature {feature}: {locals()[feature]}")

    except Exception as exc:
        print(f"Cannot write into {trace} - aborting this run")
        print(f"Found exception {type(exc)} - {exc}")
        return False
    #
    # dry-run mode
    # just display a one-liner with parameters
    #
    prelude = "" if not dry_run else "dry_run:"
    with trace.open() as feed:
        print(f"**************** {ref_time} one_run #{run_number}:")
        for line in feed:
            print(prelude, line, sep='', end='')
    if dry_run:
        return True

    # the nodes involved
    faraday = SshNode(hostname=default_gateway, username=slicename,
                      formatter=TimeColonFormatter(), verbose=verbose_ssh)

    # this is a python dictionary that allows to retrieve a node object
    # from an id
    node_index = {
        id: SshNode(gateway=faraday, hostname=fitname(id), username="******",
                    formatter=TimeColonFormatter(), verbose=verbose_ssh)
        for id in node_ids
    }
    # extracts for sources and destinations
    src_index = {id:node for (id, node) in node_index.items()
                 if id in src_ids}
    dest_index = {id:node for (id, node) in node_index.items()
                  if id in dest_ids}

    if interference:
        node_scrambler = SshNode(
            gateway=faraday, hostname=fitname(scrambler_id), username="******",
            formatter=TimeColonFormatter(), verbose=verbose_ssh)
    # the global scheduler
    scheduler = Scheduler(verbose=verbose_jobs)

    ##########
    check_lease = SshJob(
        scheduler=scheduler,
        node=faraday,
        verbose=verbose_jobs,
        label="rhubarbe check lease",
        command=Run("rhubarbe leases --check", label="rlease"),
    )

    # load images if requested

    green_light = check_lease

    # at some point we did not load the scrambler if interference was None
    # and that was a way to run faster loads with no interference
    # but now we always load the scrambler node with gnuradio
    # this is because when we do runs.py -i None 15 30 ...
    # then the first call to one_run is with interference being None
    # but it is still important to load the scrambler
    if load_images:
        # copy node_ids
        load_ids = node_ids[:]
        load_ids.append(scrambler_id)
        # the nodes that we **do not** use should be turned off
        # so if we have selected e.g. nodes 10 12 and 15, we will do
        # rhubarbe off -a ~10 ~12 ~15, meaning all nodes except 10, 12 and 15
        negated_node_ids = [f"~{id}" for id in load_ids]

        # we can do these three things in parallel
        ready_jobs = [
            SshJob(node=faraday, required=green_light,
                   scheduler=scheduler, verbose=verbose_jobs,
                   command=Run("rhubarbe", "off", "-a", *negated_node_ids,
                               label="turn off unused nodes")),
            SshJob(node=faraday, required=green_light,
                   scheduler=scheduler, verbose=verbose_jobs,
                   label="load batman image",
                   command=Run("rhubarbe", "load", "-i",
                               "batman-olsr",
                               *node_ids,
                               label=f"load ubuntu on {node_ids}")),
            SshJob(
                node=faraday, required=green_light,
                scheduler=scheduler, verbose=verbose_jobs,
                label="load gnuradio image",
                command=Run("rhubarbe", "load", "-i",
                            "batman-olsr-gnuradio",
                            scrambler_id,
                            label=f"load gnuradio on {scrambler_id}")),
        ]

        # replace green_light in this case
        green_light = SshJob(
            node=faraday, required=ready_jobs,
            scheduler=scheduler, verbose=verbose_jobs,
            label="wait for nodes to come up",
            command=Run("rhubarbe", "wait", *load_ids))

    ##########
    # setting up the wireless interface on all nodes
    #
    # provide node-utilities with the ranges/units it expects
    frequency = channel_frequency[int(channel)]
    # tx_power_in_mBm not in dBm
    tx_power_driver = tx_power * 100

    #just in case somme services failed in the previous experiment
    reset_failed_services_job = [
        SshJob(
            node=node,
            verbose=verbose_jobs,
            label="reset failed services",
            command=Run("systemctl reset-failed",
                        label="reset-failed services"))
        for id, node in node_index.items()
    ]
    reset_failed_services = Scheduler(
        *reset_failed_services_job,
        scheduler=scheduler,
        required=green_light,
        verbose=verbose_jobs,
        label="Reset failed services")
    init_wireless_sshjobs = [
        SshJob(
            node=node,
            verbose=verbose_jobs,
            label=f"init {id}",
            command=RunScript(
                "node-utilities.sh",
                f"init-ad-hoc-network-{WIRELESS_DRIVER}",
                WIRELESS_DRIVER, "foobar", frequency, phy_rate,
                antenna_mask, tx_power_driver,
                label="init add-hoc network"),
        )
        for id, node in node_index.items()]
    init_wireless_jobs = Scheduler(
        *init_wireless_sshjobs,
        scheduler=scheduler,
        required=green_light,
        verbose=verbose_jobs,
        label="Initialisation of wireless chips")

    if interference:
        # Run uhd_siggen with the chosen power
        init_scrambler_job = SshJob(
            scheduler=scheduler,
            required=green_light,
            forever=True,
            node=node_scrambler,
            verbose=verbose_jobs,
            #TODO : If exit-signal patch is done add exit-signal=["TERM"]
            #       to this run object and call uhd_siggen directly
            commands=[RunScript("node-utilities.sh",
                                "init-scrambler",
                                label="init scrambler"),
                      Run(f"systemd-run --unit=uhd_siggen -t ",
                          f"uhd_siggen -a usrp -f {frequency}M",
                          f"--sine --amplitude 0.{interference}",
                          label="systemctl start uhd_siggen")
                      ]
        )

    green_light = [init_wireless_jobs, reset_failed_services]
    # then install and run batman on fit nodes
    run_protocol_job = [
        SshJob(
            # scheduler=scheduler,
            node=node,
            label=f"init and run {protocol} on fit node {id}",
            verbose=verbose_jobs,
            # CAREFUL : These ones use sytemd-run
            #            with the ----service-type=forking option!
            command=RunScript("node-utilities.sh",
                              f"run-{protocol}",
                              label=f"run {protocol}"),
        )
        for id, node in node_index.items()]

    run_protocol = Scheduler(
        *run_protocol_job,
        scheduler=scheduler,
        required=green_light,
        verbose=verbose_jobs,
        label="init and run routing protocols")

    green_light = run_protocol

    # after that, run tcpdump on fit nodes, this job never ends...
    if tshark:

        run_tcpdump_job = [
            SshJob(
                # scheduler=scheduler_monitoring,
                node=node,
                forever=True,
                label=f"run tcpdump on fit node {id}",
                verbose=verbose_jobs,
                command=[
                    Run("systemd-run -t  --unit=tcpdump",
                        f"tcpdump -U -i moni-{WIRELESS_DRIVER}",
                        f"-y ieee802_11_radio -w /tmp/fit{id}.pcap",
                        label=f"tcpdump {id}")
                    ]
            )
            for id, node in node_index.items()
        ]

        run_tcpdump = Scheduler(
            *run_tcpdump_job,
            scheduler=scheduler,
            required=green_light,
            forever=True,
            verbose=verbose_jobs,
            label="Monitoring - tcpdumps")

    # let the wireless network settle
    settle_scheduler = Scheduler(
        scheduler=scheduler,
        required=green_light,
    )

    if warmup:
        # warmup pings don't need to be sequential, so let's
        # do all the nodes at the same time
        # on a given node though, we'll ping the other ends sequentially
        # see the graph for more
        warmup_jobs = [
            SshJob(
                node=node_s,
                verbose=verbose_jobs,
                commands=[
                    RunScript("node-utilities.sh",
                              "my-ping", f"10.0.0.{d}",
                              warmup_ping_timeout,
                              warmup_ping_interval,
                              warmup_ping_size,
                              warmup_ping_messages,
                              f"warmup {s} ➡︎ {d}",
                              label=f"warmup {s} ➡︎ {d}")
                    for d in dest_index.keys()
                    if s != d
                ]
            )
            # for each selected experiment nodes
            for s, node_s in src_index.items()
        ]
        warmup_scheduler = Scheduler(
            *warmup_jobs,
            scheduler=settle_scheduler,
            verbose=verbose_jobs,
            label="Warmup pings")
        settle_wireless_job2 = PrintJob(
            "Let the wireless network settle after warmup",
            sleep=settle_delay_shorter,
            scheduler=settle_scheduler,
            required=warmup_scheduler,
            label=f"settling-warmup for {settle_delay_shorter} sec")

    # this is a little cheating; could have gone before the bloc above
    # but produces a nicer graphical output
    # we might want to help asynciojobs if it offered a means
    # to specify entry and exit jobs in a scheduler
    settle_wireless_job = PrintJob(
        "Let the wireless network settle",
        sleep=settle_delay_long,
        scheduler=settle_scheduler,
        label=f"settling for {settle_delay_long} sec")

    green_light = settle_scheduler

    if iperf:
        iperf_service_jobs = [
            SshJob(
                node=node_d,
                verbose=verbose_jobs,
                forever=True,
                commands=[
                    Run("systemd-run -t --unit=iperf",
                        "iperf -s -p 1234 -u",
                        label=f"iperf serv on {d}"),
                ],
            )
            for d, node_d in dest_index.items()
        ]
        iperf_serv_sched = Scheduler(
            *iperf_service_jobs,
            verbose=verbose_jobs,
            label="Iperf Servers",
            # for a nicer graphical output
            # otherwise the exit arrow
            # from scheduler 'iperf mode'
            # to job 'settling for 60s'
            # gets to start from this box
            forever=True,
            )

        iperf_cli = [
            SshJob(
                node=node_s,
                verbose=verbose_jobs,
                commands=[
                    Run("sleep 7", label=""),
                    Run(f"iperf",
                        f"-c 10.0.0.{d} -p 1234",
                        f"-u -b {phy_rate}M -t 60",
                        f"-l 1024 > IPERF-{s:02d}-{d:02d}",
                        label=f"run iperf {s} ➡︎ {d}")
                ]
            )

            for s, node_s in src_index.items()
            for d, node_d in dest_index.items()
            if s != d
        ]
        iperf_cli_sched = Scheduler(
            Sequence(*iperf_cli),
            verbose=verbose_jobs,
            label="Iperf Clients")

        iperf_stop = [
            SshJob(node=node_d,
                   verbose=verbose_jobs,
                   label=f"Stop iperf on {d}",
                   command=Run("systemctl stop iperf"))
            for d, node_d in dest_index.items()
        ]
        iperf_stop_sched = Scheduler(
            *iperf_stop,
            required=iperf_cli_sched,
            verbose=verbose_jobs,
            label="Iperf server stop")
        iperf_fetch = [
            SshJob(node=node_s,
                   verbose=verbose_jobs,
                   command=Pull(
                       remotepaths=[f"IPERF-{s:02d}-{d:02d}"],
                       localpath=str(run_root),
                       label="fetch iperf {s} ➡︎ {d}")
                   )
            for s, node_s in src_index.items()
            for d, node_d in dest_index.items()
            if s != d
        ]
        iperf_fetch_sched = Scheduler(
            *iperf_fetch,
            required=iperf_stop_sched,
            verbose=verbose_jobs,
            label="Iperf fetch report")
        iperf_jobs = [iperf_serv_sched, iperf_cli_sched,
                      iperf_stop_sched, iperf_fetch_sched]
        iperf_sched = Scheduler(
            *iperf_jobs,
            scheduler=scheduler,
            required=green_light,
            verbose=verbose_jobs,
            label="Iperf Module")
        settle_wireless_job_iperf = PrintJob(
            "Let the wireless network settle",
            sleep=settle_delay_shorter,
            scheduler=scheduler,
            required=iperf_sched,
            label=f"settling-iperf for {settle_delay_shorter} sec")

        green_light = settle_wireless_job_iperf


    # create all the tracepath jobs from the first node in the list
    if map:
        map_jobs = [
            SshJob(
                node=node,
                label=f"Generating ROUTE file for proto {protocol} on node {id}",
                verbose=verbose_jobs,
                commands=[
                    RunScript(f"node-utilities.sh",
                              f"route-{protocol}",
                              f"> ROUTE-TABLE-{id:02d}",
                              label="get route table"),
                    Pull(remotepaths=[f"ROUTE-TABLE-{id:02d}"],
                         localpath=str(run_root),
                         label="")
                ],
            )
            for id, node in node_index.items()
        ]
        map_scheduler = Scheduler(
            *map_jobs,
            scheduler=scheduler,
            required=green_light,
            verbose=verbose_jobs,
            label="Snapshoting route files")
        green_light = map_scheduler

    if route_sampling:
        route_sampling_jobs = [
            SshJob(
                node=node,
                label=f"Route sampling service for proto {protocol} on node {id}",
                verbose=False,
                forever=True,
                commands=[
                    Push(localpaths=["route-sample-service.sh"],
                         remotepath=".", label=""),
                    Run("chmod +x route-sample-service.sh", label=""),
                    Run("systemd-run -t --unit=route-sample",
                        "/root/route-sample-service.sh",
                        "route-sample",
                        f"ROUTE-TABLE-{id:02d}-SAMPLED",
                        protocol,
                        label="start route-sampling"),
                ],
            )
            for id, node in node_index.items()
        ]
        route_sampling_scheduler = Scheduler(
            *route_sampling_jobs,
            scheduler=scheduler,
            verbose=False,
            forever=True,
            label="Route Sampling services launch",
            required=green_light)

    ##########
    # create all the ping jobs, i.e. max*(max-1)/2
    # this again is a python list comprehension
    # see the 2 for instructions at the bottom
    #
    # notice that these SshJob instances are not yet added
    # to the scheduler, we will add them later on
    # depending on the sequential/parallel strategy

    pings_job = [
        SshJob(
            node=node_s,
            verbose=verbose_jobs,
            commands=[
                Run(f"echo actual ping {s} ➡︎ {d} using {protocol}",
                    label=f"ping {s} ➡︎ {d}"),
                RunScript("node-utilities.sh", "my-ping",
                          f"10.0.0.{d}",
                          ping_timeout, ping_interval,
                          ping_size, ping_messages,
                          f"actual {s} ➡︎ {d}",
                          ">", f"PING-{s:02d}-{d:02d}",
                          label=""),
                Pull(remotepaths=[f"PING-{s:02d}-{d:02d}"],
                     localpath=str(run_root),
                     label=""),
            ],
        )
        # for each selected experiment nodes
        for s, node_s in src_index.items()
        for d, node_d in dest_index.items()
        if s != d
    ]
    pings = Scheduler(
        scheduler=scheduler,
        label="PINGS",
        verbose=verbose_jobs,
        required=green_light)

    # retrieve all pcap files from fit nodes
    stop_protocol_job = [
        SshJob(
            # scheduler=scheduler,
            node=node,
            # required=pings,
            label=f"kill routing protocol on {id}",
            verbose=verbose_jobs,
            command=RunScript(f"node-utilities.sh",
                              f"kill-{protocol}",
                              label=f"kill-{protocol}"),
        )
        for id, node in node_index.items()
    ]
    stop_protocol = Scheduler(
        *stop_protocol_job,
        scheduler=scheduler,
        required=pings,
        label="Stop routing protocols",
    )

    if tshark:
        retrieve_tcpdump_job = [
            SshJob(
                # scheduler=scheduler,
                node=nodei,
                # required=pings,
                label=f"retrieve pcap trace from fit{i:02d}",
                verbose=verbose_jobs,
                commands=[
                    Run("systemctl stop tcpdump",
                        label="stop tcpdump"),
                    #Run("systemctl reset-failed tcpdump"),
                    #RunScript("node-utilities.sh", "kill-tcpdump",
                    #          label="kill-tcpdump"),
                    Run(
                        f"echo retrieving pcap trace and result-{i}.txt from fit{i:02d}",
                        label=""),
                    Pull(remotepaths=[f"/tmp/fit{i}.pcap"],
                         localpath=str(run_root), label=""),
                ],
            )
            for i, nodei in node_index.items()
        ]
        retrieve_tcpdump = Scheduler(
            *retrieve_tcpdump_job,
            scheduler=scheduler,
            required=pings,
            label="Retrieve tcpdump",
        )
    if route_sampling:
        retrieve_sampling_job = [
            SshJob(
                # scheduler=scheduler,
                node=nodei,
                # required=pings,
                label=f"retrieve sampling trace from fit{i:02d}",
                verbose=verbose_jobs,
                commands=[
                    # RunScript("node-utilities.sh", "kill-route-sample", protocol,
                    #          label = "kill route sample"),
                    #RunScript("route-sample-service.sh", "kill-route-sample",
                    #          label="kill route sample"),
                    Run("systemctl stop route-sample",
                        label="stop route-sample"),
                    Run(
                        f"echo retrieving sampling trace from fit{i:02d}",
                        label=""),
                    Pull(remotepaths=[f"ROUTE-TABLE-{i:02d}-SAMPLED"],
                         localpath=str(run_root), label=""),
                ],
            )
            for i, nodei in node_index.items()
        ]
        retrieve_sampling = Scheduler(
            *retrieve_sampling_job,
            scheduler=scheduler,
            required=pings,
            verbose=verbose_jobs,
            label="Stop & retrieve route sampling",
            )
    if tshark:
        parse_pcaps_job = [
            SshJob(
                # scheduler=scheduler,
                node=LocalNode(),
                # required=retrieve_tcpdump,
                label=f"parse pcap trace {run_root}/fit{i}.pcap",
                verbose=verbose_jobs,
                #commands = [RunScript("parsepcap.sh", run_root, i)]
                command=Run("tshark", "-2", "-r",
                            f"{run_root}/fit{i}.pcap",
                            "-R",
                            f"'(ip.dst==10.0.0.{i} && icmp) && radiotap.dbm_antsignal'",
                            "-Tfields",
                            "-e", "'ip.src'",
                            "-e" "'ip.dst'",
                            "-e", "'radiotap.dbm_antsignal'",
                            ">", f"{run_root}/result-{i}.txt",
                            label=f"parsing pcap from {i}"),
            )
            for i in node_ids
        ]
        parse_pcaps = Scheduler(
            *parse_pcaps_job,
            scheduler=scheduler,
            required=retrieve_tcpdump,
            label="Parse pcap",
        )

    if interference:
        kill_uhd_siggen = SshJob(
            scheduler=scheduler,
            node=node_scrambler,
            required=pings,
            label=f"killing uhd_siggen on the scrambler node {scrambler_id}",
            verbose=verbose_jobs,
            commands=[Run("systemctl", "stop", "uhd_siggen"),
                      #Run("systemctl reset-failed tcpdump"),
                      ],
        )
        kill_2_uhd_siggen = SshJob(
            scheduler=scheduler,
            node=faraday,
            required=kill_uhd_siggen,
            label=f"turning off usrp on the scrambler node {scrambler_id}",
            verbose=verbose_jobs,
            command=Run("rhubarbe", "usrpoff", scrambler_id),
        )

    pings.add(Sequence(*pings_job))
    # for running sequentially we impose no limit on the scheduler
    # that will be limitied anyways by the very structure
    # of the required graph

    # safety check

    scheduler.export_as_pngfile(run_root / "experiment-graph")
    if dry_run:
        scheduler.list()
        return True

    # if not in dry-run mode, let's proceed to the actual experiment
    ok = scheduler.run()  # jobs_window=jobs_window)

    # close all ssh connections
    close_ssh_in_scheduler(scheduler)


    # give details if it failed
    if not ok:
        scheduler.debrief()
        scheduler.export_as_pngfile("debug")
    if ok and map:
        time_line("Creation of MAP files")
        post_processor = ProcessRoutes(run_root, src_ids, node_ids)
        post_processor.run()
    if ok and route_sampling:
        time_line("Creation of ROUTE SAMPLING files")
        post_processor = ProcessRoutes(run_root, src_ids, node_ids)
        post_processor.run_sampled()
    # data acquisition is done, let's aggregate results
    # i.e. compute averages
    #if ok and tshark:
        #post_processor = Aggregator(run_root, node_ids, antenna_mask)
        #post_processor.run()

    time_line("one_run done")
    return ok
示例#13
0
def run(
        *,
        # the pieces to use
        slice,
        hss,
        epc,
        enb,
        phones,
        e3372_ues,
        oai_ues,
        gnuradios,
        e3372_ue_xterms,
        oai_ue_xterms,
        gnuradio_xterms,
        # boolean flags
        load_nodes,
        skip_reset_usb,
        oscillo,
        # the images to load
        image_gw,
        image_enb,
        image_oai_ue,
        image_e3372_ue,
        image_gnuradio,
        # miscell
        n_rb,
        verbose,
        dry_run):
    """
    ##########
    # 3 methods to get nodes ready
    # (*) load images
    # (*) reset nodes that are known to have the right image
    # (*) do nothing, proceed to experiment

    expects e.g.
    * slice : s.t like [email protected]
    * hss : 04
    * epc : 03
    * enb : 23
    * phones: list of indices of phones to use

    * e3372_ues : list of nodes to use as a UE using e3372
    * oai_ues   : list of nodes to use as a UE using OAI
    * gnuradios : list of nodes to load with a gnuradio image

    * image_* : the name of the images to load on the various nodes

    Plus
    * load_nodes: whether to load images or not - in which case
                  image_gw, image_enb and image_*
                  are used to tell the image names
    * skip_reset_usb : the USRP board will be reset as well unless this is set
    """

    # what argparse knows as a slice actually is a gateway (user + host)
    gwuser, gwhost = r2lab_parse_slice(slice)
    gwnode = SshNode(hostname=gwhost,
                     username=gwuser,
                     formatter=TimeColonFormatter(verbose=verbose),
                     debug=verbose)

    hostnames = hssname, epcname, enbname = [
        r2lab_hostname(x) for x in (hss, epc, enb)
    ]

    optional_ids = e3372_ues       + oai_ues       + gnuradios + \
                   e3372_ue_xterms + oai_ue_xterms + gnuradio_xterms

    hssnode, epcnode, enbnode = [
        SshNode(gateway=gwnode,
                hostname=hostname,
                username='******',
                formatter=TimeColonFormatter(verbose=verbose),
                debug=verbose) for hostname in hostnames
    ]

    sched = Scheduler(verbose=verbose)

    ########## preparation
    job_check_for_lease = SshJob(
        node=gwnode,
        command=["rhubarbe", "leases", "--check"],
        label="check we have a current lease",
        scheduler=sched,
    )

    # turn off all nodes
    turn_off_command = ["rhubarbe", "off", "-a"]

    # except our 3 nodes and the optional ones
    turn_off_command += [
        "~{}".format(x) for x in [hss, epc, enb] + optional_ids
    ]

    # only do the turn-off thing if load_nodes
    if load_nodes:
        job_off_nodes = SshJob(
            node=gwnode,
            # switch off all nodes but the ones we use
            command=turn_off_command,
            label="turn off unused nodes",
            required=job_check_for_lease,
            scheduler=sched,
        )

    # actually run this in the gateway, not on the macphone
    # the ssh keys are stored in the gateway and we do not yet have
    # the tools to leverage such remote keys
    job_stop_phones = [
        SshJob(
            node=gwnode,
            command=RunScript(
                # script
                find_local_embedded_script("faraday.sh"),
                # arguments
                "macphone{}".format(id),
                "r2lab-embedded/shell/macphone.sh",
                "phone-off",
                # options
                includes=includes),
            label="put phone{} in airplane mode".format(id),
            required=job_check_for_lease,
            scheduler=sched,
        ) for id in phones
    ]

    ########## prepare the image-loading phase
    # this will be a dict of items imagename -> ids
    to_load = defaultdict(list)
    to_load[image_gw] += [hss, epc]
    to_load[image_enb] += [enb]
    if e3372_ues:
        to_load[image_e3372_ue] += e3372_ues
    if e3372_ue_xterms:
        to_load[image_e3372_ue] += e3372_ue_xterms
    if oai_ues:
        to_load[image_oai_ue] += oai_ues
    if oai_ue_xterms:
        to_load[image_oai_ue] += oai_ue_xterms
    if gnuradios:
        to_load[image_gnuradio] += gnuradios
    if gnuradio_xterms:
        to_load[image_gnuradio] += gnuradio_xterms

    prep_job_by_node = {}
    for image, nodes in to_load.items():
        commands = []
        if load_nodes:
            commands.append(Run("rhubarbe", "usrpoff", *nodes))
            commands.append(Run("rhubarbe", "load", "-i", image, *nodes))
            commands.append(Run("rhubarbe", "usrpon", *nodes))
        # always do this
        commands.append(Run("rhubarbe", "wait", "-t", 120, *nodes))
        job = SshJob(
            node=gwnode,
            commands=commands,
            label="Prepare node(s) {}".format(nodes),
            required=job_check_for_lease,
            scheduler=sched,
        )
        for node in nodes:
            prep_job_by_node[node] = job

    # start services
    job_service_hss = SshJob(
        node=hssnode,
        command=RunScript(find_local_embedded_script("oai-hss.sh"),
                          "run-hss",
                          epc,
                          includes=includes),
        label="start HSS service",
        required=prep_job_by_node[hss],
        scheduler=sched,
    )

    delay = 15
    job_service_epc = SshJob(
        node=epcnode,
        commands=[
            Run("echo giving HSS a headstart {delay}s to warm up; sleep {delay}"
                .format(delay=delay)),
            RunScript(find_local_embedded_script("oai-epc.sh"),
                      "run-epc",
                      hss,
                      includes=includes),
        ],
        label="start EPC services",
        required=prep_job_by_node[epc],
        scheduler=sched,
    )

    ########## enodeb

    job_warm_enb = SshJob(
        node=enbnode,
        commands=[
            RunScript(find_local_embedded_script("oai-enb.sh"),
                      "warm-enb",
                      epc,
                      n_rb,
                      not skip_reset_usb,
                      includes=includes),
        ],
        label="Warm eNB",
        required=prep_job_by_node[enb],
        scheduler=sched,
    )

    enb_requirements = (job_warm_enb, job_service_hss, job_service_epc)

    # wait for everything to be ready, and add an extra grace delay

    grace = 30 if load_nodes else 10
    grace_delay = SshJob(
        node = LocalNode(formatter=TimeColonFormatter()),
        command = "echo Allowing grace of {grace} seconds; sleep {grace}"\
            .format(grace=grace),
        required = enb_requirements,
        scheduler = sched,
    )

    # start services

    job_service_enb = SshJob(
        node=enbnode,
        # run-enb expects the id of the epc as a parameter
        # n_rb means number of resource blocks for DL, set to either 25 or 50.
        commands=[
            RunScript(find_local_embedded_script("oai-enb.sh"),
                      "run-enb",
                      oscillo,
                      includes=includes,
                      x11=oscillo),
        ],
        label="start softmodem on eNB",
        required=grace_delay,
        scheduler=sched,
    )

    ########## run experiment per se
    # Manage phone(s)
    # this starts at the same time as the eNB, but some
    # headstart is needed so that eNB actually is ready to serve
    delay = 12
    msg = "wait for {delay}s for enodeb to start up"\
          .format(delay=delay)
    wait_command = "echo {msg}; sleep {delay}".format(msg=msg, delay=delay)

    job_start_phones = [
        SshJob(
            node=gwnode,
            commands=[
                Run(wait_command),
                RunScript(find_local_embedded_script("faraday.sh"),
                          "macphone{}".format(id),
                          "r2lab-embedded/shell/macphone.sh",
                          "phone-on",
                          includes=includes),
                RunScript(find_local_embedded_script("faraday.sh"),
                          "macphone{}".format(id),
                          "r2lab-embedded/shell/macphone.sh",
                          "phone-start-app",
                          includes=includes),
            ],
            label="start Nexus phone and speedtest app",
            required=grace_delay,
            scheduler=sched,
        ) for id in phones
    ]

    job_ping_phones_from_epc = [
        SshJob(
            node=epcnode,
            commands=[
                Run("sleep 10"),
                Run("ping -c 100 -s 100 -i .05 172.16.0.{ip} &> /root/ping-phone"
                    .format(ip=id + 1)),
            ],
            label="ping Nexus phone from EPC",
            critical=False,
            required=job_start_phones,
        ) for id in phones
    ]

    ########## xterm nodes

    colors = ["wheat", "gray", "white", "darkolivegreen"]

    xterms = e3372_ue_xterms + oai_ue_xterms + gnuradio_xterms

    for xterm, color in zip(xterms, itertools.cycle(colors)):
        xterm_node = SshNode(gateway=gwnode,
                             hostname=r2lab_hostname(xterm),
                             username='******',
                             formatter=TimeColonFormatter(verbose=verbose),
                             debug=verbose)
        SshJob(
            node=xterm_node,
            command=Run("xterm -fn -*-fixed-medium-*-*-*-20-*-*-*-*-*-*-*"
                        " -bg {} -geometry 90x10".format(color),
                        x11=True),
            label="xterm on node {}".format(xterm_node.hostname),
            required=prep_job_by_node[xterm],
            scheduler=sched,
            # don't set forever; if we do, then these xterms get killed
            # when all other tasks have completed
            # forever = True,
        )
#    # remove dangling requirements - if any - should not be needed but won't hurt either
    sched.sanitize()

    print(20 * "*", "nodes usage summary")
    if load_nodes:
        for image, nodes in to_load.items():
            for node in nodes:
                print("node {node} : {image}".format(node=node, image=image))
    else:
        print("NODES ARE USED AS IS (no image loaded, no reset)")
    print(10 * "*", "phones usage summary")
    if phones:
        for phone in phones:
            print("Using phone{phone}".format(phone=phone))
    else:
        print("No phone involved")

    sched.rain_check()
    # Update the .dot and .png file for illustration purposes
    if verbose or dry_run:
        sched.list()
        name = "scenario-load" if load_nodes else \
               "scenario"
        sched.export_as_dotfile("{name}.dot".format(name=name))
        os.system("dot -Tpng {name}.dot -o {name}.png".format(name=name))
        print("(Over)wrote {name}.png".format(name=name))

    if dry_run:
        return False

    if verbose:
        input('OK ? - press control C to abort ? ')

    if not sched.orchestrate():
        print("RUN KO : {}".format(sched.why()))
        sched.debrief()
        return False
    else:
        print("RUN OK")
        return True
示例#14
0
def collect(run_name, slice, hss, epc, enb, verbose):
    """
    retrieves all relevant logs under a common name
    otherwise, same signature as run() for convenience

    retrieved stuff will be 3 compressed tars named
    <run_name>-(hss|epc|enb).tar.gz

    xxx - todo - it would make sense to also unwrap them all
    in a single place locally, like what "logs.sh unwrap" does
    """

    gwuser, gwhost = r2lab_parse_slice(slice)
    gwnode = SshNode(hostname=gwhost,
                     username=gwuser,
                     formatter=TimeColonFormatter(verbose=verbose),
                     debug=verbose)

    functions = "hss", "epc", "enb"

    hostnames = hssname, epcname, enbname = [
        r2lab_hostname(x) for x in (hss, epc, enb)
    ]

    nodes = hssnode, epcnode, enbnode = [
        SshNode(gateway=gwnode,
                hostname=hostname,
                username='******',
                formatter=TimeColonFormatter(verbose=verbose),
                debug=verbose) for hostname in hostnames
    ]

    # first run a 'capture' function remotely to gather all the relevant
    # info into a single tar named <run_name>.tgz

    capturers = [
        SshJob(
            node=node,
            command=RunScript(find_local_embedded_script("oai-common.sh"),
                              "capture-{}".format(function),
                              run_name,
                              includes=[
                                  find_local_embedded_script(
                                      "oai-{}.sh".format(function))
                              ]),
            label="capturer on {}".format(function),
            # capture-enb will run oai-as-enb and thus requires oai-enb.sh
        ) for (node, function) in zip(nodes, functions)
    ]

    collectors = [
        SshJob(
            node=node,
            command=Pull(remotepaths=["{}-{}.tgz".format(run_name, function)],
                         localpath="."),
            label="collector on {}".format(function),
            required=capturers,
        ) for (node, function, capturer) in zip(nodes, functions, capturers)
    ]

    sched = Scheduler(verbose=verbose)
    sched.update(capturers)
    sched.update(collectors)

    if verbose:
        sched.list()

    if not sched.orchestrate():
        print("KO")
        sched.debrief()
        return
    print("OK")
    if os.path.exists(run_name):
        print("local directory {} already exists = NOT UNWRAPPED !".format(
            run_name))
        return
    os.mkdir(run_name)
    local_tars = [
        "{run_name}-{ext}.tgz".format(run_name=run_name, ext=ext)
        for ext in ['hss', 'epc', 'enb']
    ]
    for tar in local_tars:
        print("Untaring {} in {}".format(tar, run_name))
        os.system("tar -C {} -xzf {}".format(run_name, tar))
示例#15
0
    commands = [
        Run("ls", "-l", "PING*")
    ]
)

#
# dry-run mode
# show the scheduler using list(details=True)
# also generate a .dot file, and attempt to
# transform it into a .png - should work if graphviz is installed
# but don't run anything of course
#
if args.dry_run:
    print("==================== COMPLETE SCHEDULER")
    # -n + -v = max details
    scheduler.list(details=verbose_jobs)
    suffix = "par" if args.parallel else "seq"
    if args.load_images:
        suffix += "-load"
    filename = "multi-ping-{}-{}".format(suffix, args.max)
    print("Creating dot file: {filename}.dot".format(filename=filename))
    scheduler.export_as_dotfile(filename+".dot")
    # try to run dot
    command = "dot -Tpng -o {filename}.png {filename}.dot".format(filename=filename)
    print("Trying to run dot to create {filename}.png".format(filename=filename))
    retcod = os.system(command)
    if retcod == 0:
        print("{filename}.png OK".format(filename=filename))
    else:
        print("Could not create {filename}.png - do you have graphviz installed ?"
              .format(filename=filename))
示例#16
0
def collect(run_name, slicename, cn, ran, oai_ues, verbose, dry_run):
    """
    retrieves all relevant logs under a common name
    otherwise, same signature as run() for convenience

    retrieved stuff will be made of
    * one pcap file for the CN
    * compressed tgz files, one per node, gathering logs and configs and datas
    * for convenience the tgz files are unwrapped in run_name/id0
    """

    # the local dir to store incoming raw files. mostly tar files
    local_path = Path(f"{run_name}")
    if not local_path.exists():
        print(f"Creating directory {local_path}")
        local_path.mkdir()

    gwuser, gwhost = r2lab_parse_slice(slicename)
    gwnode = SshNode(hostname=gwhost, username=gwuser,
                     formatter=TimeColonFormatter(verbose=verbose),
                     debug=verbose)

    functions = ["cn", "ran"]
    hostnames = [r2lab_hostname(x) for x in (cn, ran)]
    node_cn, node_ran = nodes = [
        SshNode(gateway=gwnode, hostname=hostname, username='******',
                formatter=TimeColonFormatter(verbose=verbose), debug=verbose)
        for hostname in hostnames
    ]
    if oai_ues:
        hostnames_ue = [r2lab_hostname(x) for x in oai_ues]
        nodes_ue = [
            SshNode(gateway=gwnode, hostname=hostname, username='******',
                    formatter=TimeColonFormatter(verbose=verbose), debug=verbose)
            for hostname in hostnames_ue]


    # all nodes involved are  managed in the same way
    # node: a SshNode instance
    # id: the fit number
    # function, a string like 'cn' or 'ran' or 'oai-ue'

    local_nodedirs_tars = []

    scheduler = Scheduler(verbose=verbose)
    for (node, id, function) in zip(
            chain(nodes, nodes_ue),
            chain( [cn, ran], oai_ues),
            chain(functions, cycle(["oai-ue"]))):
        # nodes on 2 digits
        id0 = f"{id:02d}"
        # node-dep collect dir
        node_dir = local_path / id0
        node_dir.exists() or node_dir.mkdir()
        local_tar = f"{local_path}/{function}-{id0}.tgz"
        SshJob(
            node=node,
            commands=[
                # first run a 'capture-all' function remotely
                # to gather all the relevant files and commands remotely
                RunScript(
                    find_local_embedded_script(f"mosaic-{function}.sh"),
                    f"capture-all", f"{run_name}-{function}",
                    includes=INCLUDES),
                # and retrieve it locally
                Pull(
                    remotepaths=f"{run_name}-{function}.tgz",
                    localpath=local_tar),
                ],
            scheduler=scheduler)
        local_nodedirs_tars.append((node_dir, local_tar))


    # retrieve tcpdump on CN
    SshJob(
        node=node_cn,
        commands=[
            tcpdump_cn_service.stop_command(),
            Pull(remotepaths=[tcpdump_cn_pcap],
                 localpath=local_path),
            ],
        scheduler=scheduler
        )

    print(10*'*', 'See collect scheduler in',
          scheduler.export_as_pngfile("cefore-collect"))

    if verbose:
        scheduler.list()

    if dry_run:
        return

    if not scheduler.run():
        print("KO")
        scheduler.debrief()
        return

    # unwrap
    for node_dir, tar in local_nodedirs_tars:
        print(f"Untaring {tar} in {node_dir}/")
        os.system(f"tar -C {node_dir} -xzf {tar}")
示例#17
0
    def run(self, verbose, no_load, no_save):
        """
        can skip the load or save phases
        """

        print("Using node {} through gateway {}".format(
            self.node, self.gateway))
        print("In order to produce {} from {}".format(self.to_image,
                                                      self.from_image))
        print("The following scripts will be run:")
        for i, script in enumerate(self.scripts, 1):
            print("{:03d}:{}".format(i, " ".join(script)))

        items = []
        if no_load: items.append("skip load")
        if no_save: items.append("skip save")
        if items:
            print("WARNING: using fast-track mode {}".format(
                ' & '.join(items)))

        self.locate_companion_shell()
        if verbose:
            print("Located companion in {}".format(self.companion))

        if verbose:
            print("Preparing tar of input shell scripts .. ", end="")
        tarfile = self.prepare_tar(self.to_image)
        if verbose:
            print("Done in {}".format(tarfile))

        keys = load_agent_keys()
        if verbose:
            print("We have found {} keys in the ssh agent".format(len(keys)))

        #################### the 2 nodes we need to talk to
        gateway_proxy = None
        gwuser, gwname = self.user_host(self.gateway)
        gateway_proxy = None if not gwuser else SshNode(
            hostname=gwname,
            username=gwuser,
            keys=keys,
            formatter=ColonFormatter(verbose=verbose),
        )

        # really not sure it makes sense to use username other than root
        username, nodename = self.user_host(self.node)
        node_proxy = SshNode(
            gateway=gateway_proxy,
            hostname=nodename,
            username=username,
            keys=keys,
            formatter=ColonFormatter(verbose=verbose),
        )

        banner = 20 * '='

        # now that node_proxy is initialized, we need to
        # have a valid gateway_proxy for when we run all this from inside
        # the gateway
        if gateway_proxy is None:
            print("WARNING: build-image is designed to be run on your laptop")
            # best-effort, not even tested....
            gateway_proxy = LocalNode()

        #################### the little pieces
        sequence = Sequence(
            PrintJob("Checking for a valid lease"),
            # bail out if we don't have a valid lease
            SshJob(node = gateway_proxy,
                   command = "rhubarbe leases --check",
                   critical = True),
            PrintJob("loading image {}".format(self.from_image)
                     if not no_load else "fast-track: skipping image load",
                     banner = banner,
                     #                     label = "welcome message",
                 ),
            SshJob(
                node = gateway_proxy,
                commands = [
                    Run("rhubarbe", "load", "-i", self.from_image, nodename) \
                       if not no_load else None,
                    Run("rhubarbe", "wait", "-v", "-t", "240", nodename),
                ],
                #                label = "load and wait image {}".format(self.from_image),
            ),
            SshJob(
                node = node_proxy,
                commands = [
                    Run("rm", "-rf", "/etc/rhubarbe-history/{}".format(self.to_image)),
                    Run("mkdir", "-p", "/etc/rhubarbe-history"),
                    Push(localpaths = tarfile,
                         remotepath = "/etc/rhubarbe-history"),
                    RunScript(self.companion, nodename, self.from_image, self.to_image),
                    Pull(localpath = "{}/logs/".format(self.to_image),
                         remotepaths = "/etc/rhubarbe-history/{}/logs/".format(self.to_image),
                         recurse = True),
                ],
                label = "set up and run scripts in /etc/rhubarbe-history/{}".format(self.to_image)),
            )
        # avoid creating an SshJob with void commands
        if self.extra_logs:
            sequence.append(
                SshJob(
                    node=node_proxy,
                    label="collecting extra logs",
                    critical=False,
                    commands=[
                        Pull(localpath="{}/logs/".format(self.to_image),
                             remotepaths=extra_log,
                             recurse=True) for extra_log in self.extra_logs
                    ],
                ))

        # creating these as critical = True means the whole
        # scenario will fail if these are not found
        for binary in self.expected_binaries:
            check_with = "ls" if os.path.isabs(binary) else ("type -p")
            sequence.append(
                Sequence(
                    PrintJob(
                        "Checking for expected binaries",
                        #                             label = "message about checking"
                    ),
                    SshJob(
                        node=node_proxy,
                        command=[check_with, binary],
                        #                        label = "Checking for {}".format(binary)
                    )))

        # xxx some flag
        if no_save:
            sequence.append(
                PrintJob("fast-track: skipping image save", banner=banner))
        else:
            sequence.append(
                Sequence(
                    PrintJob("saving image {} ...".format(self.to_image),
                             banner=banner),
                    # make sure we capture all the logs and all that
                    # mostly to test RunString
                    SshJob(
                        node=node_proxy,
                        command=RunString("sync ; sleep $1; sync; sleep $1",
                                          1),
                        #                        label = 'sync',
                    ),
                    SshJob(
                        node=gateway_proxy,
                        command=Run("rhubarbe", "save", "-o", self.to_image,
                                    nodename),
                        #                        label = "save image {}".format(self.to_image),
                    ),
                    SshJob(
                        node=gateway_proxy,
                        command="rhubarbe images -d",
                        #                        label = "list current images",
                    ),
                ))

        sched = Scheduler(sequence, verbose=verbose)
        # sanitizing for the cases where some pieces are left out
        sched.sanitize()

        print(20 * '+', "before run")
        sched.list(details=verbose)
        print(20 * 'x')
        if sched.orchestrate():
            if verbose:
                print(20 * '+', "after run")
                sched.list()
                print(20 * 'x')
            print("image {} OK".format(self.to_image))
            return True
        else:
            print("Something went wrong with image {}".format(self.to_image))
            print(20 * '+', "after run - KO")
            sched.debrief()
            print(20 * 'x')
            return False
示例#18
0
def run(
        *,  # pylint: disable=r0912, r0914, r0915
        # the pieces to use
    slicename,
        cn,
        ran,
        phones,
        e3372_ues,
        oai_ues,
        gnuradios,
        e3372_ue_xterms,
        gnuradio_xterms,
        # boolean flags
        load_nodes,
        reset_usb,
        oscillo,
        # the images to load
        image_cn,
        image_ran,
        image_oai_ue,
        image_e3372_ue,
        image_gnuradio,
        image_T_tracer,
        # miscell
        n_rb,
        nodes_left_alone,
        T_tracer,
        verbose,
        dry_run):
    """
    ##########
    # 3 methods to get nodes ready
    # (*) load images
    # (*) reset nodes that are known to have the right image
    # (*) do nothing, proceed to experiment

    expects e.g.
    * slicename : s.t like [email protected]
    * cn : 7
    * ran : 23
    * phones: list of indices of phones to use

    * e3372_ues : list of nodes to use as a UE using e3372
    * oai_ues   : list of nodes to use as a UE using OAI
    * gnuradios : list of nodes to load with a gnuradio image
    * T_tracer  : list of nodes to load with a tracer image

    * image_* : the name of the images to load on the various nodes

    Plus
    * load_nodes: whether to load images or not - in which case
                  image_cn, image_ran and image_*
                  are used to tell the image names
    * reset_usb : the USRP board will be reset when this is set
    """

    # what argparse knows as a slice actually is about the gateway (user + host)
    gwuser, gwhost = r2lab_parse_slice(slicename)
    gwnode = SshNode(hostname=gwhost,
                     username=gwuser,
                     formatter=TimeColonFormatter(verbose=verbose),
                     debug=verbose)

    hostnames = [r2lab_hostname(x) for x in (cn, ran)]

    cnnode, rannode = [
        SshNode(gateway=gwnode,
                hostname=hostname,
                username='******',
                formatter=TimeColonFormatter(verbose=verbose),
                debug=verbose) for hostname in hostnames
    ]

    scheduler = Scheduler(verbose=verbose, label="CORE EXP")

    ########## prepare the image-loading phase
    # focus on the experiment, and use
    # prepare_testbed_scheduler later on to prepare testbed
    # all we need to do at this point is compute a mapping dict
    # image -> list-of-nodes

    images_to_load = defaultdict(list)
    images_to_load[image_cn] += [cn]
    images_to_load[image_ran] += [ran]
    if e3372_ues:
        images_to_load[image_e3372_ue] += e3372_ues
    if e3372_ue_xterms:
        images_to_load[image_e3372_ue] += e3372_ue_xterms
    if oai_ues:
        images_to_load[image_oai_ue] += oai_ues
    if gnuradios:
        images_to_load[image_gnuradio] += gnuradios
    if gnuradio_xterms:
        images_to_load[image_gnuradio] += gnuradio_xterms
    if T_tracer:
        images_to_load[image_T_tracer] += T_tracer

    # start core network
    job_start_cn = SshJob(
        node=cnnode,
        commands=[
            RunScript(find_local_embedded_script("nodes.sh"),
                      "git-pull-r2lab",
                      includes=INCLUDES),
            RunScript(find_local_embedded_script("mosaic-cn.sh"),
                      "journal --vacuum-time=1s",
                      includes=INCLUDES),
            RunScript(find_local_embedded_script("mosaic-cn.sh"),
                      "configure",
                      includes=INCLUDES),
            RunScript(find_local_embedded_script("mosaic-cn.sh"),
                      "start",
                      includes=INCLUDES),
            tcpdump_cn_service.start_command(),
        ],
        label="start CN service",
        scheduler=scheduler,
    )

    # prepare enodeb
    reset_option = "-u" if reset_usb else ""
    job_warm_ran = SshJob(
        node=rannode,
        commands=[
            RunScript(find_local_embedded_script("nodes.sh"),
                      "git-pull-r2lab",
                      includes=INCLUDES),
            RunScript(find_local_embedded_script("mosaic-ran.sh"),
                      "journal --vacuum-time=1s",
                      includes=INCLUDES),
            RunScript(find_local_embedded_script("mosaic-ran.sh"),
                      "warm-up",
                      reset_option,
                      includes=INCLUDES),
            RunScript(find_local_embedded_script("mosaic-ran.sh"),
                      "configure -b",
                      n_rb,
                      cn,
                      includes=INCLUDES),
        ],
        label="Configure eNB",
        scheduler=scheduler,
    )

    ran_requirements = [job_start_cn, job_warm_ran]
    ###
    if oai_ues:
        # prepare OAI UEs
        for ue in oai_ues:
            ue_node = SshNode(gateway=gwnode,
                              hostname=r2lab_hostname(ue),
                              username='******',
                              formatter=TimeColonFormatter(verbose=verbose),
                              debug=verbose)
            job_warm_ues = [
                SshJob(node=ue_node,
                       commands=[
                           RunScript(find_local_embedded_script("nodes.sh"),
                                     "git-pull-r2lab",
                                     includes=INCLUDES),
                           RunScript(
                               find_local_embedded_script("mosaic-oai-ue.sh"),
                               "journal --vacuum-time=1s",
                               includes=INCLUDES),
                           RunScript(
                               find_local_embedded_script("mosaic-oai-ue.sh"),
                               "warm-up",
                               reset_option,
                               includes=INCLUDES),
                           RunScript(
                               find_local_embedded_script("mosaic-oai-ue.sh"),
                               "configure -b",
                               n_rb,
                               includes=INCLUDES),
                       ],
                       label=f"Configure OAI UE on fit{ue}",
                       scheduler=scheduler)
            ]
            ran_requirements.append(job_warm_ues)

###
    if not load_nodes and phones:
        job_turn_off_phones = SshJob(
            node=gwnode,
            commands=[
                RunScript(find_local_embedded_script("faraday.sh"),
                          f"macphone{phone} phone-off") for phone in phones
            ],
            scheduler=scheduler,
        )
        ran_requirements.append(job_turn_off_phones)

    # wait for everything to be ready, and add an extra grace delay

    grace = 5
    grace_delay = PrintJob(
        f"Allowing grace of {grace} seconds",
        sleep=grace,
        required=ran_requirements,
        scheduler=scheduler,
        label=f"settle for {grace}s",
    )

    # optionally start T_tracer
    if T_tracer:
        job_start_T_tracer = SshJob(  # pylint: disable=w0612
            node=SshNode(gateway=gwnode,
                         hostname=r2lab_hostname(T_tracer[0]),
                         username='******',
                         formatter=TimeColonFormatter(verbose=verbose),
                         debug=verbose),
            commands=[
                Run(f"/root/trace {ran}", x11=True),
            ],
            label="start T_tracer service",
            required=ran_requirements,
            scheduler=scheduler,
        )
#        ran_requirements.append(job_start_T_tracer)

# start services

    graphical_option = "-x" if oscillo else ""
    graphical_message = "graphical" if oscillo else "regular"
    tracer_option = " -T" if T_tracer else ""

    # we use a Python variable for consistency
    # although it not used down the road
    _job_service_ran = SshJob(
        node=rannode,
        commands=[
            RunScript(
                find_local_embedded_script("mosaic-ran.sh"),
                "start",
                graphical_option,
                tracer_option,
                includes=INCLUDES,
                x11=oscillo,
            ),
        ],
        label=f"start {graphical_message} softmodem on eNB",
        required=grace_delay,
        scheduler=scheduler,
    )

    ########## run experiment per se
    # Manage phone(s) and OAI UE(s)
    # this starts at the same time as the eNB, but some
    # headstart is needed so that eNB actually is ready to serve
    sleeps = [20, 30]
    phone_msgs = [
        f"wait for {sleep}s for eNB to start up before waking up phone{id}"
        for sleep, id in zip(sleeps, phones)
    ]
    wait_commands = [
        f"echo {msg}; sleep {sleep}" for msg, sleep in zip(phone_msgs, sleeps)
    ]

    job_start_phones = [
        SshJob(node=gwnode,
               commands=[
                   Run(wait_command),
                   RunScript(find_local_embedded_script("faraday.sh"),
                             f"macphone{id}",
                             "r2lab-embedded/shell/macphone.sh",
                             "phone-on",
                             includes=INCLUDES),
                   RunScript(find_local_embedded_script("faraday.sh"),
                             f"macphone{id}",
                             "r2lab-embedded/shell/macphone.sh",
                             "phone-start-app",
                             includes=INCLUDES),
               ],
               label=f"turn off airplace mode on phone {id}",
               required=grace_delay,
               scheduler=scheduler)
        for id, wait_command in zip(phones, wait_commands)
    ]

    if oai_ues:
        delay = 25
        for ue in oai_ues:
            msg = f"wait for {delay}s for eNB to start up before running UE on node fit{ue}"
            wait_command = f"echo {msg}; sleep {delay}"
            ue_node = SshNode(gateway=gwnode,
                              hostname=r2lab_hostname(ue),
                              username='******',
                              formatter=TimeColonFormatter(verbose=verbose),
                              debug=verbose)
            job_start_ues = [
                SshJob(node=ue_node,
                       commands=[
                           Run(wait_command),
                           RunScript(
                               find_local_embedded_script("mosaic-oai-ue.sh"),
                               "start",
                               includes=INCLUDES),
                       ],
                       label=f"Start OAI UE on fit{ue}",
                       required=grace_delay,
                       scheduler=scheduler)
            ]
            delay += 20

        for ue in oai_ues:
            ue_node = SshNode(gateway=gwnode,
                              hostname=r2lab_hostname(ue),
                              username='******',
                              formatter=TimeColonFormatter(verbose=verbose),
                              debug=verbose)
            msg = f"Wait 60s and then ping faraday gateway from UE on fit{ue}"
            _job_ping_gw_from_ue = [
                SshJob(node=ue_node,
                       commands=[
                           Run(f"echo {msg}; sleep 60"),
                           Run(f"ping -c 5 -I oip1 faraday.inria.fr"),
                       ],
                       label=f"ping faraday gateway from UE on fit{ue}",
                       critical=False,
                       required=job_start_ues,
                       scheduler=scheduler)
            ]

    # ditto
    _job_ping_phones_from_cn = [
        SshJob(
            node=cnnode,
            commands=[
                Run("sleep 20"),
                Run(f"ping -c 100 -s 100 -i .05 172.16.0.{id+1} &> /root/ping-phone{id}"
                    ),
            ],
            label=f"ping phone {id} from core network",
            critical=False,
            required=job_start_phones,
            scheduler=scheduler) for id in phones
    ]

    ########## xterm nodes

    colors = ("wheat", "gray", "white", "darkolivegreen")

    xterms = e3372_ue_xterms + gnuradio_xterms

    for xterm, color in zip(xterms, cycle(colors)):
        xterm_node = SshNode(gateway=gwnode,
                             hostname=r2lab_hostname(xterm),
                             username='******',
                             formatter=TimeColonFormatter(verbose=verbose),
                             debug=verbose)
        SshJob(
            node=xterm_node,
            command=Run(f"xterm -fn -*-fixed-medium-*-*-*-20-*-*-*-*-*-*-*",
                        f" -bg {color} -geometry 90x10",
                        x11=True),
            label=f"xterm on node {xterm_node.hostname}",
            scheduler=scheduler,
            # don't set forever; if we do, then these xterms get killed
            # when all other tasks have completed
            # forever = True,
        )

    # remove dangling requirements - if any
    # should not be needed but won't hurt either
    scheduler.sanitize()

    ##########
    print(10 * "*", "nodes usage summary")
    if load_nodes:
        for image, nodes in images_to_load.items():
            for node in nodes:
                print(f"node {node} : {image}")
    else:
        print("NODES ARE USED AS IS (no image loaded, no reset)")
    print(10 * "*", "phones usage summary")
    if phones:
        for phone in phones:
            print(f"Using phone{phone}")
    else:
        print("No phone involved")
    if nodes_left_alone:
        print(f"Ignore following fit nodes: {nodes_left_alone}")

    # wrap scheduler into global scheduler that prepares the testbed
    scheduler = prepare_testbed_scheduler(gwnode, load_nodes, scheduler,
                                          images_to_load, nodes_left_alone)

    scheduler.check_cycles()
    # Update the .dot and .png file for illustration purposes
    name = "mosaic-load" if load_nodes else "mosaic"
    print(10 * '*', 'See main scheduler in', scheduler.export_as_pngfile(name))

    if verbose:
        scheduler.list()

    if dry_run:
        return True

    if verbose:
        input('OK ? - press control C to abort ? ')

    if not scheduler.orchestrate():
        print(f"RUN KO : {scheduler.why()}")
        scheduler.debrief()
        return False
    print("RUN OK")
    return True
def one_run(gwhost,
            gwuser,
            keys,
            sendername,
            receivername,
            packets,
            size,
            period,
            formatter,
            verbose=False):
    """
    gwhost, gwuser, keys: where to reach the testbed gateway
    sendername, receivername : hostnames for the test nodes
    packets, size, period : details of the traffic to send
    formatter: how to report results
    """

    # we keep all 'environment' data for one run in a dedicated subdir
    # using this name scheme to store results locally
    # xxx inherited from the NEPI version - unused for now
    dataname = os.path.join("csi-{}-{}-{}-{}-{}".format(
        receivername, sendername, packets, size, period))

    # we have reused the shell script from the NEPI version as-is
    auxiliary_script = "./angle-measure.sh"

    # the proxy to enter faraday
    r2lab_gateway = SshNode(
        hostname=gwhost,
        username=gwuser,
        keys=keys,
        formatter=formatter,
    )

    # the sender node
    sender = SshNode(
        # specifying the gateway attribute means this node will be reached
        # through the ssh connection to the gateway
        gateway=r2lab_gateway,
        # hostname needs to make sense in the context of the gateway; so e.g. 'fit01' is fine
        hostname=sendername,
        # from the gateway we enter the R2lab nodes as root
        username='******',
        formatter=formatter,
    )

    # the receiver node - ditto
    receiver = SshNode(
        hostname=receivername,
        username='******',
        gateway=r2lab_gateway,
        formatter=formatter,
    )

    # one initialization job per node
    init_sender = SshJob(
        # on what node to run the command
        node=sender,
        # the command to run; being a JobSshScript, the first item in this
        # list is expected to be a **LOCAL** script that gets puhed remotely
        # before being run
        # a simple JobSsh is more suitable to issue standard Unix commands for instance
        command=RunScript(auxiliary_script, "init-sender", 64, "HT20"),
        # for convenience purposes
        label="init-sender")

    init_receiver = SshJob(node=receiver,
                           command=RunScript(auxiliary_script, "init-receiver",
                                             64, "HT20"),
                           label="init-receiver")

    # ditto for actually running the experiment
    run_sender = SshJob(node=sender,
                        command=RunScript(auxiliary_script, "run-sender",
                                          packets, size, period),
                        label="run-sender")

    # run the sender only once both nodes are ready
    run_sender.requires(init_sender, init_receiver)

    run_receiver = SshJob(node=receiver,
                          commands=[
                              RunScript(auxiliary_script, "run-receiver",
                                        packets, size, period),
                              Pull(remotepaths='rawdata', localpath=dataname),
                          ],
                          label="run-receiver")
    # ditto
    run_receiver.requires(init_sender, init_receiver)

    # print a one-liner for that receiver, sender couple
    summary = "{} ==> {} - {} packets of {} bytes, each {}us"\
        .format(sendername, receivername, packets, size, period)
    print(10 * '-', summary)

    # create an Scheduler object that will orchestrate this scenario
    e = Scheduler(init_sender,
                  init_receiver,
                  run_sender,
                  run_receiver,
                  verbose=verbose)

    print(20 * '*', "before run")
    e.list(details=verbose)
    print(20 * '*')

    if e.orchestrate(timeout=3 * 60):
        print("========== experiment OK")
    else:
        print("!!!!!!!!!! orchestration KO")
        e.debrief()
示例#20
0
    def test_graphics1(self):

        scheduler = Scheduler(critical=False)

        gateway = SshNode(hostname=localhostname(), username=localuser())

        Sequence(
            SshJob(
                node=gateway,
                command='hostname',
            ),
            SshJob(node=gateway,
                   command=[
                       Run('ls /etc/passwd'),
                       Run('wc -l /etc/passwd'),
                   ]),
            SshJob(node=gateway,
                   commands=[
                       RunString(
                           "#!/usr/bin/env bash\n"
                           "echo with RunString on $(hostname) at $(date)"),
                   ]),
            SshJob(node=gateway, commands=[
                RunScript("tests/testbasic.sh"),
            ]),
            SshJob(node=gateway,
                   commands=[
                       Run('wc -l /etc/passwd'),
                       RunString(
                           "#!/usr/bin/env bash\n"
                           "echo with RunsString on $(hostname) at $(date)",
                           remote_name="show-host-date"),
                       RunScript("tests/testbasic.sh"),
                   ]),
            SshJob(node=gateway,
                   commands=[
                       RunString(
                           "#!/usr/bin/env bash\n"
                           "echo first arg is $1\n", 10)
                   ]),
            SshJob(node=gateway,
                   commands=[
                       RunString(
                           "#!/usr/bin/env bash\n"
                           "echo first arg is $1\n",
                           10,
                           remote_name='short-show-args')
                   ]),
            SshJob(node=gateway,
                   commands=[
                       RunString(
                           "#!/usr/bin/env bash\n"
                           "echo first arg is $1\n"
                           "echo second arg is $2\n"
                           "echo third arg is $3\n"
                           "echo fourth arg is $4\n", 100, 200, 300, 400)
                   ]),
            SshJob(node=gateway,
                   commands=[
                       RunString(
                           "#!/usr/bin/env bash\n"
                           "echo first arg is $1\n"
                           "echo second arg is $2\n"
                           "echo third arg is $3\n"
                           "echo fourth arg is $4\n",
                           1000,
                           2000,
                           3000,
                           4000,
                           remote_name='long-show-args')
                   ]),
            SshJob(node=gateway,
                   commands=[
                       RunString(
                           "#!/usr/bin/env bash\n"
                           "echo first arg is $1\n"
                           "echo second arg is $2\n"
                           "echo third arg is $3\n"
                           "echo fourth arg is $4\n",
                           1000,
                           2000,
                           3000,
                           4000,
                           remote_name='long-show-args',
                           label='snip')
                   ]),
            SshJob(node=gateway,
                   commands=[
                       Run("hostname", label="Run()"),
                       RunScript("foobar", label="RunScript()"),
                       RunString("foobar", label="RunString()"),
                       Push("foobar", remotepath="remote", label="Push()"),
                       Pull("remote", localpath="foobar", label="Pull()"),
                       Run("hostname", label=None),
                       RunScript("foobar", label=[]),
                       RunString("foobar", label=0),
                       Push("foobar", remotepath="remote", label={}),
                       Pull("remote", localpath="foobar", label=""),
                   ]),
            scheduler=scheduler,
        )

        print("NO DETAILS")
        scheduler.list()
        print("WITH DETAILS")
        scheduler.list(details=True)
        produce_png(scheduler, "test_graphics1")

        ok = scheduler.run()

        self.assertFalse(ok)
示例#21
0
def run(*, gateway, slicename, nodes, node_epc, node_enb, quectel_nodes,
        phones, verbose, dry_run, load_images, epc_image, enb_image,
        quectel_image):
    """
    Launch latest OAICI EPC and eNB Docker images on R2lab

    Arguments:
        slicename: the Unix login name (slice name) to enter the gateway
        quectel_nodes: list of indices of quectel UE nodes to use
        phones: list of indices of phones to use
        nodes: a list of node ids to run the scenario on; strings or ints
                  are OK;
        node_epc: the node id on which to run the EPC
        node_enb: the node id for the enb, which is connected to B210/eNB-duplexer

    """

    quectel_ids = quectel_nodes[:]
    quectel = len(quectel_ids) > 0

    faraday = SshNode(hostname=default_gateway,
                      username=slicename,
                      verbose=verbose,
                      formatter=TimeColonFormatter())

    epc = SshNode(gateway=faraday,
                  hostname=fitname(node_epc),
                  username="******",
                  verbose=verbose,
                  formatter=TimeColonFormatter())

    node_index = {
        id: SshNode(gateway=faraday,
                    hostname=fitname(id),
                    username="******",
                    formatter=TimeColonFormatter(),
                    verbose=verbose)
        for id in nodes
    }

    nodes_quectel_index = {
        id: SshNode(gateway=faraday,
                    hostname=fitname(id),
                    username="******",
                    formatter=TimeColonFormatter(),
                    verbose=verbose)
        for id in quectel_nodes
    }
    allnodes = nodes + quectel_nodes

    fit_epc = fitname(node_epc)
    fit_enb = fitname(node_enb)

    # the global scheduler
    scheduler = Scheduler(verbose=verbose)

    ##########
    check_lease = SshJob(
        scheduler=scheduler,
        node=faraday,
        critical=True,
        verbose=verbose,
        command=Run("rhubarbe leases --check"),
    )

    green_light = check_lease

    if load_images:
        green_light = [
            SshJob(scheduler=scheduler,
                   required=check_lease,
                   node=faraday,
                   critical=True,
                   verbose=verbose,
                   label=f"Load image {epc_image} on {fit_epc}",
                   commands=[
                       Run(f"rhubarbe load {node_epc} -i {epc_image}"),
                       Run(f"rhubarbe wait {node_epc}"),
                       RunScript("oaici.sh", "init-epc", node_epc, node_enb),
                   ]),
            SshJob(
                scheduler=scheduler,
                required=check_lease,
                node=faraday,
                critical=True,
                verbose=verbose,
                label=f"Load image {enb_image} on {fit_enb}",
                commands=[
                    Run(f"rhubarbe usrpoff {node_enb}"
                        ),  # if usrp is on, load could be problematic...
                    Run(f"rhubarbe load {node_enb} -i {enb_image}"),
                    Run(f"rhubarbe wait {node_enb}"),
                    Run(f"rhubarbe usrpon {node_enb}"
                        ),  # ensure a reset of the USRP on the enB node
                    RunScript("oaici.sh", "init-enb", node_enb, node_epc),
                ],
            ),
            SshJob(scheduler=scheduler,
                   required=check_lease,
                   node=faraday,
                   critical=False,
                   verbose=verbose,
                   label="turning off unused nodes",
                   command=[
                       Run("rhubarbe bye --all " + "".join(f"~{x} "
                                                           for x in allnodes))
                   ])
        ]
        if quectel:
            prepare_quectel = SshJob(
                scheduler=scheduler,
                required=check_lease,
                node=faraday,
                critical=True,
                verbose=verbose,
                label=f"Load image {quectel_image} on quectel UE nodes",
                commands=[
                    Run("rhubarbe", "usrpoff", *quectel_ids),
                    Run("rhubarbe", "load", *quectel_ids, "-i", quectel_image),
                    Run("rhubarbe", "wait", *quectel_ids),
                    Run("rhubarbe", "usrpon", *quectel_ids),
                ],
            ),

    ##########
    # Prepare the Quectel UE nodes
    if quectel:
        # wait 30s for Quectel modules show up
        wait_quectel_ready = PrintJob(
            "Let Quectel modules show up",
            scheduler=scheduler,
            required=prepare_quectel,
            sleep=30,
            label="sleep 30s for the Quectel modules to show up")
        # run the Quectel Connection Manager as a service on each Quectel UE node
        quectelCM_service = Service(
            command="quectel-CM -s oai.ipv4 -4",
            service_id="QuectelCM",
            verbose=verbose,
        )
        init_quectel_nodes = [
            SshJob(
                scheduler=scheduler,
                required=wait_quectel_ready,
                node=node,
                critical=True,
                verbose=verbose,
                label=f"Init Quectel UE on fit node {id}",
                commands=[
                    RunScript(find_local_embedded_script("nodes.sh"),
                              "check-quectel-on",
                              includes=INCLUDES),
                    quectelCM_service.start_command(),
                ],
            ) for id, node in nodes_quectel_index.items()
        ]
        # wait 20s for Quectel Connection Manager to start up
        wait_quectelCM_ready = PrintJob(
            "Let QuectelCM start up",
            scheduler=scheduler,
            required=init_quectel_nodes,
            sleep=20,
            label="Sleep 20s for the Quectel Connection Manager(s) to start up"
        )
        detach_quectel_nodes = [
            SshJob(
                scheduler=scheduler,
                required=wait_quectelCM_ready,
                node=node,
                critical=True,
                verbose=verbose,
                label=f"Detach Quectel UE on fit node {id}",
                command=RunScript(find_local_embedded_script("nodes.sh"),
                                  "quectel-detach",
                                  includes=INCLUDES),
            ) for id, node in nodes_quectel_index.items()
        ]

    ##########
    # Launch the EPC
    start_epc = SshJob(
        scheduler=scheduler,
        required=green_light,
        node=faraday,
        critical=True,
        verbose=verbose,
        label=f"Launch EPC on {fit_epc}",
        commands=[
            RunScript("oaici.sh", "start-epc", node_epc),
        ],
    )
    # Launch the eNB
    if quectel:
        req = (start_epc, detach_quectel_nodes)
    else:
        req = start_epc
    start_enb = SshJob(
        scheduler=scheduler,
        required=req,
        node=faraday,
        critical=True,
        verbose=verbose,
        label=f"Launch eNB on {fit_enb}",
        commands=[
            RunScript("oaici.sh", "start-enb", node_enb),
        ],
    )
    wait_ran_ready = PrintJob("Let the eNB start up",
                              scheduler=scheduler,
                              required=start_enb,
                              sleep=50,
                              label="sleep 50s for the eNB to start up")

    ########## Test phone(s) connectivity

    sleeps_ran = (0, 10)
    phone_msgs = [
        f"wait again for {sleep}s before waking up phone{id}"
        for sleep, id in zip(sleeps_ran, phones)
    ]
    wait_commands = [
        f"echo {msg}; sleep {sleep}"
        for msg, sleep in zip(phone_msgs, sleeps_ran)
    ]
    sleeps_phone = (10, 10)
    phone2_msgs = [
        f"wait for {sleep}s for phone{id} before starting tests"
        for sleep, id in zip(sleeps_phone, phones)
    ]
    wait2_commands = [
        f"echo {msg}; sleep {sleep}"
        for msg, sleep in zip(phone2_msgs, sleeps_phone)
    ]

    job_start_phones = [
        SshJob(node=faraday,
               commands=[
                   Run(wait_command),
                   RunScript(find_local_embedded_script("faraday.sh"),
                             f"macphone{id}",
                             "r2lab-embedded/shell/macphone.sh",
                             "phone-on",
                             includes=INCLUDES),
                   Run(wait2_command),
                   RunScript(find_local_embedded_script("faraday.sh"),
                             f"macphone{id}",
                             "r2lab-embedded/shell/macphone.sh",
                             "phone-check-cx",
                             includes=INCLUDES),
                   RunScript(find_local_embedded_script("faraday.sh"),
                             f"macphone{id}",
                             "r2lab-embedded/shell/macphone.sh",
                             "phone-start-app",
                             includes=INCLUDES),
               ],
               label=f"turn off airplane mode on phone {id}",
               required=wait_ran_ready,
               scheduler=scheduler) for id, wait_command, wait2_command in zip(
                   phones, wait_commands, wait2_commands)
    ]
    if quectel:
        job_attach_quectel = [
            SshJob(
                scheduler=scheduler,
                required=(job_start_phones, wait_ran_ready,
                          detach_quectel_nodes),
                node=node,
                critical=True,
                verbose=verbose,
                label=f"Attach Quectel UE on fit node {id}",
                command=RunScript(find_local_embedded_script("nodes.sh"),
                                  "quectel-attach",
                                  includes=INCLUDES),
            ) for id, node in nodes_quectel_index.items()
        ]
        # wait 30s for Quectel connection to set up
        wait_quectel_cx_ready = PrintJob(
            "Let the Quectel connection(s) set up",
            scheduler=scheduler,
            required=job_attach_quectel,
            sleep=30,
            label="Sleep 30s for the Quectel connection(s) to set up")
        test_quectel_cx = [
            SshJob(
                scheduler=scheduler,
                required=wait_quectel_cx_ready,
                node=node,
                verbose=verbose,
                label=f"Check the Quectel cx on fit node {id}",
                command=RunScript(find_local_embedded_script("nodes.sh"),
                                  "check-quectel-cx",
                                  includes=INCLUDES),
            ) for id, node in nodes_quectel_index.items()
        ]

    ##########
    # Update the .dot and .png file for illustration purposes
    scheduler.check_cycles()
    name = "deploy-oaici"
    print(10 * '*', 'See main scheduler in', scheduler.export_as_pngfile(name))

    # orchestration scheduler jobs
    if verbose:
        scheduler.list()

    if dry_run:
        return True

    if not scheduler.orchestrate():
        print(f"RUN KO : {scheduler.why()}")
        scheduler.debrief()
        return False
    print(
        f"RUN OK, you can log now on the EPC node {fit_epc} and the eNB node {fit_enb} to check the logs"
    )
    print(80 * '*')