コード例 #1
0
ファイル: tests_nesting.py プロジェクト: JawaGL/asynciojobs
        def nested():
            j11, j12, j13, j14, j15 = [job(i) for i in range(11, 16)]
            s2 = Scheduler(Sequence(j11, j12, j13), label="nested internal")
            j12.requires(j14)
            j13.requires(j15)

            j1, j2, j3, j4, j5 = [job(i) for i in range(1, 6)]
            s1 = Scheduler(Sequence(j1, s2, j3), label="nested top")
            j1.requires(j4)
            j1.requires(j11)

            s2.requires(j13)

            # j2 not included in sched, untouched
            j2.requires(j1)

            self.assertEqual(len(j12.required), 2)
            self.assertEqual(len(j13.required), 2)
            self.assertEqual(len(j1.required), 2)
            self.assertEqual(len(s2.required), 2)
            self.assertEqual(len(j3.required), 1)
            s1.sanitize()
            self.assertEqual(len(j12.required), 1)
            self.assertEqual(len(j13.required), 1)
            self.assertEqual(len(j1.required), 0)
            self.assertEqual(len(s2.required), 1)
            self.assertEqual(len(j3.required), 1)
コード例 #2
0
ファイル: tests_nesting.py プロジェクト: JawaGL/asynciojobs
 def simple():
     j1, j2, j3, j4, j5 = [job(i) for i in range(1, 6)]
     s1 = Scheduler(j1, j2, j3, label='top simple')
     j2.requires(j4)
     j3.requires(j5)
     self.assertEqual(len(j2.required), 1)
     self.assertEqual(len(j3.required), 1)
     s1.sanitize()
     self.assertEqual(len(j2.required), 0)
     self.assertEqual(len(j3.required), 0)
コード例 #3
0
ファイル: tests_cycles.py プロジェクト: JawaGL/asynciojobs
    def test_nested_cycles(self):

        watch = Watch()

        def job(i):
            return Job(co_print_sleep(watch, .2, f"job {i}"),
                       label=f"job{i}")
        js1, js2, js3 = [job(i) for i in range(11, 14)]
        s2 = Scheduler(Sequence(js1, js2, js3))

        j1, j3 = job(1), job(3)
        s1 = Scheduler(Sequence(j1, s2, j3))
        self.assertTrue(s1.check_cycles())

        # create cycle in subgraph
        js1.requires(js3)
        self.assertFalse(s1.check_cycles())

        # restore in OK state
        js1.requires(js3, remove=True)
        self.assertTrue(s1.check_cycles())

        # add cycle in toplevel
        j1.requires(j3)
        self.assertFalse(s1.check_cycles())

        # restore in OK state
        j1.requires(j3, remove=True)
        self.assertTrue(s1.check_cycles())

        # add one level down
        s3 = Scheduler()
        jss1, jss2, jss3 = [job(i) for i in range(111, 114)]
        Sequence(jss1, jss2, jss3, scheduler=s3)

        # surgery in s2; no cycles
        s2.remove(js2)
        s2.sanitize()
        s2.add(s3)
        s3.requires(js1)
        js3.requires(s3)
        self.assertTrue(s1.check_cycles())

        # add cycle in s3
        js1.requires(js3)
        self.assertFalse(s1.check_cycles())
コード例 #4
0
def run(
        *,
        # the pieces to use
        slice,
        hss,
        epc,
        enb,
        phones,
        e3372_ues,
        oai_ues,
        gnuradios,
        e3372_ue_xterms,
        oai_ue_xterms,
        gnuradio_xterms,
        # boolean flags
        load_nodes,
        skip_reset_usb,
        oscillo,
        # the images to load
        image_gw,
        image_enb,
        image_oai_ue,
        image_e3372_ue,
        image_gnuradio,
        # miscell
        n_rb,
        verbose,
        dry_run):
    """
    ##########
    # 3 methods to get nodes ready
    # (*) load images
    # (*) reset nodes that are known to have the right image
    # (*) do nothing, proceed to experiment

    expects e.g.
    * slice : s.t like [email protected]
    * hss : 04
    * epc : 03
    * enb : 23
    * phones: list of indices of phones to use

    * e3372_ues : list of nodes to use as a UE using e3372
    * oai_ues   : list of nodes to use as a UE using OAI
    * gnuradios : list of nodes to load with a gnuradio image

    * image_* : the name of the images to load on the various nodes

    Plus
    * load_nodes: whether to load images or not - in which case
                  image_gw, image_enb and image_*
                  are used to tell the image names
    * skip_reset_usb : the USRP board will be reset as well unless this is set
    """

    # what argparse knows as a slice actually is a gateway (user + host)
    gwuser, gwhost = r2lab_parse_slice(slice)
    gwnode = SshNode(hostname=gwhost,
                     username=gwuser,
                     formatter=TimeColonFormatter(verbose=verbose),
                     debug=verbose)

    hostnames = hssname, epcname, enbname = [
        r2lab_hostname(x) for x in (hss, epc, enb)
    ]

    optional_ids = e3372_ues       + oai_ues       + gnuradios + \
                   e3372_ue_xterms + oai_ue_xterms + gnuradio_xterms

    hssnode, epcnode, enbnode = [
        SshNode(gateway=gwnode,
                hostname=hostname,
                username='******',
                formatter=TimeColonFormatter(verbose=verbose),
                debug=verbose) for hostname in hostnames
    ]

    sched = Scheduler(verbose=verbose)

    ########## preparation
    job_check_for_lease = SshJob(
        node=gwnode,
        command=["rhubarbe", "leases", "--check"],
        label="check we have a current lease",
        scheduler=sched,
    )

    # turn off all nodes
    turn_off_command = ["rhubarbe", "off", "-a"]

    # except our 3 nodes and the optional ones
    turn_off_command += [
        "~{}".format(x) for x in [hss, epc, enb] + optional_ids
    ]

    # only do the turn-off thing if load_nodes
    if load_nodes:
        job_off_nodes = SshJob(
            node=gwnode,
            # switch off all nodes but the ones we use
            command=turn_off_command,
            label="turn off unused nodes",
            required=job_check_for_lease,
            scheduler=sched,
        )

    # actually run this in the gateway, not on the macphone
    # the ssh keys are stored in the gateway and we do not yet have
    # the tools to leverage such remote keys
    job_stop_phones = [
        SshJob(
            node=gwnode,
            command=RunScript(
                # script
                find_local_embedded_script("faraday.sh"),
                # arguments
                "macphone{}".format(id),
                "r2lab-embedded/shell/macphone.sh",
                "phone-off",
                # options
                includes=includes),
            label="put phone{} in airplane mode".format(id),
            required=job_check_for_lease,
            scheduler=sched,
        ) for id in phones
    ]

    ########## prepare the image-loading phase
    # this will be a dict of items imagename -> ids
    to_load = defaultdict(list)
    to_load[image_gw] += [hss, epc]
    to_load[image_enb] += [enb]
    if e3372_ues:
        to_load[image_e3372_ue] += e3372_ues
    if e3372_ue_xterms:
        to_load[image_e3372_ue] += e3372_ue_xterms
    if oai_ues:
        to_load[image_oai_ue] += oai_ues
    if oai_ue_xterms:
        to_load[image_oai_ue] += oai_ue_xterms
    if gnuradios:
        to_load[image_gnuradio] += gnuradios
    if gnuradio_xterms:
        to_load[image_gnuradio] += gnuradio_xterms

    prep_job_by_node = {}
    for image, nodes in to_load.items():
        commands = []
        if load_nodes:
            commands.append(Run("rhubarbe", "usrpoff", *nodes))
            commands.append(Run("rhubarbe", "load", "-i", image, *nodes))
            commands.append(Run("rhubarbe", "usrpon", *nodes))
        # always do this
        commands.append(Run("rhubarbe", "wait", "-t", 120, *nodes))
        job = SshJob(
            node=gwnode,
            commands=commands,
            label="Prepare node(s) {}".format(nodes),
            required=job_check_for_lease,
            scheduler=sched,
        )
        for node in nodes:
            prep_job_by_node[node] = job

    # start services
    job_service_hss = SshJob(
        node=hssnode,
        command=RunScript(find_local_embedded_script("oai-hss.sh"),
                          "run-hss",
                          epc,
                          includes=includes),
        label="start HSS service",
        required=prep_job_by_node[hss],
        scheduler=sched,
    )

    delay = 15
    job_service_epc = SshJob(
        node=epcnode,
        commands=[
            Run("echo giving HSS a headstart {delay}s to warm up; sleep {delay}"
                .format(delay=delay)),
            RunScript(find_local_embedded_script("oai-epc.sh"),
                      "run-epc",
                      hss,
                      includes=includes),
        ],
        label="start EPC services",
        required=prep_job_by_node[epc],
        scheduler=sched,
    )

    ########## enodeb

    job_warm_enb = SshJob(
        node=enbnode,
        commands=[
            RunScript(find_local_embedded_script("oai-enb.sh"),
                      "warm-enb",
                      epc,
                      n_rb,
                      not skip_reset_usb,
                      includes=includes),
        ],
        label="Warm eNB",
        required=prep_job_by_node[enb],
        scheduler=sched,
    )

    enb_requirements = (job_warm_enb, job_service_hss, job_service_epc)

    # wait for everything to be ready, and add an extra grace delay

    grace = 30 if load_nodes else 10
    grace_delay = SshJob(
        node = LocalNode(formatter=TimeColonFormatter()),
        command = "echo Allowing grace of {grace} seconds; sleep {grace}"\
            .format(grace=grace),
        required = enb_requirements,
        scheduler = sched,
    )

    # start services

    job_service_enb = SshJob(
        node=enbnode,
        # run-enb expects the id of the epc as a parameter
        # n_rb means number of resource blocks for DL, set to either 25 or 50.
        commands=[
            RunScript(find_local_embedded_script("oai-enb.sh"),
                      "run-enb",
                      oscillo,
                      includes=includes,
                      x11=oscillo),
        ],
        label="start softmodem on eNB",
        required=grace_delay,
        scheduler=sched,
    )

    ########## run experiment per se
    # Manage phone(s)
    # this starts at the same time as the eNB, but some
    # headstart is needed so that eNB actually is ready to serve
    delay = 12
    msg = "wait for {delay}s for enodeb to start up"\
          .format(delay=delay)
    wait_command = "echo {msg}; sleep {delay}".format(msg=msg, delay=delay)

    job_start_phones = [
        SshJob(
            node=gwnode,
            commands=[
                Run(wait_command),
                RunScript(find_local_embedded_script("faraday.sh"),
                          "macphone{}".format(id),
                          "r2lab-embedded/shell/macphone.sh",
                          "phone-on",
                          includes=includes),
                RunScript(find_local_embedded_script("faraday.sh"),
                          "macphone{}".format(id),
                          "r2lab-embedded/shell/macphone.sh",
                          "phone-start-app",
                          includes=includes),
            ],
            label="start Nexus phone and speedtest app",
            required=grace_delay,
            scheduler=sched,
        ) for id in phones
    ]

    job_ping_phones_from_epc = [
        SshJob(
            node=epcnode,
            commands=[
                Run("sleep 10"),
                Run("ping -c 100 -s 100 -i .05 172.16.0.{ip} &> /root/ping-phone"
                    .format(ip=id + 1)),
            ],
            label="ping Nexus phone from EPC",
            critical=False,
            required=job_start_phones,
        ) for id in phones
    ]

    ########## xterm nodes

    colors = ["wheat", "gray", "white", "darkolivegreen"]

    xterms = e3372_ue_xterms + oai_ue_xterms + gnuradio_xterms

    for xterm, color in zip(xterms, itertools.cycle(colors)):
        xterm_node = SshNode(gateway=gwnode,
                             hostname=r2lab_hostname(xterm),
                             username='******',
                             formatter=TimeColonFormatter(verbose=verbose),
                             debug=verbose)
        SshJob(
            node=xterm_node,
            command=Run("xterm -fn -*-fixed-medium-*-*-*-20-*-*-*-*-*-*-*"
                        " -bg {} -geometry 90x10".format(color),
                        x11=True),
            label="xterm on node {}".format(xterm_node.hostname),
            required=prep_job_by_node[xterm],
            scheduler=sched,
            # don't set forever; if we do, then these xterms get killed
            # when all other tasks have completed
            # forever = True,
        )
#    # remove dangling requirements - if any - should not be needed but won't hurt either
    sched.sanitize()

    print(20 * "*", "nodes usage summary")
    if load_nodes:
        for image, nodes in to_load.items():
            for node in nodes:
                print("node {node} : {image}".format(node=node, image=image))
    else:
        print("NODES ARE USED AS IS (no image loaded, no reset)")
    print(10 * "*", "phones usage summary")
    if phones:
        for phone in phones:
            print("Using phone{phone}".format(phone=phone))
    else:
        print("No phone involved")

    sched.rain_check()
    # Update the .dot and .png file for illustration purposes
    if verbose or dry_run:
        sched.list()
        name = "scenario-load" if load_nodes else \
               "scenario"
        sched.export_as_dotfile("{name}.dot".format(name=name))
        os.system("dot -Tpng {name}.dot -o {name}.png".format(name=name))
        print("(Over)wrote {name}.png".format(name=name))

    if dry_run:
        return False

    if verbose:
        input('OK ? - press control C to abort ? ')

    if not sched.orchestrate():
        print("RUN KO : {}".format(sched.why()))
        sched.debrief()
        return False
    else:
        print("RUN OK")
        return True
コード例 #5
0
def run(*,                                # pylint: disable=r0912, r0914, r0915
        # the pieces to use
        slicename, cn, ran, phones,
        e3372_ues, oai_ues, gnuradios,
        e3372_ue_xterms, gnuradio_xterms, ns3,
        # boolean flags
        load_nodes, reset_usb, oscillo, tcp_streaming,
        # the images to load
        image_cn, image_ran, image_oai_ue, image_e3372_ue, image_gnuradio, image_T_tracer, image_ns3,
        # miscell
        n_rb, nodes_left_alone, T_tracer, publisher_ip, verbose, dry_run):
    """
    ##########
    # 3 methods to get nodes ready
    # (*) load images
    # (*) reset nodes that are known to have the right image
    # (*) do nothing, proceed to experiment

    expects e.g.
    * slicename : s.t like [email protected]
    * cn : 7
    * ran : 23
    * ns3 : 32
    * phones: list of indices of phones to use

    * e3372_ues : list of nodes to use as a UE using e3372
    * oai_ues   : list of nodes to use as a UE using OAI
    * gnuradios : list of nodes to load with a gnuradio image
    * T_tracer  : list of nodes to load with a tracer image

    * image_* : the name of the images to load on the various nodes

    Plus
    * load_nodes: whether to load images or not - in which case
                  image_cn, image_ran and image_*
                  are used to tell the image names
    * reset_usb : the USRP board will be reset when this is set
    * tcp_streaming : set up TCP streaming scenario
    * publisher_ip : IP address of the publisher 
    """

    # what argparse knows as a slice actually is about the gateway (user + host)
    gwuser, gwhost = r2lab_parse_slice(slicename)
    gwnode = SshNode(hostname=gwhost, username=gwuser,
                     formatter=TimeColonFormatter(verbose=verbose), debug=verbose)

    hostnames = [r2lab_hostname(x) for x in (cn, ran)]

    cnnode, rannode = [
        SshNode(gateway=gwnode, hostname=hostname, username='******',
                formatter=TimeColonFormatter(verbose=verbose), debug=verbose)
        for hostname in hostnames
    ]

    scheduler = Scheduler(verbose=verbose, label="CORE EXP")

    ########## prepare the image-loading phase
    # focus on the experiment, and use
    # prepare_testbed_scheduler later on to prepare testbed
    # all we need to do at this point is compute a mapping dict
    # image -> list-of-nodes

    images_to_load = defaultdict(list)
    images_to_load[image_cn] += [cn]
    images_to_load[image_ran] += [ran]
    if e3372_ues:
        images_to_load[image_e3372_ue] += e3372_ues
    if e3372_ue_xterms:
        images_to_load[image_e3372_ue] += e3372_ue_xterms
    if oai_ues:
        images_to_load[image_oai_ue] += oai_ues
    if gnuradios:
        images_to_load[image_gnuradio] += gnuradios
    if gnuradio_xterms:
        images_to_load[image_gnuradio] += gnuradio_xterms
    if T_tracer:
        images_to_load[image_T_tracer] += T_tracer
    if ns3:
        images_to_load[image_ns3] += [ns3]

    # start core network
    job_start_cn = SshJob(
        node=cnnode,
        commands=[
            RunScript(find_local_embedded_script("nodes.sh"),
                      "git-pull-r2lab",
                      includes=INCLUDES),
            RunScript(find_local_embedded_script("mosaic-cn.sh"),
                      "journal --vacuum-time=1s",
                      includes=INCLUDES),
            RunScript(find_local_embedded_script("mosaic-cn.sh"), "configure",
                      includes=INCLUDES),
            RunScript(find_local_embedded_script("mosaic-cn.sh"), "start",
                      includes=INCLUDES),
            tcpdump_cn_service.start_command(),
        ],
        label="start CN service",
        scheduler=scheduler,
    )

    # prepare enodeb
    reset_option = "-u" if reset_usb else ""
    job_warm_ran = SshJob(
        node=rannode,
        commands=[
            RunScript(find_local_embedded_script("nodes.sh"),
                      "git-pull-r2lab",
                      includes=INCLUDES),
            RunScript(find_local_embedded_script("mosaic-ran.sh"),
                      "journal --vacuum-time=1s",
                      includes=INCLUDES),
            RunScript(find_local_embedded_script("mosaic-ran.sh"),
                      "warm-up", reset_option,
                      includes=INCLUDES),
            RunScript(find_local_embedded_script("mosaic-ran.sh"),
                      "configure -b", n_rb, cn,
                      includes=INCLUDES),
        ],
        label="Configure eNB",
        scheduler=scheduler,
    )

    ran_requirements = [job_start_cn, job_warm_ran]
###
    if oai_ues:
        # prepare OAI UEs
        for ue in oai_ues:
            ue_node = SshNode(gateway=gwnode, hostname=r2lab_hostname(ue), username='******',
                              formatter=TimeColonFormatter(verbose=verbose), debug=verbose)
            job_warm_ues = [
                SshJob(
                    node=ue_node,
                    commands=[
                        RunScript(find_local_embedded_script("nodes.sh"),
                                  "git-pull-r2lab",
                                  includes=INCLUDES),
                        RunScript(find_local_embedded_script("mosaic-oai-ue.sh"),
                                  "journal --vacuum-time=1s",
                                  includes=INCLUDES),
                        RunScript(find_local_embedded_script("mosaic-oai-ue.sh"),
                                  "warm-up", reset_option,
                                  includes=INCLUDES),
                        RunScript(find_local_embedded_script("mosaic-oai-ue.sh"),
                                  "configure -b", n_rb,
                                  includes=INCLUDES),
                        ],
                    label=f"Configure OAI UE on fit{ue:02d}",
                    scheduler=scheduler)
                ]
            ran_requirements.append(job_warm_ues)

###
    if not load_nodes and phones:
        job_turn_off_phones = SshJob(
            node=gwnode,
            commands=[
                RunScript(find_local_embedded_script("faraday.sh"),
                          f"macphone{phone} phone-off")
                for phone in phones],
            scheduler=scheduler,
        )
        ran_requirements.append(job_turn_off_phones)

    # wait for everything to be ready, and add an extra grace delay

    grace = 5
    grace_delay = PrintJob(
        f"Allowing grace of {grace} seconds",
        sleep=grace,
        required=ran_requirements,
        scheduler=scheduler,
        label=f"settle for {grace}s",
    )

    # optionally start T_tracer
    if T_tracer:
        job_start_T_tracer = SshJob(                    # pylint: disable=w0612
            node=SshNode(
                gateway=gwnode, hostname=r2lab_hostname(T_tracer[0]), username='******',
                formatter=TimeColonFormatter(verbose=verbose), debug=verbose),
            commands=[
                Run(f"/root/trace {ran}",
                    x11=True),
            ],
            label="start T_tracer service",
            required=ran_requirements,
            scheduler=scheduler,
        )
#        ran_requirements.append(job_start_T_tracer)


# start services

    graphical_option = "-x" if oscillo else ""
    graphical_message = "graphical" if oscillo else "regular"
    tracer_option = " -T" if T_tracer else ""

    # we use a Python variable for consistency
    # although it not used down the road
    _job_service_ran = SshJob(
        node=rannode,
        commands=[
            RunScript(find_local_embedded_script("mosaic-ran.sh"),
                      "start", graphical_option, tracer_option,
                      includes=INCLUDES,
                      x11=oscillo,
                      ),
        ],
        label=f"start {graphical_message} softmodem on eNB",
        required=grace_delay,
        scheduler=scheduler,
    )

    ########## run experiment per se
    # Manage phone(s) and OAI UE(s)
    # this starts at the same time as the eNB, but some
    # headstart is needed so that eNB actually is ready to serve
    sleeps = [20, 30]
    phone_msgs = [f"wait for {sleep}s for eNB to start up before waking up phone{id}"
                  for sleep, id in zip(sleeps, phones)]
    wait_commands = [f"echo {msg}; sleep {sleep}"
                     for msg, sleep in zip(phone_msgs, sleeps)]

    job_start_phones = [
        SshJob(
            node=gwnode,
            commands=[
                Run(wait_command),
                RunScript(find_local_embedded_script("faraday.sh"), f"macphone{id}",
                          "r2lab-embedded/shell/macphone.sh", "phone-on",
                          includes=INCLUDES),
                RunScript(find_local_embedded_script("faraday.sh"), f"macphone{id}",
                          "r2lab-embedded/shell/macphone.sh", "phone-start-app",
                          includes=INCLUDES),
            ],
            label=f"turn off airplace mode on phone {id}",
            required=grace_delay,
            scheduler=scheduler)
        for id, wait_command in zip(phones, wait_commands)]

    if oai_ues:
        delay = 25
        for ue in oai_ues:
            msg = f"wait for {delay}s for eNB to start up before running UE on node fit{ue:02d}"
            wait_command = f"echo {msg}; sleep {delay}"
            ue_node = SshNode(gateway=gwnode, hostname=r2lab_hostname(ue), username='******',
                              formatter=TimeColonFormatter(verbose=verbose), debug=verbose)
            job_start_ues = [
                SshJob(
                    node=ue_node,
                    commands=[
                        Run(wait_command),
                        RunScript(find_local_embedded_script("mosaic-oai-ue.sh"),
                                  "start",
                                  includes=INCLUDES),
                        ],
                    label=f"Start OAI UE on fit{ue:02d}",
                    required=grace_delay,
                    scheduler=scheduler)
                ]
            delay += 20

        for ue in oai_ues:
            environ = {'USER': '******'}
            cefnet_ue_service = Service("cefnetd", service_id="cefnet", environ=environ) 
            ue_node = SshNode(gateway=gwnode, hostname=r2lab_hostname(ue), username='******',
                              formatter=TimeColonFormatter(verbose=verbose), debug=verbose)
            msg = f"Wait 60s and then ping faraday gateway from UE on fit{ue:02d}"
            ue_commands = f"echo {msg}; sleep 60; ping -c 5 -I oip1 faraday.inria.fr"

            if tcp_streaming:
                # TCP streaming scenario
                if load_nodes:
                    ue_commands += "sysctl -w net.ipv4.ip_forward=1;"
                    ue_commands += "ip route add 10.1.1.0/24 via 192.168.2.32 dev data"
                job_setup_ue = [
                    SshJob(
                        node=ue_node,
                        commands=[
                            Run(ue_commands,label="test UE link and set up routing for TCP streaming"),
                            ],
                        label=f"ping faraday gateway from UE on fit{ue:02d} and set up routing for the TCP streaming scenario",
                        critical=True,
                        required=job_start_ues,
                        scheduler=scheduler)
                    ]
            else:
                # Cefore streaming scenario
                if load_nodes:
                    ue_commands += "sysctl -w net.ipv4.ip_forward=1;"
                    ue_commands += f"ip route add {publisher_ip}/32 dev oip1;"
                    ue_commands += "ip route add 10.1.1.0/24 via 192.168.2.32 dev data;"
                    ue_commands += "iptables -t nat -A POSTROUTING -s 10.1.1.2/32  -j SNAT --to-source 172.16.0.2;"
                    ue_commands += "iptables -t nat -A PREROUTING -d 172.16.0.2   -j DNAT --to-destination 10.1.1.2;"
                    ue_commands += "iptables -A FORWARD -d 10.1.1.2/32 -i oip1 -j ACCEPT;"
                    ue_commands += f"iptables -A FORWARD -d {publisher_ip}/32 -i data -j ACCEPT;"
                    ue_commands += "ip rule del from all to 172.16.0.2 lookup 201;"
                    ue_commands += "ip rule del from 172.16.0.2 lookup 201;"
                    ue_commands += "ip rule add from 10.1.1.2 lookup lte prio 32760;"
                    ue_commands += "ip rule add from all to 172.16.0.2 lookup lte prio 32761;"
                    ue_commands += "ip rule add from all fwmark 0x1 lookup lte prio 32762;"
                    ue_commands += "ip route add table lte 10.1.1.0/24 via 192.168.2.32 dev data;"
#                    ue_commands += "killall cefnetd || true"
                job_setup_ue = [
                    SshJob(
                        node=ue_node,
                        commands=[
                            Run(ue_commands,label="test UE link and set up routing for Cefore streaming"),
                            cefnet_ue_service.start_command(),
                            ],
                        label=f"ping faraday gateway from fit{ue:02d} UE and set up routing for the Cefore streaming scenario",
                         critical=True,#old cefnetd not killed when running new one...
                        required=job_start_ues,
                        scheduler=scheduler)
                    ]

        if ns3:
            ns3_node = SshNode(gateway=gwnode, hostname=r2lab_hostname(ns3), username='******',
                               formatter=TimeColonFormatter(verbose=verbose), debug=verbose)
            msg = f"Wait for the UE node to be ready before running the streaming scenario with ns-3 on fit{ns3}"

            if load_nodes:
                job_prepare_ns3_node = [
                    SshJob(
                        node=ns3_node,
                        commands=[
                            Run("turn-on-data"),
                            Run("ifconfig data promisc up"),
                            Run("ip route add default via 192.168.2.6 dev data || true"),
                            Run("sysctl -w net.ipv4.ip_forward=1"),
                            ],
                        label=f"setup routing on ns-3 fit{ns3:02d} node",
                        # ip route may already be there so the ip route command may fail
                        critical=True,
                        required=job_setup_ue,
                        scheduler=scheduler)
                    ]
                ns3_requirements = job_prepare_ns3_node
            else:
                ns3_requirements = job_setup_ue

            if not tcp_streaming:
                environ = {'USER': '******'}
                cefnet_ns3_service = Service("cefnetd", service_id="cefnet", environ=environ) 
                job_start_cefnet_on_cn = [
                    SshJob(
                        node=cnnode,
                        commands=[
                            Run(f"echo 'ccn:/streaming tcp {publisher_ip}:80' > /usr/local/cefore/cefnetd.conf"),
#                            Run("killall cefnetd || true"),# not done by default with service.start_command()
                            cefnet_ns3_service.start_command(),
                            ],
                        label=f"Start Cefnet on EPC running at fit{cn:02d}",
                        critical=True,#old cefnetd not killed when running new one...
                        required=ns3_requirements,
                        scheduler=scheduler,
                        )
                    ]

    # ditto
    _job_ping_phones_from_cn = [
        SshJob(
            node=cnnode,
            commands=[
                Run("sleep 20"),
                Run(f"ping -c 100 -s 100 -i .05 172.16.0.{id+1} &> /root/ping-phone{id}"),
                ],
            label=f"ping phone {id} from core network",
            critical=False,
            required=job_start_phones,
            scheduler=scheduler)
        for id in phones]

    ########## xterm nodes

    colors = ("wheat", "gray", "white", "darkolivegreen")

    xterms = e3372_ue_xterms + gnuradio_xterms

    for xterm, color in zip(xterms, cycle(colors)):
        xterm_node = SshNode(
            gateway=gwnode, hostname=r2lab_hostname(xterm), username='******',
            formatter=TimeColonFormatter(verbose=verbose), debug=verbose)
        SshJob(
            node=xterm_node,
            command=Run(f"xterm -fn -*-fixed-medium-*-*-*-20-*-*-*-*-*-*-*",
                        f" -bg {color} -geometry 90x10",
                        x11=True),
            label=f"xterm on node {xterm_node.hostname}",
            scheduler=scheduler,
            # don't set forever; if we do, then these xterms get killed
            # when all other tasks have completed
            # forever = True,
            )

    # remove dangling requirements - if any
    # should not be needed but won't hurt either
    scheduler.sanitize()

    ##########
    print(10*"*", "nodes usage summary")
    if load_nodes:
        for image, nodes in images_to_load.items():
            for node in nodes:
                print(f"node fit{node:02d} : {image}")
    else:
        print("NODES ARE USED AS IS (no image loaded, no reset)")
    print(10*"*", "phones usage summary")
    if phones:
        for phone in phones:
            print(f"Using phone{phone}")
    else:
        print("No phone involved")
    if nodes_left_alone:
        print(f"Ignore following fit nodes: {nodes_left_alone}")

    print(f"Publisher IP is {publisher_ip}")
    if tcp_streaming:
        print("Run streaming scenario with TCP")
    else:
        print("Run streaming scenario with Cefore")
    # wrap scheduler into global scheduler that prepares the testbed
    scheduler = prepare_testbed_scheduler(
        gwnode, load_nodes, scheduler, images_to_load, nodes_left_alone)

    scheduler.check_cycles()
    # Update the .dot and .png file for illustration purposes
    name = "cefore-load" if load_nodes else "cefore"
    print(10*'*', 'See main scheduler in',
          scheduler.export_as_pngfile(name))

    if verbose:
        scheduler.list()

    if dry_run:
        return True

    if verbose:
        input('OK ? - press control C to abort ? ')

    if not scheduler.orchestrate():
        print(f"RUN KO : {scheduler.why()}")
        scheduler.debrief()
        return False
    print("RUN OK")
    print(80*'*')
    if tcp_streaming:
        # TCP streaming scenario 
        print(f"Now it's time to run the ns-3 script on node fit{ns3:02d}")
        print(f"root@fit{ns3:02d}:~# /root/NS3/source/ns-3-dce/waf  --run dce-tcp-test")
        print("Then, run iperf on the publisher host:")
        print("yourlogin@publisher:~# iperf -s -P 1 -p 80")
        print(f"Log file will be available on fit{ns3:02d} at:")
        print("  /root/NS3/source/ns-3-dce/files-4/var/log/56884/stdout")
    else:
        # Cefore streaming scenario
        print("Now, if not already done, copy cefnetd and cefputfile binaries on your publisher host")
        print("login@your_host:r2lab-demos/cefore# scp bin/cefnetd yourlogin@publisher_node:/usr/local/sbin")
        print("login@your_host:r2lab-demos/cefore# scp bin/cefputfile yourlogin@publisher_node:/user/local/bin")
        print(f"After that, run on the ns-3 fit{ns3:02d} node the following command:")
        print(f"root@fit{ns3:02d}:~# /root/NS3/source/ns-3-dce/waf  --run dce-cefore-test ")
        print("Then, run on the publisher host:")
        print("yourlogin@publisher:~# cefnetd")
        print("yourlogin@publisher:~# cefputfile ccn:/streaming/test -f ./[file-name] -r [1 <= streaming_rate <= 32 (Mbps)]")
        print(f"Log file will be available on fit{ns3:02d} at:")
        print("  /root/NS3/source/ns-3-dce/files-3/tmp/cefgetstream-thuputLog-126230400110000")
    print(80*'*')

    return True
コード例 #6
0
    def run(self, verbose, no_load, no_save):
        """
        can skip the load or save phases
        """

        print("Using node {} through gateway {}".format(
            self.node, self.gateway))
        print("In order to produce {} from {}".format(self.to_image,
                                                      self.from_image))
        print("The following scripts will be run:")
        for i, script in enumerate(self.scripts, 1):
            print("{:03d}:{}".format(i, " ".join(script)))

        items = []
        if no_load: items.append("skip load")
        if no_save: items.append("skip save")
        if items:
            print("WARNING: using fast-track mode {}".format(
                ' & '.join(items)))

        self.locate_companion_shell()
        if verbose:
            print("Located companion in {}".format(self.companion))

        if verbose:
            print("Preparing tar of input shell scripts .. ", end="")
        tarfile = self.prepare_tar(self.to_image)
        if verbose:
            print("Done in {}".format(tarfile))

        keys = load_agent_keys()
        if verbose:
            print("We have found {} keys in the ssh agent".format(len(keys)))

        #################### the 2 nodes we need to talk to
        gateway_proxy = None
        gwuser, gwname = self.user_host(self.gateway)
        gateway_proxy = None if not gwuser else SshNode(
            hostname=gwname,
            username=gwuser,
            keys=keys,
            formatter=ColonFormatter(verbose=verbose),
        )

        # really not sure it makes sense to use username other than root
        username, nodename = self.user_host(self.node)
        node_proxy = SshNode(
            gateway=gateway_proxy,
            hostname=nodename,
            username=username,
            keys=keys,
            formatter=ColonFormatter(verbose=verbose),
        )

        banner = 20 * '='

        # now that node_proxy is initialized, we need to
        # have a valid gateway_proxy for when we run all this from inside
        # the gateway
        if gateway_proxy is None:
            print("WARNING: build-image is designed to be run on your laptop")
            # best-effort, not even tested....
            gateway_proxy = LocalNode()

        #################### the little pieces
        sequence = Sequence(
            PrintJob("Checking for a valid lease"),
            # bail out if we don't have a valid lease
            SshJob(node = gateway_proxy,
                   command = "rhubarbe leases --check",
                   critical = True),
            PrintJob("loading image {}".format(self.from_image)
                     if not no_load else "fast-track: skipping image load",
                     banner = banner,
                     #                     label = "welcome message",
                 ),
            SshJob(
                node = gateway_proxy,
                commands = [
                    Run("rhubarbe", "load", "-i", self.from_image, nodename) \
                       if not no_load else None,
                    Run("rhubarbe", "wait", "-v", "-t", "240", nodename),
                ],
                #                label = "load and wait image {}".format(self.from_image),
            ),
            SshJob(
                node = node_proxy,
                commands = [
                    Run("rm", "-rf", "/etc/rhubarbe-history/{}".format(self.to_image)),
                    Run("mkdir", "-p", "/etc/rhubarbe-history"),
                    Push(localpaths = tarfile,
                         remotepath = "/etc/rhubarbe-history"),
                    RunScript(self.companion, nodename, self.from_image, self.to_image),
                    Pull(localpath = "{}/logs/".format(self.to_image),
                         remotepaths = "/etc/rhubarbe-history/{}/logs/".format(self.to_image),
                         recurse = True),
                ],
                label = "set up and run scripts in /etc/rhubarbe-history/{}".format(self.to_image)),
            )
        # avoid creating an SshJob with void commands
        if self.extra_logs:
            sequence.append(
                SshJob(
                    node=node_proxy,
                    label="collecting extra logs",
                    critical=False,
                    commands=[
                        Pull(localpath="{}/logs/".format(self.to_image),
                             remotepaths=extra_log,
                             recurse=True) for extra_log in self.extra_logs
                    ],
                ))

        # creating these as critical = True means the whole
        # scenario will fail if these are not found
        for binary in self.expected_binaries:
            check_with = "ls" if os.path.isabs(binary) else ("type -p")
            sequence.append(
                Sequence(
                    PrintJob(
                        "Checking for expected binaries",
                        #                             label = "message about checking"
                    ),
                    SshJob(
                        node=node_proxy,
                        command=[check_with, binary],
                        #                        label = "Checking for {}".format(binary)
                    )))

        # xxx some flag
        if no_save:
            sequence.append(
                PrintJob("fast-track: skipping image save", banner=banner))
        else:
            sequence.append(
                Sequence(
                    PrintJob("saving image {} ...".format(self.to_image),
                             banner=banner),
                    # make sure we capture all the logs and all that
                    # mostly to test RunString
                    SshJob(
                        node=node_proxy,
                        command=RunString("sync ; sleep $1; sync; sleep $1",
                                          1),
                        #                        label = 'sync',
                    ),
                    SshJob(
                        node=gateway_proxy,
                        command=Run("rhubarbe", "save", "-o", self.to_image,
                                    nodename),
                        #                        label = "save image {}".format(self.to_image),
                    ),
                    SshJob(
                        node=gateway_proxy,
                        command="rhubarbe images -d",
                        #                        label = "list current images",
                    ),
                ))

        sched = Scheduler(sequence, verbose=verbose)
        # sanitizing for the cases where some pieces are left out
        sched.sanitize()

        print(20 * '+', "before run")
        sched.list(details=verbose)
        print(20 * 'x')
        if sched.orchestrate():
            if verbose:
                print(20 * '+', "after run")
                sched.list()
                print(20 * 'x')
            print("image {} OK".format(self.to_image))
            return True
        else:
            print("Something went wrong with image {}".format(self.to_image))
            print(20 * '+', "after run - KO")
            sched.debrief()
            print(20 * 'x')
            return False
コード例 #7
0
ファイル: mosaic-demo.py プロジェクト: fit-r2lab/r2lab-demos
def run(
        *,  # pylint: disable=r0912, r0914, r0915
        # the pieces to use
    slicename,
        cn,
        ran,
        phones,
        e3372_ues,
        oai_ues,
        gnuradios,
        e3372_ue_xterms,
        gnuradio_xterms,
        # boolean flags
        load_nodes,
        reset_usb,
        oscillo,
        # the images to load
        image_cn,
        image_ran,
        image_oai_ue,
        image_e3372_ue,
        image_gnuradio,
        image_T_tracer,
        # miscell
        n_rb,
        nodes_left_alone,
        T_tracer,
        verbose,
        dry_run):
    """
    ##########
    # 3 methods to get nodes ready
    # (*) load images
    # (*) reset nodes that are known to have the right image
    # (*) do nothing, proceed to experiment

    expects e.g.
    * slicename : s.t like [email protected]
    * cn : 7
    * ran : 23
    * phones: list of indices of phones to use

    * e3372_ues : list of nodes to use as a UE using e3372
    * oai_ues   : list of nodes to use as a UE using OAI
    * gnuradios : list of nodes to load with a gnuradio image
    * T_tracer  : list of nodes to load with a tracer image

    * image_* : the name of the images to load on the various nodes

    Plus
    * load_nodes: whether to load images or not - in which case
                  image_cn, image_ran and image_*
                  are used to tell the image names
    * reset_usb : the USRP board will be reset when this is set
    """

    # what argparse knows as a slice actually is about the gateway (user + host)
    gwuser, gwhost = r2lab_parse_slice(slicename)
    gwnode = SshNode(hostname=gwhost,
                     username=gwuser,
                     formatter=TimeColonFormatter(verbose=verbose),
                     debug=verbose)

    hostnames = [r2lab_hostname(x) for x in (cn, ran)]

    cnnode, rannode = [
        SshNode(gateway=gwnode,
                hostname=hostname,
                username='******',
                formatter=TimeColonFormatter(verbose=verbose),
                debug=verbose) for hostname in hostnames
    ]

    scheduler = Scheduler(verbose=verbose, label="CORE EXP")

    ########## prepare the image-loading phase
    # focus on the experiment, and use
    # prepare_testbed_scheduler later on to prepare testbed
    # all we need to do at this point is compute a mapping dict
    # image -> list-of-nodes

    images_to_load = defaultdict(list)
    images_to_load[image_cn] += [cn]
    images_to_load[image_ran] += [ran]
    if e3372_ues:
        images_to_load[image_e3372_ue] += e3372_ues
    if e3372_ue_xterms:
        images_to_load[image_e3372_ue] += e3372_ue_xterms
    if oai_ues:
        images_to_load[image_oai_ue] += oai_ues
    if gnuradios:
        images_to_load[image_gnuradio] += gnuradios
    if gnuradio_xterms:
        images_to_load[image_gnuradio] += gnuradio_xterms
    if T_tracer:
        images_to_load[image_T_tracer] += T_tracer

    # start core network
    job_start_cn = SshJob(
        node=cnnode,
        commands=[
            RunScript(find_local_embedded_script("nodes.sh"),
                      "git-pull-r2lab",
                      includes=INCLUDES),
            RunScript(find_local_embedded_script("mosaic-cn.sh"),
                      "journal --vacuum-time=1s",
                      includes=INCLUDES),
            RunScript(find_local_embedded_script("mosaic-cn.sh"),
                      "configure",
                      includes=INCLUDES),
            RunScript(find_local_embedded_script("mosaic-cn.sh"),
                      "start",
                      includes=INCLUDES),
            tcpdump_cn_service.start_command(),
        ],
        label="start CN service",
        scheduler=scheduler,
    )

    # prepare enodeb
    reset_option = "-u" if reset_usb else ""
    job_warm_ran = SshJob(
        node=rannode,
        commands=[
            RunScript(find_local_embedded_script("nodes.sh"),
                      "git-pull-r2lab",
                      includes=INCLUDES),
            RunScript(find_local_embedded_script("mosaic-ran.sh"),
                      "journal --vacuum-time=1s",
                      includes=INCLUDES),
            RunScript(find_local_embedded_script("mosaic-ran.sh"),
                      "warm-up",
                      reset_option,
                      includes=INCLUDES),
            RunScript(find_local_embedded_script("mosaic-ran.sh"),
                      "configure -b",
                      n_rb,
                      cn,
                      includes=INCLUDES),
        ],
        label="Configure eNB",
        scheduler=scheduler,
    )

    ran_requirements = [job_start_cn, job_warm_ran]
    ###
    if oai_ues:
        # prepare OAI UEs
        for ue in oai_ues:
            ue_node = SshNode(gateway=gwnode,
                              hostname=r2lab_hostname(ue),
                              username='******',
                              formatter=TimeColonFormatter(verbose=verbose),
                              debug=verbose)
            job_warm_ues = [
                SshJob(node=ue_node,
                       commands=[
                           RunScript(find_local_embedded_script("nodes.sh"),
                                     "git-pull-r2lab",
                                     includes=INCLUDES),
                           RunScript(
                               find_local_embedded_script("mosaic-oai-ue.sh"),
                               "journal --vacuum-time=1s",
                               includes=INCLUDES),
                           RunScript(
                               find_local_embedded_script("mosaic-oai-ue.sh"),
                               "warm-up",
                               reset_option,
                               includes=INCLUDES),
                           RunScript(
                               find_local_embedded_script("mosaic-oai-ue.sh"),
                               "configure -b",
                               n_rb,
                               includes=INCLUDES),
                       ],
                       label=f"Configure OAI UE on fit{ue}",
                       scheduler=scheduler)
            ]
            ran_requirements.append(job_warm_ues)

###
    if not load_nodes and phones:
        job_turn_off_phones = SshJob(
            node=gwnode,
            commands=[
                RunScript(find_local_embedded_script("faraday.sh"),
                          f"macphone{phone} phone-off") for phone in phones
            ],
            scheduler=scheduler,
        )
        ran_requirements.append(job_turn_off_phones)

    # wait for everything to be ready, and add an extra grace delay

    grace = 5
    grace_delay = PrintJob(
        f"Allowing grace of {grace} seconds",
        sleep=grace,
        required=ran_requirements,
        scheduler=scheduler,
        label=f"settle for {grace}s",
    )

    # optionally start T_tracer
    if T_tracer:
        job_start_T_tracer = SshJob(  # pylint: disable=w0612
            node=SshNode(gateway=gwnode,
                         hostname=r2lab_hostname(T_tracer[0]),
                         username='******',
                         formatter=TimeColonFormatter(verbose=verbose),
                         debug=verbose),
            commands=[
                Run(f"/root/trace {ran}", x11=True),
            ],
            label="start T_tracer service",
            required=ran_requirements,
            scheduler=scheduler,
        )
#        ran_requirements.append(job_start_T_tracer)

# start services

    graphical_option = "-x" if oscillo else ""
    graphical_message = "graphical" if oscillo else "regular"
    tracer_option = " -T" if T_tracer else ""

    # we use a Python variable for consistency
    # although it not used down the road
    _job_service_ran = SshJob(
        node=rannode,
        commands=[
            RunScript(
                find_local_embedded_script("mosaic-ran.sh"),
                "start",
                graphical_option,
                tracer_option,
                includes=INCLUDES,
                x11=oscillo,
            ),
        ],
        label=f"start {graphical_message} softmodem on eNB",
        required=grace_delay,
        scheduler=scheduler,
    )

    ########## run experiment per se
    # Manage phone(s) and OAI UE(s)
    # this starts at the same time as the eNB, but some
    # headstart is needed so that eNB actually is ready to serve
    sleeps = [20, 30]
    phone_msgs = [
        f"wait for {sleep}s for eNB to start up before waking up phone{id}"
        for sleep, id in zip(sleeps, phones)
    ]
    wait_commands = [
        f"echo {msg}; sleep {sleep}" for msg, sleep in zip(phone_msgs, sleeps)
    ]

    job_start_phones = [
        SshJob(node=gwnode,
               commands=[
                   Run(wait_command),
                   RunScript(find_local_embedded_script("faraday.sh"),
                             f"macphone{id}",
                             "r2lab-embedded/shell/macphone.sh",
                             "phone-on",
                             includes=INCLUDES),
                   RunScript(find_local_embedded_script("faraday.sh"),
                             f"macphone{id}",
                             "r2lab-embedded/shell/macphone.sh",
                             "phone-start-app",
                             includes=INCLUDES),
               ],
               label=f"turn off airplace mode on phone {id}",
               required=grace_delay,
               scheduler=scheduler)
        for id, wait_command in zip(phones, wait_commands)
    ]

    if oai_ues:
        delay = 25
        for ue in oai_ues:
            msg = f"wait for {delay}s for eNB to start up before running UE on node fit{ue}"
            wait_command = f"echo {msg}; sleep {delay}"
            ue_node = SshNode(gateway=gwnode,
                              hostname=r2lab_hostname(ue),
                              username='******',
                              formatter=TimeColonFormatter(verbose=verbose),
                              debug=verbose)
            job_start_ues = [
                SshJob(node=ue_node,
                       commands=[
                           Run(wait_command),
                           RunScript(
                               find_local_embedded_script("mosaic-oai-ue.sh"),
                               "start",
                               includes=INCLUDES),
                       ],
                       label=f"Start OAI UE on fit{ue}",
                       required=grace_delay,
                       scheduler=scheduler)
            ]
            delay += 20

        for ue in oai_ues:
            ue_node = SshNode(gateway=gwnode,
                              hostname=r2lab_hostname(ue),
                              username='******',
                              formatter=TimeColonFormatter(verbose=verbose),
                              debug=verbose)
            msg = f"Wait 60s and then ping faraday gateway from UE on fit{ue}"
            _job_ping_gw_from_ue = [
                SshJob(node=ue_node,
                       commands=[
                           Run(f"echo {msg}; sleep 60"),
                           Run(f"ping -c 5 -I oip1 faraday.inria.fr"),
                       ],
                       label=f"ping faraday gateway from UE on fit{ue}",
                       critical=False,
                       required=job_start_ues,
                       scheduler=scheduler)
            ]

    # ditto
    _job_ping_phones_from_cn = [
        SshJob(
            node=cnnode,
            commands=[
                Run("sleep 20"),
                Run(f"ping -c 100 -s 100 -i .05 172.16.0.{id+1} &> /root/ping-phone{id}"
                    ),
            ],
            label=f"ping phone {id} from core network",
            critical=False,
            required=job_start_phones,
            scheduler=scheduler) for id in phones
    ]

    ########## xterm nodes

    colors = ("wheat", "gray", "white", "darkolivegreen")

    xterms = e3372_ue_xterms + gnuradio_xterms

    for xterm, color in zip(xterms, cycle(colors)):
        xterm_node = SshNode(gateway=gwnode,
                             hostname=r2lab_hostname(xterm),
                             username='******',
                             formatter=TimeColonFormatter(verbose=verbose),
                             debug=verbose)
        SshJob(
            node=xterm_node,
            command=Run(f"xterm -fn -*-fixed-medium-*-*-*-20-*-*-*-*-*-*-*",
                        f" -bg {color} -geometry 90x10",
                        x11=True),
            label=f"xterm on node {xterm_node.hostname}",
            scheduler=scheduler,
            # don't set forever; if we do, then these xterms get killed
            # when all other tasks have completed
            # forever = True,
        )

    # remove dangling requirements - if any
    # should not be needed but won't hurt either
    scheduler.sanitize()

    ##########
    print(10 * "*", "nodes usage summary")
    if load_nodes:
        for image, nodes in images_to_load.items():
            for node in nodes:
                print(f"node {node} : {image}")
    else:
        print("NODES ARE USED AS IS (no image loaded, no reset)")
    print(10 * "*", "phones usage summary")
    if phones:
        for phone in phones:
            print(f"Using phone{phone}")
    else:
        print("No phone involved")
    if nodes_left_alone:
        print(f"Ignore following fit nodes: {nodes_left_alone}")

    # wrap scheduler into global scheduler that prepares the testbed
    scheduler = prepare_testbed_scheduler(gwnode, load_nodes, scheduler,
                                          images_to_load, nodes_left_alone)

    scheduler.check_cycles()
    # Update the .dot and .png file for illustration purposes
    name = "mosaic-load" if load_nodes else "mosaic"
    print(10 * '*', 'See main scheduler in', scheduler.export_as_pngfile(name))

    if verbose:
        scheduler.list()

    if dry_run:
        return True

    if verbose:
        input('OK ? - press control C to abort ? ')

    if not scheduler.orchestrate():
        print(f"RUN KO : {scheduler.why()}")
        scheduler.debrief()
        return False
    print("RUN OK")
    return True
コード例 #8
0
def run(slice, hss, epc, enb, extras, load_nodes, image_gw, image_enb,
        image_extra, reset_nodes, reset_usrp, spawn_xterms, verbose):
    """
    ##########
    # 3 methods to get nodes ready
    # (*) load images
    # (*) reset nodes that are known to have the right image
    # (*) do nothing, proceed to experiment

    expects e.g.
    * slice : s.t like [email protected]
    * hss : 04
    * epc : 03
    * enb : 23
    * extras : a list of ids that will be loaded with the gnuradio image

    Plus
    * load_nodes: whether to load images or not - in which case
                  image_gw, image_enb and image_extra
                  are used to tell the image names
    * reset_nodes: if load_nodes is false and reset_nodes is true, the nodes are reset - i.e. rebooted
    * otherwise (both False): do nothing
    * reset_usrp : if not False, the USRP board won't be reset
    * spawn_xterms : if set, starts xterm on all extra nodes
    * image_* : the name of the images to load on the various nodes
    """

    # what argparse knows as a slice actually is a gateway (user + host)
    gwuser, gwhost = parse_slice(slice)
    gwnode = SshNode(hostname=gwhost,
                     username=gwuser,
                     formatter=ColonFormatter(verbose=verbose),
                     debug=verbose)

    hostnames = hssname, epcname, enbname = [
        r2lab_hostname(x) for x in (hss, epc, enb)
    ]
    extra_hostnames = [r2lab_hostname(x) for x in extras]

    hssnode, epcnode, enbnode = [
        SshNode(gateway=gwnode,
                hostname=hostname,
                username='******',
                formatter=ColonFormatter(verbose=verbose),
                debug=verbose) for hostname in hostnames
    ]

    extra_nodes = [
        SshNode(gateway=gwnode,
                hostname=hostname,
                username='******',
                formatter=ColonFormatter(verbose=verbose),
                debug=verbose) for hostname in extra_hostnames
    ]

    ########## preparation
    job_check_for_lease = SshJob(
        node=gwnode,
        command=["rhubarbe", "leases", "--check"],
        label="check we have a current lease",
    )

    # turn off all nodes
    turn_off_command = ["rhubarbe", "off", "-a"]
    # except our 3 nodes and the optional extras
    turn_off_command += [
        "~{}".format(x) for x in [hss, epc, enb] + extras + [20]
    ]

    job_off_nodes = SshJob(
        node=gwnode,
        # switch off all nodes but the ones we use
        command=turn_off_command,
        label="turn off unused nodes",
        required=job_check_for_lease,
    )

    # actually run this in the gateway, not on the mac
    # the ssh keys are stored in the gateway and we do not yet have
    # the tools to leverage such remote keys
    job_stop_phone = SshJob(
        node=gwnode,
        command=RunScript(locate_local_script("faraday.sh"),
                          "macphone",
                          "r2lab/infra/user-env/macphone.sh",
                          "phone-off",
                          includes=includes),
        label="stop phone",
        required=job_check_for_lease,
    )

    jobs_prepare = [job_check_for_lease, job_stop_phone]
    # turn off nodes only when --load or --reset is set
    if load_nodes or reset_nodes:
        jobs_prepare.append(job_off_nodes)

    ########## infra nodes hss + epc

    # prepare nodes

    commands = []
    if load_nodes:
        commands.append(
            Run("rhubarbe", "load", "-i", image_gw, hssname, epcname))
    elif reset_nodes:
        commands.append(Run("rhubarbe", "reset", hssname, epcname))
    # always do this
    commands.append(Run("rhubarbe", "wait", "-t", 120, hssname, epcname))
    job_load_infra = SshJob(
        node=gwnode,
        commands=commands,
        label="load and wait HSS and EPC nodes",
        required=jobs_prepare,
    )

    # start services

    job_service_hss = SshJob(
        node=hssnode,
        command=RunScript(locate_local_script("oai-hss.sh"),
                          "run-hss",
                          epc,
                          includes=includes),
        label="start HSS service",
        required=job_load_infra,
    )

    msg = "wait for HSS to warm up"
    job_service_epc = Sequence(
        # let 15 seconds to HSS
        Job(
            verbose_delay(15, msg),
            label=msg,
        ),
        SshJob(
            node=epcnode,
            command=RunScript(locate_local_script("oai-epc.sh"),
                              "run-epc",
                              hss,
                              includes=includes),
            label="start EPC services",
        ),
        required=job_load_infra,
    )

    jobs_infra = job_load_infra, job_service_hss, job_service_epc

    ########## enodeb

    # prepare node

    commands = []
    if load_nodes:
        commands.append(Run("rhubarbe", "usrpoff", enb))
        commands.append(Run("rhubarbe", "load", "-i", image_enb, enb))
    elif reset_nodes:
        commands.append(Run("rhubarbe", "reset", enb))
    commands.append(Run("rhubarbe", "wait", "-t", "120", enb))

    job_load_enb = SshJob(
        node=gwnode,
        commands=commands,
        label="load and wait ENB",
        required=jobs_prepare,
    )

    # start service

    msg = "wait for EPC to warm up"
    job_service_enb = Sequence(
        Job(verbose_delay(15, msg), label=msg),
        SshJob(
            node=enbnode,
            # run-enb expects the id of the epc as a parameter
            command=RunScript(locate_local_script("oai-enb.sh"),
                              "run-enb",
                              epc,
                              reset_usrp,
                              includes=includes),
            label="start softmodem on ENB",
        ),
        required=(job_load_enb, job_service_hss, job_service_epc),
    )

    jobs_enb = job_load_enb, job_service_enb

    ########## run experiment per se

    # the phone
    # we need to wait for the USB firmware to be loaded
    duration = 30 if reset_usrp is not False else 8
    msg = "wait for enodeb firmware to load on USRP".format(duration)
    job_wait_enb = Job(verbose_delay(duration, msg),
                       label=msg,
                       required=job_service_enb)

    job_start_phone = SshJob(
        node=gwnode,
        commands=[
            RunScript(locate_local_script("faraday.sh"),
                      "macphone",
                      "r2lab/infra/user-env/macphone.sh",
                      "phone-on",
                      includes=includes),
            RunScript(locate_local_script("faraday.sh"),
                      "macphone",
                      "r2lab/infra/user-env/macphone.sh",
                      "phone-start-app",
                      includes=includes),
        ],
        label="start phone 4g and speedtest app",
        required=job_wait_enb,
    )

    job_ping_phone_from_epc = SshJob(
        node=epcnode,
        commands=[
            Run("sleep 10"),
            Run("ping -c 100 -s 100 -i .05 172.16.0.2 &> /root/ping-phone"),
        ],
        label="ping phone from EPC",
        critical=False,
        required=job_wait_enb,
    )

    jobs_exp = job_wait_enb, job_start_phone, job_ping_phone_from_epc

    ########## extra nodes
    # ssh -X not yet supported in apssh, so one option is to start them using
    # a local process
    # xxx to update: The following code kind of works, but it needs to be
    # turned off, because the process in question would be killed
    # at the end of the Scheduler orchestration (at the end of the run function)
    # which is the exact time where it would be useful :)
    # however the code for LocalJob appears to work fine, it would be nice to
    # move it around - maybe in apssh ?

    commands = []
    if not extras:
        commands.append(Run("echo no extra nodes specified - ignored"))
    else:
        if load_nodes:
            commands.append(Run("rhubarbe", "usrpoff", *extra_hostnames))
            commands.append(
                Run("rhubarbe", "load", "-i", image_extra, *extra_hostnames))
            commands.append(
                Run("rhubarbe", "wait", "-t", 120, *extra_hostnames))
            commands.append(Run("rhubarbe", "usrpon", *extra_hostnames))
        elif reset_nodes:
            commands.append(Run("rhubarbe", "reset", extra_hostnames))
        commands.append(Run("rhubarbe", "wait", "-t", "120", *extra_hostnames))
    job_load_extras = SshJob(
        node=gwnode,
        commands=commands,
        label="load and wait extra nodes",
        required=job_check_for_lease,
    )

    jobs_extras = [job_load_extras]

    colors = ["wheat", "gray", "white"]

    if spawn_xterms:
        jobs_xterms_extras = [
            SshJob(
                node=extra_node,
                command=Run("xterm -fn -*-fixed-medium-*-*-*-20-*-*-*-*-*-*-*"
                            " -bg {} -geometry 90x10".format(color),
                            x11=True),
                label="xterm on node {}".format(extra_node.hostname),
                required=job_load_extras,
                # don't set forever; if we do, then these xterms get killed
                # when all other tasks have completed
                # forever = True,
            )
            for extra_node, color in zip(extra_nodes, itertools.cycle(colors))
        ]
        jobs_extras += jobs_xterms_extras

    # schedule the load phases only if required
    sched = Scheduler(verbose=verbose)
    # this is just a way to add a collection of jobs to the scheduler
    sched.update(jobs_prepare)
    sched.update(jobs_infra)
    sched.update(jobs_enb)
    sched.update(jobs_exp)
    sched.update(jobs_extras)
    # remove dangling requirements - if any - should not be needed but won't hurt either
    sched.sanitize()

    print(40 * "*")
    if load_nodes:
        print("LOADING IMAGES: (gw->{}, enb->{}, extras->{})".format(
            load_nodes, image_gw, image_enb, image_extra))
    elif reset_nodes:
        print("RESETTING NODES")
    else:
        print("NODES ARE USED AS IS (no image loaded, no reset)")

    sched.rain_check()
    # Update the .dot and .png file for illustration purposes
    if verbose:
        sched.list()
        name = "scenario-load" if load_nodes else \
               "scenario-reset" if reset_nodes else \
               "scenario"
        sched.export_as_dotfile("{}.dot".format(name))
        os.system("dot -Tpng {}.dot -o {}.png".format(name, name))

    sched.list()

    if not sched.orchestrate():
        print("RUN KO : {}".format(sched.why()))
        sched.debrief()
        return False
    else:
        print("RUN OK")
        return True