Esempio n. 1
0
    def run(self, message_bus, timeout):
        """
        send verb to all nodes, waits for max timeout
        returns True if all nodes behaved as expected
        and False otherwise - including in case of KeyboardInterrupt
        """

        nodes = [
            Node(cmc_name, message_bus)
            for cmc_name in self.selector.cmc_names()
        ]
        jobs = [
            Job(self.get_and_show_verb(node, self.verb), critical=True)
            for node in nodes
        ]
        display = Display(nodes, message_bus)
        scheduler = Scheduler(Job(display.run(), forever=True, critical=True),
                              *jobs,
                              timeout=timeout,
                              critical=False)
        try:
            if scheduler.run():
                return True
            else:
                scheduler.debrief()
                print(f"rhubarbe-{self.verb} failed: {scheduler.why()}")
                return False
        except KeyboardInterrupt:
            print(f"rhubarbe-{self.verb} : keyboard interrupt - exiting")
            return False
Esempio n. 2
0
    def test_nesting_sequence(self):

        expected_duration = 1.

        watch = Watch('test_nesting_sequence')

        subjob = Scheduler(
            Sequence(
                Job(co_print_sleep(watch, .2, "one")),
                Job(co_print_sleep(watch, .2, "two")),
                Job(co_print_sleep(watch, .2, "three")),
            ),
            watch=watch,
            label="sub-scheduler\non several lines",
            critical=True,
            forever=True,
        )

        main = Scheduler(Sequence(
            Job(co_print_sleep(watch, .2, "BEGIN"), label="job-label"),
            subjob,
            Job(co_print_sleep(watch, .2, "END")),
        ),
                         watch=watch)

        print("===== test_nesting_sequence", "LIST with details")
        main.list(details=True)

        self.assertTrue(main.run())
        self.assertAlmostEqual(watch.seconds(), expected_duration, delta=.05)

        produce_png(main, "test_nesting_sequence")
Esempio n. 3
0
    def test_nesting1(self):
        """
        one main scheduler in sequence
        one job
        one subscheduler that runs 2 jobs in parallel
        one job
        """

        watch = Watch('test_nesting1')
        # sub-scheduler - total approx 1 s
        sub_sched = PureScheduler(watch=watch)
        Job(co_print_sleep(watch, 0.5, "sub short"), scheduler=sub_sched)
        Job(co_print_sleep(watch, 1, "sub longs"), scheduler=sub_sched)

        # main scheduler - total approx 2 s
        main_sched = PureScheduler(watch=watch)
        Sequence(
            Job(co_print_sleep(watch, 0.5, "main begin")),
            # this is where the subscheduler is merged
            Job(sub_sched.co_run(), label='subscheduler'),
            Job(co_print_sleep(watch, 0.5, "main end")),
            scheduler=main_sched)

        print("===== test_nesting1", "LIST with details")
        main_sched.list(details=True)
        ok = main_sched.run()
        self.assertTrue(ok)

        # allow for a small variation around 2s of course
        duration = watch.seconds()
        self.assertAlmostEqual(duration, 2, delta=0.05)
Esempio n. 4
0
    def test_nesting3(self):
        """
        same as test_nesting2
        but using a Scheduler instance
        2 sub schedulers run in parallel while
        the third main one controls them both
        """

        # main scheduler - total approx
        # 0.5 + max(0.5, 1) + 0.5 = 2 s
        expected_duration = 2

        watch = Watch('test_nesting3')
        main_sched = PureScheduler(verbose=True, watch=watch)
        main_sched.label = "main3"
        mainj1 = Job(co_print_sleep(watch, 0.5, "mainj1"),
                     label="mainj1",
                     scheduler=main_sched)

        # sub-scheduler 2 - total approx 0.5 s
        sub_sched2 = diamond_scheduler(watch, 0.5, "SUB2")
        main_sched.add(sub_sched2)
        sub_sched2.requires(mainj1)
        sub_sched2.label = "sub_sched2"
        sub_sched2.verbose = True

        # sub-scheduler 3 - total approx 1 s
        sub_sched3 = diamond_scheduler(watch, 1, "SUB3")
        main_sched.add(sub_sched3)
        sub_sched3.requires(mainj1)
        sub_sched3.label = "sub_sched3"
        sub_sched3.verbose = True

        # last job in main scheduler
        Job(co_print_sleep(watch, 0.5, "mainj4"),
            label="mainj4",
            required=(sub_sched2, sub_sched3),
            scheduler=main_sched)

        for s in main_sched, sub_sched2, sub_sched3:
            if not s.sanitize():
                print(f"OOPS, had to sanitize sched {s.label}")

        print("===== test_nesting3", "LIST without details")
        main_sched.list(details=False)
        produce_png(main_sched, "test_nesting3")

        watch.reset()
        print("---run")
        ok = main_sched.run()
        if not ok:
            main_sched.debrief()
        self.assertTrue(ok)

        # allow for a small variation around 2s of course
        duration = watch.seconds()
        self.assertAlmostEqual(duration, expected_duration, delta=0.05)
Esempio n. 5
0
 def test_png_easy2(self):
     """
     same but with a multi-entry/multi-exit sub-sched
     """
     watch = Watch()
     sched = Scheduler(
         Sequence(
             Job(co_print_sleep(watch, .2, "beg"),
                 label="test_png_easy2"),
             pipes(watch, .6, "middle"),
             Job(co_print_sleep(watch, .2, "end")),
         ),
         watch=watch
     )
     produce_png(sched, "test_png_easy2")
Esempio n. 6
0
 def test_png_easy(self):
     """
     start with an easy one, a sequence that has a diamond inside
     """
     watch = Watch()
     sched = Scheduler(
         Sequence(
             Job(co_print_sleep(watch, .2, "beg"),
                 label="test_easy"),
             diamond_scheduler(watch, .6, "middle"),
             Job(co_print_sleep(watch, .2, "end")),
         ),
         watch=watch
     )
     produce_png(sched, "test_png_easy")
Esempio n. 7
0
 def test_png_simple(self):
     """
     a sequence that has 2 subscheds in a row
     """
     watch = Watch()
     sched = Scheduler(
         Sequence(
             Job(co_print_sleep(watch, .2, "beg"),
                 label="test_png_simple start"),
             diamond_scheduler(watch, .6, "middle-up"),
             pipes(watch, .6, "middle-down"),
             Job(co_print_sleep(watch, .2, "end"),
                 label="test_png_simple end"),
         ),
         watch=watch
     )
     produce_png(sched, "test_png_simple")
Esempio n. 8
0
 def sched_sched_boom(s1_crit, s2_crit, j_crit):
     return Scheduler(Scheduler(Job(boom("ok"),
                                    critical=j_crit,
                                    label=f"boom {j_crit}"),
                                critical=s2_crit,
                                label=f"internal {s2_crit}"),
                      critical=s1_crit,
                      label=f"external {s1_crit}")
Esempio n. 9
0
    def test_nesting2(self):
        """
        2 sub schedulers run in parallel while
        the third main one controls them both
        """
        watch = Watch('test_nesting2')
        # sub-scheduler - total approx 0.5 s
        sub2 = diamond_scheduler(watch,
                                 0.5,
                                 "SUB2",
                                 scheduler_class=PureScheduler)
        sub2.watch = watch
        # sub-scheduler - total approx 1 s
        sub3 = diamond_scheduler(watch,
                                 1,
                                 "SUB3",
                                 scheduler_class=PureScheduler)
        sub3.watch = watch

        # main scheduler - total approx
        # 0.5 + max(0.5, 1) + 0.5 = 2 s
        expected_duration = 2
        main_sched = PureScheduler(watch=watch)
        mainj1 = Job(co_print_sleep(watch, 0.5, "mainj1"),
                     label="mainj1",
                     scheduler=main_sched)
        mainj2 = Job(sub2.co_run(),
                     label="mainj2",
                     required=mainj1,
                     scheduler=main_sched)
        mainj3 = Job(sub3.co_run(),
                     label="mainj3",
                     required=mainj1,
                     scheduler=main_sched)
        Job(co_print_sleep(watch, 0.5, "mainj4"),
            label="mainj4",
            required=(mainj2, mainj3),
            scheduler=main_sched)

        ok = main_sched.run()
        self.assertTrue(ok)

        # allow for a small variation around 2s of course
        duration = watch.seconds()
        self.assertAlmostEqual(duration, expected_duration, delta=0.05)
Esempio n. 10
0
    def main(self, reset, timeout):

        mainjob = Job(self.run(reset))
        displayjob = Job(self.display.run(), forever=True)
        scheduler = Scheduler (mainjob, displayjob)
        
        try:
            ok = scheduler.orchestrate(timeout = timeout)
            if not ok:
                self.display.set_goodbye("rhubarbe-load failed: {}".format(scheduler.why()))
                return 1
            return 0 if mainjob.result() else 1
        except KeyboardInterrupt as e:
            self.display.set_goodbye("rhubarbe-load : keyboard interrupt - exiting")
            return 1
        finally:
            self.frisbeed and self.frisbeed.stop_nowait()
            self.nextboot_cleanup()
            self.display.epilogue()
Esempio n. 11
0
    def test_graph1(self):

        watch = Watch()

        s = PureScheduler()
        s.add(
            Sequence(
                Job(co_print_sleep(watch, .25, 'begin')),
                Job(co_print_sleep(watch, 1., 'middle'), label='middle'),
                Job(co_print_sleep(watch, .25, 'end')),
            ))
        print("test_graph1 NO DETAILS")
        s.list()
        print("test_graph1 WITH DETAILS")
        s.list(details=True)
        print("GRAPH")
        self.assertEqual(len(s), 3)
        s.run()
        self.assertAlmostEqual(watch.seconds(), 1.5, delta=0.05)
        produce_png(s, "test_graph1")
Esempio n. 12
0
    def main(self, reset, timeout):
        mainjob = Job(self.run(reset), critical=True)
        displayjob = Job(self.display.run(), forever=True, critical=True)

        scheduler = Scheduler(mainjob,
                              displayjob,
                              timeout=timeout,
                              critical=False)

        try:
            is_ok = scheduler.run()
            if not is_ok:
                scheduler.debrief()
                self.display.set_goodbye(
                    f"rhubarbe-save failed: {scheduler.why()}")
                return 1
            return 0 if mainjob.result() else 1
        except KeyboardInterrupt:
            self.display.set_goodbye("rhubarbe-save : keyboard interrupt, bye")
            return 1
        finally:
            self.cleanup()
Esempio n. 13
0
    def test_png_styles1(self):
        """
        trying the rendering of critical and forever jobs
        """
        watch = Watch()
        sched = Scheduler(
            Sequence(
                Job(co_print_sleep(watch, .1, "regular"),
                    label="regular",
                    critical=False, forever=False),
                Job(co_print_sleep(watch, .1, "critical"),
                    label="critical",
                    critical=True, forever=False),
                Job(co_print_sleep(watch, .1, "forever"),
                    label="forever",
                    critical=False, forever=True),
                Job(co_print_sleep(watch, .1, "both"),
                    label="both",
                    critical=True, forever=True),
            ),
            watch=watch,
        )

        produce_png(sched, "test_png_styles1")
Esempio n. 14
0
    def global_wait_ssh(self):
        # wait for nodes to be ssh-reachable
        self.print(f"waiting for {len(self.nodes)} nodes"
                   f" (timeout={self.wait_timeout})")
        sshs = [SshWaiter(node, verbose=self.verbose) for node in self.nodes]
        jobs = [
            Job(ssh.wait_for(self.backoff), critical=False) for ssh in sshs
        ]

        scheduler = Scheduler(Job(self.display.run(), forever=True),
                              *jobs,
                              critical=False,
                              timeout=self.wait_timeout)
        if not scheduler.run():
            self.verbose and scheduler.debrief()  # pylint: disable=w0106
        # exclude nodes that have not behaved
        for node, job in zip(self.nodes, jobs):
            self.verbose_msg(
                f"node {node.id} wait_ssh_job -> done={job.is_done()}",
                f"exc={job.raised_exception()}")

            if exc := job.raised_exception():
                message = f"OOPS {type(exc)} {exc}"
                self.mark_and_exclude(node, Reason.WONT_SSH, message)
Esempio n. 15
0
    def global_check_image(self, _image, check_strings):
        # on the remaining nodes: check image marker
        self.print(f"checking {len(self.nodes)} nodes"
                   f" against {check_strings} in /etc/rhubarbe-image")

        grep_pattern = "|".join(check_strings)
        check_command = (
            f"tail -1 /etc/rhubarbe-image | egrep -q '{grep_pattern}'")
        jobs = [
            SshJob(node=silent_sshnode(node, verbose=self.verbose),
                   command=check_command,
                   critical=False) for node in self.nodes
        ]

        scheduler = Scheduler(Job(self.display.run(), forever=True),
                              *jobs,
                              critical=False,
                              timeout=self.wait_timeout)
        if not scheduler.run():
            self.verbose and scheduler.debrief()  # pylint: disable=w0106
        # exclude nodes that have not behaved
        for node, job in zip(self.nodes, jobs):
            if not job.is_done() or job.raised_exception():
                self.verbose_msg(
                    f"checking {grep_pattern}: something went badly wrong with {node}"
                )
                message = None
                if exc := job.raised_exception():
                    message = f"OOPS {type(exc)} {exc}"
                self.mark_and_exclude(node, Reason.CANT_CHECK_IMAGE, message)
                continue
            if not job.result() == 0:
                explanation = f"wrong image found on {node} - looking for {grep_pattern}"
                self.verbose_msg(explanation)
                self.mark_and_exclude(node, Reason.DID_NOT_LOAD, explanation)
                continue
            self.print(f"node {node} checked out OK")
Esempio n. 16
0
 def sched_boom(s_crit, j_crit):
     return Scheduler(Job(boom(str(j_crit)), critical=j_crit),
                      critical=s_crit)
Esempio n. 17
0
def wait(*argv):  # pylint: disable=r0914
    usage = """
    Wait for selected nodes to be reachable by ssh
    Returns 0 if all nodes indeed are reachable
    """
    # suppress info log messages from asyncssh
    asyncssh_set_log_level(logging.WARNING)

    config = Config()
    parser = ArgumentParser(usage=usage,
                            formatter_class=ArgumentDefaultsHelpFormatter)
    parser.add_argument("-c",
                        "--curses",
                        action='store_true',
                        default=False,
                        help="Use curses to provide term-based animation")
    parser.add_argument("-t",
                        "--timeout",
                        action='store',
                        default=config.value('nodes', 'wait_default_timeout'),
                        type=float,
                        help="Specify global timeout for the whole process")
    parser.add_argument("-b",
                        "--backoff",
                        action='store',
                        default=config.value('networking', 'ssh_backoff'),
                        type=float,
                        help="Specify backoff average between "
                        "attempts to ssh connect")
    parser.add_argument("-u",
                        "--user",
                        default="root",
                        help="select other username")
    # really dont' write anything
    parser.add_argument("-s", "--silent", action='store_true', default=False)
    parser.add_argument("-v", "--verbose", action='store_true', default=False)

    add_selector_arguments(parser)
    args = parser.parse_args(argv)

    # --curses implies --verbose otherwise nothing shows up
    if args.curses:
        args.verbose = True

    selector = selected_selector(args)
    message_bus = asyncio.Queue()

    if args.verbose:
        message_bus.put_nowait({'selected_nodes': selector})
    from rhubarbe.logger import logger
    logger.info(f"wait: backoff is {args.backoff} "
                f"and global timeout is {args.timeout}")

    nodes = [
        Node(cmc_name, message_bus)  # pylint: disable=w0621
        for cmc_name in selector.cmc_names()
    ]
    sshs = [
        SshProxy(node, username=args.user, verbose=args.verbose)
        for node in nodes
    ]
    jobs = [Job(ssh.wait_for(args.backoff), critical=True) for ssh in sshs]

    display_class = Display if not args.curses else DisplayCurses
    display = display_class(nodes, message_bus)

    # have the display class run forever until the other ones are done
    scheduler = Scheduler(Job(display.run(), forever=True, critical=True),
                          *jobs,
                          timeout=args.timeout,
                          critical=False)
    try:
        orchestration = scheduler.run()
        if orchestration:
            return 0
        else:
            if args.verbose:
                scheduler.debrief()
            return 1
    except KeyboardInterrupt:
        print("rhubarbe-wait : keyboard interrupt - exiting")
        # xxx
        return 1
    finally:
        display.epilogue()
        if not args.silent:
            for ssh in sshs:
                print(f"{ssh.node}:ssh {'OK' if ssh.status else 'KO'}")
Esempio n. 18
0
# for the fun of it, let's add a job that runs forever and writes
# current time every second
import time
import asyncio

async def infinite_clock():
    while True:
        print("--- TICK - {}".format(time.strftime("%H:%M:%S")))
        await asyncio.sleep(1)

# a forever job is not expected to end, instead
# it gets killed when the rest of the flock is done with
clock_job = Job(
    infinite_clock(),
    forever=True,
    scheduler = scheduler,
    # for the illustrated graph
    label = "infinite clock",
)

##########
# run the scheduler
ok = scheduler.orchestrate()

# give details if it failed
ok or scheduler.debrief()

success = ok and ping.result() == 0

# producing a dot file for illustration
scheduler.export_as_dotfile("B4.dot")
Esempio n. 19
0
 def job_in_s(i, s):
     return Job(co_print_sleep(watch, .2, f"job {i}"),
                label=f"job{i}",
                scheduler=s)
Esempio n. 20
0
 def job(i):
     return Job(co_print_sleep(watch, 0.1, f"job{i}"), label=i)
Esempio n. 21
0
def main() -> bool:
    parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
    parser.add_argument("-U", "--url", default=default_topurl,
                        dest='topurl',
                        help="url to reach nbhosting server")
    parser.add_argument("-c", "--course-gitdir", default=default_course_gitdir,
                        help="""location of a git repo where to fetch notebooks;
                                needed in order to generate relevant URLs""")
    parser.add_argument("-i", "--indices", default=[0], action=IntsRanges,
                        help="(cumulative) ranges of indices in the list of known notebooks"
                        " - run nbhtest with -l to see list")
    parser.add_argument("-u", "--users", default=[1], action=IntsRanges,
                        help="(cumulative) ranges of students indexes; e.g. -u 101-400 -u 501-600")
    parser.add_argument("-m", "--random", action='store_true',
                        help="if set, a random notebook index is used for each student")
    parser.add_argument("-b", "--base", default='student',
                        help="basename for students name")
    parser.add_argument("-p", "--period", default=20, type=float,
                        help="delay between 2 triggers of nbhtest")
    parser.add_argument("-s", "--sleep", default=default_sleep_internal, type=float,
                        help="delay in seconds to sleep between actions inside nbhtest")
    parser.add_argument("-w", "--window", default=default_window, type=int,
                        help="window depth for spawning the nbhtest instances")
    parser.add_argument("-n", "--dry-run", action='store_true')
    args = parser.parse_args()

    course_gitdir = args.course_gitdir
    course, notebooks = list_notebooks(course_gitdir)
    
    # in random mode; what are the choices that we randomize on
    if args.random:
        if len(args.indices) > 1:
            choices = args.indices
        else:
            choices = list(range(len(notebooks)))


    local = LocalNode(
        formatter=TerminalFormatter(
            format="%H-%M-%S:@line@",
            verbose=True
            ))

    scheduler = Scheduler()

    jobs = []
    for user in args.users:
        student_name = "{}-{:04d}".format(args.base, user)
        if args.random:
            indices = [ random.choice(choices) ]
        else:
            indices = args.indices
        for index in indices:
            command = "nbhtest.py -U {} -c {} -i {} -u {} -s {} &"\
                      .format(args.topurl, course_gitdir, index, student_name, args.sleep)
            if args.dry_run:
                print("dry-run:", command)
            else:
                # schule this command to run
                job = Sequence(
                    SshJob(scheduler=scheduler,
                           node=local,
                           command = command,
                    ),
                    Job(asyncio.sleep(args.period))
                )
                jobs.append(job)

    if args.dry_run:
        return True

    overall = scheduler.orchestrate(
        jobs_window = args.window
    )
    if not overall:
        scheduler.debrief()
    print("nbhtests DONE")
    return overall
Esempio n. 22
0
async def infinite_clock(watch):
    while True:
        print("--- TICK - {}".format(watch.elapsed()))
        await asyncio.sleep(1)


# create a Watch instance for keeping track of elapsed time
watch = Watch()

# a forever job is not expected to end, instead
# it gets killed when the rest of the flock is done with
clock_job = Job(
    infinite_clock(watch),
    forever=True,
    scheduler=scheduler,
    # for the illustrated graph
    label="infinite stopwatch",
)

##########
# run the scheduler
ok = scheduler.orchestrate()

# give details if it failed
ok or scheduler.debrief()

success = ok and ping.result() == 0

# producing a dot file for illustration
scheduler.export_as_dotfile("B5.dot")
Esempio n. 23
0
 def job(n):
     return Job(aprint(n), label=n)
Esempio n. 24
0
def run(slice, hss, epc, enb, extras, load_nodes, image_gw, image_enb,
        image_extra, reset_nodes, reset_usrp, spawn_xterms, verbose):
    """
    ##########
    # 3 methods to get nodes ready
    # (*) load images
    # (*) reset nodes that are known to have the right image
    # (*) do nothing, proceed to experiment

    expects e.g.
    * slice : s.t like [email protected]
    * hss : 04
    * epc : 03
    * enb : 23
    * extras : a list of ids that will be loaded with the gnuradio image

    Plus
    * load_nodes: whether to load images or not - in which case
                  image_gw, image_enb and image_extra
                  are used to tell the image names
    * reset_nodes: if load_nodes is false and reset_nodes is true, the nodes are reset - i.e. rebooted
    * otherwise (both False): do nothing
    * reset_usrp : if not False, the USRP board won't be reset
    * spawn_xterms : if set, starts xterm on all extra nodes
    * image_* : the name of the images to load on the various nodes
    """

    # what argparse knows as a slice actually is a gateway (user + host)
    gwuser, gwhost = parse_slice(slice)
    gwnode = SshNode(hostname=gwhost,
                     username=gwuser,
                     formatter=ColonFormatter(verbose=verbose),
                     debug=verbose)

    hostnames = hssname, epcname, enbname = [
        r2lab_hostname(x) for x in (hss, epc, enb)
    ]
    extra_hostnames = [r2lab_hostname(x) for x in extras]

    hssnode, epcnode, enbnode = [
        SshNode(gateway=gwnode,
                hostname=hostname,
                username='******',
                formatter=ColonFormatter(verbose=verbose),
                debug=verbose) for hostname in hostnames
    ]

    extra_nodes = [
        SshNode(gateway=gwnode,
                hostname=hostname,
                username='******',
                formatter=ColonFormatter(verbose=verbose),
                debug=verbose) for hostname in extra_hostnames
    ]

    ########## preparation
    job_check_for_lease = SshJob(
        node=gwnode,
        command=["rhubarbe", "leases", "--check"],
        label="check we have a current lease",
    )

    # turn off all nodes
    turn_off_command = ["rhubarbe", "off", "-a"]
    # except our 3 nodes and the optional extras
    turn_off_command += [
        "~{}".format(x) for x in [hss, epc, enb] + extras + [20]
    ]

    job_off_nodes = SshJob(
        node=gwnode,
        # switch off all nodes but the ones we use
        command=turn_off_command,
        label="turn off unused nodes",
        required=job_check_for_lease,
    )

    # actually run this in the gateway, not on the mac
    # the ssh keys are stored in the gateway and we do not yet have
    # the tools to leverage such remote keys
    job_stop_phone = SshJob(
        node=gwnode,
        command=RunScript(locate_local_script("faraday.sh"),
                          "macphone",
                          "r2lab/infra/user-env/macphone.sh",
                          "phone-off",
                          includes=includes),
        label="stop phone",
        required=job_check_for_lease,
    )

    jobs_prepare = [job_check_for_lease, job_stop_phone]
    # turn off nodes only when --load or --reset is set
    if load_nodes or reset_nodes:
        jobs_prepare.append(job_off_nodes)

    ########## infra nodes hss + epc

    # prepare nodes

    commands = []
    if load_nodes:
        commands.append(
            Run("rhubarbe", "load", "-i", image_gw, hssname, epcname))
    elif reset_nodes:
        commands.append(Run("rhubarbe", "reset", hssname, epcname))
    # always do this
    commands.append(Run("rhubarbe", "wait", "-t", 120, hssname, epcname))
    job_load_infra = SshJob(
        node=gwnode,
        commands=commands,
        label="load and wait HSS and EPC nodes",
        required=jobs_prepare,
    )

    # start services

    job_service_hss = SshJob(
        node=hssnode,
        command=RunScript(locate_local_script("oai-hss.sh"),
                          "run-hss",
                          epc,
                          includes=includes),
        label="start HSS service",
        required=job_load_infra,
    )

    msg = "wait for HSS to warm up"
    job_service_epc = Sequence(
        # let 15 seconds to HSS
        Job(
            verbose_delay(15, msg),
            label=msg,
        ),
        SshJob(
            node=epcnode,
            command=RunScript(locate_local_script("oai-epc.sh"),
                              "run-epc",
                              hss,
                              includes=includes),
            label="start EPC services",
        ),
        required=job_load_infra,
    )

    jobs_infra = job_load_infra, job_service_hss, job_service_epc

    ########## enodeb

    # prepare node

    commands = []
    if load_nodes:
        commands.append(Run("rhubarbe", "usrpoff", enb))
        commands.append(Run("rhubarbe", "load", "-i", image_enb, enb))
    elif reset_nodes:
        commands.append(Run("rhubarbe", "reset", enb))
    commands.append(Run("rhubarbe", "wait", "-t", "120", enb))

    job_load_enb = SshJob(
        node=gwnode,
        commands=commands,
        label="load and wait ENB",
        required=jobs_prepare,
    )

    # start service

    msg = "wait for EPC to warm up"
    job_service_enb = Sequence(
        Job(verbose_delay(15, msg), label=msg),
        SshJob(
            node=enbnode,
            # run-enb expects the id of the epc as a parameter
            command=RunScript(locate_local_script("oai-enb.sh"),
                              "run-enb",
                              epc,
                              reset_usrp,
                              includes=includes),
            label="start softmodem on ENB",
        ),
        required=(job_load_enb, job_service_hss, job_service_epc),
    )

    jobs_enb = job_load_enb, job_service_enb

    ########## run experiment per se

    # the phone
    # we need to wait for the USB firmware to be loaded
    duration = 30 if reset_usrp is not False else 8
    msg = "wait for enodeb firmware to load on USRP".format(duration)
    job_wait_enb = Job(verbose_delay(duration, msg),
                       label=msg,
                       required=job_service_enb)

    job_start_phone = SshJob(
        node=gwnode,
        commands=[
            RunScript(locate_local_script("faraday.sh"),
                      "macphone",
                      "r2lab/infra/user-env/macphone.sh",
                      "phone-on",
                      includes=includes),
            RunScript(locate_local_script("faraday.sh"),
                      "macphone",
                      "r2lab/infra/user-env/macphone.sh",
                      "phone-start-app",
                      includes=includes),
        ],
        label="start phone 4g and speedtest app",
        required=job_wait_enb,
    )

    job_ping_phone_from_epc = SshJob(
        node=epcnode,
        commands=[
            Run("sleep 10"),
            Run("ping -c 100 -s 100 -i .05 172.16.0.2 &> /root/ping-phone"),
        ],
        label="ping phone from EPC",
        critical=False,
        required=job_wait_enb,
    )

    jobs_exp = job_wait_enb, job_start_phone, job_ping_phone_from_epc

    ########## extra nodes
    # ssh -X not yet supported in apssh, so one option is to start them using
    # a local process
    # xxx to update: The following code kind of works, but it needs to be
    # turned off, because the process in question would be killed
    # at the end of the Scheduler orchestration (at the end of the run function)
    # which is the exact time where it would be useful :)
    # however the code for LocalJob appears to work fine, it would be nice to
    # move it around - maybe in apssh ?

    commands = []
    if not extras:
        commands.append(Run("echo no extra nodes specified - ignored"))
    else:
        if load_nodes:
            commands.append(Run("rhubarbe", "usrpoff", *extra_hostnames))
            commands.append(
                Run("rhubarbe", "load", "-i", image_extra, *extra_hostnames))
            commands.append(
                Run("rhubarbe", "wait", "-t", 120, *extra_hostnames))
            commands.append(Run("rhubarbe", "usrpon", *extra_hostnames))
        elif reset_nodes:
            commands.append(Run("rhubarbe", "reset", extra_hostnames))
        commands.append(Run("rhubarbe", "wait", "-t", "120", *extra_hostnames))
    job_load_extras = SshJob(
        node=gwnode,
        commands=commands,
        label="load and wait extra nodes",
        required=job_check_for_lease,
    )

    jobs_extras = [job_load_extras]

    colors = ["wheat", "gray", "white"]

    if spawn_xterms:
        jobs_xterms_extras = [
            SshJob(
                node=extra_node,
                command=Run("xterm -fn -*-fixed-medium-*-*-*-20-*-*-*-*-*-*-*"
                            " -bg {} -geometry 90x10".format(color),
                            x11=True),
                label="xterm on node {}".format(extra_node.hostname),
                required=job_load_extras,
                # don't set forever; if we do, then these xterms get killed
                # when all other tasks have completed
                # forever = True,
            )
            for extra_node, color in zip(extra_nodes, itertools.cycle(colors))
        ]
        jobs_extras += jobs_xterms_extras

    # schedule the load phases only if required
    sched = Scheduler(verbose=verbose)
    # this is just a way to add a collection of jobs to the scheduler
    sched.update(jobs_prepare)
    sched.update(jobs_infra)
    sched.update(jobs_enb)
    sched.update(jobs_exp)
    sched.update(jobs_extras)
    # remove dangling requirements - if any - should not be needed but won't hurt either
    sched.sanitize()

    print(40 * "*")
    if load_nodes:
        print("LOADING IMAGES: (gw->{}, enb->{}, extras->{})".format(
            load_nodes, image_gw, image_enb, image_extra))
    elif reset_nodes:
        print("RESETTING NODES")
    else:
        print("NODES ARE USED AS IS (no image loaded, no reset)")

    sched.rain_check()
    # Update the .dot and .png file for illustration purposes
    if verbose:
        sched.list()
        name = "scenario-load" if load_nodes else \
               "scenario-reset" if reset_nodes else \
               "scenario"
        sched.export_as_dotfile("{}.dot".format(name))
        os.system("dot -Tpng {}.dot -o {}.png".format(name, name))

    sched.list()

    if not sched.orchestrate():
        print("RUN KO : {}".format(sched.why()))
        sched.debrief()
        return False
    else:
        print("RUN OK")
        return True