Esempio n. 1
0
def mcmd(cmdid: str, mods: list):
    """
    Helper function to create appfwk's Commands addressed to modules.
    
    :param      cmdid:  The coommand id
    :type       cmdid:  str
    :param      mods:   List of module name/data structures 
    :type       mods:   list
    
    :returns:   A constructed Command object
    :rtype:     dunedaq.appfwk.cmd.Command
    """
    return ccmd.Command(id=ccmd.CmdId(cmdid), data=acmd(mods))
Esempio n. 2
0
def mrccmd(cmdid, instate, outstate, mods):
    """
    Helper function to create appfwk's Commands addressed to modules.
    
    :param      cmdid:  The coommand id
    :type       cmdid:  str
    :param      instate:  The state before command execution
    :type       instate:  str
    :param      outstate:  The state after command execution
    :type       outstate:  str
    :param      mods:   List of module name/data structures 
    :type       mods:   list
    
    :returns:   A constructed Command object
    :rtype:     dunedaq.rcif.cmd.RCCommand
    """
    return rccmd.RCCommand(id=ccmd.CmdId(cmdid),
                           entry_state=rccmd.State(instate),
                           exit_state=rccmd.State(outstate),
                           data=cmd.CmdObj(modules=cmd.AddressedCmds(
                               cmd.AddressedCmd(match=m, data=o)
                               for m, o in mods)))
Esempio n. 3
0
def generate(QUEUE_PAIRS=1, BYTES_TO_SEND=4096, OUTPUT_DIR="output"):

    # Define modules and queues
    queue_bare_specs = []
    for i in range(QUEUE_PAIRS):
        queue_bare_specs.append(
            app.QueueSpec(inst="queue_" + str(i),
                          kind="FollySPSCQueue",
                          capacity=100))

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs,
                                        key=lambda x: x.inst))

    mod_specs = []
    for i in range(QUEUE_PAIRS):
        mod_specs.append(
            mspec("prod_" + str(i), "RandomProducer", [
                app.QueueInfo(
                    name="outputQueue", inst="queue_" + str(i), dir="output")
            ]))
        mod_specs.append(
            mspec("cons_" + str(i), "Consumer", [
                app.QueueInfo(
                    name="inputQueue", inst="queue_" + str(i), dir="input")
            ]))

    init_specs = app.Init(queues=queue_specs, modules=mod_specs)

    jstr = json.dumps(init_specs.pod(), indent=4, sort_keys=True)
    print(jstr)

    initcmd = rc.RCCommand(id=bcmd.CmdId("init"),
                           entry_state=rc.State("NONE"),
                           exit_state=rc.State("INITIAL"),
                           data=init_specs)

    startcmd = mrccmd("start", "INITIAL", "RUNNING",
                      [(".*",
                        daq_test_conf.Conf(message_size=BYTES_TO_SEND,
                                           output_dir=OUTPUT_DIR))])

    stopcmd = mrccmd("stop", "RUNNING", "INITIAL", [
        (".*", None),
    ])

    start_measurement_cmd = mrccmd("start_measurement", "RUNNING", "MEASURING",
                                   [
                                       (".*", None),
                                   ])

    stop_measurement_cmd = mrccmd("stop_measurement", "MEASURING", "RUNNING", [
        (".*", None),
    ])

    jstr = json.dumps(initcmd.pod(), indent=4, sort_keys=True)
    print(jstr)

    # Create a list of commands
    cmd_seq = [
        initcmd, startcmd, stopcmd, start_measurement_cmd, stop_measurement_cmd
    ]

    # Print them as json (to be improved/moved out)
    jstr = json.dumps([c.pod() for c in cmd_seq], indent=4, sort_keys=True)
    return jstr
Esempio n. 4
0
def generate(
    PARTITION="hsi_readout_test",
    OUTPUT_PATH=".",
    TRIGGER_RATE_HZ: int = 1,
    CLOCK_SPEED_HZ: int = 50000000,
    HSI_TIMESTAMP_OFFSET:
    int = 0,  # Offset for HSIEvent timestamps in units of clock ticks. Positive offset increases timestamp estimate.
    HSI_DEVICE_ID: int = 0,
    MEAN_SIGNAL_MULTIPLICITY: int = 0,
    SIGNAL_EMULATION_MODE: int = 0,
    ENABLED_SIGNALS: int = 0b00000001,
):

    # network connection
    nw_specs = [
        nwmgr.Connection(name=PARTITION + ".hsievent",
                         topics=[],
                         address="tcp://127.0.0.1:12344")
    ]

    # Define modules and queues
    queue_bare_specs = [
        app.QueueSpec(inst="time_sync_from_netq",
                      kind='FollySPSCQueue',
                      capacity=100),
    ]

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs,
                                        key=lambda x: x.inst))

    mod_specs = [
        mspec("ntoq_timesync", "NetworkToQueue", [
            app.QueueInfo(
                name="output", inst="time_sync_from_netq", dir="output")
        ]),
        mspec("fhsig", "FakeHSIEventGenerator", [
            app.QueueInfo(name="time_sync_source",
                          inst="time_sync_from_netq",
                          dir="input"),
        ]),
    ]

    init_specs = app.Init(queues=queue_specs, modules=mod_specs)

    jstr = json.dumps(init_specs.pod(), indent=4, sort_keys=True)
    print(jstr)

    initcmd = rcif.RCCommand(id=cmdlib.CmdId("init"),
                             entry_state="NONE",
                             exit_state="INITIAL",
                             data=init_specs)

    trigger_interval_ticks = 0
    if TRIGGER_RATE_HZ > 0:
        trigger_interval_ticks = math.floor(
            (1 / TRIGGER_RATE_HZ) * CLOCK_SPEED_HZ)

    mods = [
        ("fhsig",
         fhsig.Conf(
             clock_frequency=CLOCK_SPEED_HZ,
             trigger_interval_ticks=trigger_interval_ticks,
             timestamp_offset=HSI_TIMESTAMP_OFFSET,
             mean_signal_multiplicity=MEAN_SIGNAL_MULTIPLICITY,
             signal_emulation_mode=SIGNAL_EMULATION_MODE,
             enabled_signals=ENABLED_SIGNALS,
             timesync_topic="Timesync",
             hsievent_connection_name=PARTITION + ".hsievent",
         )),
    ]

    confcmd = mrccmd("conf", "INITIAL", "CONFIGURED", mods)

    jstr = json.dumps(confcmd.pod(), indent=4, sort_keys=True)
    print(jstr)

    startpars = rcif.StartParams(run=33, disable_data_storage=False)

    startcmd = mrccmd("start", "CONFIGURED", "RUNNING",
                      [("ntoq_timesync", startpars), ("fhsig", startpars)])

    jstr = json.dumps(startcmd.pod(), indent=4, sort_keys=True)
    print("=" * 80 + "\nStart\n\n", jstr)

    stopcmd = mrccmd("stop", "RUNNING", "CONFIGURED", [("ntoq_timesync", None),
                                                       ("fhsig", None)])

    jstr = json.dumps(stopcmd.pod(), indent=4, sort_keys=True)
    print("=" * 80 + "\nStop\n\n", jstr)

    scrapcmd = mcmd("scrap", [("", None)])

    jstr = json.dumps(scrapcmd.pod(), indent=4, sort_keys=True)
    print("=" * 80 + "\nScrap\n\n", jstr)

    # Create a list of commands
    cmd_seq = [initcmd, confcmd, startcmd, stopcmd]

    # Print them as json (to be improved/moved out)
    jstr = json.dumps([c.pod() for c in cmd_seq], indent=4, sort_keys=True)
    return jstr
Esempio n. 5
0
def generate(RUN_NUMBER=333):

    # Define modules and queues
    queue_bare_specs = [
        app.QueueSpec(inst="hsievent_q", kind='FollyMPMCQueue', capacity=1000),
    ]

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs,
                                        key=lambda x: x.inst))

    mod_specs = [
        mspec("ftsdgen", "FakeTimeStampedDataGenerator", [
            app.QueueInfo(
                name="hsievent_sink", inst="hsievent_q", dir="output"),
        ]),
    ]

    init_specs = app.Init(queues=queue_specs, modules=mod_specs)

    jstr = json.dumps(init_specs.pod(), indent=4, sort_keys=True)
    print(jstr)

    initcmd = rc.RCCommand(id=bcmd.CmdId("init"),
                           entry_state=rc.State("NONE"),
                           exit_state=rc.State("INITIAL"),
                           data=init_specs)

    confcmd = mrccmd(
        "conf", "INITIAL", "CONFIGURED",
        [("ftsdgen", ftsdg.Conf(sleep_time=1000000000, frequency=50000000))])

    jstr = json.dumps(confcmd.pod(), indent=4, sort_keys=True)
    print(jstr)

    startpars = rc.StartParams(run=RUN_NUMBER)
    startcmd = mrccmd("start", "CONFIGURED", "RUNNING", [
        (".*", rc.StartParams(run=RUN_NUMBER, )),
    ])

    jstr = json.dumps(startcmd.pod(), indent=4, sort_keys=True)
    print("=" * 80 + "\nStart\n\n", jstr)

    stopcmd = mrccmd("stop", "RUNNING", "CONFIGURED", [
        (".*", None),
    ])

    jstr = json.dumps(stopcmd.pod(), indent=4, sort_keys=True)
    print("=" * 80 + "\nStop\n\n", jstr)

    scrapcmd = mrccmd("scrap", "CONFIGURED", "INITIAL", [(".*", None)])

    jstr = json.dumps(scrapcmd.pod(), indent=4, sort_keys=True)
    print("=" * 80 + "\nScrap\n\n", jstr)

    # Create a list of commands
    cmd_seq = [initcmd, confcmd, startcmd, stopcmd, scrapcmd]

    # Print them as json (to be improved/moved out)
    jstr = json.dumps([c.pod() for c in cmd_seq], indent=4, sort_keys=True)
    return jstr
Esempio n. 6
0
def generate_df(
        network_endpoints,
        NUMBER_OF_DATA_PRODUCERS=2,
        EMULATOR_MODE=False,
        DATA_RATE_SLOWDOWN_FACTOR = 1,
        RUN_NUMBER = 333, 
        TRIGGER_RATE_HZ = 1.0,
        DATA_FILE="./frames.bin",
        OUTPUT_PATH=".",
        DISABLE_OUTPUT=False,
        FLX_INPUT=True,
        TOKEN_COUNT=0
    ):
    """Generate the json configuration for the readout and DF process"""
   
    trg_interval_ticks = math.floor((1/TRIGGER_RATE_HZ) * CLOCK_SPEED_HZ/DATA_RATE_SLOWDOWN_FACTOR)

    # Define modules and queues
    queue_bare_specs = [
            app.QueueSpec(inst="time_sync_q", kind='FollyMPMCQueue', capacity=100),
            app.QueueSpec(inst="token_q", kind='FollySPSCQueue', capacity=100),
            app.QueueSpec(inst="trigger_decision_q", kind='FollySPSCQueue', capacity=100),
            app.QueueSpec(inst="trigger_decision_from_netq", kind='FollySPSCQueue', capacity=100),
            app.QueueSpec(inst="trigger_decision_copy_for_bookkeeping", kind='FollySPSCQueue', capacity=100),
            app.QueueSpec(inst="trigger_record_q", kind='FollySPSCQueue', capacity=100),
            app.QueueSpec(inst="data_fragments_q", kind='FollyMPMCQueue', capacity=1000),
        ] + [
            app.QueueSpec(inst=f"data_requests_{idx}", kind='FollySPSCQueue', capacity=100)
                for idx in range(NUMBER_OF_DATA_PRODUCERS)
        ] + [

            app.QueueSpec(inst=f"wib_link_{idx}", kind='FollySPSCQueue', capacity=100000)
                for idx in range(NUMBER_OF_DATA_PRODUCERS)
        ]
    

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs, key=lambda x: x.inst))


    mod_specs = [
        mspec("ntoq_trigdec", "NetworkToQueue", [
                        app.QueueInfo(name="output", inst="trigger_decision_from_netq", dir="output")
                    ]),

        mspec("qton_token", "QueueToNetwork", [
                        app.QueueInfo(name="input", inst="token_q", dir="input")
                    ]),

        mspec("qton_timesync", "QueueToNetwork", [
                        app.QueueInfo(name="input", inst="time_sync_q", dir="input")
                    ]),

        mspec("rqg", "RequestGenerator", [
                        app.QueueInfo(name="trigger_decision_input_queue", inst="trigger_decision_from_netq", dir="input"),
                        app.QueueInfo(name="trigger_decision_for_event_building", inst="trigger_decision_copy_for_bookkeeping", dir="output"),
                    ] + [
                        app.QueueInfo(name=f"data_request_{idx}_output_queue", inst=f"data_requests_{idx}", dir="output")
                            for idx in range(NUMBER_OF_DATA_PRODUCERS)
                    ]),

        mspec("ffr", "FragmentReceiver", [
                        app.QueueInfo(name="trigger_decision_input_queue", inst="trigger_decision_copy_for_bookkeeping", dir="input"),
                        app.QueueInfo(name="trigger_record_output_queue", inst="trigger_record_q", dir="output"),
                        app.QueueInfo(name="data_fragment_input_queue", inst="data_fragments_q", dir="input"),
                    ]),

        mspec("datawriter", "DataWriter", [
                        app.QueueInfo(name="trigger_record_input_queue", inst="trigger_record_q", dir="input"),
                        app.QueueInfo(name="token_output_queue", inst="token_q", dir="output"),
                    ]),

        ] + [
                mspec(f"datahandler_{idx}", "DataLinkHandler", [

                            app.QueueInfo(name="raw_input", inst=f"wib_link_{idx}", dir="input"),
                            app.QueueInfo(name="timesync", inst="time_sync_q", dir="output"),
                            app.QueueInfo(name="requests", inst=f"data_requests_{idx}", dir="input"),
                            app.QueueInfo(name="fragments", inst="data_fragments_q", dir="output"),
                            ]) for idx in range(NUMBER_OF_DATA_PRODUCERS)
        ]

    if FLX_INPUT:
        mod_specs.append(mspec("flxcard_0", "FelixCardReader", [
                        app.QueueInfo(name=f"output_{idx}", inst=f"wib_link_{idx}", dir="output")
                            for idx in range(0,min(5, NUMBER_OF_DATA_PRODUCERS))
                        ]))
        if NUMBER_OF_DATA_PRODUCERS>5 :
            mod_specs.append(mspec("flxcard_1", "FelixCardReader", [
                            app.QueueInfo(name=f"output_{idx}", inst=f"wib_link_{idx}", dir="output")
                                for idx in range(5, NUMBER_OF_DATA_PRODUCERS)
                            ]))
    else:
        mod_specs.append(mspec("fake_source", "FakeCardReader", [
                        app.QueueInfo(name=f"output_{idx}", inst=f"wib_link_{idx}", dir="output")
                            for idx in range(NUMBER_OF_DATA_PRODUCERS)
                        ]))

    


    init_specs = app.Init(queues=queue_specs, modules=mod_specs)

    initcmd = rccmd.RCCommand(
        id=basecmd.CmdId("init"),
        entry_state="NONE",
        exit_state="INITIAL",
        data=init_specs
    )

    confcmd = mrccmd("conf", "INITIAL", "CONFIGURED",[
                ("ntoq_trigdec", ntoq.Conf(msg_type="dunedaq::dfmessages::TriggerDecision",
                                           msg_module_name="TriggerDecisionNQ",
                                           receiver_config=nor.Conf(ipm_plugin_type="ZmqReceiver",
                                                                    address=network_endpoints["trigdec"])
                                           )
                 ),

                ("qton_token", qton.Conf(msg_type="dunedaq::dfmessages::TriggerDecisionToken",
                                           msg_module_name="TriggerDecisionTokenNQ",
                                           sender_config=nos.Conf(ipm_plugin_type="ZmqSender",
                                                                  address=network_endpoints["triginh"],
                                                                  stype="msgpack")
                                           )
                 ),

                ("qton_timesync", qton.Conf(msg_type="dunedaq::dfmessages::TimeSync",
                                            msg_module_name="TimeSyncNQ",
                                            sender_config=nos.Conf(ipm_plugin_type="ZmqSender",
                                                                   address=network_endpoints["timesync"],
                                                                   stype="msgpack")
                                           )
                ),
        
                ("rqg", rqg.ConfParams(
                        map=rqg.mapgeoidqueue([
                                rqg.geoidinst(apa=0, link=idx, queueinstance=f"data_requests_{idx}") for idx in range(NUMBER_OF_DATA_PRODUCERS)
                            ])  
                        )),
                ("ffr", ffr.ConfParams(
                            general_queue_timeout=QUEUE_POP_WAIT_MS
                        )),
                ("datawriter", dw.ConfParams(
                            initial_token_count=TOKEN_COUNT,
                            data_store_parameters=hdf5ds.ConfParams(
                                name="data_store",
                                # type = "HDF5DataStore", # default
                                directory_path = OUTPUT_PATH, # default
                                # mode = "all-per-file", # default
                                max_file_size_bytes = 1073741824,
                                disable_unique_filename_suffix = False,
                                filename_parameters = hdf5ds.HDF5DataStoreFileNameParams(
                                    overall_prefix = "swtest",
                                    digits_for_run_number = 6,
                                    file_index_prefix = "",
                                    digits_for_file_index = 4,
                                ),
                                file_layout_parameters = hdf5ds.HDF5DataStoreFileLayoutParams(
                                    trigger_record_name_prefix= "TriggerRecord",
                                    digits_for_trigger_number = 5,
                                    digits_for_apa_number = 3,
                                    digits_for_link_number = 2,
                                )
                            )
                        )),
                ("fake_source",fakecr.Conf(
                            link_ids=list(range(NUMBER_OF_DATA_PRODUCERS)),
                            # input_limit=10485100, # default
                            rate_khz = CLOCK_SPEED_HZ/(25*12*DATA_RATE_SLOWDOWN_FACTOR*1000),
                            raw_type = "wib",
                            data_filename = DATA_FILE,
                            queue_timeout_ms = QUEUE_POP_WAIT_MS
                        )),
                ("flxcard_0",flxcr.Conf(
                            card_id=0,
                            logical_unit=0,
                            dma_id=0,
                            chunk_trailer_size= 32,
                            dma_block_size_kb= 4,
                            dma_memory_size_gb= 4,
                            numa_id=0,
                            num_links=min(5,NUMBER_OF_DATA_PRODUCERS)
                        )),
                ("flxcard_1",flxcr.Conf(
                            card_id=0,
                            logical_unit=1,
                            dma_id=0,
                            chunk_trailer_size= 32,
                            dma_block_size_kb= 4,
                            dma_memory_size_gb= 4,
                            numa_id=0,
                            num_links=max(0, NUMBER_OF_DATA_PRODUCERS-5)
                        )),
            ] + [
                (f"datahandler_{idx}", dlh.Conf(
                        raw_type = "wib",
                        emulator_mode = EMULATOR_MODE,
                        # fake_trigger_flag=0, # default
                        source_queue_timeout_ms= QUEUE_POP_WAIT_MS,
                        latency_buffer_size = 3*CLOCK_SPEED_HZ/(25*12*DATA_RATE_SLOWDOWN_FACTOR),
                        pop_limit_pct = 0.8,
                        pop_size_pct = 0.1,
                        apa_number = 0,
                        link_number = idx
                        )) for idx in range(NUMBER_OF_DATA_PRODUCERS)
            ])

    startpars = rccmd.StartParams(run=RUN_NUMBER, trigger_interval_ticks=trg_interval_ticks, disable_data_storage=DISABLE_OUTPUT)
    startcmd = mrccmd("start", "CONFIGURED", "RUNNING", [
            ("qton_token", startpars),
            ("datawriter", startpars),
            ("ffr", startpars),
            ("qton_timesync", startpars),
            ("datahandler_.*", startpars),
            ("fake_source", startpars),
            ("flxcard.*", startpars),
            ("rqg", startpars),
            ("ntoq_trigdec", startpars),
        ])

    stopcmd = mrccmd("stop", "RUNNING", "CONFIGURED", [
            ("ntoq_trigdec", None),
            ("rqg", None),
            ("flxcard.*", None),
            ("fake_source", None),
            ("datahandler_.*", None),
            ("qton_timesync", None),
            ("ffr", None),
            ("datawriter", None),
            ("qton_token", None),
        ])

    pausecmd = mrccmd("pause", "RUNNING", "RUNNING", [
            ("", None)
        ])

    resumecmd = mrccmd("resume", "RUNNING", "RUNNING", [
            ("tde", tde.ResumeParams(
                            trigger_interval_ticks=trg_interval_ticks
                        ))
        ])

    scrapcmd = mrccmd("scrap", "CONFIGURED", "INITIAL", [
            ("", None)
        ])

    # Create a list of commands
    cmd_seq = [initcmd, confcmd, startcmd, stopcmd, pausecmd, resumecmd, scrapcmd]

    # Print them as json (to be improved/moved out)
    jstr = json.dumps([c.pod() for c in cmd_seq], indent=4, sort_keys=True)
    return jstr
Esempio n. 7
0
def generate_trigemu(
        network_endpoints,
        NUMBER_OF_DATA_PRODUCERS=2,          
        DATA_RATE_SLOWDOWN_FACTOR = 1,
        RUN_NUMBER = 333, 
        TRIGGER_RATE_HZ = 1.0,
        DATA_FILE="./frames.bin",
        OUTPUT_PATH=".",
        TOKEN_COUNT=10
    ):
    """Generate the json config for the TriggerDecisionEmulator process"""
    
    trg_interval_ticks = math.floor((1/TRIGGER_RATE_HZ) * CLOCK_SPEED_HZ/DATA_RATE_SLOWDOWN_FACTOR)

    # Define modules and queues
    queue_bare_specs = [
            app.QueueSpec(inst="time_sync_from_netq", kind='FollySPSCQueue', capacity=100),
            app.QueueSpec(inst="token_from_netq", kind='FollySPSCQueue', capacity=20),
            app.QueueSpec(inst="trigger_decision_to_netq", kind='FollySPSCQueue', capacity=20),
        ]

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs, key=lambda x: x.inst))


    mod_specs = [
        mspec("qton_trigdec", "QueueToNetwork", [
                        app.QueueInfo(name="input", inst="trigger_decision_to_netq", dir="input")
                    ]),

        mspec("ntoq_token", "NetworkToQueue", [
                        app.QueueInfo(name="output", inst="token_from_netq", dir="output")
                    ]),

        mspec("ntoq_timesync", "NetworkToQueue", [
                        app.QueueInfo(name="output", inst="time_sync_from_netq", dir="output")
                    ]),

        mspec("tde", "TriggerDecisionEmulator", [
                        app.QueueInfo(name="time_sync_source", inst="time_sync_from_netq", dir="input"),
                        app.QueueInfo(name="token_source", inst="token_from_netq", dir="input"),
                        app.QueueInfo(name="trigger_decision_sink", inst="trigger_decision_to_netq", dir="output"),
                    ]),
        ]

    init_specs = app.Init(queues=queue_specs, modules=mod_specs)

    initcmd = rccmd.RCCommand(
        id=basecmd.CmdId("init"),
        entry_state="NONE",
        exit_state="INITIAL",
        data=init_specs
    )

    confcmd = mrccmd("conf", "INITIAL", "CONFIGURED",[
                ("qton_trigdec", qton.Conf(msg_type="dunedaq::dfmessages::TriggerDecision",
                                           msg_module_name="TriggerDecisionNQ",
                                           sender_config=nos.Conf(ipm_plugin_type="ZmqSender",
                                                                  address=network_endpoints["trigdec"],
                                                                  stype="msgpack")
                                           )
                 ),

                 ("ntoq_token", ntoq.Conf(msg_type="dunedaq::dfmessages::TriggerDecisionToken",
                                            msg_module_name="TriggerDecisionTokenNQ",
                                            receiver_config=nor.Conf(ipm_plugin_type="ZmqReceiver",
                                                                     address=network_endpoints["triginh"])
                                            )
                 ),

                ("ntoq_timesync", ntoq.Conf(msg_type="dunedaq::dfmessages::TimeSync",
                                           msg_module_name="TimeSyncNQ",
                                           receiver_config=nor.Conf(ipm_plugin_type="ZmqReceiver",
                                                                    address=network_endpoints["timesync"])
                                           )
                ),

                ("tde", tde.ConfParams(
                        links=[idx for idx in range(NUMBER_OF_DATA_PRODUCERS)],
                        min_links_in_request=NUMBER_OF_DATA_PRODUCERS,
                        max_links_in_request=NUMBER_OF_DATA_PRODUCERS,
                        min_readout_window_ticks=math.floor(CLOCK_SPEED_HZ/(DATA_RATE_SLOWDOWN_FACTOR*1000)),
                        max_readout_window_ticks=math.floor(CLOCK_SPEED_HZ/(DATA_RATE_SLOWDOWN_FACTOR*1000)),
                        trigger_window_offset=math.floor(CLOCK_SPEED_HZ/(DATA_RATE_SLOWDOWN_FACTOR*2000)),
                        # The delay is set to put the trigger well within the latency buff
                        trigger_delay_ticks=math.floor(CLOCK_SPEED_HZ/DATA_RATE_SLOWDOWN_FACTOR),
                        # We divide the trigger interval by
                        # DATA_RATE_SLOWDOWN_FACTOR so the triggers are still
                        # emitted per (wall-clock) second, rather than being
                        # spaced out further
                        trigger_interval_ticks=trg_interval_ticks,
                        clock_frequency_hz=CLOCK_SPEED_HZ/DATA_RATE_SLOWDOWN_FACTOR,
                        initial_token_count=TOKEN_COUNT                    
                        )),
            ])

    startpars = rccmd.StartParams(run=RUN_NUMBER, disable_data_storage=False)
    startcmd = mrccmd("start", "CONFIGURED", "RUNNING", [
            ("qton_trigdec", startpars),
            ("ntoq_token", startpars),
            ("ntoq_timesync", startpars),
            ("tde", startpars),
        ])

    stopcmd = mrccmd("stop", "RUNNING", "CONFIGURED", [
            ("qton_trigdec", None),
            ("ntoq_timesync", None),
            ("ntoq_token", None),
            ("tde", None),
        ])

    pausecmd = mrccmd("pause", "RUNNING", "RUNNING", [
            ("", None)
        ])

    resumecmd = mrccmd("resume", "RUNNING", "RUNNING", [
            ("tde", tde.ResumeParams(
                            trigger_interval_ticks=trg_interval_ticks
                        ))
        ])

    scrapcmd = mrccmd("scrap", "CONFIGURED", "INITIAL", [
            ("", None)
        ])

    # Create a list of commands
    cmd_seq = [initcmd, confcmd, startcmd, stopcmd, pausecmd, resumecmd, scrapcmd]

    # Print them as json (to be improved/moved out)
    jstr = json.dumps([c.pod() for c in cmd_seq], indent=4, sort_keys=True)
    return jstr
Esempio n. 8
0
def generate(
    FRONTEND_TYPE="pacman",
    NUMBER_OF_DATA_PRODUCERS=1,
    NUMBER_OF_TP_PRODUCERS=1,
    DATA_RATE_SLOWDOWN_FACTOR=1,
    ENABLE_SOFTWARE_TPG=False,
    RUN_NUMBER=333,
    DATA_FILE="./frames.bin",
    TP_DATA_FILE="./tp_frames.bin",
):

    # Define modules and queues
    queue_bare_specs = ([
        app.QueueSpec(inst="time_sync_q", kind="FollyMPMCQueue", capacity=100),
        app.QueueSpec(
            inst="data_fragments_q", kind="FollyMPMCQueue", capacity=100),
        app.QueueSpec(
            inst="errored_frames_q", kind="FollyMPMCQueue", capacity=10000),
    ] + [
        app.QueueSpec(
            inst=f"data_requests_{idx}", kind="FollySPSCQueue", capacity=1000)
        for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ] + [
        app.QueueSpec(
            inst=f"{FRONTEND_TYPE}_link_{idx}",
            kind="FollySPSCQueue",
            capacity=100000,
        ) for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ] + [
        app.QueueSpec(
            inst=f"raw_tp_link_{idx}", kind="FollySPSCQueue", capacity=100000)
        for idx in range(
            NUMBER_OF_DATA_PRODUCERS,
            NUMBER_OF_DATA_PRODUCERS + NUMBER_OF_TP_PRODUCERS,
        )
    ] + [
        app.QueueSpec(
            inst=f"sw_tp_queue_{idx}", kind="FollySPSCQueue", capacity=100000)
        for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ] + [
        app.QueueSpec(
            inst=f"tp_data_requests", kind="FollySPSCQueue", capacity=1000)
    ] + [
        app.QueueSpec(
            inst=f"tpset_link_{idx}", kind="FollySPSCQueue", capacity=10000)
        for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ])

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs,
                                        key=lambda x: x.inst))

    mod_specs = ([
        mspec("fake_source", "PacmanCardReader", [
            app.QueueInfo(
                name=f"output_{idx}",
                inst=f"{FRONTEND_TYPE}_link_{idx}",
                dir="output",
            ) for idx in range(NUMBER_OF_DATA_PRODUCERS)
        ]),
    ] + [
        mspec(
            f"datahandler_{idx}",
            "DataLinkHandler",
            [
                app.QueueInfo(
                    name="raw_input",
                    inst=f"{FRONTEND_TYPE}_link_{idx}",
                    dir="input",
                ),
                app.QueueInfo(
                    name="timesync", inst="time_sync_q", dir="output"),
                app.QueueInfo(name="data_requests_0",
                              inst=f"data_requests_{idx}",
                              dir="input"),
                app.QueueInfo(name="fragment_queue",
                              inst="data_fragments_q",
                              dir="output"),
                app.QueueInfo(
                    name="tp_out", inst=f"sw_tp_queue_{idx}", dir="output"),
                app.QueueInfo(
                    name="tpset_out", inst=f"tpset_link_{idx}", dir="output"),
                app.QueueInfo(name="errored_frames",
                              inst="errored_frames_q",
                              dir="output"),
            ],
        ) for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ] + [
        mspec(
            f"timesync_consumer",
            "TimeSyncConsumer",
            [
                app.QueueInfo(
                    name="input_queue", inst=f"time_sync_q", dir="input")
            ],
        )
    ] + [
        mspec(
            f"fragment_consumer",
            "FragmentConsumer",
            [
                app.QueueInfo(
                    name="input_queue", inst=f"data_fragments_q", dir="input")
            ],
        )
    ] + [
        mspec(
            f"sw_tp_handler_{idx}",
            "DataLinkHandler",
            [
                app.QueueInfo(
                    name="raw_input", inst=f"sw_tp_queue_{idx}", dir="input"),
                app.QueueInfo(
                    name="timesync", inst="time_sync_q", dir="output"),
                app.QueueInfo(
                    name="requests", inst="tp_data_requests", dir="input"),
                app.QueueInfo(name="fragment_queue",
                              inst="data_fragments_q",
                              dir="output"),
            ],
        ) for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ] + [
        mspec(
            f"tpset_publisher_{idx}",
            "QueueToNetwork",
            [
                app.QueueInfo(
                    name="input", inst=f"tpset_link_{idx}", dir="input")
            ],
        ) for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ] + [
        mspec(
            f"raw_tp_handler_{idx}",
            "DataLinkHandler",
            [
                app.QueueInfo(
                    name="raw_input", inst=f"raw_tp_link_{idx}", dir="input"),
                app.QueueInfo(
                    name="timesync", inst="time_sync_q", dir="output"),
            ],
        ) for idx in range(
            NUMBER_OF_DATA_PRODUCERS,
            NUMBER_OF_DATA_PRODUCERS + NUMBER_OF_TP_PRODUCERS,
        )
    ] + [
        mspec(
            "errored_frame_consumer",
            "ErroredFrameConsumer",
            [
                app.QueueInfo(
                    name="input_queue", inst="errored_frames_q", dir="input")
            ],
        )
    ])

    nw_specs = [
        nwmgr.Connection(name=f"tpsets_{idx}",
                         topics=["foo"],
                         address="tcp://127.0.0.1:" + str(5000 + idx))
        for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ]
    nw_specs.append(
        nwmgr.Connection(name="timesync",
                         topics=["Timesync"],
                         address="tcp://127.0.0.1:6000"))

    init_specs = app.Init(queues=queue_specs,
                          modules=mod_specs,
                          nwconnections=nw_specs)

    jstr = json.dumps(init_specs.pod(), indent=4, sort_keys=True)
    print(jstr)

    initcmd = rccmd.RCCommand(
        id=basecmd.CmdId("init"),
        entry_state="NONE",
        exit_state="INITIAL",
        data=init_specs,
    )

    confcmd = mrccmd(
        "conf",
        "INITIAL",
        "CONFIGURED",
        [
            (
                "fake_source",
                pcr.Conf(link_confs=[
                    pcr.LinkConfiguration(geoid=pcr.GeoID(
                        system="kNDLarTPC", region=0, element=idx), )
                    for idx in range(NUMBER_OF_DATA_PRODUCERS)
                ] + [
                    pcr.LinkConfiguration(geoid=sec.GeoID(
                        system="TPC", region=0, element=idx), )
                    for idx in range(
                        NUMBER_OF_DATA_PRODUCERS,
                        NUMBER_OF_DATA_PRODUCERS + NUMBER_OF_TP_PRODUCERS,
                    )
                ],
                         # input_limit=10485100, # default
                         ),
            ),
        ] + [(
            f"datahandler_{idx}",
            rconf.Conf(
                readoutmodelconf=rconf.ReadoutModelConf(
                    source_queue_timeout_ms=QUEUE_POP_WAIT_MS,
                    fake_trigger_flag=1,
                    region_id=0,
                    element_id=idx,
                    timesync_connection_name=f"timesync",
                    timesync_topic_name="Timesync",
                ),
                latencybufferconf=rconf.LatencyBufferConf(
                    latency_buffer_size=3 * CLOCK_SPEED_HZ /
                    (25 * 12 * DATA_RATE_SLOWDOWN_FACTOR),
                    region_id=0,
                    element_id=idx,
                ),
                rawdataprocessorconf=rconf.RawDataProcessorConf(
                    region_id=0,
                    element_id=idx,
                    enable_software_tpg=ENABLE_SOFTWARE_TPG,
                    error_counter_threshold=100,
                    error_reset_freq=10000,
                ),
                requesthandlerconf=rconf.RequestHandlerConf(
                    latency_buffer_size=3 * CLOCK_SPEED_HZ /
                    (25 * 12 * DATA_RATE_SLOWDOWN_FACTOR),
                    pop_limit_pct=0.8,
                    pop_size_pct=0.1,
                    region_id=0,
                    element_id=idx,
                    output_file=f"output_{idx}.out",
                    stream_buffer_size=8388608,
                    enable_raw_recording=True,
                    use_o_direct=False,
                ),
            ),
        ) for idx in range(NUMBER_OF_DATA_PRODUCERS)] + [(
            f"sw_tp_handler_{idx}",
            rconf.Conf(
                readoutmodelconf=rconf.ReadoutModelConf(
                    source_queue_timeout_ms=QUEUE_POP_WAIT_MS,
                    fake_trigger_flag=1,
                    region_id=0,
                    element_id=idx,
                ),
                latencybufferconf=rconf.LatencyBufferConf(
                    latency_buffer_size=3 * CLOCK_SPEED_HZ /
                    (25 * 12 * DATA_RATE_SLOWDOWN_FACTOR),
                    region_id=0,
                    element_id=idx,
                ),
                rawdataprocessorconf=rconf.RawDataProcessorConf(
                    region_id=0,
                    element_id=idx,
                    enable_software_tpg=ENABLE_SOFTWARE_TPG,
                ),
                requesthandlerconf=rconf.RequestHandlerConf(
                    latency_buffer_size=3 * CLOCK_SPEED_HZ /
                    (25 * 12 * DATA_RATE_SLOWDOWN_FACTOR),
                    pop_limit_pct=0.8,
                    pop_size_pct=0.1,
                    region_id=0,
                    element_id=idx,
                    output_file=f"output_{idx}.out",
                    stream_buffer_size=8388608,
                    enable_raw_recording=False,
                    use_o_direct=False,
                ),
            ),
        ) for idx in range(NUMBER_OF_DATA_PRODUCERS)] + [(
            f"raw_tp_handler_{idx}",
            rconf.Conf(
                readoutmodelconf=rconf.ReadoutModelConf(
                    source_queue_timeout_ms=QUEUE_POP_WAIT_MS,
                    fake_trigger_flag=1,
                    region_id=0,
                    element_id=idx,
                ),
                latencybufferconf=rconf.LatencyBufferConf(
                    latency_buffer_size=3 * CLOCK_SPEED_HZ /
                    (25 * 12 * DATA_RATE_SLOWDOWN_FACTOR),
                    region_id=0,
                    element_id=idx,
                ),
                rawdataprocessorconf=rconf.RawDataProcessorConf(
                    region_id=0,
                    element_id=idx,
                    enable_software_tpg=ENABLE_SOFTWARE_TPG,
                ),
                requesthandlerconf=rconf.RequestHandlerConf(
                    latency_buffer_size=3 * CLOCK_SPEED_HZ /
                    (25 * 12 * DATA_RATE_SLOWDOWN_FACTOR),
                    pop_limit_pct=0.8,
                    pop_size_pct=0.1,
                    region_id=0,
                    element_id=idx,
                    output_file=f"output_{idx}.out",
                    stream_buffer_size=8388608,
                    enable_raw_recording=False,
                    use_o_direct=False,
                ),
            ),
        ) for idx in range(
            NUMBER_OF_DATA_PRODUCERS,
            NUMBER_OF_DATA_PRODUCERS + NUMBER_OF_TP_PRODUCERS,
        )] + [(
            f"tpset_publisher_{idx}",
            qton.Conf(
                msg_type="dunedaq::trigger::TPSet",
                msg_module_name="TPSetNQ",
                sender_config=nos.Conf(
                    name=f"tpsets_{idx}",
                    topic="foo",
                    stype="msgpack",
                ),
            ),
        ) for idx in range(NUMBER_OF_DATA_PRODUCERS)],
    )

    jstr = json.dumps(confcmd.pod(), indent=4, sort_keys=True)
    print(jstr)

    startpars = rccmd.StartParams(run=RUN_NUMBER)
    startcmd = mrccmd(
        "start",
        "CONFIGURED",
        "RUNNING",
        [
            ("datahandler_.*", startpars),
            ("fake_source", startpars),
            ("data_recorder_.*", startpars),
            ("timesync_consumer", startpars),
            ("fragment_consumer", startpars),
            ("sw_tp_handler_.*", startpars),
            ("raw_tp_handler_.*", startpars),
            ("tpset_publisher_.*", startpars),
            ("errored_frame_consumer", startpars),
        ],
    )

    jstr = json.dumps(startcmd.pod(), indent=4, sort_keys=True)
    print("=" * 80 + "\nStart\n\n", jstr)

    stopcmd = mrccmd(
        "stop",
        "RUNNING",
        "CONFIGURED",
        [
            ("fake_source", None),
            ("datahandler_.*", None),
            ("data_recorder_.*", None),
            ("timesync_consumer", None),
            ("fragment_consumer", None),
            ("sw_tp_handler_.*", None),
            ("raw_tp_handler_.*", None),
            ("tpset_publisher_.*", None),
            ("errored_frame_consumer", None),
        ],
    )

    jstr = json.dumps(stopcmd.pod(), indent=4, sort_keys=True)
    print("=" * 80 + "\nStop\n\n", jstr)

    scrapcmd = mrccmd("scrap", "CONFIGURED", "INITIAL", [("", None)])

    jstr = json.dumps(scrapcmd.pod(), indent=4, sort_keys=True)
    print("=" * 80 + "\nScrap\n\n", jstr)

    # Create a list of commands
    cmd_seq = [initcmd, confcmd, startcmd, stopcmd, scrapcmd]

    record_cmd = mrccmd(
        "record",
        "RUNNING",
        "RUNNING",
        [("datahandler_.*", rconf.RecordingParams(duration=10))],
    )

    jstr = json.dumps(record_cmd.pod(), indent=4, sort_keys=True)
    print("=" * 80 + "\nRecord\n\n", jstr)

    cmd_seq.append(record_cmd)

    # Print them as json (to be improved/moved out)
    jstr = json.dumps([c.pod() for c in cmd_seq], indent=4, sort_keys=True)
    return jstr
Esempio n. 9
0
def generate(
        RUN_NUMBER = 333, 
        GATHER_INTERVAL = 1e6,
        GATHER_INTERVAL_DEBUG = 10e6,
        MASTER_DEVICE_NAME="",
        MASTER_SEND_DELAYS_PERIOD=0,
        MASTER_CLOCK_FILE="",
        MASTER_CLOCK_MODE=-1,
        PARTITION_IDS=[],
        FANOUT_DEVICES_NAMES=[],
        FANOUT_CLOCK_FILE="",
        ENDPOINT_DEVICE_NAME="",
        ENDPOINT_CLOCK_FILE="",
        ENDPOINT_ADDRESS=0,
        ENDPOINT_PARTITION=0,
        HSI_DEVICE_NAME="",
        HSI_ENDPOINT_ADDRESS=0,
        HSI_ENDPOINT_PARTITION=0,
        HSI_CLOCK_FILE="",
        HSI_RE_MASK=0x0,
        HSI_FE_MASK=0x0,
        HSI_INV_MASK=0x0,
        HSI_RANDOM_RATE=1.0,
        HSI_SOURCE=0x0,
        PART_TRIGGER_MASK=0xff,
        PART_SPILL_GATE_ENABLED=True,
        PART_RATE_CONTROL_ENABLED=True,
        UHAL_LOG_LEVEL="notice",
        OUTPUT_PATH=".",
    ):
    
    # Define modules and queues
    queue_bare_specs = [
            app.QueueSpec(inst="hardware_commands", kind='StdDeQueue', capacity=100),
        ]

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs, key=lambda x: x.inst))

    mod_specs = [
                    mspec("thi", "TimingHardwareManagerPDI", [app.QueueInfo(name="timing_cmds_queue", inst="hardware_commands", dir="input")])
                ]
    conf_cmds = [
                ("thi", thi.ConfParams(
                                           connections_file="${TIMING_SHARE}/config/etc/connections.xml",
                                           gather_interval=GATHER_INTERVAL,
                                           gather_interval_debug=GATHER_INTERVAL_DEBUG,
                                           monitored_device_name_master=MASTER_DEVICE_NAME,
                                           monitored_device_names_fanout=FANOUT_DEVICES_NAMES,
                                           monitored_device_name_endpoint=ENDPOINT_DEVICE_NAME,
                                           monitored_device_name_hsi=HSI_DEVICE_NAME,
                                           uhal_log_level=UHAL_LOG_LEVEL
                                    )),
            ]
    
    ## master and partition controllers
    if MASTER_DEVICE_NAME != "":

        mod_specs.extend( [ mspec("tmc0", "TimingMasterController", [app.QueueInfo(name="hardware_commands_out", inst="hardware_commands", dir="output")]) ] )

        tpc_mods=[]
        for partition_id in PARTITION_IDS:

            tpc_mods.append( mspec("tpc{}".format(partition_id), "TimingPartitionController", [app.QueueInfo(name="hardware_commands_out", inst="hardware_commands", dir="output")]) )
        mod_specs.extend( tpc_mods )

    ## fanout controller
    for i,fanout_device_name in enumerate(FANOUT_DEVICES_NAMES):
        mod_specs.extend( [ mspec("tfc{}".format(i), "TimingFanoutController", [app.QueueInfo(name="hardware_commands_out", inst="hardware_commands", dir="output")]) ] )

    ## endpoint controllers
    if ENDPOINT_DEVICE_NAME != "":
        mod_specs.extend( [ mspec("tec0", "TimingEndpointController", [app.QueueInfo(name="hardware_commands_out", inst="hardware_commands", dir="output")]) ] )

    ## hsi controllers
    if HSI_DEVICE_NAME != "":
        mod_specs.extend( [ mspec("hsi0", "HSIController", [app.QueueInfo(name="hardware_commands_out", inst="hardware_commands", dir="output")]) ] )

    init_specs = app.Init(queues=queue_specs, modules=mod_specs)
    
    jstr = json.dumps(init_specs.pod(), indent=4, sort_keys=True)
    print(jstr)

    initcmd = rcif.RCCommand(
        id=cmdlib.CmdId("init"),
        entry_state="NONE",
        exit_state="INITIAL",
        data=init_specs
    )

    ## conf command

    if MASTER_DEVICE_NAME != "":
        conf_cmds.extend( [
                        ("tmc0", tmc.ConfParams(
                                    device=MASTER_DEVICE_NAME,
                                    send_endpoint_delays_period=MASTER_SEND_DELAYS_PERIOD,
                                    clock_config=MASTER_CLOCK_FILE,
                                    fanout_mode=MASTER_CLOCK_MODE,
                                 )),
                     ] )

        for partition_id in PARTITION_IDS:
            conf_cmds.extend( [
                            ("tpc{}".format(partition_id), tpc.PartitionConfParams(
                                                            device=MASTER_DEVICE_NAME,
                                                            partition_id=partition_id,
                                                            trigger_mask=PART_TRIGGER_MASK,
                                                            spill_gate_enabled=PART_SPILL_GATE_ENABLED,
                                                            rate_control_enabled=PART_RATE_CONTROL_ENABLED,
                                                        )),
                        ] )
    
        for i,fanout_device_name in enumerate(FANOUT_DEVICES_NAMES):
            conf_cmds.extend( [
                            ("tfc{}".format(i), tfc.ConfParams(
                                    device=fanout_device_name,
                                    )),
                         ] )

    if ENDPOINT_DEVICE_NAME != "":
        conf_cmds.extend( [
                        ("tec0", tec.ConfParams(
                                device=ENDPOINT_DEVICE_NAME,
                                address=ENDPOINT_ADDRESS,
                                partition=ENDPOINT_PARTITION
                                )),
                     ] )

    trigger_interval_ticks=0
    if HSI_DEVICE_NAME != "":
        if HSI_RANDOM_RATE > 0:
            trigger_interval_ticks=math.floor((1/HSI_RANDOM_RATE) * CLOCK_SPEED_HZ)
        else:
            console.log('WARNING! Emulated trigger rate of 0 will not disable signal emulation in real HSI hardware! To disable emulated HSI triggers, use  option: "--hsi-source 0" or mask all signal bits', style="bold red")

        conf_cmds.extend( [
                        ("hsi0", hsi.ConfParams(
                                device=HSI_DEVICE_NAME,
                                clock_frequency=CLOCK_SPEED_HZ,
                                trigger_interval_ticks=trigger_interval_ticks,
                                address=HSI_ENDPOINT_ADDRESS,
                                partition=HSI_ENDPOINT_PARTITION,
                                rising_edge_mask=HSI_RE_MASK,                   
                                falling_edge_mask=HSI_FE_MASK,
                                invert_edge_mask=HSI_INV_MASK,
                                data_source=HSI_SOURCE,
                                )),
                     ] )

    confcmd = mrccmd("conf", "INITIAL", "CONFIGURED", conf_cmds)

    jstr = json.dumps(confcmd.pod(), indent=4, sort_keys=True)
    print(jstr)

    startpars = rcif.StartParams(run=RUN_NUMBER, trigger_interval_ticks = trigger_interval_ticks)
    startcmd = mrccmd("start", "CONFIGURED", "RUNNING", [
            ("thi", startpars),
            ("tmc.*", startpars),
            ("tpc.*", startpars),
            ("tfc.*", startpars),
            ("tec.*", startpars),
            ("hsi.*", startpars),
        ])

    jstr = json.dumps(startcmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nStart\n\n", jstr)

    stopcmd = mrccmd("stop", "RUNNING", "CONFIGURED", [
            ("thi", None),
            ("tmc.*", None),
            ("tpc.*", None),
            ("tfc.*", None),
            ("tec.*", None),
            ("hsi.*", None),
        ])

    jstr = json.dumps(stopcmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nStop\n\n", jstr)


    scrapcmd = mcmd("scrap", [
            ("thi", None),
            ("tmc.*", None),
            ("tpc.*", None),
            ("tfc.*", None),
            ("tec.*", None),
            ("hsi.*", None),
        ])

    jstr = json.dumps(scrapcmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nScrap\n\n", jstr)

    ## timing specific commands

    # master commands
    master_io_reset_cmd = mcmd("master_io_reset", [
            ("tmc.*", tcmd.IOResetCmdPayload(
                      clock_config=MASTER_CLOCK_FILE,
                      fanout_mode=MASTER_CLOCK_MODE,
                      soft=False
                      )),
        ])
    jstr = json.dumps(master_io_reset_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nMaster IO reset\n\n", jstr)


    master_set_timestamp_cmd = mcmd("master_set_timestamp", [
            ("tmc.*", None),
        ])
    jstr = json.dumps(master_set_timestamp_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nMaster set timestamp\n\n", jstr)


    master_print_status_cmd = mcmd("master_print_status", [
            ("tmc.*", None),
        ])
    jstr = json.dumps(master_print_status_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nMaster print status\n\n", jstr)


    # partition commands
    partition_configure_cmd = mcmd("partition_configure", [
            ("tpc.*", tpc.PartitionConfParams(
                      trigger_mask=PART_TRIGGER_MASK,
                      spill_gate_enabled=PART_SPILL_GATE_ENABLED,
                      rate_control_enabled=PART_RATE_CONTROL_ENABLED,
                      )),
        ])
    jstr = json.dumps(partition_configure_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nPartition configure\n\n", jstr)


    partition_enable_cmd = mcmd("partition_enable", [
            ("tpc.*", None),
        ])
    jstr = json.dumps(partition_enable_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nPartition enable\n\n", jstr)


    partition_disable_cmd = mcmd("partition_disable", [
            ("tpc.*", None),
        ])
    jstr = json.dumps(partition_disable_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nPartition disable\n\n", jstr)


    partition_start_cmd = mcmd("partition_start", [
            ("tpc.*", None),
        ])
    jstr = json.dumps(partition_start_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nPartition start\n\n", jstr)


    partition_stop_cmd = mcmd("partition_stop", [
            ("tpc.*", None),
        ])
    jstr = json.dumps(partition_stop_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nPartition stop\n\n", jstr)


    partition_enable_triggers_cmd = mcmd("partition_enable_triggers", [
            ("tpc.*", None),
        ])
    jstr = json.dumps(partition_enable_triggers_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nPartition enable triggers\n\n", jstr)


    partition_disable_triggers_cmd = mcmd("partition_disable_triggers", [
            ("tpc.*", None),
        ])
    jstr = json.dumps(partition_disable_triggers_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nPartition disable triggers\n\n", jstr)


    partition_print_status_cmd = mcmd("partition_print_status", [
            ("tpc.*", None),
        ])
    jstr = json.dumps(partition_print_status_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nPartition print status\n\n", jstr)

    # fanout commands
    fanout_io_reset_cmd = mcmd("fanout_io_reset", [
            ("tfc.*", tcmd.IOResetCmdPayload(
                      clock_config=FANOUT_CLOCK_FILE,
                      soft=False
                      )),
        ])
    jstr = json.dumps(fanout_io_reset_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nFanout IO reset\n\n", jstr)


    fanout_print_status_cmd = mcmd("fanout_print_status", [
            ("tfc.*", None),
        ])
    jstr = json.dumps(fanout_print_status_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nFanout print status\n\n", jstr)

    # hsi commands
    hsi_io_reset_cmd = mcmd("hsi_io_reset", [
            ("hsi.*", tcmd.IOResetCmdPayload(
                      clock_config=HSI_CLOCK_FILE,
                      soft=False
                      )),
        ])
    jstr = json.dumps(hsi_io_reset_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nHSI IO reset\n\n", jstr)


    hsi_endpoint_enable_cmd = mcmd("hsi_endpoint_enable", [
            ("hsi.*", tcmd.TimingEndpointConfigureCmdPayload(
                      address=HSI_ENDPOINT_ADDRESS,
                      partition=HSI_ENDPOINT_PARTITION
                      )),
        ])
    jstr = json.dumps(hsi_endpoint_enable_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nHSI endpoint enable\n\n", jstr)


    hsi_endpoint_disable_cmd = mcmd("hsi_endpoint_disable", [
            ("hsi.*", None),
        ])
    jstr = json.dumps(hsi_endpoint_disable_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nHSI endpoint disable\n\n", jstr)


    hsi_endpoint_reset_cmd = mcmd("hsi_endpoint_reset", [
            ("hsi.*", tcmd.TimingEndpointConfigureCmdPayload(
                      address=HSI_ENDPOINT_ADDRESS,
                      partition=HSI_ENDPOINT_PARTITION
                      )),
        ])
    jstr = json.dumps(hsi_endpoint_reset_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nHSI endpoint reset\n\n", jstr)


    hsi_reset_cmd = mcmd("hsi_reset", [
            ("hsi.*", None),
        ])
    jstr = json.dumps(hsi_reset_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nHSI reset\n\n", jstr)


    hsi_configure_cmd = mcmd("hsi_configure", [
            ("hsi.*", tcmd.HSIConfigureCmdPayload(
                      rising_edge_mask=HSI_RE_MASK,                   
                      falling_edge_mask=HSI_FE_MASK,
                      invert_edge_mask=HSI_INV_MASK,
                      data_source=HSI_SOURCE,
                      random_rate=HSI_RANDOM_RATE
                      )),
        ])
    jstr = json.dumps(hsi_configure_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nHSI configure\n\n", jstr)

    hsi_start_cmd = mcmd("hsi_start", [
            ("hsi.*", None),
        ])
    jstr = json.dumps(hsi_start_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nHSI start\n\n", jstr)

    hsi_stop_cmd = mcmd("hsi_stop", [
            ("hsi.*", None),
        ])
    jstr = json.dumps(hsi_stop_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nHSI stop\n\n", jstr)


    hsi_print_status_cmd = mcmd("hsi_print_status", [
            ("hsi.*", None),
        ])
    jstr = json.dumps(hsi_print_status_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nHSI print status\n\n", jstr)



    # endpoint commands
    endpoint_io_reset_cmd = mcmd("endpoint_io_reset", [
            ("tec.*", tcmd.IOResetCmdPayload(
                      clock_config=ENDPOINT_CLOCK_FILE,
                      soft=False
                      )),
        ])
    jstr = json.dumps(endpoint_io_reset_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nEndpoint IO reset\n\n", jstr)


    endpoint_enable_cmd = mcmd("endpoint_enable", [
            ("tec.*", tcmd.TimingEndpointConfigureCmdPayload(
                      address=ENDPOINT_ADDRESS,
                      partition=ENDPOINT_PARTITION
                      )),
        ])
    jstr = json.dumps(endpoint_enable_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nEndpoint enable\n\n", jstr)


    endpoint_disable_cmd = mcmd("endpoint_disable", [
            ("tec.*", None),
        ])
    jstr = json.dumps(endpoint_disable_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nEndpoint disable\n\n", jstr)


    endpoint_reset_cmd = mcmd("endpoint_reset", [
            ("tec.*", tcmd.TimingEndpointConfigureCmdPayload(
                      address=ENDPOINT_ADDRESS,
                      partition=ENDPOINT_PARTITION
                      )),
        ])
    jstr = json.dumps(endpoint_reset_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nEndpoint reset\n\n", jstr)


    endpoint_print_status_cmd = mcmd("endpoint_print_status", [
            ("tec.*", None),
        ])
    jstr = json.dumps(endpoint_print_status_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nEndpoint print status\n\n", jstr)
    #####

    # Create a list of commands
    cmd_seq = [initcmd, confcmd, startcmd, stopcmd, scrapcmd]

    if MASTER_DEVICE_NAME != "":
        cmd_seq.extend( [
                        master_io_reset_cmd, master_set_timestamp_cmd, master_print_status_cmd,
                        partition_configure_cmd, partition_enable_cmd, partition_disable_cmd, 
                        partition_start_cmd, partition_stop_cmd, 
                        partition_enable_triggers_cmd, partition_disable_triggers_cmd, 
                        partition_print_status_cmd
                        ] )
    
    if len(FANOUT_DEVICES_NAMES) != 0:
        cmd_seq.extend( [
                        fanout_io_reset_cmd, fanout_print_status_cmd,
                        ] )

    if ENDPOINT_DEVICE_NAME != "":
        cmd_seq.extend( [
                        endpoint_io_reset_cmd, 
                        endpoint_enable_cmd, endpoint_disable_cmd, 
                        endpoint_reset_cmd, endpoint_print_status_cmd
                        ] )

    if HSI_DEVICE_NAME != "":
        cmd_seq.extend( [
                        hsi_io_reset_cmd,
                        hsi_endpoint_enable_cmd,
                        hsi_endpoint_disable_cmd,
                        hsi_endpoint_reset_cmd,
                        hsi_reset_cmd,
                        hsi_configure_cmd,
                        hsi_start_cmd,
                        hsi_stop_cmd,
                        hsi_print_status_cmd,
                        ] )

    # Print them as json (to be improved/moved out)
    jstr = json.dumps([c.pod() for c in cmd_seq], indent=4, sort_keys=True)
    return jstr
Esempio n. 10
0
def generate(
        PARTITION = "hsi_readout_test",
        RUN_NUMBER = 333, 
        CONNECTIONS_FILE="${TIMING_SHARE}/config/etc/connections.xml",
        READOUT_PERIOD = 1e3,
        HSI_DEVICE_NAME="BOREAS_FMC",
        TTCM_S1: int = 1,
        TTCM_S2: int = 2,
        UHAL_LOG_LEVEL="notice",
        OUTPUT_PATH=".",
    ):
    
    # network connection
    nw_specs = [nwmgr.Connection(name=PARTITION + ".hsievent",topics=[],  address="tcp://127.0.0.1:12344")]

    # Define modules and queues
    queue_bare_specs = [
            app.QueueSpec(inst="trigger_candidate_q", kind='FollySPSCQueue', capacity=2000),

        ]

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs, key=lambda x: x.inst))

    mod_specs = [   
                    mspec("hsi", "HSIReadout", []),

                    mspec("ttcm", "TimingTriggerCandidateMaker", [
                                    app.QueueInfo(name="output", inst="trigger_candidate_q", dir="output"),
                                ]),
                ]

    init_specs = app.Init(queues=queue_specs, modules=mod_specs, nwconnections=nw_specs)

    jstr = json.dumps(init_specs.pod(), indent=4, sort_keys=True)
    print(jstr)

    initcmd = rcif.RCCommand(
        id=cmdlib.CmdId("init"),
        entry_state="NONE",
        exit_state="INITIAL",
        data=init_specs
    )

    mods = [
                ("hsi", hsi.ConfParams(
                        connections_file=CONNECTIONS_FILE,
                        readout_period=READOUT_PERIOD,
                        hsi_device_name=HSI_DEVICE_NAME,
                        uhal_log_level=UHAL_LOG_LEVEL,
                        hsievent_connection_name = f"{PARTITION}.hsievent",
                        )),
                
                ("ttcm", ttcm.Conf(
                        s1=ttcm.map_t(signal_type=TTCM_S1,
                                      time_before=100000,
                                      time_after=200000),
                        s2=ttcm.map_t(signal_type=TTCM_S2,
                                      time_before=100000,
                                      time_after=200000),
                        hsievent_connection_name = PARTITION+".hsievent",
                        )),
            ]

    confcmd = mrccmd("conf", "INITIAL", "CONFIGURED", mods)

    jstr = json.dumps(confcmd.pod(), indent=4, sort_keys=True)
    print(jstr)

    startpars = rcif.StartParams(run=1, disable_data_storage=False)

    startcmd = mrccmd("start", "CONFIGURED", "RUNNING", [
            ("hsi", None),
            ("ttcm", startpars),
        ])

    jstr = json.dumps(startcmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nStart\n\n", jstr)

    stopcmd = mrccmd("stop", "RUNNING", "CONFIGURED", [
            ("hsi", None),
            ("ttcm", None),
        ])

    jstr = json.dumps(stopcmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nStop\n\n", jstr)


    scrapcmd = mcmd("scrap", [
            ("", None)
        ])

    jstr = json.dumps(scrapcmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nScrap\n\n", jstr)

    # Create a list of commands
    cmd_seq = [initcmd, confcmd, startcmd, stopcmd]

    # Print them as json (to be improved/moved out)
    jstr = json.dumps([c.pod() for c in cmd_seq], indent=4, sort_keys=True)
    return jstr
def generate(NUMBER_OF_DATA_PRODUCERS=2,
             EMULATOR_MODE=False,
             DATA_RATE_SLOWDOWN_FACTOR=10,
             RUN_NUMBER=333,
             TRIGGER_RATE_HZ=1.0,
             DATA_FILE="./frames.bin",
             OUTPUT_PATH=".",
             DISABLE_OUTPUT=False,
             TOKEN_COUNT=10):

    trigger_interval_ticks = math.floor(
        (1 / TRIGGER_RATE_HZ) * CLOCK_SPEED_HZ / DATA_RATE_SLOWDOWN_FACTOR)

    # Define modules and queues
    queue_bare_specs = [
        app.QueueSpec(inst="time_sync_q", kind='FollyMPMCQueue', capacity=100),
        app.QueueSpec(inst="token_q", kind='FollySPSCQueue', capacity=20),
        app.QueueSpec(
            inst="trigger_decision_q", kind='FollySPSCQueue', capacity=20),
        app.QueueSpec(inst="trigger_decision_copy_for_bookkeeping",
                      kind='FollySPSCQueue',
                      capacity=20),
        app.QueueSpec(
            inst="trigger_record_q", kind='FollySPSCQueue', capacity=20),
        app.QueueSpec(
            inst="data_fragments_q", kind='FollyMPMCQueue', capacity=100),
    ] + [
        app.QueueSpec(
            inst=f"data_requests_{idx}", kind='FollySPSCQueue', capacity=20)
        for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ] + [
        app.QueueSpec(inst=f"wib_fake_link_{idx}",
                      kind='FollySPSCQueue',
                      capacity=100000)
        for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ]

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs,
                                        key=lambda x: x.inst))

    mod_specs = [
        mspec("tde", "TriggerDecisionEmulator", [
            app.QueueInfo(
                name="time_sync_source", inst="time_sync_q", dir="input"),
            app.QueueInfo(name="token_source", inst="token_q", dir="input"),
            app.QueueInfo(name="trigger_decision_sink",
                          inst="trigger_decision_q",
                          dir="output"),
        ]),
        mspec("rqg", "RequestGenerator", [
            app.QueueInfo(name="trigger_decision_input_queue",
                          inst="trigger_decision_q",
                          dir="input"),
            app.QueueInfo(name="trigger_decision_for_event_building",
                          inst="trigger_decision_copy_for_bookkeeping",
                          dir="output"),
        ] + [
            app.QueueInfo(name=f"data_request_{idx}_output_queue",
                          inst=f"data_requests_{idx}",
                          dir="output")
            for idx in range(NUMBER_OF_DATA_PRODUCERS)
        ]),
        mspec("ffr", "FragmentReceiver", [
            app.QueueInfo(name="trigger_decision_input_queue",
                          inst="trigger_decision_copy_for_bookkeeping",
                          dir="input"),
            app.QueueInfo(name="trigger_record_output_queue",
                          inst="trigger_record_q",
                          dir="output"),
            app.QueueInfo(name="data_fragment_input_queue",
                          inst="data_fragments_q",
                          dir="input"),
        ]),
        mspec("datawriter", "DataWriter", [
            app.QueueInfo(name="trigger_record_input_queue",
                          inst="trigger_record_q",
                          dir="input"),
            app.QueueInfo(
                name="token_output_queue", inst="token_q", dir="output"),
        ]),
        mspec("fake_source", "FakeCardReader", [
            app.QueueInfo(name=f"output_{idx}",
                          inst=f"wib_fake_link_{idx}",
                          dir="output")
            for idx in range(NUMBER_OF_DATA_PRODUCERS)
        ]),
    ] + [
        mspec(f"datahandler_{idx}", "DataLinkHandler", [
            app.QueueInfo(
                name="raw_input", inst=f"wib_fake_link_{idx}", dir="input"),
            app.QueueInfo(name="timesync", inst="time_sync_q", dir="output"),
            app.QueueInfo(
                name="requests", inst=f"data_requests_{idx}", dir="input"),
            app.QueueInfo(
                name="fragments", inst="data_fragments_q", dir="output"),
        ]) for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ]

    init_specs = app.Init(queues=queue_specs, modules=mod_specs)

    jstr = json.dumps(init_specs.pod(), indent=4, sort_keys=True)
    print(jstr)

    initcmd = rccmd.RCCommand(id=basecmd.CmdId("init"),
                              entry_state="NONE",
                              exit_state="INITIAL",
                              data=init_specs)

    if TOKEN_COUNT > 0:
        df_token_count = 0
        trigemu_token_count = TOKEN_COUNT
    else:
        df_token_count = -1 * TOKEN_COUNT
        trigemu_token_count = 0

    confcmd = mrccmd(
        "conf",
        "INITIAL",
        "CONFIGURED",
        [
            (
                "tde",
                tde.ConfParams(
                    links=[idx for idx in range(NUMBER_OF_DATA_PRODUCERS)],
                    min_links_in_request=NUMBER_OF_DATA_PRODUCERS,
                    max_links_in_request=NUMBER_OF_DATA_PRODUCERS,
                    min_readout_window_ticks=1200,
                    max_readout_window_ticks=1200,
                    trigger_window_offset=1000,
                    # The delay is set to put the trigger well within the latency buff
                    trigger_delay_ticks=math.floor(
                        2 * CLOCK_SPEED_HZ / DATA_RATE_SLOWDOWN_FACTOR),
                    # We divide the trigger interval by
                    # DATA_RATE_SLOWDOWN_FACTOR so the triggers are still
                    # emitted per (wall-clock) second, rather than being
                    # spaced out further
                    trigger_interval_ticks=trigger_interval_ticks,
                    clock_frequency_hz=CLOCK_SPEED_HZ /
                    DATA_RATE_SLOWDOWN_FACTOR,
                    initial_token_count=trigemu_token_count)),
            ("rqg",
             rqg.ConfParams(map=rqg.mapgeoidqueue([
                 rqg.geoidinst(
                     apa=0, link=idx, queueinstance=f"data_requests_{idx}")
                 for idx in range(NUMBER_OF_DATA_PRODUCERS)
             ]))),
            ("ffr", ffr.ConfParams(general_queue_timeout=QUEUE_POP_WAIT_MS)),
            (
                "datawriter",
                dw.ConfParams(
                    initial_token_count=df_token_count,
                    data_store_parameters=hdf5ds.ConfParams(
                        name="data_store",
                        # type = "HDF5DataStore", # default
                        directory_path=OUTPUT_PATH,  # default
                        # mode = "all-per-file", # default
                        max_file_size_bytes=1073741824,
                        filename_parameters=hdf5ds.HDF5DataStoreFileNameParams(
                            overall_prefix="swtest",
                            digits_for_run_number=6,
                            file_index_prefix="",
                            digits_for_file_index=4,
                        ),
                        file_layout_parameters=hdf5ds.
                        HDF5DataStoreFileLayoutParams(
                            trigger_record_name_prefix="TriggerRecord",
                            digits_for_trigger_number=5,
                            digits_for_apa_number=3,
                            digits_for_link_number=2,
                        )))),
            (
                "fake_source",
                fcr.Conf(
                    link_ids=list(range(NUMBER_OF_DATA_PRODUCERS)),
                    # input_limit=10485100, # default
                    rate_khz=CLOCK_SPEED_HZ /
                    (25 * 12 * DATA_RATE_SLOWDOWN_FACTOR * 1000),
                    raw_type="wib",
                    data_filename=DATA_FILE,
                    queue_timeout_ms=QUEUE_POP_WAIT_MS)),
        ] + [
            (
                f"datahandler_{idx}",
                dlh.Conf(
                    raw_type="wib",
                    emulator_mode=EMULATOR_MODE,
                    # fake_trigger_flag=0, # default
                    source_queue_timeout_ms=QUEUE_POP_WAIT_MS,
                    latency_buffer_size=3 * CLOCK_SPEED_HZ /
                    (25 * 12 * DATA_RATE_SLOWDOWN_FACTOR),
                    pop_limit_pct=0.8,
                    pop_size_pct=0.1,
                    apa_number=0,
                    link_number=idx))
            for idx in range(NUMBER_OF_DATA_PRODUCERS)
        ])

    jstr = json.dumps(confcmd.pod(), indent=4, sort_keys=True)
    print(jstr)

    startpars = rccmd.StartParams(
        run=RUN_NUMBER,
        trigger_interval_ticks=trigger_interval_ticks,
        disable_data_storage=DISABLE_OUTPUT)
    startcmd = mrccmd("start", "CONFIGURED", "RUNNING", [
        ("datawriter", startpars),
        ("ffr", startpars),
        ("datahandler_.*", startpars),
        ("fake_source", startpars),
        ("rqg", startpars),
        ("tde", startpars),
    ])

    jstr = json.dumps(startcmd.pod(), indent=4, sort_keys=True)
    print("=" * 80 + "\nStart\n\n", jstr)

    stopcmd = mrccmd("stop", "RUNNING", "CONFIGURED", [
        ("tde", None),
        ("rqg", None),
        ("fake_source", None),
        ("datahandler_.*", None),
        ("ffr", None),
        ("datawriter", None),
    ])

    jstr = json.dumps(stopcmd.pod(), indent=4, sort_keys=True)
    print("=" * 80 + "\nStop\n\n", jstr)

    pausecmd = mrccmd("pause", "RUNNING", "RUNNING", [("", None)])

    jstr = json.dumps(pausecmd.pod(), indent=4, sort_keys=True)
    print("=" * 80 + "\nPause\n\n", jstr)

    resumecmd = mrccmd(
        "resume", "RUNNING", "RUNNING",
        [("tde",
          tde.ResumeParams(trigger_interval_ticks=trigger_interval_ticks))])

    jstr = json.dumps(resumecmd.pod(), indent=4, sort_keys=True)
    print("=" * 80 + "\nResume\n\n", jstr)

    scrapcmd = mrccmd("scrap", "CONFIGURED", "INITIAL", [("", None)])

    jstr = json.dumps(scrapcmd.pod(), indent=4, sort_keys=True)
    print("=" * 80 + "\nScrap\n\n", jstr)

    # Create a list of commands
    cmd_seq = [
        initcmd, confcmd, startcmd, stopcmd, pausecmd, resumecmd, scrapcmd
    ]

    # Print them as json (to be improved/moved out)
    jstr = json.dumps([c.pod() for c in cmd_seq], indent=4, sort_keys=True)
    return jstr