예제 #1
0
def add_one_command_data(command_data, command, default_params, app,
                         module_order):
    """Add the command data for one command in one app to the command_data object. The modules to be sent the command are listed in `module_order`. If the module has an entry in its extra_commands dictionary for this command, then that entry is used as the parameters to pass to the command, otherwise the `default_params` object is passed"""
    mod_and_params = []
    for module in module_order:
        extra_commands = app.modulegraph.get_module(module).extra_commands
        if command in extra_commands:
            mod_and_params.append((module, extra_commands[command]))
        else:
            mod_and_params.append((module, default_params))

    command_data[command] = acmd(mod_and_params)
예제 #2
0
def generate(INPUT_FILE: str, SLOWDOWN_FACTOR: float, NETWORK_ENDPOINTS: dict):
    cmd_data = {}

    # Derived parameters
    CLOCK_FREQUENCY_HZ = 50000000 / SLOWDOWN_FACTOR

    # Define modules and queues
    queue_specs = [
        app.QueueSpec(inst="tpset_q", kind='FollySPSCQueue', capacity=1000)
    ]

    mod_specs = [
        mspec(
            "tpm", "TriggerPrimitiveMaker",
            [app.QueueInfo(name="tpset_sink", inst="tpset_q", dir="output")]),
        mspec("qton", "QueueToNetwork",
              [app.QueueInfo(name="input", inst="tpset_q", dir="input")])
    ]

    cmd_data['init'] = app.Init(queues=queue_specs, modules=mod_specs)

    cmd_data['conf'] = acmd([
        (
            "tpm",
            tpm.ConfParams(
                filename=INPUT_FILE,
                number_of_loops=-1,  # Infinite
                tpset_time_offset=0,
                tpset_time_width=10000,  # 0.2 ms
                clock_frequency_hz=CLOCK_FREQUENCY_HZ)),
        ("qton",
         qton.Conf(msg_type="dunedaq::trigger::TPSet",
                   msg_module_name="TPSetNQ",
                   sender_config=nos.Conf(ipm_plugin_type="ZmqPublisher",
                                          address=NETWORK_ENDPOINTS["tpset"],
                                          topic="foo",
                                          stype="msgpack")))
    ])

    startpars = rccmd.StartParams(run=1, disable_data_storage=False)
    cmd_data['start'] = acmd([
        ("qton", startpars),
        ("tpm", startpars),
    ])

    cmd_data['pause'] = acmd([])

    cmd_data['resume'] = acmd([])

    cmd_data['stop'] = acmd([
        ("tpm", None),
        ("qton", None),
    ])

    cmd_data['scrap'] = acmd([
        #     ("tpm", None),
    ])

    return cmd_data
예제 #3
0
def generate(NETWORK_ENDPOINTS: dict):
    cmd_data = {}

    # Define modules and queues
    queue_specs = [
        app.QueueSpec(inst="tpset_q", kind='FollySPSCQueue', capacity=10000)
    ]

    mod_specs = [
        mspec(
            "tps_sink", "TPSetSink",
            [app.QueueInfo(name="tpset_source", inst="tpset_q", dir="input")]),
        mspec("ntoq", "NetworkToQueue",
              [app.QueueInfo(name="output", inst="tpset_q", dir="output")])
    ]

    cmd_data['init'] = app.Init(queues=queue_specs, modules=mod_specs)

    cmd_data['conf'] = acmd([(
        "ntoq",
        ntoq.Conf(
            msg_type="dunedaq::trigger::TPSet",
            msg_module_name="TPSetNQ",
            receiver_config=nor.Conf(
                ipm_plugin_type="ZmqSubscriber",
                address=NETWORK_ENDPOINTS["tpset"],
                subscriptions=[
                    "foo"
                ])  # Empty subscription means subscribe to everything
        ))])

    startpars = rccmd.StartParams(run=1, disable_data_storage=False)
    cmd_data['start'] = acmd([
        ("ntoq", startpars),
        ("tps_sink", startpars),
    ])

    cmd_data['pause'] = acmd([])

    cmd_data['resume'] = acmd([])

    cmd_data['stop'] = acmd([
        ("tps_sink", None),
        ("ntoq", None),
    ])

    cmd_data['scrap'] = acmd([
        #     ("tpm", None),
    ])

    return cmd_data
예제 #4
0
def generate(NW_SPECS: list,
             SOFTWARE_TPG_ENABLED: bool = False,
             RU_CONFIG: list = [],
             ACTIVITY_PLUGIN: str = 'TriggerActivityMakerPrescalePlugin',
             ACTIVITY_CONFIG: dict = dict(prescale=10000),
             CANDIDATE_PLUGIN: str = 'TriggerCandidateMakerPrescalePlugin',
             CANDIDATE_CONFIG: int = dict(prescale=10),
             TOKEN_COUNT: int = 10,
             DF_COUNT: int = 1,
             SYSTEM_TYPE='wib',
             TTCM_S1: int = 1,
             TTCM_S2: int = 2,
             TRIGGER_WINDOW_BEFORE_TICKS: int = 1000,
             TRIGGER_WINDOW_AFTER_TICKS: int = 1000,
             PARTITION="UNKNOWN"):
    """
    { item_description }
    """
    cmd_data = {}

    required_eps = {PARTITION + '.hsievent'}
    if not required_eps.issubset([nw.name for nw in NW_SPECS]):
        raise RuntimeError(
            f"ERROR: not all the required endpoints ({', '.join(required_eps)}) found in list of endpoints {' '.join([nw.name for nw in NW_SPECS])}"
        )

    # Define modules and queues
    queue_bare_specs = [
        app.QueueSpec(inst='trigger_candidate_q',
                      kind='FollyMPMCQueue',
                      capacity=1000),
        app.QueueSpec(inst='trigger_decision_q',
                      kind='FollySPSCQueue',
                      capacity=2)
    ]

    if SOFTWARE_TPG_ENABLED:
        queue_bare_specs.extend([
            app.QueueSpec(inst=f"fragment_q",
                          kind='FollyMPMCQueue',
                          capacity=1000),
            app.QueueSpec(inst=f'taset_q',
                          kind='FollyMPMCQueue',
                          capacity=1000),
        ])
        for ru in range(len(RU_CONFIG)):
            queue_bare_specs.extend([
                app.QueueSpec(inst=f"tpsets_from_netq_{ru}",
                              kind='FollySPSCQueue',
                              capacity=1000),
                app.QueueSpec(inst=f'zipped_tpset_q_{ru}',
                              kind='FollySPSCQueue',
                              capacity=1000),
            ])
            for idx in range(RU_CONFIG[ru]["channel_count"]):
                queue_bare_specs.extend([
                    app.QueueSpec(inst=f"tpset_q_for_buf{ru}_{idx}",
                                  kind='FollySPSCQueue',
                                  capacity=1000),
                    app.QueueSpec(inst=f"data_request_q{ru}_{idx}",
                                  kind='FollySPSCQueue',
                                  capacity=1000),
                ])

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs,
                                        key=lambda x: x.inst))

    mod_specs = []

    if SOFTWARE_TPG_ENABLED:
        mod_specs.extend([
            mspec(f"request_receiver", "RequestReceiver", [
                app.QueueInfo(name="output",
                              inst=f"data_request_q{ru}_{idy}",
                              dir="output") for ru in range(len(RU_CONFIG))
                for idy in range(RU_CONFIG[ru]["channel_count"])
            ])
        ] + [
            mspec(f"tpset_receiver", "TPSetReceiver", [
                app.QueueInfo(name="output",
                              inst=f"tpset_q_for_buf{ru}_{idy}",
                              dir="output") for ru in range(len(RU_CONFIG))
                for idy in range(RU_CONFIG[ru]["channel_count"])
            ])
        ] + [
            mspec(f"fragment_sender", "FragmentSender", [
                app.QueueInfo(
                    name="input_queue", inst=f"fragment_q", dir="input")
            ]),
            mspec(
                f'tcm',
                'TriggerCandidateMaker',
                [  # TASet -> TC
                    app.QueueInfo(name='input', inst=f'taset_q', dir='input'),
                    app.QueueInfo(name='output',
                                  inst=f'trigger_candidate_q',
                                  dir='output'),
                ])
        ])
        for ru in range(len(RU_CONFIG)):
            mod_specs.extend([
                mspec(f"tpset_subscriber_{ru}", "NetworkToQueue", [
                    app.QueueInfo(name="output",
                                  inst=f"tpsets_from_netq_{ru}",
                                  dir="output")
                ]),
                mspec(
                    f"zip_{ru}",
                    "TPZipper",
                    [
                        app.QueueInfo(name="input",
                                      inst=f"tpsets_from_netq_{ru}",
                                      dir="input"),
                        app.QueueInfo(
                            name="output",
                            inst=f"zipped_tpset_q_{ru}",
                            dir="output"
                        ),  #FIXME need to fanout this zipped_tpset_q if using multiple algorithms
                    ]),

                ### Algorithm(s)
                mspec(
                    f'tam_{ru}',
                    'TriggerActivityMaker',
                    [  # TPSet -> TASet
                        app.QueueInfo(name='input',
                                      inst=f'zipped_tpset_q_{ru}',
                                      dir='input'),
                        app.QueueInfo(
                            name='output', inst=f'taset_q', dir='output'),
                    ]),
            ])
            for idy in range(RU_CONFIG[ru]["channel_count"]):
                mod_specs.extend([
                    mspec(f"buf{ru}_{idy}", "TPSetBufferCreator", [
                        app.QueueInfo(name="tpset_source",
                                      inst=f"tpset_q_for_buf{ru}_{idy}",
                                      dir="input"),
                        app.QueueInfo(name="data_request_source",
                                      inst=f"data_request_q{ru}_{idy}",
                                      dir="input"),
                        app.QueueInfo(name="fragment_sink",
                                      inst=f"fragment_q",
                                      dir="output"),
                    ])
                ])

    mod_specs += ([

        ### Timing TCs
        mspec("ttcm", "TimingTriggerCandidateMaker", [
            app.QueueInfo(
                name="output", inst="trigger_candidate_q", dir="output"),
        ]),

        ### Module level trigger
        mspec("mlt", "ModuleLevelTrigger", [
            app.QueueInfo(name="trigger_candidate_source",
                          inst="trigger_candidate_q",
                          dir="input"),
            app.QueueInfo(name="trigger_decision_sink",
                          inst="trigger_decision_q",
                          dir="output"),
        ]),

        ### DFO
        mspec("dfo", "DataFlowOrchestrator", [
            app.QueueInfo(name="trigger_decision_queue",
                          inst="trigger_decision_q",
                          dir="input"),
        ]),
    ])

    cmd_data['init'] = app.Init(queues=queue_specs,
                                modules=mod_specs,
                                nwconnections=NW_SPECS)

    # Generate schema for the maker plugins on the fly in the temptypes module
    make_moo_record(ACTIVITY_CONFIG, 'ActivityConf', 'temptypes')
    make_moo_record(CANDIDATE_CONFIG, 'CandidateConf', 'temptypes')
    import temptypes

    tp_confs = []

    if SOFTWARE_TPG_ENABLED:
        tp_confs.extend([
            ("request_receiver",
             rrcv.ConfParams(map=[
                 rrcv.geoidinst(region=RU_CONFIG[ru]["region_id"],
                                element=idy + RU_CONFIG[ru]["start_channel"],
                                system="DataSelection",
                                queueinstance=f"data_request_q{ru}_{idy}")
                 for ru in range(len(RU_CONFIG))
                 for idy in range(RU_CONFIG[ru]["channel_count"])
             ],
                             general_queue_timeout=100,
                             connection_name=f"{PARTITION}.ds_tp_datareq_0")),
            ("tpset_receiver",
             tpsrcv.ConfParams(map=[
                 tpsrcv.geoidinst(region=RU_CONFIG[ru]["region_id"],
                                  element=idy + RU_CONFIG[ru]["start_channel"],
                                  system=SYSTEM_TYPE,
                                  queueinstance=f"tpset_q_for_buf{ru}_{idy}")
                 for ru in range(len(RU_CONFIG))
                 for idy in range(RU_CONFIG[ru]["channel_count"])
             ],
                               general_queue_timeout=100,
                               topic=f"TPSets")),
            (f"fragment_sender", None),
            (f'tcm',
             tcm.Conf(candidate_maker=CANDIDATE_PLUGIN,
                      candidate_maker_config=temptypes.CandidateConf(
                          **CANDIDATE_CONFIG))),
        ])
        for idx in range(len(RU_CONFIG)):
            tp_confs.extend([
                (f"tpset_subscriber_{idx}",
                 ntoq.Conf(msg_type="dunedaq::trigger::TPSet",
                           msg_module_name="TPSetNQ",
                           receiver_config=nor.Conf(
                               name=f'{PARTITION}.tpsets_{idx}',
                               subscriptions=["TPSets"]))),
                (
                    f"zip_{idx}",
                    tzip.ConfParams(
                        cardinality=RU_CONFIG[idx]["channel_count"],
                        max_latency_ms=1000,
                        region_id=0,  # Fake placeholder
                        element_id=0  # Fake placeholder
                    )),

                ### Algorithms
                (
                    f'tam_{idx}',
                    tam.Conf(
                        activity_maker=ACTIVITY_PLUGIN,
                        geoid_region=0,  # Fake placeholder
                        geoid_element=0,  # Fake placeholder
                        window_time=
                        10000,  # should match whatever makes TPSets, in principle
                        buffer_time=625000,  # 10ms in 62.5 MHz ticks
                        activity_maker_config=temptypes.ActivityConf(
                            **ACTIVITY_CONFIG))),
            ])
            for idy in range(RU_CONFIG[idx]["channel_count"]):
                tp_confs.extend([
                    (f"buf{idx}_{idy}",
                     buf.Conf(tpset_buffer_size=10000,
                              region=RU_CONFIG[idx]["region_id"],
                              element=idy + RU_CONFIG[idx]["start_channel"]))
                ])

    total_link_count = 0
    for ru in range(len(RU_CONFIG)):
        total_link_count += RU_CONFIG[ru]["channel_count"]

    cmd_data['conf'] = acmd(tp_confs + [

        ### Timing TCs
        ("ttcm",
         ttcm.Conf(
             s1=ttcm.map_t(signal_type=TTCM_S1,
                           time_before=TRIGGER_WINDOW_BEFORE_TICKS,
                           time_after=TRIGGER_WINDOW_AFTER_TICKS),
             s2=ttcm.map_t(signal_type=TTCM_S2,
                           time_before=TRIGGER_WINDOW_BEFORE_TICKS,
                           time_after=TRIGGER_WINDOW_AFTER_TICKS),
             hsievent_connection_name=PARTITION + ".hsievent",
         )),

        # Module level trigger
        (
            "mlt",
            mlt.ConfParams(
                # This line requests the raw data from upstream DAQ _and_ the raw TPs from upstream DAQ
                links=[
                    mlt.GeoID(system=SYSTEM_TYPE,
                              region=RU_CONFIG[ru]["region_id"],
                              element=RU_CONFIG[ru]["start_channel"] + idx)
                    for ru in range(len(RU_CONFIG))
                    for idx in range(RU_CONFIG[ru]["channel_count"])
                ] + ([
                    mlt.GeoID(system="DataSelection",
                              region=RU_CONFIG[ru]["region_id"],
                              element=RU_CONFIG[ru]["start_channel"] + idx)
                    for ru in range(len(RU_CONFIG))
                    for idx in range(RU_CONFIG[ru]["channel_count"])
                ] if SOFTWARE_TPG_ENABLED else []) + ([
                    mlt.GeoID(system=SYSTEM_TYPE,
                              region=RU_CONFIG[ru]["region_id"],
                              element=RU_CONFIG[ru]["start_channel"] + idx +
                              total_link_count) for ru in range(len(RU_CONFIG))
                    for idx in range(RU_CONFIG[ru]["channel_count"])
                ] if SOFTWARE_TPG_ENABLED else []), )),
        ("dfo",
         dfo.ConfParams(
             token_connection=PARTITION + ".triginh",
             dataflow_applications=[
                 dfo.app_config(
                     decision_connection=f"{PARTITION}.trigdec_{dfidx}",
                     capacity=TOKEN_COUNT) for dfidx in range(DF_COUNT)
             ])),
    ])

    # We start modules in "downstream-to-upstream" order, so that each
    # module is ready before its input starts sending data. The stop
    # order is the reverse (upstream-to-downstream), so each module
    # can process all of its input then stop, ensuring all data gets
    # processed
    start_order = ["buf.*", "dfo", "mlt", "ttcm", "ntoq_token"]

    if SOFTWARE_TPG_ENABLED:
        start_order += [
            "fragment_sender", "tcm", "tam_.*", "zip_.*",
            "tpset_subscriber_.*", "tpset_receiver", "request_receiver"
        ]

    stop_order = start_order[::-1]

    startpars = rccmd.StartParams(run=1)
    cmd_data['start'] = acmd([(m, startpars) for m in start_order])
    cmd_data['stop'] = acmd([(m, None) for m in stop_order])

    cmd_data['pause'] = acmd([("mlt", None)])

    resumepars = rccmd.ResumeParams(trigger_interval_ticks=50000000)
    cmd_data['resume'] = acmd([("mlt", resumepars)])

    cmd_data['scrap'] = acmd([("dfo", None)])

    cmd_data['record'] = acmd([("", None)])

    return cmd_data
예제 #5
0
def generate(RUN_NUMBER: int,
        NW_SPECS: list,
        TIMING_CMD_NETWORK_ENDPOINTS: set,
        GATHER_INTERVAL=1e6,
        GATHER_INTERVAL_DEBUG=10e6,
        HSI_DEVICE_NAME="",
        CONNECTIONS_FILE="${TIMING_SHARE}/config/etc/connections.xml",
        UHAL_LOG_LEVEL="notice",
        PARTITION="UNKNOWN"):
    """
    { item_description }
    """
    cmd_data = {}

    required_eps = TIMING_CMD_NETWORK_ENDPOINTS
    if not required_eps.issubset([nw.name for nw in NW_SPECS]):
        raise RuntimeError(f"ERROR: not all the required endpoints ({', '.join(required_eps)}) found in list of endpoints {' '.join([nw.name for nw in NW_SPECS])}")

    # Define modules and queues
    queue_bare_specs = [app.QueueSpec(inst="ntoq_timing_cmds", kind='FollyMPMCQueue', capacity=100),]

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs, key=lambda x: x.inst))

    thi_init_data = thi.InitParams(
                                   qinfos=app.QueueInfos([app.QueueInfo(name="hardware_commands_in", inst="ntoq_timing_cmds", dir="input")]),
                                   connections_file=CONNECTIONS_FILE,
                                   gather_interval=GATHER_INTERVAL,
                                   gather_interval_debug=GATHER_INTERVAL_DEBUG,
                                   monitored_device_name_master="",
                                   monitored_device_names_fanout=[],
                                   monitored_device_name_endpoint="",
                                   monitored_device_name_hsi=HSI_DEVICE_NAME,
                                   uhal_log_level=UHAL_LOG_LEVEL)

    mod_specs = [app.ModSpec(inst="thi", plugin="TimingHardwareManagerPDI", data=thi_init_data),]
    for cmd_nw_endpoint in TIMING_CMD_NETWORK_ENDPOINTS:
        nq_mod_name_suffix=cmd_nw_endpoint.split('.')[-1]
        mod_specs.extend([mspec(f'ntoq_{nq_mod_name_suffix}', "NetworkToQueue", [app.QueueInfo(name="output", inst="ntoq_timing_cmds", dir="output")]),])
            
    cmd_data['init'] = app.Init(queues=queue_specs, modules=mod_specs, nwconnections=NW_SPECS)
    

    conf_cmds = []
    for cmd_nw_endpoint in TIMING_CMD_NETWORK_ENDPOINTS:
        nq_mod_name_suffix=cmd_nw_endpoint.split('.')[-1]
        conf_cmds.extend([(f'ntoq_{nq_mod_name_suffix}', ntoq.Conf(msg_type="dunedaq::timinglibs::timingcmd::TimingHwCmd",
                                               msg_module_name="TimingHwCmdNQ",
                                               receiver_config=nor.Conf(name=cmd_nw_endpoint))),])
    cmd_data['conf'] = acmd(conf_cmds)
 
    startpars = rccmd.StartParams(run=RUN_NUMBER)

    cmd_data['start'] = acmd([("", startpars),])

    cmd_data['stop'] = acmd([("", None),])

    cmd_data['pause'] = acmd([("", None)])

    cmd_data['resume'] = acmd([("", None)])

    cmd_data['scrap'] = acmd([("", None)])

    cmd_data['record'] = acmd([("", None)])

    return cmd_data
예제 #6
0
def generate(NW_SPECS,
        RU_CONFIG=[],
        EMULATOR_MODE=False,
        RUN_NUMBER=333,
        DATA_FILE="./frames.bin",
        CLOCK_SPEED_HZ=50000000,
        RUIDX=0,
        SYSTEM_TYPE='TPC',
        DQM_ENABLED=False,
        DQM_KAFKA_ADDRESS='',
        DQM_CMAP='HD',
        DQM_RAWDISPLAY_PARAMS=[60, 10, 50],
        DQM_MEANRMS_PARAMS=[10, 1, 100],
        DQM_FOURIER_PARAMS=[600, 60, 100],
        DQM_FOURIERSUM_PARAMS=[10, 1, 8192],
        PARTITION="UNKNOWN"):
    """Generate the json configuration for the dqm process"""

    cmd_data = {}

    required_eps = {f'{PARTITION}.timesync_{RUIDX}'}
    if not required_eps.issubset([nw.name for nw in NW_SPECS]):
        raise RuntimeError(f"ERROR: not all the required endpoints ({', '.join(required_eps)}) found in list of endpoints {' '.join([nw.name for nw in NW_SPECS])}")

    MIN_LINK = RU_CONFIG[RUIDX]["start_channel"]
    MAX_LINK = MIN_LINK + RU_CONFIG[RUIDX]["channel_count"]
    # Define modules and queues
    queue_bare_specs =  [
        app.QueueSpec(inst="data_fragments_q", kind='FollyMPMCQueue', capacity=1000),
        app.QueueSpec(inst="trigger_decision_q_dqm", kind='FollySPSCQueue', capacity=20),
        app.QueueSpec(inst="trigger_record_q_dqm", kind='FollySPSCQueue', capacity=20)
    ]

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs, key=lambda x: x.inst))

    mod_specs = [mspec("trb_dqm", "TriggerRecordBuilder", [
                    app.QueueInfo(name="trigger_decision_input_queue", inst="trigger_decision_q_dqm", dir="input"),
                    app.QueueInfo(name="trigger_record_output_queue", inst="trigger_record_q_dqm", dir="output"),
                    app.QueueInfo(name="data_fragment_input_queue", inst="data_fragments_q", dir="input")
                ]),
    ]
    mod_specs += [mspec("dqmprocessor", "DQMProcessor", [
                    app.QueueInfo(name="trigger_record_dqm_processor", inst="trigger_record_q_dqm", dir="input"),
                    app.QueueInfo(name="trigger_decision_dqm_processor", inst="trigger_decision_q_dqm", dir="output"),
                ]),
    ]

    mod_specs += [
        mspec(f"fragment_receiver_dqm", "FragmentReceiver",
              [app.QueueInfo(name="output", inst="data_fragments_q", dir="output")
               ])]

    cmd_data['init'] = app.Init(queues=queue_specs, modules=mod_specs, nwconnections=NW_SPECS)

    conf_list = [("fragment_receiver_dqm", frcv.ConfParams(
                    general_queue_timeout=QUEUE_POP_WAIT_MS,
                    connection_name=f"{PARTITION}.fragx_dqm_{RUIDX}"))
            ] + [
                ("trb_dqm", trb.ConfParams(
                        general_queue_timeout=QUEUE_POP_WAIT_MS,
                        reply_connection_name = f"{PARTITION}.fragx_dqm_{RUIDX}",
                        map=trb.mapgeoidconnections([
                                trb.geoidinst(region=RU_CONFIG[RUIDX]["region_id"], element=idx, system=SYSTEM_TYPE, connection_name=f"{PARTITION}.datareq_{RUIDX}") for idx in range(MIN_LINK, MAX_LINK)
                            ]),
                        ))
            ] + [
                ('dqmprocessor', dqmprocessor.Conf(
                        region=RU_CONFIG[RUIDX]["region_id"],
                        channel_map=DQM_CMAP, # 'HD' for horizontal drift or 'VD' for vertical drift
                        sdqm_hist=dqmprocessor.StandardDQM(**{'how_often' : DQM_RAWDISPLAY_PARAMS[0], 'unavailable_time' : DQM_RAWDISPLAY_PARAMS[1], 'num_frames' : DQM_RAWDISPLAY_PARAMS[2]}),
                        sdqm_mean_rms=dqmprocessor.StandardDQM(**{'how_often' : DQM_MEANRMS_PARAMS[0], 'unavailable_time' : DQM_MEANRMS_PARAMS[1], 'num_frames' : DQM_MEANRMS_PARAMS[2]}),
                        sdqm_fourier=dqmprocessor.StandardDQM(**{'how_often' : DQM_FOURIER_PARAMS[0], 'unavailable_time' : DQM_FOURIER_PARAMS[1], 'num_frames' : DQM_FOURIER_PARAMS[2]}),
                        sdqm_fourier_sum=dqmprocessor.StandardDQM(**{'how_often' : DQM_FOURIERSUM_PARAMS[0], 'unavailable_time' : DQM_FOURIERSUM_PARAMS[1], 'num_frames' : DQM_FOURIERSUM_PARAMS[2]}),
                        kafka_address=DQM_KAFKA_ADDRESS,
                        link_idx=list(range(MIN_LINK, MAX_LINK)),
                        clock_frequency=CLOCK_SPEED_HZ,
                        timesync_connection_name = f"{PARTITION}.timesync_{RUIDX}",
                        ))
            ]

    cmd_data['conf'] = acmd(conf_list)

    startpars = rccmd.StartParams(run=RUN_NUMBER)
    cmd_data['start'] = acmd([
            ("fragment_receiver_dqm", startpars),
            ("dqmprocessor", startpars),
            ("trb_dqm", startpars),
            ])

    cmd_data['stop'] = acmd([
            ("trb_dqm", None), 
            ("dqmprocessor", None),
            ("fragment_receiver_dqm", None),
            ])

    cmd_data['pause'] = acmd([("", None)])

    cmd_data['resume'] = acmd([("", None)])

    cmd_data['scrap'] = acmd([("", None)])

    cmd_data['record'] = acmd([("", None)])

    return cmd_data
예제 #7
0
def generate(NW_SPECS,
             RU_CONFIG=[],
             HOSTIDX=0,
             RUN_NUMBER=333,
             OUTPUT_PATH=".",
             TOKEN_COUNT=0,
             SYSTEM_TYPE="TPC",
             SOFTWARE_TPG_ENABLED=False,
             TPSET_WRITING_ENABLED=False,
             PARTITION="UNKNOWN",
             OPERATIONAL_ENVIRONMENT="swtest",
             TPC_REGION_NAME_PREFIX="APA",
             MAX_FILE_SIZE=4 * 1024 * 1024 * 1024):
    """Generate the json configuration for the readout and DF process"""

    cmd_data = {}

    required_eps = {PARTITION + f'.trigdec_{HOSTIDX}', PARTITION + '.triginh'}
    if not required_eps.issubset([nw.name for nw in NW_SPECS]):
        raise RuntimeError(
            f"ERROR: not all the required endpoints ({', '.join(required_eps)}) found in list of endpoints {' '.join([nw.name for nw in NW_SPECS])}"
        )

    # Define modules and queues
    queue_bare_specs = [
        app.QueueSpec(
            inst="trigger_decision_q", kind='FollySPSCQueue', capacity=100),
        app.QueueSpec(
            inst="trigger_record_q", kind='FollySPSCQueue', capacity=100),
        app.QueueSpec(
            inst="data_fragments_q", kind='FollyMPMCQueue', capacity=1000),
    ] + ([
        app.QueueSpec(
            inst="tpsets_from_netq", kind='FollyMPMCQueue', capacity=1000),
    ] if TPSET_WRITING_ENABLED else [])

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs,
                                        key=lambda x: x.inst))

    mod_specs = [
        mspec("trigdec_receiver", "TriggerDecisionReceiver", [
            app.QueueInfo(
                name="output", inst="trigger_decision_q", dir="output")
        ]),
        mspec("fragment_receiver", "FragmentReceiver", [
            app.QueueInfo(name="output", inst="data_fragments_q", dir="output")
        ]),
        mspec("trb", "TriggerRecordBuilder", [
            app.QueueInfo(name="trigger_decision_input_queue",
                          inst="trigger_decision_q",
                          dir="input"),
            app.QueueInfo(name="trigger_record_output_queue",
                          inst="trigger_record_q",
                          dir="output"),
            app.QueueInfo(name="data_fragment_input_queue",
                          inst="data_fragments_q",
                          dir="input")
        ]),
        mspec("datawriter", "DataWriter", [
            app.QueueInfo(name="trigger_record_input_queue",
                          inst="trigger_record_q",
                          dir="input")
        ]),
    ] + ([
        mspec(f"tpset_subscriber_{idx}", "NetworkToQueue", [
            app.QueueInfo(
                name="output", inst=f"tpsets_from_netq", dir="output")
        ]) for idx in range(len(RU_CONFIG))
    ] if TPSET_WRITING_ENABLED else []) + ([
        mspec("tpswriter", "TPSetWriter", [
            app.QueueInfo(
                name="tpset_source", inst="tpsets_from_netq", dir="input")
        ])
    ] if TPSET_WRITING_ENABLED else [])

    cmd_data['init'] = app.Init(queues=queue_specs,
                                modules=mod_specs,
                                nwconnections=NW_SPECS)

    total_link_count = 0
    for ru in range(len(RU_CONFIG)):
        total_link_count += RU_CONFIG[ru]["channel_count"]

    cmd_data['conf'] = acmd([
        ("trigdec_receiver",
         tdrcv.ConfParams(general_queue_timeout=QUEUE_POP_WAIT_MS,
                          connection_name=f"{PARTITION}.trigdec_{HOSTIDX}")),
        ("trb",
         trb.ConfParams(
             general_queue_timeout=QUEUE_POP_WAIT_MS,
             reply_connection_name=f"{PARTITION}.frags_{HOSTIDX}",
             map=trb.mapgeoidconnections([
                 trb.geoidinst(region=RU_CONFIG[ru]["region_id"],
                               element=idx + RU_CONFIG[ru]["start_channel"],
                               system=SYSTEM_TYPE,
                               connection_name=f"{PARTITION}.datareq_{ru}")
                 for ru in range(len(RU_CONFIG))
                 for idx in range(RU_CONFIG[ru]["channel_count"])
             ] + ([
                 trb.geoidinst(region=RU_CONFIG[ru]["region_id"],
                               element=idx + RU_CONFIG[ru]["start_channel"] +
                               total_link_count,
                               system=SYSTEM_TYPE,
                               connection_name=f"{PARTITION}.datareq_{ru}")
                 for ru in range(len(RU_CONFIG))
                 for idx in range(RU_CONFIG[ru]["channel_count"])
             ] if SOFTWARE_TPG_ENABLED else []) + ([
                 trb.geoidinst(region=RU_CONFIG[ru]["region_id"],
                               element=idx + RU_CONFIG[ru]["start_channel"],
                               system="DataSelection",
                               connection_name=f"{PARTITION}.ds_tp_datareq_0")
                 for ru in range(len(RU_CONFIG))
                 for idx in range(RU_CONFIG[ru]["channel_count"])
             ] if SOFTWARE_TPG_ENABLED else [])))),
        ("datawriter",
         dw.ConfParams(
             decision_connection=f"{PARTITION}.trigdec_{HOSTIDX}",
             token_connection=PARTITION + ".triginh",
             data_store_parameters=hdf5ds.ConfParams(
                 name="data_store",
                 version=3,
                 operational_environment=OPERATIONAL_ENVIRONMENT,
                 directory_path=OUTPUT_PATH,
                 max_file_size_bytes=MAX_FILE_SIZE,
                 disable_unique_filename_suffix=False,
                 filename_parameters=hdf5ds.FileNameParams(
                     overall_prefix=OPERATIONAL_ENVIRONMENT,
                     digits_for_run_number=6,
                     file_index_prefix="",
                     digits_for_file_index=4,
                 ),
                 file_layout_parameters=hdf5ds.FileLayoutParams(
                     trigger_record_name_prefix="TriggerRecord",
                     digits_for_trigger_number=5,
                     path_param_list=hdf5ds.PathParamList([
                         hdf5ds.PathParams(
                             detector_group_type="TPC",
                             detector_group_name="TPC",
                             region_name_prefix=TPC_REGION_NAME_PREFIX,
                             element_name_prefix="Link"),
                         hdf5ds.PathParams(detector_group_type="PDS",
                                           detector_group_name="PDS"),
                         hdf5ds.PathParams(detector_group_type="NDLArTPC",
                                           detector_group_name="NDLArTPC"),
                         hdf5ds.PathParams(detector_group_type="Trigger",
                                           detector_group_name="Trigger"),
                         hdf5ds.PathParams(detector_group_type="TPC_TP",
                                           detector_group_name="TPC",
                                           region_name_prefix="TP_APA",
                                           element_name_prefix="Link")
                     ]))))),
    ] + [
        ("fragment_receiver",
         frcv.ConfParams(general_queue_timeout=QUEUE_POP_WAIT_MS,
                         connection_name=f"{PARTITION}.frags_{HOSTIDX}")),
    ] + [(f"tpset_subscriber_{idx}",
          ntoq.Conf(msg_type="dunedaq::trigger::TPSet",
                    msg_module_name="TPSetNQ",
                    receiver_config=nor.Conf(name=f'{PARTITION}.tpsets_{idx}',
                                             subscriptions=["TPSets"])))
         for idx in range(len(RU_CONFIG))] + (
             [("tpswriter", tpsw.ConfParams(max_file_size_bytes=1000000000, )
               )] if TPSET_WRITING_ENABLED else []))

    startpars = rccmd.StartParams(run=RUN_NUMBER)
    cmd_data['start'] = acmd(
        [] + ([("tpswriter",
                startpars), ("tpset_subscriber_.*",
                             startpars)] if TPSET_WRITING_ENABLED else []) +
        [("datawriter", startpars), ("fragment_receiver", startpars),
         ("trb", startpars), ("trigdec_receiver", startpars)])

    cmd_data['stop'] = acmd([
        ("trigdec_receiver", None),
        ("trb", None),
        ("fragment_receiver", None),
        ("datawriter", None),
    ] + ([("tpset_subscriber_.*",
           None), ("tpswriter", None)] if TPSET_WRITING_ENABLED else []))

    cmd_data['pause'] = acmd([("", None)])

    cmd_data['resume'] = acmd([("", None)])

    cmd_data['scrap'] = acmd([("fragment_receiver", None),
                              ("trigdec_receiver", None),
                              ("qton_token", None)])

    cmd_data['record'] = acmd([("", None)])

    return cmd_data
예제 #8
0
def make_app_command_data(system, app, verbose=False):
    """Given an App instance, create the 'command data' suitable for
    feeding to nanorc. The needed queues are inferred from from
    connections between modules, as are the start and stop order of the
    modules

    TODO: This should probably be split up into separate stages of
    inferring/creating the queues (which can be part of validation)
    and actually making the command data objects for nanorc.

    """

    if verbose:
        console.log(f"Making app command data for {app.name}")

    modules = app.modulegraph.modules

    module_deps = make_module_deps(modules)
    if verbose:
        console.log(f"inter-module dependencies are: {module_deps}")

    stop_order = list(nx.algorithms.dag.topological_sort(module_deps))
    start_order = stop_order[::-1]

    if verbose:
        console.log(f"Inferred module start order is {start_order}")
        console.log(f"Inferred module stop order is {stop_order}")

    command_data = {}

    queue_specs = []

    app_qinfos = defaultdict(list)

    # Infer the queues we need based on the connections between modules

    # Terminology: an "endpoint" is "module.name"
    for mod in modules:
        name = mod.name
        for from_name, downstream_connection in mod.connections.items():
            # The name might be prefixed with a "!" to indicate that it doesn't participate in dependencies. Remove that here because "!" is illegal in actual queue names
            from_name = from_name.replace("!", "")
            from_endpoint = ".".join([name, from_name])
            to_endpoint = downstream_connection.to
            if verbose:
                console.log(
                    f"Making connection from {from_endpoint} to {to_endpoint}")
            if to_endpoint is None:
                continue
            to_mod, to_name = to_endpoint.split(".")
            queue_inst = f"{from_endpoint}_to_{to_endpoint}".replace(".", "")
            # Is there already a queue connecting either endpoint? If so, we reuse it

            # TODO: This is a bit complicated. Might be nicer to find
            # the list of necessary queues in a first step, and then
            # actually make the QueueSpec/QueueInfo objects
            found_from = False
            found_to = False
            for k, v in app_qinfos.items():
                for qi in v:
                    test_endpoint = ".".join([k, qi.name])
                    if test_endpoint == from_endpoint:
                        found_from = True
                        queue_inst = qi.inst
                    if test_endpoint == to_endpoint:
                        found_to = True
                        queue_inst = qi.inst

            if not (found_from or found_to):
                queue_inst = queue_inst if downstream_connection.queue_name is None else downstream_connection.queue_name
                if verbose:
                    console.log(
                        f"downstream_connection is {downstream_connection}, its queue_name is {downstream_connection.queue_name}"
                    )
                    console.log(
                        f"Creating {downstream_connection.queue_kind}({downstream_connection.queue_capacity}) queue with name {queue_inst} connecting {from_endpoint} to {to_endpoint}"
                    )
                queue_specs.append(
                    appfwk.QueueSpec(
                        inst=queue_inst,
                        kind=downstream_connection.queue_kind,
                        capacity=downstream_connection.queue_capacity))

            if not found_from:
                if verbose:
                    console.log(
                        f"Adding output queue to module {name}: inst={queue_inst}, name={from_name}"
                    )
                app_qinfos[name].append(
                    appfwk.QueueInfo(name=from_name,
                                     inst=queue_inst,
                                     dir="output"))
            if not found_to:
                if verbose:
                    console.log(
                        f"Adding input queue to module {to_mod}: inst={queue_inst}, name={to_name}"
                    )
                app_qinfos[to_mod].append(
                    appfwk.QueueInfo(name=to_name,
                                     inst=queue_inst,
                                     dir="input"))

    if verbose:
        console.log(
            f"Creating mod_specs for {[ (mod.name, mod.plugin) for mod in modules ]}"
        )
    mod_specs = [
        mspec(mod.name, mod.plugin, app_qinfos[mod.name]) for mod in modules
    ]

    # Fill in the "standard" command entries in the command_data structure

    command_data['init'] = appfwk.Init(queues=queue_specs,
                                       modules=mod_specs,
                                       nwconnections=system.network_endpoints)

    # TODO: Conf ordering
    command_data['conf'] = acmd([(mod.name, mod.conf) for mod in modules])

    startpars = rccmd.StartParams(run=1, disable_data_storage=False)
    resumepars = rccmd.ResumeParams()

    add_one_command_data(command_data, "start", startpars, app, start_order)
    add_one_command_data(command_data, "stop", None, app, stop_order)
    add_one_command_data(command_data, "scrap", None, app, stop_order)
    add_one_command_data(command_data, "resume", resumepars, app, start_order)
    add_one_command_data(command_data, "pause", None, app, stop_order)

    # TODO: handle modules' `extra_commands`, including "record"

    return command_data
예제 #9
0
def generate(NW_SPECS: list,
             RUN_NUMBER=333,
             CLOCK_SPEED_HZ: int = 50000000,
             TRIGGER_RATE_HZ: int = 1,
             CONTROL_HSI_HARDWARE=False,
             READOUT_PERIOD_US: int = 1e3,
             HSI_ENDPOINT_ADDRESS=1,
             HSI_ENDPOINT_PARTITION=0,
             HSI_RE_MASK=0x20000,
             HSI_FE_MASK=0,
             HSI_INV_MASK=0,
             HSI_SOURCE=1,
             CONNECTIONS_FILE="${TIMING_SHARE}/config/etc/connections.xml",
             HSI_DEVICE_NAME="BOREAS_TLU",
             UHAL_LOG_LEVEL="notice",
             PARTITION="UNKNOWN"):
    """
    { item_description }
    """
    cmd_data = {}

    required_eps = {PARTITION + '.hsievent'}
    if CONTROL_HSI_HARDWARE:
        required_eps.add(PARTITION + '.hsicmds')

    if not required_eps.issubset([nw.name for nw in NW_SPECS]):
        raise RuntimeError(
            f"ERROR: not all the required endpoints ({', '.join(required_eps)}) found in list of endpoints {' '.join([nw.name for nw in NW_SPECS])}"
        )

    # Define modules and queues
    queue_bare_specs = []

    if CONTROL_HSI_HARDWARE:
        queue_bare_specs.extend([
            app.QueueSpec(inst="hw_cmds_q_to_net",
                          kind='FollySPSCQueue',
                          capacity=100)
        ])

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs,
                                        key=lambda x: x.inst))

    hsi_controller_init_data = hsic.InitParams(
        qinfos=app.QueueInfos([
            app.QueueInfo(name="hardware_commands_out",
                          inst="hw_cmds_q_to_net",
                          dir="output")
        ]),
        device=HSI_DEVICE_NAME,
    )
    mod_specs = [
        mspec("hsir", "HSIReadout", []),
    ]

    if CONTROL_HSI_HARDWARE:
        hsi_controller_init_data = hsic.InitParams(
            qinfos=app.QueueInfos([
                app.QueueInfo(name="hardware_commands_out",
                              inst="hw_cmds_q_to_net",
                              dir="output")
            ]),
            device=HSI_DEVICE_NAME,
        )
        mod_specs.extend([
            mspec("qton_hw_cmds", "QueueToNetwork", [
                app.QueueInfo(
                    name="input", inst="hw_cmds_q_to_net", dir="input")
            ]),
            app.ModSpec(inst="hsic",
                        plugin="HSIController",
                        data=hsi_controller_init_data)
        ])

    cmd_data['init'] = app.Init(queues=queue_specs,
                                modules=mod_specs,
                                nwconnections=NW_SPECS)

    conf_cmds = [
        ("hsir",
         hsi.ConfParams(
             connections_file=CONNECTIONS_FILE,
             readout_period=READOUT_PERIOD_US,
             hsi_device_name=HSI_DEVICE_NAME,
             uhal_log_level=UHAL_LOG_LEVEL,
             hsievent_connection_name=f"{PARTITION}.hsievent",
         )),
    ]

    trigger_interval_ticks = 0
    if TRIGGER_RATE_HZ > 0:
        trigger_interval_ticks = math.floor(
            (1 / TRIGGER_RATE_HZ) * CLOCK_SPEED_HZ)
    elif CONTROL_HSI_HARDWARE:
        console.log(
            'WARNING! Emulated trigger rate of 0 will not disable signal emulation in real HSI hardware! To disable emulated HSI triggers, use  option: "--hsi-source 0" or mask all signal bits',
            style="bold red")

    if CONTROL_HSI_HARDWARE:
        conf_cmds.extend([
            ("qton_hw_cmds",
             qton.Conf(msg_type="dunedaq::timinglibs::timingcmd::TimingHwCmd",
                       msg_module_name="TimingHwCmdNQ",
                       sender_config=nos.Conf(name=PARTITION + ".hsicmds",
                                              stype="msgpack"))),
            ("hsic",
             hsic.ConfParams(
                 clock_frequency=CLOCK_SPEED_HZ,
                 trigger_interval_ticks=trigger_interval_ticks,
                 address=HSI_ENDPOINT_ADDRESS,
                 partition=HSI_ENDPOINT_PARTITION,
                 rising_edge_mask=HSI_RE_MASK,
                 falling_edge_mask=HSI_FE_MASK,
                 invert_edge_mask=HSI_INV_MASK,
                 data_source=HSI_SOURCE,
             )),
        ])
    cmd_data['conf'] = acmd(conf_cmds)

    startpars = rccmd.StartParams(
        run=RUN_NUMBER, trigger_interval_ticks=trigger_interval_ticks)
    resumepars = rccmd.ResumeParams(
        trigger_interval_ticks=trigger_interval_ticks)

    cmd_data['start'] = acmd([("hsi.*", startpars), ("qton_.*", startpars)])

    cmd_data['stop'] = acmd([("hsi.*", None), ("qton.*", None)])

    cmd_data['pause'] = acmd([("", None)])

    if CONTROL_HSI_HARDWARE:
        cmd_data['resume'] = acmd([("hsic", resumepars)])
    else:
        cmd_data['resume'] = acmd([("", None)])

    cmd_data['scrap'] = acmd([("", None)])

    cmd_data['record'] = acmd([("", None)])

    return cmd_data
예제 #10
0
def generate(NW_SPECS: list,
             RUN_NUMBER=333,
             CLOCK_SPEED_HZ: int = 50000000,
             DATA_RATE_SLOWDOWN_FACTOR: int = 1,
             TRIGGER_RATE_HZ: int = 1,
             HSI_DEVICE_ID: int = 0,
             MEAN_SIGNAL_MULTIPLICITY: int = 0,
             SIGNAL_EMULATION_MODE: int = 0,
             ENABLED_SIGNALS: int = 0b00000001,
             PARTITION="UNKNOWN"):
    """
    { item_description }
    """
    cmd_data = {}

    required_eps = {PARTITION + '.hsievent'}
    if not required_eps.issubset([nw.name for nw in NW_SPECS]):
        raise RuntimeError(
            f"ERROR: not all the required endpoints ({', '.join(required_eps)}) found in list of endpoints {' '.join([nw.name for nw in NW_SPECS])}"
        )

    # Define modules and queues
    queue_bare_specs = []

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs,
                                        key=lambda x: x.inst))

    mod_specs = [
        mspec("fhsig", "FakeHSIEventGenerator", []),
    ]

    cmd_data['init'] = app.Init(queues=queue_specs,
                                modules=mod_specs,
                                nwconnections=NW_SPECS)

    trigger_interval_ticks = 0
    if TRIGGER_RATE_HZ > 0:
        trigger_interval_ticks = math.floor(
            (1 / TRIGGER_RATE_HZ) * CLOCK_SPEED_HZ / DATA_RATE_SLOWDOWN_FACTOR)

    cmd_data['conf'] = acmd([
        ("fhsig",
         fhsig.Conf(
             clock_frequency=CLOCK_SPEED_HZ / DATA_RATE_SLOWDOWN_FACTOR,
             trigger_interval_ticks=trigger_interval_ticks,
             mean_signal_multiplicity=MEAN_SIGNAL_MULTIPLICITY,
             signal_emulation_mode=SIGNAL_EMULATION_MODE,
             enabled_signals=ENABLED_SIGNALS,
             timesync_topic="Timesync",
             hsievent_connection_name=PARTITION + ".hsievent",
         )),
    ])

    startpars = rccmd.StartParams(
        run=RUN_NUMBER, trigger_interval_ticks=trigger_interval_ticks)
    resumepars = rccmd.ResumeParams(
        trigger_interval_ticks=trigger_interval_ticks)

    cmd_data['start'] = acmd([
        ("fhsig", startpars),
    ])

    cmd_data['stop'] = acmd([
        ("fhsig", None),
    ])

    cmd_data['pause'] = acmd([("", None)])

    cmd_data['resume'] = acmd([("fhsig", resumepars)])

    cmd_data['scrap'] = acmd([("", None)])

    cmd_data['record'] = acmd([("", None)])

    return cmd_data
예제 #11
0
def generate(NW_SPECS,
             RU_CONFIG=[],
             EMULATOR_MODE=False,
             DATA_RATE_SLOWDOWN_FACTOR=1,
             RUN_NUMBER=333,
             DATA_FILE="./frames.bin",
             FLX_INPUT=False,
             SSP_INPUT=True,
             CLOCK_SPEED_HZ=50000000,
             RUIDX=0,
             RAW_RECORDING_ENABLED=False,
             RAW_RECORDING_OUTPUT_DIR=".",
             FRONTEND_TYPE='wib',
             SYSTEM_TYPE='TPC',
             SOFTWARE_TPG_ENABLED=False,
             USE_FAKE_DATA_PRODUCERS=False,
             PARTITION="UNKNOWN",
             LATENCY_BUFFER_SIZE=499968):
    """Generate the json configuration for the readout and DF process"""

    cmd_data = {}

    required_eps = {f'{PARTITION}.timesync_{RUIDX}'}
    if not required_eps.issubset([nw.name for nw in NW_SPECS]):
        raise RuntimeError(
            f"ERROR: not all the required endpoints ({', '.join(required_eps)}) found in list of endpoints {' '.join([nw.name for nw in NW_SPECS])}"
        )

    RATE_KHZ = CLOCK_SPEED_HZ / (25 * 12 * DATA_RATE_SLOWDOWN_FACTOR * 1000)

    MIN_LINK = RU_CONFIG[RUIDX]["start_channel"]
    MAX_LINK = MIN_LINK + RU_CONFIG[RUIDX]["channel_count"]
    # Define modules and queues
    queue_bare_specs = [
        app.QueueSpec(
            inst=f"data_requests_{idx}", kind='FollySPSCQueue', capacity=100)
        for idx in range(MIN_LINK, MAX_LINK)
    ] + [
        app.QueueSpec(inst="fragment_q", kind="FollyMPMCQueue", capacity=100)
    ]

    if not USE_FAKE_DATA_PRODUCERS:
        queue_bare_specs += [
            app.QueueSpec(inst=f"{FRONTEND_TYPE}_link_{idx}",
                          kind='FollySPSCQueue',
                          capacity=100000)
            for idx in range(MIN_LINK, MAX_LINK)
        ]
    if SOFTWARE_TPG_ENABLED:
        queue_bare_specs += [
            app.QueueSpec(inst=f"sw_tp_link_{idx}",
                          kind='FollySPSCQueue',
                          capacity=100000)
            for idx in range(MIN_LINK, MAX_LINK)
        ] + [
            app.QueueSpec(
                inst=f"tpset_queue", kind='FollyMPMCQueue', capacity=10000)
        ] + [
            app.QueueSpec(
                inst=f"tp_requests_{idx}", kind='FollySPSCQueue', capacity=100)
            for idx in range(MIN_LINK, MAX_LINK)
        ]

    if FRONTEND_TYPE == 'wib':
        queue_bare_specs += [
            app.QueueSpec(inst="errored_frames_q",
                          kind="FollyMPMCQueue",
                          capacity=10000)
        ]

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs,
                                        key=lambda x: x.inst))

    mod_specs = [
        mspec(f"fragment_sender", "FragmentSender", [
            app.QueueInfo(name="input_queue", inst="fragment_q", dir="input")
        ])
    ]

    if SOFTWARE_TPG_ENABLED:
        mod_specs += [
            mspec(f"request_receiver", "RequestReceiver", [
                app.QueueInfo(
                    name="output", inst=f"data_requests_{idx}", dir="output")
                for idx in range(MIN_LINK, MAX_LINK)
            ] + [
                app.QueueInfo(
                    name="output", inst=f"tp_requests_{idx}", dir="output")
                for idx in range(MIN_LINK, MAX_LINK)
            ])
        ] + [
            mspec(f"tp_datahandler_{idx}", "DataLinkHandler", [
                app.QueueInfo(
                    name="raw_input", inst=f"sw_tp_link_{idx}", dir="input"),
                app.QueueInfo(name="data_requests_0",
                              inst=f"tp_requests_{idx}",
                              dir="input"),
                app.QueueInfo(
                    name="fragment_queue", inst="fragment_q", dir="output")
            ]) for idx in range(MIN_LINK, MAX_LINK)
        ] + [
            mspec(f"tpset_publisher", "QueueToNetwork", [
                app.QueueInfo(name="input", inst=f"tpset_queue", dir="input")
            ])
        ]
    else:
        mod_specs += [
            mspec(f"request_receiver", "RequestReceiver", [
                app.QueueInfo(
                    name="output", inst=f"data_requests_{idx}", dir="output")
                for idx in range(MIN_LINK, MAX_LINK)
            ])
        ]

    if FRONTEND_TYPE == 'wib':
        mod_specs += [
            mspec("errored_frame_consumer", "ErroredFrameConsumer", [
                app.QueueInfo(
                    name="input_queue", inst="errored_frames_q", dir="input")
            ])
        ]

    # There are two flags to be checked so I think a for loop
    # is the closest way to the blocks that are being used here

    for idx in range(MIN_LINK, MAX_LINK):
        if USE_FAKE_DATA_PRODUCERS:
            mod_specs = mod_specs + [
                mspec(f"fakedataprod_{idx}", "FakeDataProd", [
                    app.QueueInfo(name="data_request_input_queue",
                                  inst=f"data_requests_{idx}",
                                  dir="input"),
                ])
            ]
        else:
            ls = [
                app.QueueInfo(name="raw_input",
                              inst=f"{FRONTEND_TYPE}_link_{idx}",
                              dir="input"),
                app.QueueInfo(name="data_requests_0",
                              inst=f"data_requests_{idx}",
                              dir="input"),
                app.QueueInfo(name="fragment_queue",
                              inst="fragment_q",
                              dir="output")
            ]
            if SOFTWARE_TPG_ENABLED:
                ls.extend([
                    app.QueueInfo(name="tp_out",
                                  inst=f"sw_tp_link_{idx}",
                                  dir="output"),
                    app.QueueInfo(name="tpset_out",
                                  inst=f"tpset_queue",
                                  dir="output")
                ])

            if FRONTEND_TYPE == 'wib':
                ls.extend([
                    app.QueueInfo(name="errored_frames",
                                  inst="errored_frames_q",
                                  dir="output")
                ])

            mod_specs += [mspec(f"datahandler_{idx}", "DataLinkHandler", ls)]

    if not USE_FAKE_DATA_PRODUCERS:
        if FLX_INPUT:
            mod_specs.append(
                mspec("flxcard_0", "FelixCardReader", [
                    app.QueueInfo(name=f"output_{idx}",
                                  inst=f"{FRONTEND_TYPE}_link_{idx}",
                                  dir="output")
                    for idx in range(
                        MIN_LINK, MIN_LINK +
                        min(5, RU_CONFIG[RUIDX]["channel_count"]))
                ]))
            if RU_CONFIG[RUIDX]["channel_count"] > 5:
                mod_specs.append(
                    mspec("flxcard_1", "FelixCardReader", [
                        app.QueueInfo(name=f"output_{idx}",
                                      inst=f"{FRONTEND_TYPE}_link_{idx}",
                                      dir="output")
                        for idx in range(MIN_LINK + 5, MAX_LINK)
                    ]))
        elif SSP_INPUT:
            mod_specs.append(
                mspec("ssp_0", "SSPCardReader", [
                    app.QueueInfo(name=f"output_{idx}",
                                  inst=f"{FRONTEND_TYPE}_link_{idx}",
                                  dir="output")
                    for idx in range(MIN_LINK, MAX_LINK)
                ]))

        else:
            fake_source = "fake_source"
            card_reader = "FakeCardReader"
            if FRONTEND_TYPE == 'pacman':
                fake_source = "pacman_source"
                card_reader = "PacmanCardReader"

            mod_specs.append(
                mspec(fake_source, card_reader, [
                    app.QueueInfo(name=f"output_{idx}",
                                  inst=f"{FRONTEND_TYPE}_link_{idx}",
                                  dir="output")
                    for idx in range(MIN_LINK, MAX_LINK)
                ]))

    cmd_data['init'] = app.Init(queues=queue_specs,
                                modules=mod_specs,
                                nwconnections=NW_SPECS)

    total_link_count = 0
    for ru in range(len(RU_CONFIG)):
        total_link_count += RU_CONFIG[ru]["channel_count"]

    conf_list = [
        (
            "fake_source",
            sec.Conf(
                link_confs=[
                    sec.LinkConfiguration(
                        geoid=sec.GeoID(system=SYSTEM_TYPE,
                                        region=RU_CONFIG[RUIDX]["region_id"],
                                        element=idx),
                        slowdown=DATA_RATE_SLOWDOWN_FACTOR,
                        queue_name=f"output_{idx}",
                        data_filename=DATA_FILE,
                        emu_frame_error_rate=0,
                    ) for idx in range(MIN_LINK, MAX_LINK)
                ],
                # input_limit=10485100, # default
                queue_timeout_ms=QUEUE_POP_WAIT_MS)),
        ("pacman_source",
         pcr.Conf(link_confs=[
             pcr.LinkConfiguration(geoid=pcr.GeoID(
                 system=SYSTEM_TYPE,
                 region=RU_CONFIG[RUIDX]["region_id"],
                 element=idx), ) for idx in range(MIN_LINK, MAX_LINK)
         ],
                  zmq_receiver_timeout=10000)),
        ("flxcard_0",
         flxcr.Conf(card_id=RU_CONFIG[RUIDX]["card_id"],
                    logical_unit=0,
                    dma_id=0,
                    chunk_trailer_size=32,
                    dma_block_size_kb=4,
                    dma_memory_size_gb=4,
                    numa_id=0,
                    num_links=min(5, RU_CONFIG[RUIDX]["channel_count"]))),
        ("flxcard_1",
         flxcr.Conf(card_id=RU_CONFIG[RUIDX]["card_id"],
                    logical_unit=1,
                    dma_id=0,
                    chunk_trailer_size=32,
                    dma_block_size_kb=4,
                    dma_memory_size_gb=4,
                    numa_id=0,
                    num_links=max(0, RU_CONFIG[RUIDX]["channel_count"] - 5))),
        ("ssp_0",
         flxcr.Conf(card_id=RU_CONFIG[RUIDX]["card_id"],
                    logical_unit=0,
                    dma_id=0,
                    chunk_trailer_size=32,
                    dma_block_size_kb=4,
                    dma_memory_size_gb=4,
                    numa_id=0,
                    num_links=RU_CONFIG[RUIDX]["channel_count"])),
    ] + [
        ("request_receiver",
         rrcv.ConfParams(map=[
             rrcv.geoidinst(region=RU_CONFIG[RUIDX]["region_id"],
                            element=idx,
                            system=SYSTEM_TYPE,
                            queueinstance=f"data_requests_{idx}")
             for idx in range(MIN_LINK, MAX_LINK)
         ] + [
             rrcv.geoidinst(region=RU_CONFIG[RUIDX]["region_id"],
                            element=idx + total_link_count,
                            system=SYSTEM_TYPE,
                            queueinstance=f"tp_requests_{idx}")
             for idx in range(MIN_LINK, MAX_LINK) if SOFTWARE_TPG_ENABLED
         ],
                         general_queue_timeout=QUEUE_POP_WAIT_MS,
                         connection_name=f"{PARTITION}.datareq_{RUIDX}"))
    ] + [
        (
            f"datahandler_{idx}",
            rconf.Conf(
                readoutmodelconf=rconf.ReadoutModelConf(
                    source_queue_timeout_ms=QUEUE_POP_WAIT_MS,
                    # fake_trigger_flag=0, # default
                    region_id=RU_CONFIG[RUIDX]["region_id"],
                    element_id=idx,
                    timesync_connection_name=f"{PARTITION}.timesync_{RUIDX}",
                    timesync_topic_name="Timesync",
                ),
                latencybufferconf=rconf.LatencyBufferConf(
                    latency_buffer_alignment_size=4096,
                    latency_buffer_size=LATENCY_BUFFER_SIZE,
                    region_id=RU_CONFIG[RUIDX]["region_id"],
                    element_id=idx,
                ),
                rawdataprocessorconf=rconf.RawDataProcessorConf(
                    region_id=RU_CONFIG[RUIDX]["region_id"],
                    element_id=idx,
                    enable_software_tpg=SOFTWARE_TPG_ENABLED,
                    emulator_mode=EMULATOR_MODE,
                    error_counter_threshold=100,
                    error_reset_freq=10000),
                requesthandlerconf=rconf.RequestHandlerConf(
                    latency_buffer_size=LATENCY_BUFFER_SIZE,
                    pop_limit_pct=0.8,
                    pop_size_pct=0.1,
                    region_id=RU_CONFIG[RUIDX]["region_id"],
                    element_id=idx,
                    output_file=path.join(RAW_RECORDING_OUTPUT_DIR,
                                          f"output_{RUIDX}_{idx}.out"),
                    stream_buffer_size=8388608,
                    enable_raw_recording=RAW_RECORDING_ENABLED,
                ))) for idx in range(MIN_LINK, MAX_LINK)
    ] + [
        (
            f"tp_datahandler_{idx}",
            rconf.Conf(
                readoutmodelconf=rconf.ReadoutModelConf(
                    source_queue_timeout_ms=QUEUE_POP_WAIT_MS,
                    # fake_trigger_flag=0, default
                    region_id=RU_CONFIG[RUIDX]["region_id"],
                    element_id=total_link_count + idx,
                ),
                latencybufferconf=rconf.LatencyBufferConf(
                    latency_buffer_size=LATENCY_BUFFER_SIZE,
                    region_id=RU_CONFIG[RUIDX]["region_id"],
                    element_id=total_link_count + idx,
                ),
                rawdataprocessorconf=rconf.RawDataProcessorConf(
                    region_id=RU_CONFIG[RUIDX]["region_id"],
                    element_id=total_link_count + idx,
                    enable_software_tpg=False,
                ),
                requesthandlerconf=rconf.RequestHandlerConf(
                    latency_buffer_size=LATENCY_BUFFER_SIZE,
                    pop_limit_pct=0.8,
                    pop_size_pct=0.1,
                    region_id=RU_CONFIG[RUIDX]["region_id"],
                    element_id=total_link_count + idx,
                    # output_file = f"output_{idx + MIN_LINK}.out",
                    stream_buffer_size=100
                    if FRONTEND_TYPE == 'pacman' else 8388608,
                    enable_raw_recording=False,
                ))) for idx in range(MIN_LINK, MAX_LINK)
    ]

    if SOFTWARE_TPG_ENABLED:

        conf_list.extend([(f"tpset_publisher",
                           qton.Conf(msg_type="dunedaq::trigger::TPSet",
                                     msg_module_name="TPSetNQ",
                                     sender_config=nos.Conf(
                                         name=f"{PARTITION}.tpsets_{RUIDX}",
                                         topic="TPSets",
                                         stype="msgpack")))])

    if USE_FAKE_DATA_PRODUCERS:
        conf_list.extend([
            (f"fakedataprod_{idx}",
             fdp.ConfParams(
                 system_type=SYSTEM_TYPE,
                 apa_number=RU_CONFIG[RUIDX]["region_id"],
                 link_number=idx,
                 time_tick_diff=25,
                 frame_size=464,
                 response_delay=0,
                 timesync_connection_name=f"{PARTITION}.timesync_{RUIDX}",
                 timesync_topic_name="Timesync",
                 fragment_type="FakeData"))
            for idx in range(MIN_LINK, MAX_LINK)
        ])

    conf_list.extend([("fragment_sender", None)])

    cmd_data['conf'] = acmd(conf_list)

    startpars = rccmd.StartParams(run=RUN_NUMBER)
    cmd_data['start'] = acmd([("datahandler_.*", startpars),
                              ("fake_source", startpars),
                              ("pacman_source", startpars),
                              ("flxcard.*", startpars),
                              ("request_receiver", startpars),
                              ("ssp.*", startpars),
                              ("ntoq_trigdec", startpars),
                              (f"tp_datahandler_.*", startpars),
                              (f"tpset_publisher", startpars),
                              ("fakedataprod_.*", startpars),
                              ("fragment_sender", startpars),
                              ("errored_frame_consumer", startpars)])

    cmd_data['stop'] = acmd([("request_receiver", None), ("flxcard.*", None),
                             ("ssp.*", None), ("fake_source", None),
                             ("pacman_source", None), ("datahandler_.*", None),
                             (f"tp_datahandler_.*", None),
                             (f"tpset_publisher", None),
                             ("fakedataprod_.*", None),
                             ("fragment_sender", None),
                             ("errored_frame_consumer", None)])

    cmd_data['pause'] = acmd([("", None)])

    cmd_data['resume'] = acmd([("", None)])

    cmd_data['scrap'] = acmd([("", None)])

    cmd_data['record'] = acmd([("", None)])

    return cmd_data