Beispiel #1
0
def generate(NETWORK_ENDPOINTS: dict):
    cmd_data = {}

    # Define modules and queues
    queue_specs = [
        app.QueueSpec(inst="tpset_q", kind='FollySPSCQueue', capacity=10000)
    ]

    mod_specs = [
        mspec(
            "tps_sink", "TPSetSink",
            [app.QueueInfo(name="tpset_source", inst="tpset_q", dir="input")]),
        mspec("ntoq", "NetworkToQueue",
              [app.QueueInfo(name="output", inst="tpset_q", dir="output")])
    ]

    cmd_data['init'] = app.Init(queues=queue_specs, modules=mod_specs)

    cmd_data['conf'] = acmd([(
        "ntoq",
        ntoq.Conf(
            msg_type="dunedaq::trigger::TPSet",
            msg_module_name="TPSetNQ",
            receiver_config=nor.Conf(
                ipm_plugin_type="ZmqSubscriber",
                address=NETWORK_ENDPOINTS["tpset"],
                subscriptions=[
                    "foo"
                ])  # Empty subscription means subscribe to everything
        ))])

    startpars = rccmd.StartParams(run=1, disable_data_storage=False)
    cmd_data['start'] = acmd([
        ("ntoq", startpars),
        ("tps_sink", startpars),
    ])

    cmd_data['pause'] = acmd([])

    cmd_data['resume'] = acmd([])

    cmd_data['stop'] = acmd([
        ("tps_sink", None),
        ("ntoq", None),
    ])

    cmd_data['scrap'] = acmd([
        #     ("tpm", None),
    ])

    return cmd_data
Beispiel #2
0
    def __init__(self,
                 RUN_NUMBER: int,
                 NW_SPECS: list,
                 TIMING_CMD_NETWORK_ENDPOINTS: set,
                 GATHER_INTERVAL=1e6,
                 GATHER_INTERVAL_DEBUG=10e6,
                 HSI_DEVICE_NAME="",
                 CONNECTIONS_FILE="${TIMING_SHARE}/config/etc/connections.xml",
                 UHAL_LOG_LEVEL="notice",
                 PARTITION="UNKNOWN"):
        """
        { item_description }
        """
        cmd_data = {}
        ## TODO: Everything?
        required_eps = TIMING_CMD_NETWORK_ENDPOINTS
        if not required_eps.issubset([nw.name for nw in NW_SPECS]):
            raise RuntimeError(f"ERROR: not all the required endpoints ({', '.join(required_eps)}) found in list of endpoints {' '.join([nw.name for nw in NW_SPECS])}")
    
        # # Define modules and queues
        # queue_bare_specs = [app.QueueSpec(inst="ntoq_timing_cmds", kind='FollyMPMCQueue', capacity=100),]
    
        # # Only needed to reproduce the same order as when using jsonnet
        # queue_specs = app.QueueSpecs(sorted(queue_bare_specs, key=lambda x: x.inst))
        thi_init_data = thi.InitParams(qinfos=app.QueueInfos([app.QueueInfo(name="hardware_commands_in", inst="ntoq_timing_cmds", dir="input")]),
                                       connections_file=CONNECTIONS_FILE,
                                       gather_interval=GATHER_INTERVAL,
                                       gather_interval_debug=GATHER_INTERVAL_DEBUG,
                                       monitored_device_name_master="",
                                       monitored_device_names_fanout=[],
                                       monitored_device_name_endpoint="",
                                       monitored_device_name_hsi=HSI_DEVICE_NAME,
                                       uhal_log_level=UHAL_LOG_LEVEL)

        modules = {}
        modules["thi"] = Module("TimingHardwareManagerPDI")
        # mod_specs = [app.ModSpec(inst="thi", plugin="TimingHardwareManagerPDI", data=thi_init_data),]
        for cmd_nw_endpoint in TIMING_CMD_NETWORK_ENDPOINTS:
            mod_specs.extend([mspec(f'ntoq_{cmd_nw_endpoint}', "NetworkToQueue", [app.QueueInfo(name="output", inst="ntoq_timing_cmds", dir="output")]),])
                
        cmd_data['init'] = app.Init(queues=queue_specs, modules=mod_specs, nwconnections=NW_SPECS)
        
    
        conf_cmds = []
        for cmd_nw_endpoint in TIMING_CMD_NETWORK_ENDPOINTS:
            conf_cmds.extend([(f'ntoq_{cmd_nw_endpoint}', ntoq.Conf(msg_type="dunedaq::timinglibs::timingcmd::TimingHwCmd",
                                                   msg_module_name="TimingHwCmdNQ",
                                                   receiver_config=nor.Conf(name=cmd_nw_endpoint))),])
        mgraph = ModuleGraph(modules)
        super().__init__(modulegraph=mgraph, host=HOST)
Beispiel #3
0
def generate(NW_SPECS: list,
             SOFTWARE_TPG_ENABLED: bool = False,
             RU_CONFIG: list = [],
             ACTIVITY_PLUGIN: str = 'TriggerActivityMakerPrescalePlugin',
             ACTIVITY_CONFIG: dict = dict(prescale=10000),
             CANDIDATE_PLUGIN: str = 'TriggerCandidateMakerPrescalePlugin',
             CANDIDATE_CONFIG: int = dict(prescale=10),
             TOKEN_COUNT: int = 10,
             DF_COUNT: int = 1,
             SYSTEM_TYPE='wib',
             TTCM_S1: int = 1,
             TTCM_S2: int = 2,
             TRIGGER_WINDOW_BEFORE_TICKS: int = 1000,
             TRIGGER_WINDOW_AFTER_TICKS: int = 1000,
             PARTITION="UNKNOWN"):
    """
    { item_description }
    """
    cmd_data = {}

    required_eps = {PARTITION + '.hsievent'}
    if not required_eps.issubset([nw.name for nw in NW_SPECS]):
        raise RuntimeError(
            f"ERROR: not all the required endpoints ({', '.join(required_eps)}) found in list of endpoints {' '.join([nw.name for nw in NW_SPECS])}"
        )

    # Define modules and queues
    queue_bare_specs = [
        app.QueueSpec(inst='trigger_candidate_q',
                      kind='FollyMPMCQueue',
                      capacity=1000),
        app.QueueSpec(inst='trigger_decision_q',
                      kind='FollySPSCQueue',
                      capacity=2)
    ]

    if SOFTWARE_TPG_ENABLED:
        queue_bare_specs.extend([
            app.QueueSpec(inst=f"fragment_q",
                          kind='FollyMPMCQueue',
                          capacity=1000),
            app.QueueSpec(inst=f'taset_q',
                          kind='FollyMPMCQueue',
                          capacity=1000),
        ])
        for ru in range(len(RU_CONFIG)):
            queue_bare_specs.extend([
                app.QueueSpec(inst=f"tpsets_from_netq_{ru}",
                              kind='FollySPSCQueue',
                              capacity=1000),
                app.QueueSpec(inst=f'zipped_tpset_q_{ru}',
                              kind='FollySPSCQueue',
                              capacity=1000),
            ])
            for idx in range(RU_CONFIG[ru]["channel_count"]):
                queue_bare_specs.extend([
                    app.QueueSpec(inst=f"tpset_q_for_buf{ru}_{idx}",
                                  kind='FollySPSCQueue',
                                  capacity=1000),
                    app.QueueSpec(inst=f"data_request_q{ru}_{idx}",
                                  kind='FollySPSCQueue',
                                  capacity=1000),
                ])

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs,
                                        key=lambda x: x.inst))

    mod_specs = []

    if SOFTWARE_TPG_ENABLED:
        mod_specs.extend([
            mspec(f"request_receiver", "RequestReceiver", [
                app.QueueInfo(name="output",
                              inst=f"data_request_q{ru}_{idy}",
                              dir="output") for ru in range(len(RU_CONFIG))
                for idy in range(RU_CONFIG[ru]["channel_count"])
            ])
        ] + [
            mspec(f"tpset_receiver", "TPSetReceiver", [
                app.QueueInfo(name="output",
                              inst=f"tpset_q_for_buf{ru}_{idy}",
                              dir="output") for ru in range(len(RU_CONFIG))
                for idy in range(RU_CONFIG[ru]["channel_count"])
            ])
        ] + [
            mspec(f"fragment_sender", "FragmentSender", [
                app.QueueInfo(
                    name="input_queue", inst=f"fragment_q", dir="input")
            ]),
            mspec(
                f'tcm',
                'TriggerCandidateMaker',
                [  # TASet -> TC
                    app.QueueInfo(name='input', inst=f'taset_q', dir='input'),
                    app.QueueInfo(name='output',
                                  inst=f'trigger_candidate_q',
                                  dir='output'),
                ])
        ])
        for ru in range(len(RU_CONFIG)):
            mod_specs.extend([
                mspec(f"tpset_subscriber_{ru}", "NetworkToQueue", [
                    app.QueueInfo(name="output",
                                  inst=f"tpsets_from_netq_{ru}",
                                  dir="output")
                ]),
                mspec(
                    f"zip_{ru}",
                    "TPZipper",
                    [
                        app.QueueInfo(name="input",
                                      inst=f"tpsets_from_netq_{ru}",
                                      dir="input"),
                        app.QueueInfo(
                            name="output",
                            inst=f"zipped_tpset_q_{ru}",
                            dir="output"
                        ),  #FIXME need to fanout this zipped_tpset_q if using multiple algorithms
                    ]),

                ### Algorithm(s)
                mspec(
                    f'tam_{ru}',
                    'TriggerActivityMaker',
                    [  # TPSet -> TASet
                        app.QueueInfo(name='input',
                                      inst=f'zipped_tpset_q_{ru}',
                                      dir='input'),
                        app.QueueInfo(
                            name='output', inst=f'taset_q', dir='output'),
                    ]),
            ])
            for idy in range(RU_CONFIG[ru]["channel_count"]):
                mod_specs.extend([
                    mspec(f"buf{ru}_{idy}", "TPSetBufferCreator", [
                        app.QueueInfo(name="tpset_source",
                                      inst=f"tpset_q_for_buf{ru}_{idy}",
                                      dir="input"),
                        app.QueueInfo(name="data_request_source",
                                      inst=f"data_request_q{ru}_{idy}",
                                      dir="input"),
                        app.QueueInfo(name="fragment_sink",
                                      inst=f"fragment_q",
                                      dir="output"),
                    ])
                ])

    mod_specs += ([

        ### Timing TCs
        mspec("ttcm", "TimingTriggerCandidateMaker", [
            app.QueueInfo(
                name="output", inst="trigger_candidate_q", dir="output"),
        ]),

        ### Module level trigger
        mspec("mlt", "ModuleLevelTrigger", [
            app.QueueInfo(name="trigger_candidate_source",
                          inst="trigger_candidate_q",
                          dir="input"),
            app.QueueInfo(name="trigger_decision_sink",
                          inst="trigger_decision_q",
                          dir="output"),
        ]),

        ### DFO
        mspec("dfo", "DataFlowOrchestrator", [
            app.QueueInfo(name="trigger_decision_queue",
                          inst="trigger_decision_q",
                          dir="input"),
        ]),
    ])

    cmd_data['init'] = app.Init(queues=queue_specs,
                                modules=mod_specs,
                                nwconnections=NW_SPECS)

    # Generate schema for the maker plugins on the fly in the temptypes module
    make_moo_record(ACTIVITY_CONFIG, 'ActivityConf', 'temptypes')
    make_moo_record(CANDIDATE_CONFIG, 'CandidateConf', 'temptypes')
    import temptypes

    tp_confs = []

    if SOFTWARE_TPG_ENABLED:
        tp_confs.extend([
            ("request_receiver",
             rrcv.ConfParams(map=[
                 rrcv.geoidinst(region=RU_CONFIG[ru]["region_id"],
                                element=idy + RU_CONFIG[ru]["start_channel"],
                                system="DataSelection",
                                queueinstance=f"data_request_q{ru}_{idy}")
                 for ru in range(len(RU_CONFIG))
                 for idy in range(RU_CONFIG[ru]["channel_count"])
             ],
                             general_queue_timeout=100,
                             connection_name=f"{PARTITION}.ds_tp_datareq_0")),
            ("tpset_receiver",
             tpsrcv.ConfParams(map=[
                 tpsrcv.geoidinst(region=RU_CONFIG[ru]["region_id"],
                                  element=idy + RU_CONFIG[ru]["start_channel"],
                                  system=SYSTEM_TYPE,
                                  queueinstance=f"tpset_q_for_buf{ru}_{idy}")
                 for ru in range(len(RU_CONFIG))
                 for idy in range(RU_CONFIG[ru]["channel_count"])
             ],
                               general_queue_timeout=100,
                               topic=f"TPSets")),
            (f"fragment_sender", None),
            (f'tcm',
             tcm.Conf(candidate_maker=CANDIDATE_PLUGIN,
                      candidate_maker_config=temptypes.CandidateConf(
                          **CANDIDATE_CONFIG))),
        ])
        for idx in range(len(RU_CONFIG)):
            tp_confs.extend([
                (f"tpset_subscriber_{idx}",
                 ntoq.Conf(msg_type="dunedaq::trigger::TPSet",
                           msg_module_name="TPSetNQ",
                           receiver_config=nor.Conf(
                               name=f'{PARTITION}.tpsets_{idx}',
                               subscriptions=["TPSets"]))),
                (
                    f"zip_{idx}",
                    tzip.ConfParams(
                        cardinality=RU_CONFIG[idx]["channel_count"],
                        max_latency_ms=1000,
                        region_id=0,  # Fake placeholder
                        element_id=0  # Fake placeholder
                    )),

                ### Algorithms
                (
                    f'tam_{idx}',
                    tam.Conf(
                        activity_maker=ACTIVITY_PLUGIN,
                        geoid_region=0,  # Fake placeholder
                        geoid_element=0,  # Fake placeholder
                        window_time=
                        10000,  # should match whatever makes TPSets, in principle
                        buffer_time=625000,  # 10ms in 62.5 MHz ticks
                        activity_maker_config=temptypes.ActivityConf(
                            **ACTIVITY_CONFIG))),
            ])
            for idy in range(RU_CONFIG[idx]["channel_count"]):
                tp_confs.extend([
                    (f"buf{idx}_{idy}",
                     buf.Conf(tpset_buffer_size=10000,
                              region=RU_CONFIG[idx]["region_id"],
                              element=idy + RU_CONFIG[idx]["start_channel"]))
                ])

    total_link_count = 0
    for ru in range(len(RU_CONFIG)):
        total_link_count += RU_CONFIG[ru]["channel_count"]

    cmd_data['conf'] = acmd(tp_confs + [

        ### Timing TCs
        ("ttcm",
         ttcm.Conf(
             s1=ttcm.map_t(signal_type=TTCM_S1,
                           time_before=TRIGGER_WINDOW_BEFORE_TICKS,
                           time_after=TRIGGER_WINDOW_AFTER_TICKS),
             s2=ttcm.map_t(signal_type=TTCM_S2,
                           time_before=TRIGGER_WINDOW_BEFORE_TICKS,
                           time_after=TRIGGER_WINDOW_AFTER_TICKS),
             hsievent_connection_name=PARTITION + ".hsievent",
         )),

        # Module level trigger
        (
            "mlt",
            mlt.ConfParams(
                # This line requests the raw data from upstream DAQ _and_ the raw TPs from upstream DAQ
                links=[
                    mlt.GeoID(system=SYSTEM_TYPE,
                              region=RU_CONFIG[ru]["region_id"],
                              element=RU_CONFIG[ru]["start_channel"] + idx)
                    for ru in range(len(RU_CONFIG))
                    for idx in range(RU_CONFIG[ru]["channel_count"])
                ] + ([
                    mlt.GeoID(system="DataSelection",
                              region=RU_CONFIG[ru]["region_id"],
                              element=RU_CONFIG[ru]["start_channel"] + idx)
                    for ru in range(len(RU_CONFIG))
                    for idx in range(RU_CONFIG[ru]["channel_count"])
                ] if SOFTWARE_TPG_ENABLED else []) + ([
                    mlt.GeoID(system=SYSTEM_TYPE,
                              region=RU_CONFIG[ru]["region_id"],
                              element=RU_CONFIG[ru]["start_channel"] + idx +
                              total_link_count) for ru in range(len(RU_CONFIG))
                    for idx in range(RU_CONFIG[ru]["channel_count"])
                ] if SOFTWARE_TPG_ENABLED else []), )),
        ("dfo",
         dfo.ConfParams(
             token_connection=PARTITION + ".triginh",
             dataflow_applications=[
                 dfo.app_config(
                     decision_connection=f"{PARTITION}.trigdec_{dfidx}",
                     capacity=TOKEN_COUNT) for dfidx in range(DF_COUNT)
             ])),
    ])

    # We start modules in "downstream-to-upstream" order, so that each
    # module is ready before its input starts sending data. The stop
    # order is the reverse (upstream-to-downstream), so each module
    # can process all of its input then stop, ensuring all data gets
    # processed
    start_order = ["buf.*", "dfo", "mlt", "ttcm", "ntoq_token"]

    if SOFTWARE_TPG_ENABLED:
        start_order += [
            "fragment_sender", "tcm", "tam_.*", "zip_.*",
            "tpset_subscriber_.*", "tpset_receiver", "request_receiver"
        ]

    stop_order = start_order[::-1]

    startpars = rccmd.StartParams(run=1)
    cmd_data['start'] = acmd([(m, startpars) for m in start_order])
    cmd_data['stop'] = acmd([(m, None) for m in stop_order])

    cmd_data['pause'] = acmd([("mlt", None)])

    resumepars = rccmd.ResumeParams(trigger_interval_ticks=50000000)
    cmd_data['resume'] = acmd([("mlt", resumepars)])

    cmd_data['scrap'] = acmd([("dfo", None)])

    cmd_data['record'] = acmd([("", None)])

    return cmd_data
Beispiel #4
0
def generate(RUN_NUMBER: int,
        NW_SPECS: list,
        TIMING_CMD_NETWORK_ENDPOINTS: set,
        GATHER_INTERVAL=1e6,
        GATHER_INTERVAL_DEBUG=10e6,
        HSI_DEVICE_NAME="",
        CONNECTIONS_FILE="${TIMING_SHARE}/config/etc/connections.xml",
        UHAL_LOG_LEVEL="notice",
        PARTITION="UNKNOWN"):
    """
    { item_description }
    """
    cmd_data = {}

    required_eps = TIMING_CMD_NETWORK_ENDPOINTS
    if not required_eps.issubset([nw.name for nw in NW_SPECS]):
        raise RuntimeError(f"ERROR: not all the required endpoints ({', '.join(required_eps)}) found in list of endpoints {' '.join([nw.name for nw in NW_SPECS])}")

    # Define modules and queues
    queue_bare_specs = [app.QueueSpec(inst="ntoq_timing_cmds", kind='FollyMPMCQueue', capacity=100),]

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs, key=lambda x: x.inst))

    thi_init_data = thi.InitParams(
                                   qinfos=app.QueueInfos([app.QueueInfo(name="hardware_commands_in", inst="ntoq_timing_cmds", dir="input")]),
                                   connections_file=CONNECTIONS_FILE,
                                   gather_interval=GATHER_INTERVAL,
                                   gather_interval_debug=GATHER_INTERVAL_DEBUG,
                                   monitored_device_name_master="",
                                   monitored_device_names_fanout=[],
                                   monitored_device_name_endpoint="",
                                   monitored_device_name_hsi=HSI_DEVICE_NAME,
                                   uhal_log_level=UHAL_LOG_LEVEL)

    mod_specs = [app.ModSpec(inst="thi", plugin="TimingHardwareManagerPDI", data=thi_init_data),]
    for cmd_nw_endpoint in TIMING_CMD_NETWORK_ENDPOINTS:
        nq_mod_name_suffix=cmd_nw_endpoint.split('.')[-1]
        mod_specs.extend([mspec(f'ntoq_{nq_mod_name_suffix}', "NetworkToQueue", [app.QueueInfo(name="output", inst="ntoq_timing_cmds", dir="output")]),])
            
    cmd_data['init'] = app.Init(queues=queue_specs, modules=mod_specs, nwconnections=NW_SPECS)
    

    conf_cmds = []
    for cmd_nw_endpoint in TIMING_CMD_NETWORK_ENDPOINTS:
        nq_mod_name_suffix=cmd_nw_endpoint.split('.')[-1]
        conf_cmds.extend([(f'ntoq_{nq_mod_name_suffix}', ntoq.Conf(msg_type="dunedaq::timinglibs::timingcmd::TimingHwCmd",
                                               msg_module_name="TimingHwCmdNQ",
                                               receiver_config=nor.Conf(name=cmd_nw_endpoint))),])
    cmd_data['conf'] = acmd(conf_cmds)
 
    startpars = rccmd.StartParams(run=RUN_NUMBER)

    cmd_data['start'] = acmd([("", startpars),])

    cmd_data['stop'] = acmd([("", None),])

    cmd_data['pause'] = acmd([("", None)])

    cmd_data['resume'] = acmd([("", None)])

    cmd_data['scrap'] = acmd([("", None)])

    cmd_data['record'] = acmd([("", None)])

    return cmd_data
Beispiel #5
0
def generate(NETWORK_ENDPOINTS,
             NUMBER_OF_DATA_PRODUCERS=2,
             EMULATOR_MODE=False,
             DATA_RATE_SLOWDOWN_FACTOR=1,
             RUN_NUMBER=333,
             DATA_FILE="./frames.bin",
             OUTPUT_PATH=".",
             DISABLE_OUTPUT=False,
             FLX_INPUT=True,
             TOKEN_COUNT=0,
             CLOCK_SPEED_HZ=50000000):
    """Generate the json configuration for the readout and DF process"""

    cmd_data = {}

    required_eps = {'trigdec', 'triginh', 'timesync'}
    if not required_eps.issubset(NETWORK_ENDPOINTS):
        raise RuntimeError(
            f"ERROR: not all the required endpoints ({', '.join(required_eps)}) found in list of endpoints {' '.join(NETWORK_ENDPOINTS.keys())}"
        )

    LATENCY_BUFFER_SIZE = 3 * CLOCK_SPEED_HZ / (25 * 12 *
                                                DATA_RATE_SLOWDOWN_FACTOR)
    RATE_KHZ = CLOCK_SPEED_HZ / (25 * 12 * DATA_RATE_SLOWDOWN_FACTOR * 1000)

    # Define modules and queues
    queue_bare_specs = [
        app.QueueSpec(inst="time_sync_q", kind='FollyMPMCQueue', capacity=100),
        app.QueueSpec(inst="token_q", kind='FollySPSCQueue', capacity=100),
        app.QueueSpec(
            inst="trigger_decision_q", kind='FollySPSCQueue', capacity=100),
        app.QueueSpec(inst="trigger_decision_from_netq",
                      kind='FollySPSCQueue',
                      capacity=100),
        app.QueueSpec(inst="trigger_decision_copy_for_bookkeeping",
                      kind='FollySPSCQueue',
                      capacity=100),
        app.QueueSpec(
            inst="trigger_record_q", kind='FollySPSCQueue', capacity=100),
        app.QueueSpec(
            inst="data_fragments_q", kind='FollyMPMCQueue', capacity=1000),
    ] + [
        app.QueueSpec(
            inst=f"data_requests_{idx}", kind='FollySPSCQueue', capacity=100)
        for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ] + [
        app.QueueSpec(
            inst=f"wib_link_{idx}", kind='FollySPSCQueue', capacity=100000)
        for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ]

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs,
                                        key=lambda x: x.inst))

    mod_specs = [
        mspec("ntoq_trigdec", "NetworkToQueue", [
            app.QueueInfo(
                name="output", inst="trigger_decision_from_netq", dir="output")
        ]),
        mspec("qton_token", "QueueToNetwork",
              [app.QueueInfo(name="input", inst="token_q", dir="input")]),
        mspec("qton_timesync", "QueueToNetwork",
              [app.QueueInfo(name="input", inst="time_sync_q", dir="input")]),
        mspec("rqg", "RequestGenerator", [
            app.QueueInfo(name="trigger_decision_input_queue",
                          inst="trigger_decision_from_netq",
                          dir="input"),
            app.QueueInfo(name="trigger_decision_for_event_building",
                          inst="trigger_decision_copy_for_bookkeeping",
                          dir="output"),
        ] + [
            app.QueueInfo(name=f"data_request_{idx}_output_queue",
                          inst=f"data_requests_{idx}",
                          dir="output")
            for idx in range(NUMBER_OF_DATA_PRODUCERS)
        ]),
        mspec("ffr", "FragmentReceiver", [
            app.QueueInfo(name="trigger_decision_input_queue",
                          inst="trigger_decision_copy_for_bookkeeping",
                          dir="input"),
            app.QueueInfo(name="trigger_record_output_queue",
                          inst="trigger_record_q",
                          dir="output"),
            app.QueueInfo(name="data_fragment_input_queue",
                          inst="data_fragments_q",
                          dir="input"),
        ]),
        mspec("datawriter", "DataWriter", [
            app.QueueInfo(name="trigger_record_input_queue",
                          inst="trigger_record_q",
                          dir="input"),
            app.QueueInfo(
                name="token_output_queue", inst="token_q", dir="output"),
        ]),
    ] + [
        mspec(f"datahandler_{idx}", "DataLinkHandler", [
            app.QueueInfo(
                name="raw_input", inst=f"wib_link_{idx}", dir="input"),
            app.QueueInfo(name="timesync", inst="time_sync_q", dir="output"),
            app.QueueInfo(
                name="requests", inst=f"data_requests_{idx}", dir="input"),
            app.QueueInfo(
                name="fragments", inst="data_fragments_q", dir="output"),
        ]) for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ]

    if FLX_INPUT:
        mod_specs.append(
            mspec("flxcard_0", "FelixCardReader", [
                app.QueueInfo(
                    name=f"output_{idx}", inst=f"wib_link_{idx}", dir="output")
                for idx in range(0, min(5, NUMBER_OF_DATA_PRODUCERS))
            ]))
        if NUMBER_OF_DATA_PRODUCERS > 5:
            mod_specs.append(
                mspec("flxcard_1", "FelixCardReader", [
                    app.QueueInfo(name=f"output_{idx}",
                                  inst=f"wib_link_{idx}",
                                  dir="output")
                    for idx in range(5, NUMBER_OF_DATA_PRODUCERS)
                ]))
    else:
        mod_specs.append(
            mspec("fake_source", "FakeCardReader", [
                app.QueueInfo(
                    name=f"output_{idx}", inst=f"wib_link_{idx}", dir="output")
                for idx in range(NUMBER_OF_DATA_PRODUCERS)
            ]))

    cmd_data['init'] = app.Init(queues=queue_specs, modules=mod_specs)

    cmd_data['conf'] = acmd([
        ("ntoq_trigdec",
         ntoq.Conf(msg_type="dunedaq::dfmessages::TriggerDecision",
                   msg_module_name="TriggerDecisionNQ",
                   receiver_config=nor.Conf(
                       ipm_plugin_type="ZmqReceiver",
                       address=NETWORK_ENDPOINTS["trigdec"]))),
        ("qton_token",
         qton.Conf(msg_type="dunedaq::dfmessages::TriggerDecisionToken",
                   msg_module_name="TriggerDecisionTokenNQ",
                   sender_config=nos.Conf(ipm_plugin_type="ZmqSender",
                                          address=NETWORK_ENDPOINTS["triginh"],
                                          stype="msgpack"))),
        ("qton_timesync",
         qton.Conf(msg_type="dunedaq::dfmessages::TimeSync",
                   msg_module_name="TimeSyncNQ",
                   sender_config=nos.Conf(
                       ipm_plugin_type="ZmqSender",
                       address=NETWORK_ENDPOINTS["timesync"],
                       stype="msgpack"))),
        ("rqg",
         rqg.ConfParams(map=rqg.mapgeoidqueue([
             rqg.geoidinst(
                 apa=0, link=idx, queueinstance=f"data_requests_{idx}")
             for idx in range(NUMBER_OF_DATA_PRODUCERS)
         ]))),
        ("ffr", ffr.ConfParams(general_queue_timeout=QUEUE_POP_WAIT_MS)),
        (
            "datawriter",
            dw.ConfParams(
                initial_token_count=TOKEN_COUNT,
                data_store_parameters=hdf5ds.ConfParams(
                    name="data_store",
                    # type = "HDF5DataStore", # default
                    directory_path=OUTPUT_PATH,  # default
                    # mode = "all-per-file", # default
                    max_file_size_bytes=1073741824,
                    disable_unique_filename_suffix=False,
                    filename_parameters=hdf5ds.HDF5DataStoreFileNameParams(
                        overall_prefix="swtest",
                        digits_for_run_number=6,
                        file_index_prefix="",
                        digits_for_file_index=4,
                    ),
                    file_layout_parameters=hdf5ds.
                    HDF5DataStoreFileLayoutParams(
                        trigger_record_name_prefix="TriggerRecord",
                        digits_for_trigger_number=5,
                        digits_for_apa_number=3,
                        digits_for_link_number=2,
                    )))),
        (
            "fake_source",
            fakecr.Conf(
                link_ids=list(range(NUMBER_OF_DATA_PRODUCERS)),
                # input_limit=10485100, # default
                rate_khz=RATE_KHZ,
                raw_type="wib",
                data_filename=DATA_FILE,
                queue_timeout_ms=QUEUE_POP_WAIT_MS)),
        ("flxcard_0",
         flxcr.Conf(card_id=0,
                    logical_unit=0,
                    dma_id=0,
                    chunk_trailer_size=32,
                    dma_block_size_kb=4,
                    dma_memory_size_gb=4,
                    numa_id=0,
                    num_links=min(5, NUMBER_OF_DATA_PRODUCERS))),
        ("flxcard_1",
         flxcr.Conf(card_id=0,
                    logical_unit=1,
                    dma_id=0,
                    chunk_trailer_size=32,
                    dma_block_size_kb=4,
                    dma_memory_size_gb=4,
                    numa_id=0,
                    num_links=max(0, NUMBER_OF_DATA_PRODUCERS - 5))),
    ] + [
        (
            f"datahandler_{idx}",
            dlh.Conf(
                raw_type="wib",
                emulator_mode=EMULATOR_MODE,
                # fake_trigger_flag=0, # default
                source_queue_timeout_ms=QUEUE_POP_WAIT_MS,
                latency_buffer_size=LATENCY_BUFFER_SIZE,
                pop_limit_pct=0.8,
                pop_size_pct=0.1,
                apa_number=0,
                link_number=idx)) for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ])

    startpars = rccmd.StartParams(run=RUN_NUMBER,
                                  disable_data_storage=DISABLE_OUTPUT)
    cmd_data['start'] = acmd([
        ("qton_token", startpars),
        ("datawriter", startpars),
        ("ffr", startpars),
        ("qton_timesync", startpars),
        ("datahandler_.*", startpars),
        ("fake_source", startpars),
        ("flxcard.*", startpars),
        ("rqg", startpars),
        ("ntoq_trigdec", startpars),
    ])

    cmd_data['stop'] = acmd([
        ("ntoq_trigdec", None),
        ("rqg", None),
        ("flxcard.*", None),
        ("fake_source", None),
        ("datahandler_.*", None),
        ("qton_timesync", None),
        ("ffr", None),
        ("datawriter", None),
        ("qton_token", None),
    ])

    cmd_data['pause'] = acmd([("", None)])

    cmd_data['resume'] = acmd([("", None)])

    cmd_data['scrap'] = acmd([("", None)])

    return cmd_data
Beispiel #6
0
def generate(NW_SPECS,
             RU_CONFIG=[],
             HOSTIDX=0,
             RUN_NUMBER=333,
             OUTPUT_PATH=".",
             TOKEN_COUNT=0,
             SYSTEM_TYPE="TPC",
             SOFTWARE_TPG_ENABLED=False,
             TPSET_WRITING_ENABLED=False,
             PARTITION="UNKNOWN",
             OPERATIONAL_ENVIRONMENT="swtest",
             TPC_REGION_NAME_PREFIX="APA",
             MAX_FILE_SIZE=4 * 1024 * 1024 * 1024):
    """Generate the json configuration for the readout and DF process"""

    cmd_data = {}

    required_eps = {PARTITION + f'.trigdec_{HOSTIDX}', PARTITION + '.triginh'}
    if not required_eps.issubset([nw.name for nw in NW_SPECS]):
        raise RuntimeError(
            f"ERROR: not all the required endpoints ({', '.join(required_eps)}) found in list of endpoints {' '.join([nw.name for nw in NW_SPECS])}"
        )

    # Define modules and queues
    queue_bare_specs = [
        app.QueueSpec(
            inst="trigger_decision_q", kind='FollySPSCQueue', capacity=100),
        app.QueueSpec(
            inst="trigger_record_q", kind='FollySPSCQueue', capacity=100),
        app.QueueSpec(
            inst="data_fragments_q", kind='FollyMPMCQueue', capacity=1000),
    ] + ([
        app.QueueSpec(
            inst="tpsets_from_netq", kind='FollyMPMCQueue', capacity=1000),
    ] if TPSET_WRITING_ENABLED else [])

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs,
                                        key=lambda x: x.inst))

    mod_specs = [
        mspec("trigdec_receiver", "TriggerDecisionReceiver", [
            app.QueueInfo(
                name="output", inst="trigger_decision_q", dir="output")
        ]),
        mspec("fragment_receiver", "FragmentReceiver", [
            app.QueueInfo(name="output", inst="data_fragments_q", dir="output")
        ]),
        mspec("trb", "TriggerRecordBuilder", [
            app.QueueInfo(name="trigger_decision_input_queue",
                          inst="trigger_decision_q",
                          dir="input"),
            app.QueueInfo(name="trigger_record_output_queue",
                          inst="trigger_record_q",
                          dir="output"),
            app.QueueInfo(name="data_fragment_input_queue",
                          inst="data_fragments_q",
                          dir="input")
        ]),
        mspec("datawriter", "DataWriter", [
            app.QueueInfo(name="trigger_record_input_queue",
                          inst="trigger_record_q",
                          dir="input")
        ]),
    ] + ([
        mspec(f"tpset_subscriber_{idx}", "NetworkToQueue", [
            app.QueueInfo(
                name="output", inst=f"tpsets_from_netq", dir="output")
        ]) for idx in range(len(RU_CONFIG))
    ] if TPSET_WRITING_ENABLED else []) + ([
        mspec("tpswriter", "TPSetWriter", [
            app.QueueInfo(
                name="tpset_source", inst="tpsets_from_netq", dir="input")
        ])
    ] if TPSET_WRITING_ENABLED else [])

    cmd_data['init'] = app.Init(queues=queue_specs,
                                modules=mod_specs,
                                nwconnections=NW_SPECS)

    total_link_count = 0
    for ru in range(len(RU_CONFIG)):
        total_link_count += RU_CONFIG[ru]["channel_count"]

    cmd_data['conf'] = acmd([
        ("trigdec_receiver",
         tdrcv.ConfParams(general_queue_timeout=QUEUE_POP_WAIT_MS,
                          connection_name=f"{PARTITION}.trigdec_{HOSTIDX}")),
        ("trb",
         trb.ConfParams(
             general_queue_timeout=QUEUE_POP_WAIT_MS,
             reply_connection_name=f"{PARTITION}.frags_{HOSTIDX}",
             map=trb.mapgeoidconnections([
                 trb.geoidinst(region=RU_CONFIG[ru]["region_id"],
                               element=idx + RU_CONFIG[ru]["start_channel"],
                               system=SYSTEM_TYPE,
                               connection_name=f"{PARTITION}.datareq_{ru}")
                 for ru in range(len(RU_CONFIG))
                 for idx in range(RU_CONFIG[ru]["channel_count"])
             ] + ([
                 trb.geoidinst(region=RU_CONFIG[ru]["region_id"],
                               element=idx + RU_CONFIG[ru]["start_channel"] +
                               total_link_count,
                               system=SYSTEM_TYPE,
                               connection_name=f"{PARTITION}.datareq_{ru}")
                 for ru in range(len(RU_CONFIG))
                 for idx in range(RU_CONFIG[ru]["channel_count"])
             ] if SOFTWARE_TPG_ENABLED else []) + ([
                 trb.geoidinst(region=RU_CONFIG[ru]["region_id"],
                               element=idx + RU_CONFIG[ru]["start_channel"],
                               system="DataSelection",
                               connection_name=f"{PARTITION}.ds_tp_datareq_0")
                 for ru in range(len(RU_CONFIG))
                 for idx in range(RU_CONFIG[ru]["channel_count"])
             ] if SOFTWARE_TPG_ENABLED else [])))),
        ("datawriter",
         dw.ConfParams(
             decision_connection=f"{PARTITION}.trigdec_{HOSTIDX}",
             token_connection=PARTITION + ".triginh",
             data_store_parameters=hdf5ds.ConfParams(
                 name="data_store",
                 version=3,
                 operational_environment=OPERATIONAL_ENVIRONMENT,
                 directory_path=OUTPUT_PATH,
                 max_file_size_bytes=MAX_FILE_SIZE,
                 disable_unique_filename_suffix=False,
                 filename_parameters=hdf5ds.FileNameParams(
                     overall_prefix=OPERATIONAL_ENVIRONMENT,
                     digits_for_run_number=6,
                     file_index_prefix="",
                     digits_for_file_index=4,
                 ),
                 file_layout_parameters=hdf5ds.FileLayoutParams(
                     trigger_record_name_prefix="TriggerRecord",
                     digits_for_trigger_number=5,
                     path_param_list=hdf5ds.PathParamList([
                         hdf5ds.PathParams(
                             detector_group_type="TPC",
                             detector_group_name="TPC",
                             region_name_prefix=TPC_REGION_NAME_PREFIX,
                             element_name_prefix="Link"),
                         hdf5ds.PathParams(detector_group_type="PDS",
                                           detector_group_name="PDS"),
                         hdf5ds.PathParams(detector_group_type="NDLArTPC",
                                           detector_group_name="NDLArTPC"),
                         hdf5ds.PathParams(detector_group_type="Trigger",
                                           detector_group_name="Trigger"),
                         hdf5ds.PathParams(detector_group_type="TPC_TP",
                                           detector_group_name="TPC",
                                           region_name_prefix="TP_APA",
                                           element_name_prefix="Link")
                     ]))))),
    ] + [
        ("fragment_receiver",
         frcv.ConfParams(general_queue_timeout=QUEUE_POP_WAIT_MS,
                         connection_name=f"{PARTITION}.frags_{HOSTIDX}")),
    ] + [(f"tpset_subscriber_{idx}",
          ntoq.Conf(msg_type="dunedaq::trigger::TPSet",
                    msg_module_name="TPSetNQ",
                    receiver_config=nor.Conf(name=f'{PARTITION}.tpsets_{idx}',
                                             subscriptions=["TPSets"])))
         for idx in range(len(RU_CONFIG))] + (
             [("tpswriter", tpsw.ConfParams(max_file_size_bytes=1000000000, )
               )] if TPSET_WRITING_ENABLED else []))

    startpars = rccmd.StartParams(run=RUN_NUMBER)
    cmd_data['start'] = acmd(
        [] + ([("tpswriter",
                startpars), ("tpset_subscriber_.*",
                             startpars)] if TPSET_WRITING_ENABLED else []) +
        [("datawriter", startpars), ("fragment_receiver", startpars),
         ("trb", startpars), ("trigdec_receiver", startpars)])

    cmd_data['stop'] = acmd([
        ("trigdec_receiver", None),
        ("trb", None),
        ("fragment_receiver", None),
        ("datawriter", None),
    ] + ([("tpset_subscriber_.*",
           None), ("tpswriter", None)] if TPSET_WRITING_ENABLED else []))

    cmd_data['pause'] = acmd([("", None)])

    cmd_data['resume'] = acmd([("", None)])

    cmd_data['scrap'] = acmd([("fragment_receiver", None),
                              ("trigdec_receiver", None),
                              ("qton_token", None)])

    cmd_data['record'] = acmd([("", None)])

    return cmd_data
Beispiel #7
0
def add_network(app_name, the_system, verbose=False):
    """Add the necessary QueueToNetwork and NetworkToQueue objects to the
       application named `app_name`, based on the inter-application
       connections specified in `the_system`. NB `the_system` is modified
       in-place."""

    # if the_system.network_endpoints is None:
    #     the_system.network_endpoints=assign_network_endpoints(the_system)

    if verbose:
        console.log(f"---- add_network for {app_name} ----")
    app = the_system.apps[app_name]

    modules_with_network = deepcopy(app.modulegraph.modules)

    unconnected_endpoints = set(app.modulegraph.endpoints.keys())

    if verbose:
        console.log(f"Endpoints to connect are: {unconnected_endpoints}")

    for conn_name, app_connection in the_system.app_connections.items():
        if verbose:
            console.log(
                f"conn_name {conn_name}, app_connection {app_connection}")

        # Create the nwmgr connection if it doesn't already exist
        if not the_system.has_network_endpoint(
                app_connection.nwmgr_connection):
            # IPM connections have the following confusing behaviour:
            # whether the connection is pub/sub or direct connection
            # is determined by whether the list of topics is empty;
            # and the end that binds is upstream for pub/sub
            # connections and downstream for direct connections
            is_pubsub = app_connection.topics != []
            bind_host = app_name if is_pubsub else app_connection.receivers[
                0].split(".")[0]
            port = the_system.next_unassigned_port()
            address = f"tcp://{{host_{bind_host}}}:{port}"
            if verbose:
                console.log(
                    f"Assigning address {address} for connection {app_connection.nwmgr_connection}"
                )
            the_system.network_endpoints.append(
                nwmgr.Connection(name=app_connection.nwmgr_connection,
                                 topics=app_connection.topics,
                                 address=address))
        from_app, from_endpoint = conn_name.split(".", maxsplit=1)

        if from_app == app_name:
            if from_endpoint in unconnected_endpoints:
                unconnected_endpoints.remove(from_endpoint)
            from_endpoint_internal = resolve_endpoint(app, from_endpoint,
                                                      Direction.OUT)
            if from_endpoint_internal is None:
                # The module.endpoint for this external endpoint was
                # specified as None, so we assume it was a direct
                # nwmgr sender, and don't make a qton for it
                if verbose:
                    console.log(
                        f"{conn_name} specifies its internal endpoint as None, so not creating a QtoN for it"
                    )
                continue
            from_endpoint_module_name, from_endpoint_sink = from_endpoint_internal.split(
                ".")
            # We're a publisher or sender. Make the queue to network
            qton_name = conn_name.replace(".", "_")
            qton_name = make_unique_name(qton_name, modules_with_network)

            if verbose:
                console.log(
                    f"Adding QueueToNetwork named {qton_name} connected to {from_endpoint_internal} in app {app_name}"
                )
            nwmgr_connection_name = app_connection.nwmgr_connection
            nwmgr_connection = the_system.get_network_endpoint(
                nwmgr_connection_name)
            topic = nwmgr_connection.topics[
                0] if nwmgr_connection.topics else ""
            modules_with_network.append(
                DAQModule(
                    name=qton_name,
                    plugin="QueueToNetwork",
                    connections={},  # No outgoing connections
                    conf=qton.Conf(
                        msg_type=app_connection.msg_type,
                        msg_module_name=app_connection.msg_module_name,
                        sender_config=nos.Conf(name=nwmgr_connection_name,
                                               topic=topic))))
            # Connect the module to the QueueToNetwork
            from_endpoint_module = None
            for mod in modules_with_network:
                if mod.name == from_endpoint_module_name:
                    from_endpoint_module = mod
                    break
            mod_connections = from_endpoint_module.connections
            mod_connections[from_endpoint_sink] = Connection(
                f"{qton_name}.input")

        if verbose:
            console.log(
                f"app_connection.receivers is {app_connection.receivers}")
        for receiver in app_connection.receivers:
            to_app, to_endpoint = receiver.split(".", maxsplit=1)
            if to_app == app_name:
                if to_endpoint in unconnected_endpoints:
                    unconnected_endpoints.remove(to_endpoint)
                to_endpoint_internal = resolve_endpoint(
                    app, to_endpoint, Direction.IN)
                if to_endpoint_internal is None:
                    # The module.endpoint for this external endpoint was
                    # specified as None, so we assume it was a direct
                    # nwmgr sender, and don't make a ntoq for it
                    if verbose:
                        console.log(
                            f"{to_endpoint} specifies its internal endpoint as None, so not creating a NtoQ for it"
                        )
                    continue

                ntoq_name = receiver.replace(".", "_")
                ntoq_name = make_unique_name(ntoq_name, modules_with_network)

                if verbose:
                    console.log(
                        f"Adding NetworkToQueue named {ntoq_name} connected to {to_endpoint_internal} in app {app_name}"
                    )

                nwmgr_connection_name = app_connection.nwmgr_connection
                nwmgr_connection = the_system.get_network_endpoint(
                    nwmgr_connection_name)

                modules_with_network.append(
                    DAQModule(
                        name=ntoq_name,
                        plugin="NetworkToQueue",
                        connections={
                            "output": Connection(to_endpoint_internal)
                        },
                        conf=ntoq.Conf(
                            msg_type=app_connection.msg_type,
                            msg_module_name=app_connection.msg_module_name,
                            receiver_config=nor.Conf(
                                name=nwmgr_connection_name,
                                subscriptions=nwmgr_connection.topics))))

    if unconnected_endpoints:
        # TODO: Use proper logging
        console.log(
            f"Warning: the following endpoints of {app_name} were not connected to anything: {unconnected_endpoints}"
        )

    app.modulegraph.modules = modules_with_network
def generate_df(
        network_endpoints,
        NUMBER_OF_DATA_PRODUCERS=2,
        EMULATOR_MODE=False,
        DATA_RATE_SLOWDOWN_FACTOR = 1,
        RUN_NUMBER = 333, 
        TRIGGER_RATE_HZ = 1.0,
        DATA_FILE="./frames.bin",
        OUTPUT_PATH=".",
        DISABLE_OUTPUT=False,
        FLX_INPUT=True,
        TOKEN_COUNT=0
    ):
    """Generate the json configuration for the readout and DF process"""
   
    trg_interval_ticks = math.floor((1/TRIGGER_RATE_HZ) * CLOCK_SPEED_HZ/DATA_RATE_SLOWDOWN_FACTOR)

    # Define modules and queues
    queue_bare_specs = [
            app.QueueSpec(inst="time_sync_q", kind='FollyMPMCQueue', capacity=100),
            app.QueueSpec(inst="token_q", kind='FollySPSCQueue', capacity=100),
            app.QueueSpec(inst="trigger_decision_q", kind='FollySPSCQueue', capacity=100),
            app.QueueSpec(inst="trigger_decision_from_netq", kind='FollySPSCQueue', capacity=100),
            app.QueueSpec(inst="trigger_decision_copy_for_bookkeeping", kind='FollySPSCQueue', capacity=100),
            app.QueueSpec(inst="trigger_record_q", kind='FollySPSCQueue', capacity=100),
            app.QueueSpec(inst="data_fragments_q", kind='FollyMPMCQueue', capacity=1000),
        ] + [
            app.QueueSpec(inst=f"data_requests_{idx}", kind='FollySPSCQueue', capacity=100)
                for idx in range(NUMBER_OF_DATA_PRODUCERS)
        ] + [

            app.QueueSpec(inst=f"wib_link_{idx}", kind='FollySPSCQueue', capacity=100000)
                for idx in range(NUMBER_OF_DATA_PRODUCERS)
        ]
    

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs, key=lambda x: x.inst))


    mod_specs = [
        mspec("ntoq_trigdec", "NetworkToQueue", [
                        app.QueueInfo(name="output", inst="trigger_decision_from_netq", dir="output")
                    ]),

        mspec("qton_token", "QueueToNetwork", [
                        app.QueueInfo(name="input", inst="token_q", dir="input")
                    ]),

        mspec("qton_timesync", "QueueToNetwork", [
                        app.QueueInfo(name="input", inst="time_sync_q", dir="input")
                    ]),

        mspec("rqg", "RequestGenerator", [
                        app.QueueInfo(name="trigger_decision_input_queue", inst="trigger_decision_from_netq", dir="input"),
                        app.QueueInfo(name="trigger_decision_for_event_building", inst="trigger_decision_copy_for_bookkeeping", dir="output"),
                    ] + [
                        app.QueueInfo(name=f"data_request_{idx}_output_queue", inst=f"data_requests_{idx}", dir="output")
                            for idx in range(NUMBER_OF_DATA_PRODUCERS)
                    ]),

        mspec("ffr", "FragmentReceiver", [
                        app.QueueInfo(name="trigger_decision_input_queue", inst="trigger_decision_copy_for_bookkeeping", dir="input"),
                        app.QueueInfo(name="trigger_record_output_queue", inst="trigger_record_q", dir="output"),
                        app.QueueInfo(name="data_fragment_input_queue", inst="data_fragments_q", dir="input"),
                    ]),

        mspec("datawriter", "DataWriter", [
                        app.QueueInfo(name="trigger_record_input_queue", inst="trigger_record_q", dir="input"),
                        app.QueueInfo(name="token_output_queue", inst="token_q", dir="output"),
                    ]),

        ] + [
                mspec(f"datahandler_{idx}", "DataLinkHandler", [

                            app.QueueInfo(name="raw_input", inst=f"wib_link_{idx}", dir="input"),
                            app.QueueInfo(name="timesync", inst="time_sync_q", dir="output"),
                            app.QueueInfo(name="requests", inst=f"data_requests_{idx}", dir="input"),
                            app.QueueInfo(name="fragments", inst="data_fragments_q", dir="output"),
                            ]) for idx in range(NUMBER_OF_DATA_PRODUCERS)
        ]

    if FLX_INPUT:
        mod_specs.append(mspec("flxcard_0", "FelixCardReader", [
                        app.QueueInfo(name=f"output_{idx}", inst=f"wib_link_{idx}", dir="output")
                            for idx in range(0,min(5, NUMBER_OF_DATA_PRODUCERS))
                        ]))
        if NUMBER_OF_DATA_PRODUCERS>5 :
            mod_specs.append(mspec("flxcard_1", "FelixCardReader", [
                            app.QueueInfo(name=f"output_{idx}", inst=f"wib_link_{idx}", dir="output")
                                for idx in range(5, NUMBER_OF_DATA_PRODUCERS)
                            ]))
    else:
        mod_specs.append(mspec("fake_source", "FakeCardReader", [
                        app.QueueInfo(name=f"output_{idx}", inst=f"wib_link_{idx}", dir="output")
                            for idx in range(NUMBER_OF_DATA_PRODUCERS)
                        ]))

    


    init_specs = app.Init(queues=queue_specs, modules=mod_specs)

    initcmd = rccmd.RCCommand(
        id=basecmd.CmdId("init"),
        entry_state="NONE",
        exit_state="INITIAL",
        data=init_specs
    )

    confcmd = mrccmd("conf", "INITIAL", "CONFIGURED",[
                ("ntoq_trigdec", ntoq.Conf(msg_type="dunedaq::dfmessages::TriggerDecision",
                                           msg_module_name="TriggerDecisionNQ",
                                           receiver_config=nor.Conf(ipm_plugin_type="ZmqReceiver",
                                                                    address=network_endpoints["trigdec"])
                                           )
                 ),

                ("qton_token", qton.Conf(msg_type="dunedaq::dfmessages::TriggerDecisionToken",
                                           msg_module_name="TriggerDecisionTokenNQ",
                                           sender_config=nos.Conf(ipm_plugin_type="ZmqSender",
                                                                  address=network_endpoints["triginh"],
                                                                  stype="msgpack")
                                           )
                 ),

                ("qton_timesync", qton.Conf(msg_type="dunedaq::dfmessages::TimeSync",
                                            msg_module_name="TimeSyncNQ",
                                            sender_config=nos.Conf(ipm_plugin_type="ZmqSender",
                                                                   address=network_endpoints["timesync"],
                                                                   stype="msgpack")
                                           )
                ),
        
                ("rqg", rqg.ConfParams(
                        map=rqg.mapgeoidqueue([
                                rqg.geoidinst(apa=0, link=idx, queueinstance=f"data_requests_{idx}") for idx in range(NUMBER_OF_DATA_PRODUCERS)
                            ])  
                        )),
                ("ffr", ffr.ConfParams(
                            general_queue_timeout=QUEUE_POP_WAIT_MS
                        )),
                ("datawriter", dw.ConfParams(
                            initial_token_count=TOKEN_COUNT,
                            data_store_parameters=hdf5ds.ConfParams(
                                name="data_store",
                                # type = "HDF5DataStore", # default
                                directory_path = OUTPUT_PATH, # default
                                # mode = "all-per-file", # default
                                max_file_size_bytes = 1073741824,
                                disable_unique_filename_suffix = False,
                                filename_parameters = hdf5ds.HDF5DataStoreFileNameParams(
                                    overall_prefix = "swtest",
                                    digits_for_run_number = 6,
                                    file_index_prefix = "",
                                    digits_for_file_index = 4,
                                ),
                                file_layout_parameters = hdf5ds.HDF5DataStoreFileLayoutParams(
                                    trigger_record_name_prefix= "TriggerRecord",
                                    digits_for_trigger_number = 5,
                                    digits_for_apa_number = 3,
                                    digits_for_link_number = 2,
                                )
                            )
                        )),
                ("fake_source",fakecr.Conf(
                            link_ids=list(range(NUMBER_OF_DATA_PRODUCERS)),
                            # input_limit=10485100, # default
                            rate_khz = CLOCK_SPEED_HZ/(25*12*DATA_RATE_SLOWDOWN_FACTOR*1000),
                            raw_type = "wib",
                            data_filename = DATA_FILE,
                            queue_timeout_ms = QUEUE_POP_WAIT_MS
                        )),
                ("flxcard_0",flxcr.Conf(
                            card_id=0,
                            logical_unit=0,
                            dma_id=0,
                            chunk_trailer_size= 32,
                            dma_block_size_kb= 4,
                            dma_memory_size_gb= 4,
                            numa_id=0,
                            num_links=min(5,NUMBER_OF_DATA_PRODUCERS)
                        )),
                ("flxcard_1",flxcr.Conf(
                            card_id=0,
                            logical_unit=1,
                            dma_id=0,
                            chunk_trailer_size= 32,
                            dma_block_size_kb= 4,
                            dma_memory_size_gb= 4,
                            numa_id=0,
                            num_links=max(0, NUMBER_OF_DATA_PRODUCERS-5)
                        )),
            ] + [
                (f"datahandler_{idx}", dlh.Conf(
                        raw_type = "wib",
                        emulator_mode = EMULATOR_MODE,
                        # fake_trigger_flag=0, # default
                        source_queue_timeout_ms= QUEUE_POP_WAIT_MS,
                        latency_buffer_size = 3*CLOCK_SPEED_HZ/(25*12*DATA_RATE_SLOWDOWN_FACTOR),
                        pop_limit_pct = 0.8,
                        pop_size_pct = 0.1,
                        apa_number = 0,
                        link_number = idx
                        )) for idx in range(NUMBER_OF_DATA_PRODUCERS)
            ])

    startpars = rccmd.StartParams(run=RUN_NUMBER, trigger_interval_ticks=trg_interval_ticks, disable_data_storage=DISABLE_OUTPUT)
    startcmd = mrccmd("start", "CONFIGURED", "RUNNING", [
            ("qton_token", startpars),
            ("datawriter", startpars),
            ("ffr", startpars),
            ("qton_timesync", startpars),
            ("datahandler_.*", startpars),
            ("fake_source", startpars),
            ("flxcard.*", startpars),
            ("rqg", startpars),
            ("ntoq_trigdec", startpars),
        ])

    stopcmd = mrccmd("stop", "RUNNING", "CONFIGURED", [
            ("ntoq_trigdec", None),
            ("rqg", None),
            ("flxcard.*", None),
            ("fake_source", None),
            ("datahandler_.*", None),
            ("qton_timesync", None),
            ("ffr", None),
            ("datawriter", None),
            ("qton_token", None),
        ])

    pausecmd = mrccmd("pause", "RUNNING", "RUNNING", [
            ("", None)
        ])

    resumecmd = mrccmd("resume", "RUNNING", "RUNNING", [
            ("tde", tde.ResumeParams(
                            trigger_interval_ticks=trg_interval_ticks
                        ))
        ])

    scrapcmd = mrccmd("scrap", "CONFIGURED", "INITIAL", [
            ("", None)
        ])

    # Create a list of commands
    cmd_seq = [initcmd, confcmd, startcmd, stopcmd, pausecmd, resumecmd, scrapcmd]

    # Print them as json (to be improved/moved out)
    jstr = json.dumps([c.pod() for c in cmd_seq], indent=4, sort_keys=True)
    return jstr
def generate_trigemu(
        network_endpoints,
        NUMBER_OF_DATA_PRODUCERS=2,          
        DATA_RATE_SLOWDOWN_FACTOR = 1,
        RUN_NUMBER = 333, 
        TRIGGER_RATE_HZ = 1.0,
        DATA_FILE="./frames.bin",
        OUTPUT_PATH=".",
        TOKEN_COUNT=10
    ):
    """Generate the json config for the TriggerDecisionEmulator process"""
    
    trg_interval_ticks = math.floor((1/TRIGGER_RATE_HZ) * CLOCK_SPEED_HZ/DATA_RATE_SLOWDOWN_FACTOR)

    # Define modules and queues
    queue_bare_specs = [
            app.QueueSpec(inst="time_sync_from_netq", kind='FollySPSCQueue', capacity=100),
            app.QueueSpec(inst="token_from_netq", kind='FollySPSCQueue', capacity=20),
            app.QueueSpec(inst="trigger_decision_to_netq", kind='FollySPSCQueue', capacity=20),
        ]

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs, key=lambda x: x.inst))


    mod_specs = [
        mspec("qton_trigdec", "QueueToNetwork", [
                        app.QueueInfo(name="input", inst="trigger_decision_to_netq", dir="input")
                    ]),

        mspec("ntoq_token", "NetworkToQueue", [
                        app.QueueInfo(name="output", inst="token_from_netq", dir="output")
                    ]),

        mspec("ntoq_timesync", "NetworkToQueue", [
                        app.QueueInfo(name="output", inst="time_sync_from_netq", dir="output")
                    ]),

        mspec("tde", "TriggerDecisionEmulator", [
                        app.QueueInfo(name="time_sync_source", inst="time_sync_from_netq", dir="input"),
                        app.QueueInfo(name="token_source", inst="token_from_netq", dir="input"),
                        app.QueueInfo(name="trigger_decision_sink", inst="trigger_decision_to_netq", dir="output"),
                    ]),
        ]

    init_specs = app.Init(queues=queue_specs, modules=mod_specs)

    initcmd = rccmd.RCCommand(
        id=basecmd.CmdId("init"),
        entry_state="NONE",
        exit_state="INITIAL",
        data=init_specs
    )

    confcmd = mrccmd("conf", "INITIAL", "CONFIGURED",[
                ("qton_trigdec", qton.Conf(msg_type="dunedaq::dfmessages::TriggerDecision",
                                           msg_module_name="TriggerDecisionNQ",
                                           sender_config=nos.Conf(ipm_plugin_type="ZmqSender",
                                                                  address=network_endpoints["trigdec"],
                                                                  stype="msgpack")
                                           )
                 ),

                 ("ntoq_token", ntoq.Conf(msg_type="dunedaq::dfmessages::TriggerDecisionToken",
                                            msg_module_name="TriggerDecisionTokenNQ",
                                            receiver_config=nor.Conf(ipm_plugin_type="ZmqReceiver",
                                                                     address=network_endpoints["triginh"])
                                            )
                 ),

                ("ntoq_timesync", ntoq.Conf(msg_type="dunedaq::dfmessages::TimeSync",
                                           msg_module_name="TimeSyncNQ",
                                           receiver_config=nor.Conf(ipm_plugin_type="ZmqReceiver",
                                                                    address=network_endpoints["timesync"])
                                           )
                ),

                ("tde", tde.ConfParams(
                        links=[idx for idx in range(NUMBER_OF_DATA_PRODUCERS)],
                        min_links_in_request=NUMBER_OF_DATA_PRODUCERS,
                        max_links_in_request=NUMBER_OF_DATA_PRODUCERS,
                        min_readout_window_ticks=math.floor(CLOCK_SPEED_HZ/(DATA_RATE_SLOWDOWN_FACTOR*1000)),
                        max_readout_window_ticks=math.floor(CLOCK_SPEED_HZ/(DATA_RATE_SLOWDOWN_FACTOR*1000)),
                        trigger_window_offset=math.floor(CLOCK_SPEED_HZ/(DATA_RATE_SLOWDOWN_FACTOR*2000)),
                        # The delay is set to put the trigger well within the latency buff
                        trigger_delay_ticks=math.floor(CLOCK_SPEED_HZ/DATA_RATE_SLOWDOWN_FACTOR),
                        # We divide the trigger interval by
                        # DATA_RATE_SLOWDOWN_FACTOR so the triggers are still
                        # emitted per (wall-clock) second, rather than being
                        # spaced out further
                        trigger_interval_ticks=trg_interval_ticks,
                        clock_frequency_hz=CLOCK_SPEED_HZ/DATA_RATE_SLOWDOWN_FACTOR,
                        initial_token_count=TOKEN_COUNT                    
                        )),
            ])

    startpars = rccmd.StartParams(run=RUN_NUMBER, disable_data_storage=False)
    startcmd = mrccmd("start", "CONFIGURED", "RUNNING", [
            ("qton_trigdec", startpars),
            ("ntoq_token", startpars),
            ("ntoq_timesync", startpars),
            ("tde", startpars),
        ])

    stopcmd = mrccmd("stop", "RUNNING", "CONFIGURED", [
            ("qton_trigdec", None),
            ("ntoq_timesync", None),
            ("ntoq_token", None),
            ("tde", None),
        ])

    pausecmd = mrccmd("pause", "RUNNING", "RUNNING", [
            ("", None)
        ])

    resumecmd = mrccmd("resume", "RUNNING", "RUNNING", [
            ("tde", tde.ResumeParams(
                            trigger_interval_ticks=trg_interval_ticks
                        ))
        ])

    scrapcmd = mrccmd("scrap", "CONFIGURED", "INITIAL", [
            ("", None)
        ])

    # Create a list of commands
    cmd_seq = [initcmd, confcmd, startcmd, stopcmd, pausecmd, resumecmd, scrapcmd]

    # Print them as json (to be improved/moved out)
    jstr = json.dumps([c.pod() for c in cmd_seq], indent=4, sort_keys=True)
    return jstr
Beispiel #10
0
def generate(
    NETWORK_ENDPOINTS: list,
    NUMBER_OF_DATA_PRODUCERS: int = 2,
    DATA_RATE_SLOWDOWN_FACTOR: int = 1,
    RUN_NUMBER: int = 333,
    TRIGGER_RATE_HZ: float = 1.0,
    DATA_FILE: str = "./frames.bin",
    OUTPUT_PATH: str = ".",
    TOKEN_COUNT: int = 10,
    CLOCK_SPEED_HZ: int = 50000000,
):
    """
    { item_description }
    """
    cmd_data = {}

    required_eps = {'trigdec', 'triginh', 'timesync'}
    if not required_eps.issubset(NETWORK_ENDPOINTS):
        raise RuntimeError(
            f"ERROR: not all the required endpoints ({', '.join(required_eps)}) found in list of endpoints {' '.join(NETWORK_ENDPOINTS.keys())}"
        )

    # Derived parameters
    TRG_INTERVAL_TICKS = math.floor(
        (1 / TRIGGER_RATE_HZ) * CLOCK_SPEED_HZ / DATA_RATE_SLOWDOWN_FACTOR)
    MIN_READOUT_WINDOW_TICKS = math.floor(CLOCK_SPEED_HZ /
                                          (DATA_RATE_SLOWDOWN_FACTOR * 1000))
    MAX_READOUT_WINDOW_TICKS = math.floor(CLOCK_SPEED_HZ /
                                          (DATA_RATE_SLOWDOWN_FACTOR * 1000))
    TRIGGER_WINDOW_OFFSET = math.floor(CLOCK_SPEED_HZ /
                                       (DATA_RATE_SLOWDOWN_FACTOR * 2000))
    # The delay is set to put the trigger well within the latency buff
    TRIGGER_DELAY_TICKS = math.floor(CLOCK_SPEED_HZ /
                                     DATA_RATE_SLOWDOWN_FACTOR)

    # Define modules and queues
    queue_bare_specs = [
        app.QueueSpec(inst="time_sync_from_netq",
                      kind='FollySPSCQueue',
                      capacity=100),
        app.QueueSpec(inst="token_from_netq",
                      kind='FollySPSCQueue',
                      capacity=20),
        app.QueueSpec(inst="trigger_decision_to_netq",
                      kind='FollySPSCQueue',
                      capacity=20),
    ]

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs,
                                        key=lambda x: x.inst))

    mod_specs = [
        mspec("qton_trigdec", "QueueToNetwork", [
            app.QueueInfo(
                name="input", inst="trigger_decision_to_netq", dir="input")
        ]),
        mspec("ntoq_token", "NetworkToQueue", [
            app.QueueInfo(name="output", inst="token_from_netq", dir="output")
        ]),
        mspec("ntoq_timesync", "NetworkToQueue", [
            app.QueueInfo(
                name="output", inst="time_sync_from_netq", dir="output")
        ]),
        mspec("tde", "TriggerDecisionEmulator", [
            app.QueueInfo(name="time_sync_source",
                          inst="time_sync_from_netq",
                          dir="input"),
            app.QueueInfo(
                name="token_source", inst="token_from_netq", dir="input"),
            app.QueueInfo(name="trigger_decision_sink",
                          inst="trigger_decision_to_netq",
                          dir="output"),
        ]),
    ]

    cmd_data['init'] = app.Init(queues=queue_specs, modules=mod_specs)

    cmd_data['conf'] = acmd([
        ("qton_trigdec",
         qton.Conf(msg_type="dunedaq::dfmessages::TriggerDecision",
                   msg_module_name="TriggerDecisionNQ",
                   sender_config=nos.Conf(ipm_plugin_type="ZmqSender",
                                          address=NETWORK_ENDPOINTS["trigdec"],
                                          stype="msgpack"))),
        ("ntoq_token",
         ntoq.Conf(msg_type="dunedaq::dfmessages::TriggerDecisionToken",
                   msg_module_name="TriggerDecisionTokenNQ",
                   receiver_config=nor.Conf(
                       ipm_plugin_type="ZmqReceiver",
                       address=NETWORK_ENDPOINTS["triginh"]))),
        ("ntoq_timesync",
         ntoq.Conf(msg_type="dunedaq::dfmessages::TimeSync",
                   msg_module_name="TimeSyncNQ",
                   receiver_config=nor.Conf(
                       ipm_plugin_type="ZmqReceiver",
                       address=NETWORK_ENDPOINTS["timesync"]))),
        (
            "tde",
            tde.ConfParams(
                links=[idx for idx in range(NUMBER_OF_DATA_PRODUCERS)],
                min_links_in_request=NUMBER_OF_DATA_PRODUCERS,
                max_links_in_request=NUMBER_OF_DATA_PRODUCERS,
                min_readout_window_ticks=MIN_READOUT_WINDOW_TICKS,
                max_readout_window_ticks=MAX_READOUT_WINDOW_TICKS,
                trigger_window_offset=TRIGGER_WINDOW_OFFSET,
                # The delay is set to put the trigger well within the latency buff
                trigger_delay_ticks=TRIGGER_DELAY_TICKS,
                # We divide the trigger interval by
                # DATA_RATE_SLOWDOWN_FACTOR so the triggers are still
                # emitted per (wall-clock) second, rather than being
                # spaced out further
                trigger_interval_ticks=TRG_INTERVAL_TICKS,
                clock_frequency_hz=CLOCK_SPEED_HZ / DATA_RATE_SLOWDOWN_FACTOR,
                initial_token_count=TOKEN_COUNT)),
    ])

    startpars = rccmd.StartParams(run=RUN_NUMBER, disable_data_storage=False)
    cmd_data['start'] = acmd([
        ("qton_trigdec", startpars),
        ("ntoq_token", startpars),
        ("ntoq_timesync", startpars),
        ("tde", startpars),
    ])

    cmd_data['stop'] = acmd([
        ("qton_trigdec", None),
        ("ntoq_timesync", None),
        ("ntoq_token", None),
        ("tde", None),
    ])

    cmd_data['pause'] = acmd([("", None)])

    cmd_data['resume'] = acmd([
        ("tde", tde.ResumeParams(trigger_interval_ticks=TRG_INTERVAL_TICKS))
    ])

    cmd_data['scrap'] = acmd([("", None)])

    return cmd_data
Beispiel #11
0
    def __init__(self,
                 # NW_SPECS: list,
                 
                 SOFTWARE_TPG_ENABLED: bool = False,
                 RU_CONFIG: list = [],

                 ACTIVITY_PLUGIN: str = 'TriggerActivityMakerPrescalePlugin',
                 ACTIVITY_CONFIG: dict = dict(prescale=10000),

                 CANDIDATE_PLUGIN: str = 'TriggerCandidateMakerPrescalePlugin',
                 CANDIDATE_CONFIG: int = dict(prescale=10),

                 TOKEN_COUNT: int = 10,
                 SYSTEM_TYPE = 'wib',
                 TTCM_S1: int = 1,
                 TTCM_S2: int = 2,
                 TRIGGER_WINDOW_BEFORE_TICKS: int = 1000,
                 TRIGGER_WINDOW_AFTER_TICKS: int = 1000,
                 PARTITION="UNKNOWN",
                 HOST="localhost"
                 ):
        """
        { item_description }
        """
        
        # Generate schema for the maker plugins on the fly in the temptypes module
        make_moo_record(ACTIVITY_CONFIG , 'ActivityConf' , 'temptypes')
        make_moo_record(CANDIDATE_CONFIG, 'CandidateConf', 'temptypes')
        import temptypes

        modules = []
    
        if SOFTWARE_TPG_ENABLED:
            connections_request_receiver = {}
            connections_tpset_receiver = {}
            for ru in range(len(RU_CONFIG)):
                for idy in range(RU_CONFIG[ru]["channel_count"]):
                    connections_request_receiver[f'output_{ru}_{idy}'] = Connection(f'buf{ru}_{idy}.data_request_q{ru}_{idy}')
                    connections_tpset_receiver  [f'output_{ru}_{idy}'] = Connection(f'buf{ru}_{idy}.tpset_q_for_buf{ru}_{idy}')

            config_request_receiver = rrcv.ConfParams(map = [rrcv.geoidinst(region=RU_CONFIG[ru]["region_id"],
                                                                            element=idy+RU_CONFIG[ru]["start_channel"],
                                                                            system="DataSelection",
                                                                            queueinstance=f"data_request_q{ru}_{idy}")
                                                             for ru in range(len(RU_CONFIG)) for idy in range(RU_CONFIG[ru]["channel_count"])],
                                                      general_queue_timeout = 100,
                                                      connection_name = f"{PARTITION}.ds_tp_datareq_0")
            
            config_tpset_receiver = tpsrcv.ConfParams(map = [tpsrcv.geoidinst(region=RU_CONFIG[ru]["region_id"],
                                                                              element=idy+RU_CONFIG[ru]["start_channel"],
                                                                              system=SYSTEM_TYPE,
                                                                              queueinstance=f"tpset_q_for_buf{ru}_{idy}")
                                                             for ru in range(len(RU_CONFIG)) for idy in range(RU_CONFIG[ru]["channel_count"])],
                                                      general_queue_timeout = 100,
                                                      topic = f"TPSets")
    
            config_qton_fragment = qton.Conf(msg_type="std::unique_ptr<dunedaq::daqdataformats::Fragment>",
                                             msg_module_name="FragmentNQ",
                                             sender_config=nos.Conf(name=f"{PARTITION}.frags_tpset_ds_0",stype="msgpack"))

            config_tcm =  tcm.Conf(candidate_maker=CANDIDATE_PLUGIN,
                                   candidate_maker_config=temptypes.CandidateConf(**CANDIDATE_CONFIG))
            
            modules += [DAQModule(name = 'request_receiver',
                               plugin = 'RequestReceiver',
                               connections = connections_request_receiver,
                               conf = config_request_receiver),
                        
                        DAQModule(name = 'tpset_receiver',
                               plugin = 'TPSetReceiver',
                               connections = connections_tpset_receiver,
                               conf = config_tpset_receiver),
                        
                        DAQModule(name = 'qton_fragments',
                               plugin = 'QueueToNetwork',
                               connections = {}, # all the incoming links in TPSetBufferCreators
                               conf = config_qton_fragment),
                        
                        DAQModule(name = 'tcm',
                               plugin = 'TriggerCandidateMaker',
                               connections = {#'input' : Connection(f'tcm.taset_q'),
                                   'output': Connection(f'mlt.trigger_candidate_q')},
                               conf = config_tcm)]
            
            for ru in range(len(RU_CONFIG)):
                
                modules += [DAQModule(name = f'tpset_subscriber_{ru}',
                                   plugin = 'NetworkToQueue',
                                   connections = {'output': Connection(f'zip_{ru}.tpsets_from_netq_{ru}')},
                                   conf = ntoq.Conf(msg_type="dunedaq::trigger::TPSet",
                                                    msg_module_name="TPSetNQ",
                                                    receiver_config=nor.Conf(name=f'{PARTITION}.tpsets_{ru}',
                                                                             subscriptions=["TPSets"]))),
                            
                            DAQModule(name = f'zip_{ru}',
                                   plugin = 'TPZipper',
                                   connections = {# 'input' are App.network_endpoints, from RU
                                       'output': Connection(f'tam_{ru}.input')},
                                   conf = tzip.ConfParams(cardinality=RU_CONFIG[ru]['channel_count'],
                                                          max_latency_ms=1000,
                                                          region_id=0,
                                                          element_id=0)),
                            
                            DAQModule(name = f'tam_{ru}',
                                   plugin = 'TriggerActivityMaker',
                                   connections = {'output': Connection('tcm.taset_q')},
                                   conf = tam.Conf(activity_maker=ACTIVITY_PLUGIN,
                                                   geoid_region=0,  # Fake placeholder
                                                   geoid_element=0,  # Fake placeholder
                                                   window_time=10000,  # should match whatever makes TPSets, in principle
                                                   buffer_time=625000,  # 10ms in 62.5 MHz ticks
                                                   activity_maker_config=temptypes.ActivityConf(**ACTIVITY_CONFIG)))]

                for idy in range(RU_CONFIG[ru]["channel_count"]):
                    modules += [DAQModule(name = f'buf{ru}_{idy}',
                                       plugin = 'TPSetBufferCreator',
                                       connections = {#'tpset_source': Connection(f"tpset_q_for_buf{ru}_{idy}"),#already in request_receiver
                                                      #'data_request_source': Connection(f"data_request_q{ru}_{idy}"), #ditto
                                                      'fragment_sink': Connection('qton_fragments.fragment_q')},
                                       conf = buf.Conf(tpset_buffer_size=10000, region=RU_CONFIG[ru]["region_id"], element=idy + RU_CONFIG[ru]["start_channel"]))]

        modules += [DAQModule(name = 'ttcm',
                           plugin = 'TimingTriggerCandidateMaker',
                           connections={"output": Connection("mlt.trigger_candidate_q")},
                           conf=ttcm.Conf(s1=ttcm.map_t(signal_type=TTCM_S1,
                                                        time_before=TRIGGER_WINDOW_BEFORE_TICKS,
                                                        time_after=TRIGGER_WINDOW_AFTER_TICKS),
                                          s2=ttcm.map_t(signal_type=TTCM_S2,
                                                        time_before=TRIGGER_WINDOW_BEFORE_TICKS,
                                                        time_after=TRIGGER_WINDOW_AFTER_TICKS),
                                          hsievent_connection_name = PARTITION+".hsievent"))]
                    
        # We need to populate the list of links based on the fragment
        # producers available in the system. This is a bit of a
        # chicken-and-egg problem, because the trigger app itself creates
        # fragment producers (see below). Eventually when the MLT is its
        # own process, this problem will probably go away, but for now, we
        # leave the list of links here blank, and replace it in
        # util.connect_fragment_producers
        modules += [DAQModule(name = 'mlt',
                              plugin = 'ModuleLevelTrigger',
                              conf=mlt.ConfParams(links=[]))] # To be updated later - see comment above
        
        mgraph = ModuleGraph(modules)
        mgraph.add_endpoint("hsievents",  "ttcm.input", Direction.IN)
        if SOFTWARE_TPG_ENABLED:
            for idx in range(len(RU_CONFIG)):
                mgraph.add_endpoint(f"tpsets_into_chain_link{idx}", f"tpset_receiver.input", Direction.IN)
                mgraph.add_endpoint(f"tpsets_into_buffer_link{idx}", f"tpset_subscriber_{idx}.tpset_source", Direction.IN)

                mgraph.add_fragment_producer(region=0, element=idx, system="DataSelection",
                                             requests_in=f"request_receiver.data_request_source",
                                             fragments_out=f"qton_fragments.fragment_sink")


        mgraph.add_endpoint("trigger_decisions", "mlt.trigger_decision_sink", Direction.OUT)
        mgraph.add_endpoint("tokens", "mlt.token_source", Direction.IN)

        super().__init__(modulegraph=mgraph, host=HOST, name='TriggerApp')
        self.export("trigger_app.dot")