Esempio n. 1
0
def generate(INPUT_FILE: str, SLOWDOWN_FACTOR: float, NETWORK_ENDPOINTS: dict):
    cmd_data = {}

    # Derived parameters
    CLOCK_FREQUENCY_HZ = 50000000 / SLOWDOWN_FACTOR

    # Define modules and queues
    queue_specs = [
        app.QueueSpec(inst="tpset_q", kind='FollySPSCQueue', capacity=1000)
    ]

    mod_specs = [
        mspec(
            "tpm", "TriggerPrimitiveMaker",
            [app.QueueInfo(name="tpset_sink", inst="tpset_q", dir="output")]),
        mspec("qton", "QueueToNetwork",
              [app.QueueInfo(name="input", inst="tpset_q", dir="input")])
    ]

    cmd_data['init'] = app.Init(queues=queue_specs, modules=mod_specs)

    cmd_data['conf'] = acmd([
        (
            "tpm",
            tpm.ConfParams(
                filename=INPUT_FILE,
                number_of_loops=-1,  # Infinite
                tpset_time_offset=0,
                tpset_time_width=10000,  # 0.2 ms
                clock_frequency_hz=CLOCK_FREQUENCY_HZ)),
        ("qton",
         qton.Conf(msg_type="dunedaq::trigger::TPSet",
                   msg_module_name="TPSetNQ",
                   sender_config=nos.Conf(ipm_plugin_type="ZmqPublisher",
                                          address=NETWORK_ENDPOINTS["tpset"],
                                          topic="foo",
                                          stype="msgpack")))
    ])

    startpars = rccmd.StartParams(run=1, disable_data_storage=False)
    cmd_data['start'] = acmd([
        ("qton", startpars),
        ("tpm", startpars),
    ])

    cmd_data['pause'] = acmd([])

    cmd_data['resume'] = acmd([])

    cmd_data['stop'] = acmd([
        ("tpm", None),
        ("qton", None),
    ])

    cmd_data['scrap'] = acmd([
        #     ("tpm", None),
    ])

    return cmd_data
Esempio n. 2
0
def generate(NETWORK_ENDPOINTS: dict):
    cmd_data = {}

    # Define modules and queues
    queue_specs = [
        app.QueueSpec(inst="tpset_q", kind='FollySPSCQueue', capacity=10000)
    ]

    mod_specs = [
        mspec(
            "tps_sink", "TPSetSink",
            [app.QueueInfo(name="tpset_source", inst="tpset_q", dir="input")]),
        mspec("ntoq", "NetworkToQueue",
              [app.QueueInfo(name="output", inst="tpset_q", dir="output")])
    ]

    cmd_data['init'] = app.Init(queues=queue_specs, modules=mod_specs)

    cmd_data['conf'] = acmd([(
        "ntoq",
        ntoq.Conf(
            msg_type="dunedaq::trigger::TPSet",
            msg_module_name="TPSetNQ",
            receiver_config=nor.Conf(
                ipm_plugin_type="ZmqSubscriber",
                address=NETWORK_ENDPOINTS["tpset"],
                subscriptions=[
                    "foo"
                ])  # Empty subscription means subscribe to everything
        ))])

    startpars = rccmd.StartParams(run=1, disable_data_storage=False)
    cmd_data['start'] = acmd([
        ("ntoq", startpars),
        ("tps_sink", startpars),
    ])

    cmd_data['pause'] = acmd([])

    cmd_data['resume'] = acmd([])

    cmd_data['stop'] = acmd([
        ("tps_sink", None),
        ("ntoq", None),
    ])

    cmd_data['scrap'] = acmd([
        #     ("tpm", None),
    ])

    return cmd_data
Esempio n. 3
0
def generate(RUN_NUMBER=333):

    # Define modules and queues
    queue_bare_specs = [
        app.QueueSpec(inst="hsievent_q", kind='FollyMPMCQueue', capacity=1000),
    ]

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs,
                                        key=lambda x: x.inst))

    mod_specs = [
        mspec("ftsdgen", "FakeTimeStampedDataGenerator", [
            app.QueueInfo(
                name="hsievent_sink", inst="hsievent_q", dir="output"),
        ]),
    ]

    init_specs = app.Init(queues=queue_specs, modules=mod_specs)

    jstr = json.dumps(init_specs.pod(), indent=4, sort_keys=True)
    print(jstr)

    initcmd = rc.RCCommand(id=bcmd.CmdId("init"),
                           entry_state=rc.State("NONE"),
                           exit_state=rc.State("INITIAL"),
                           data=init_specs)

    confcmd = mrccmd(
        "conf", "INITIAL", "CONFIGURED",
        [("ftsdgen", ftsdg.Conf(sleep_time=1000000000, frequency=50000000))])

    jstr = json.dumps(confcmd.pod(), indent=4, sort_keys=True)
    print(jstr)

    startpars = rc.StartParams(run=RUN_NUMBER)
    startcmd = mrccmd("start", "CONFIGURED", "RUNNING", [
        (".*", rc.StartParams(run=RUN_NUMBER, )),
    ])

    jstr = json.dumps(startcmd.pod(), indent=4, sort_keys=True)
    print("=" * 80 + "\nStart\n\n", jstr)

    stopcmd = mrccmd("stop", "RUNNING", "CONFIGURED", [
        (".*", None),
    ])

    jstr = json.dumps(stopcmd.pod(), indent=4, sort_keys=True)
    print("=" * 80 + "\nStop\n\n", jstr)

    scrapcmd = mrccmd("scrap", "CONFIGURED", "INITIAL", [(".*", None)])

    jstr = json.dumps(scrapcmd.pod(), indent=4, sort_keys=True)
    print("=" * 80 + "\nScrap\n\n", jstr)

    # Create a list of commands
    cmd_seq = [initcmd, confcmd, startcmd, stopcmd, scrapcmd]

    # Print them as json (to be improved/moved out)
    jstr = json.dumps([c.pod() for c in cmd_seq], indent=4, sort_keys=True)
    return jstr
Esempio n. 4
0
def make_app_command_data(system, app, verbose=False):
    """Given an App instance, create the 'command data' suitable for
    feeding to nanorc. The needed queues are inferred from from
    connections between modules, as are the start and stop order of the
    modules

    TODO: This should probably be split up into separate stages of
    inferring/creating the queues (which can be part of validation)
    and actually making the command data objects for nanorc.

    """

    if verbose:
        console.log(f"Making app command data for {app.name}")

    modules = app.modulegraph.modules

    module_deps = make_module_deps(modules)
    if verbose:
        console.log(f"inter-module dependencies are: {module_deps}")

    stop_order = list(nx.algorithms.dag.topological_sort(module_deps))
    start_order = stop_order[::-1]

    if verbose:
        console.log(f"Inferred module start order is {start_order}")
        console.log(f"Inferred module stop order is {stop_order}")

    command_data = {}

    queue_specs = []

    app_qinfos = defaultdict(list)

    # Infer the queues we need based on the connections between modules

    # Terminology: an "endpoint" is "module.name"
    for mod in modules:
        name = mod.name
        for from_name, downstream_connection in mod.connections.items():
            # The name might be prefixed with a "!" to indicate that it doesn't participate in dependencies. Remove that here because "!" is illegal in actual queue names
            from_name = from_name.replace("!", "")
            from_endpoint = ".".join([name, from_name])
            to_endpoint = downstream_connection.to
            if verbose:
                console.log(
                    f"Making connection from {from_endpoint} to {to_endpoint}")
            if to_endpoint is None:
                continue
            to_mod, to_name = to_endpoint.split(".")
            queue_inst = f"{from_endpoint}_to_{to_endpoint}".replace(".", "")
            # Is there already a queue connecting either endpoint? If so, we reuse it

            # TODO: This is a bit complicated. Might be nicer to find
            # the list of necessary queues in a first step, and then
            # actually make the QueueSpec/QueueInfo objects
            found_from = False
            found_to = False
            for k, v in app_qinfos.items():
                for qi in v:
                    test_endpoint = ".".join([k, qi.name])
                    if test_endpoint == from_endpoint:
                        found_from = True
                        queue_inst = qi.inst
                    if test_endpoint == to_endpoint:
                        found_to = True
                        queue_inst = qi.inst

            if not (found_from or found_to):
                queue_inst = queue_inst if downstream_connection.queue_name is None else downstream_connection.queue_name
                if verbose:
                    console.log(
                        f"downstream_connection is {downstream_connection}, its queue_name is {downstream_connection.queue_name}"
                    )
                    console.log(
                        f"Creating {downstream_connection.queue_kind}({downstream_connection.queue_capacity}) queue with name {queue_inst} connecting {from_endpoint} to {to_endpoint}"
                    )
                queue_specs.append(
                    appfwk.QueueSpec(
                        inst=queue_inst,
                        kind=downstream_connection.queue_kind,
                        capacity=downstream_connection.queue_capacity))

            if not found_from:
                if verbose:
                    console.log(
                        f"Adding output queue to module {name}: inst={queue_inst}, name={from_name}"
                    )
                app_qinfos[name].append(
                    appfwk.QueueInfo(name=from_name,
                                     inst=queue_inst,
                                     dir="output"))
            if not found_to:
                if verbose:
                    console.log(
                        f"Adding input queue to module {to_mod}: inst={queue_inst}, name={to_name}"
                    )
                app_qinfos[to_mod].append(
                    appfwk.QueueInfo(name=to_name,
                                     inst=queue_inst,
                                     dir="input"))

    if verbose:
        console.log(
            f"Creating mod_specs for {[ (mod.name, mod.plugin) for mod in modules ]}"
        )
    mod_specs = [
        mspec(mod.name, mod.plugin, app_qinfos[mod.name]) for mod in modules
    ]

    # Fill in the "standard" command entries in the command_data structure

    command_data['init'] = appfwk.Init(queues=queue_specs,
                                       modules=mod_specs,
                                       nwconnections=system.network_endpoints)

    # TODO: Conf ordering
    command_data['conf'] = acmd([(mod.name, mod.conf) for mod in modules])

    startpars = rccmd.StartParams(run=1, disable_data_storage=False)
    resumepars = rccmd.ResumeParams()

    add_one_command_data(command_data, "start", startpars, app, start_order)
    add_one_command_data(command_data, "stop", None, app, stop_order)
    add_one_command_data(command_data, "scrap", None, app, stop_order)
    add_one_command_data(command_data, "resume", resumepars, app, start_order)
    add_one_command_data(command_data, "pause", None, app, stop_order)

    # TODO: handle modules' `extra_commands`, including "record"

    return command_data
Esempio n. 5
0
def generate(NW_SPECS: list,
             RUN_NUMBER=333,
             CLOCK_SPEED_HZ: int = 50000000,
             TRIGGER_RATE_HZ: int = 1,
             CONTROL_HSI_HARDWARE=False,
             READOUT_PERIOD_US: int = 1e3,
             HSI_ENDPOINT_ADDRESS=1,
             HSI_ENDPOINT_PARTITION=0,
             HSI_RE_MASK=0x20000,
             HSI_FE_MASK=0,
             HSI_INV_MASK=0,
             HSI_SOURCE=1,
             CONNECTIONS_FILE="${TIMING_SHARE}/config/etc/connections.xml",
             HSI_DEVICE_NAME="BOREAS_TLU",
             UHAL_LOG_LEVEL="notice",
             PARTITION="UNKNOWN"):
    """
    { item_description }
    """
    cmd_data = {}

    required_eps = {PARTITION + '.hsievent'}
    if CONTROL_HSI_HARDWARE:
        required_eps.add(PARTITION + '.hsicmds')

    if not required_eps.issubset([nw.name for nw in NW_SPECS]):
        raise RuntimeError(
            f"ERROR: not all the required endpoints ({', '.join(required_eps)}) found in list of endpoints {' '.join([nw.name for nw in NW_SPECS])}"
        )

    # Define modules and queues
    queue_bare_specs = []

    if CONTROL_HSI_HARDWARE:
        queue_bare_specs.extend([
            app.QueueSpec(inst="hw_cmds_q_to_net",
                          kind='FollySPSCQueue',
                          capacity=100)
        ])

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs,
                                        key=lambda x: x.inst))

    hsi_controller_init_data = hsic.InitParams(
        qinfos=app.QueueInfos([
            app.QueueInfo(name="hardware_commands_out",
                          inst="hw_cmds_q_to_net",
                          dir="output")
        ]),
        device=HSI_DEVICE_NAME,
    )
    mod_specs = [
        mspec("hsir", "HSIReadout", []),
    ]

    if CONTROL_HSI_HARDWARE:
        hsi_controller_init_data = hsic.InitParams(
            qinfos=app.QueueInfos([
                app.QueueInfo(name="hardware_commands_out",
                              inst="hw_cmds_q_to_net",
                              dir="output")
            ]),
            device=HSI_DEVICE_NAME,
        )
        mod_specs.extend([
            mspec("qton_hw_cmds", "QueueToNetwork", [
                app.QueueInfo(
                    name="input", inst="hw_cmds_q_to_net", dir="input")
            ]),
            app.ModSpec(inst="hsic",
                        plugin="HSIController",
                        data=hsi_controller_init_data)
        ])

    cmd_data['init'] = app.Init(queues=queue_specs,
                                modules=mod_specs,
                                nwconnections=NW_SPECS)

    conf_cmds = [
        ("hsir",
         hsi.ConfParams(
             connections_file=CONNECTIONS_FILE,
             readout_period=READOUT_PERIOD_US,
             hsi_device_name=HSI_DEVICE_NAME,
             uhal_log_level=UHAL_LOG_LEVEL,
             hsievent_connection_name=f"{PARTITION}.hsievent",
         )),
    ]

    trigger_interval_ticks = 0
    if TRIGGER_RATE_HZ > 0:
        trigger_interval_ticks = math.floor(
            (1 / TRIGGER_RATE_HZ) * CLOCK_SPEED_HZ)
    elif CONTROL_HSI_HARDWARE:
        console.log(
            'WARNING! Emulated trigger rate of 0 will not disable signal emulation in real HSI hardware! To disable emulated HSI triggers, use  option: "--hsi-source 0" or mask all signal bits',
            style="bold red")

    if CONTROL_HSI_HARDWARE:
        conf_cmds.extend([
            ("qton_hw_cmds",
             qton.Conf(msg_type="dunedaq::timinglibs::timingcmd::TimingHwCmd",
                       msg_module_name="TimingHwCmdNQ",
                       sender_config=nos.Conf(name=PARTITION + ".hsicmds",
                                              stype="msgpack"))),
            ("hsic",
             hsic.ConfParams(
                 clock_frequency=CLOCK_SPEED_HZ,
                 trigger_interval_ticks=trigger_interval_ticks,
                 address=HSI_ENDPOINT_ADDRESS,
                 partition=HSI_ENDPOINT_PARTITION,
                 rising_edge_mask=HSI_RE_MASK,
                 falling_edge_mask=HSI_FE_MASK,
                 invert_edge_mask=HSI_INV_MASK,
                 data_source=HSI_SOURCE,
             )),
        ])
    cmd_data['conf'] = acmd(conf_cmds)

    startpars = rccmd.StartParams(
        run=RUN_NUMBER, trigger_interval_ticks=trigger_interval_ticks)
    resumepars = rccmd.ResumeParams(
        trigger_interval_ticks=trigger_interval_ticks)

    cmd_data['start'] = acmd([("hsi.*", startpars), ("qton_.*", startpars)])

    cmd_data['stop'] = acmd([("hsi.*", None), ("qton.*", None)])

    cmd_data['pause'] = acmd([("", None)])

    if CONTROL_HSI_HARDWARE:
        cmd_data['resume'] = acmd([("hsic", resumepars)])
    else:
        cmd_data['resume'] = acmd([("", None)])

    cmd_data['scrap'] = acmd([("", None)])

    cmd_data['record'] = acmd([("", None)])

    return cmd_data
Esempio n. 6
0
def generate(OUTPUT_PATH: str, INPUT_FILES: str, SLOWDOWN_FACTOR: float):
    cmd_data = {}

    # Derived parameters
    CLOCK_FREQUENCY_HZ = 50000000 / SLOWDOWN_FACTOR

    # Define modules and queues
    queue_specs = [
        app.QueueSpec(inst=f"tpset_q{i}", kind='FollySPSCQueue', capacity=1000)
        for i in range(len(INPUT_FILES))
    ] + [
        app.QueueSpec(
            inst="tpset_plus_hb_q", kind='FollyMPMCQueue', capacity=1000),
        app.QueueSpec(
            inst="zipped_tpset_q", kind='FollyMPMCQueue', capacity=1000),
    ]

    mod_specs = [
        mspec(f"tpm{i}", "TriggerPrimitiveMaker", [
            app.QueueInfo(name="tpset_sink", inst=f"tpset_q{i}", dir="output"),
        ]) for i in range(len(INPUT_FILES))
    ] + [
        mspec(f"ftpchm{i}", "FakeTPCreatorHeartbeatMaker", [
            app.QueueInfo(name="tpset_source", inst=f"tpset_q{i}",
                          dir="input"),
            app.QueueInfo(
                name="tpset_sink", inst="tpset_plus_hb_q", dir="output"),
        ]) for i in range(len(INPUT_FILES))
    ] + [
        mspec("zip", "TPZipper", [
            app.QueueInfo(name="input", inst="tpset_plus_hb_q", dir="input"),
            app.QueueInfo(name="output", inst="zipped_tpset_q", dir="output"),
        ]),
        mspec("tps_sink", "TPSetSink", [
            app.QueueInfo(
                name="tpset_source", inst="zipped_tpset_q", dir="input"),
        ]),
    ]

    cmd_data['init'] = app.Init(queues=queue_specs, modules=mod_specs)

    cmd_data['conf'] = acmd([
        (
            f"tpm{i}",
            tpm.ConfParams(
                filename=input_file,
                number_of_loops=-1,  # Infinite
                tpset_time_offset=0,
                tpset_time_width=10000,
                clock_frequency_hz=CLOCK_FREQUENCY_HZ,
                maximum_wait_time_us=1000,
                region_id=0,
                element_id=i,
            )) for i, input_file in enumerate(INPUT_FILES)
    ] + [(f"ftpchm{i}", ftpchm.Conf(heartbeat_interval=50000))
         for i in range(len(INPUT_FILES))] +
                            [("zip",
                              tzip.ConfParams(cardinality=len(INPUT_FILES),
                                              max_latency_ms=1000,
                                              region_id=0,
                                              element_id=0))])

    startpars = rccmd.StartParams(run=1, disable_data_storage=False)
    cmd_data['start'] = acmd([
        ("zip", startpars),
        ("tps_sink", startpars),
    ] + [(f'ftpchm{i}', startpars) for i in range(len(INPUT_FILES))] +
                             [(f'tpm{i}', startpars)
                              for i in range(len(INPUT_FILES))])

    cmd_data['pause'] = acmd([])

    cmd_data['resume'] = acmd([])

    cmd_data['stop'] = acmd([(f'tpm{i}', None)
                             for i in range(len(INPUT_FILES))] +
                            [(f'ftpchm{i}', None)
                             for i in range(len(INPUT_FILES))] + [
                                 ("zip", None),
                                 ("tps_sink", None),
                             ])

    cmd_data['scrap'] = acmd([(f'ftpchm{i}', None)
                              for i in range(len(INPUT_FILES))] +
                             [(f'tpm{i}', None)
                              for i in range(len(INPUT_FILES))] +
                             [("zip", None)])

    return cmd_data
Esempio n. 7
0
def generate(NW_SPECS: list,
             SOFTWARE_TPG_ENABLED: bool = False,
             RU_CONFIG: list = [],
             ACTIVITY_PLUGIN: str = 'TriggerActivityMakerPrescalePlugin',
             ACTIVITY_CONFIG: dict = dict(prescale=10000),
             CANDIDATE_PLUGIN: str = 'TriggerCandidateMakerPrescalePlugin',
             CANDIDATE_CONFIG: int = dict(prescale=10),
             TOKEN_COUNT: int = 10,
             DF_COUNT: int = 1,
             SYSTEM_TYPE='wib',
             TTCM_S1: int = 1,
             TTCM_S2: int = 2,
             TRIGGER_WINDOW_BEFORE_TICKS: int = 1000,
             TRIGGER_WINDOW_AFTER_TICKS: int = 1000,
             PARTITION="UNKNOWN"):
    """
    { item_description }
    """
    cmd_data = {}

    required_eps = {PARTITION + '.hsievent'}
    if not required_eps.issubset([nw.name for nw in NW_SPECS]):
        raise RuntimeError(
            f"ERROR: not all the required endpoints ({', '.join(required_eps)}) found in list of endpoints {' '.join([nw.name for nw in NW_SPECS])}"
        )

    # Define modules and queues
    queue_bare_specs = [
        app.QueueSpec(inst='trigger_candidate_q',
                      kind='FollyMPMCQueue',
                      capacity=1000),
        app.QueueSpec(inst='trigger_decision_q',
                      kind='FollySPSCQueue',
                      capacity=2)
    ]

    if SOFTWARE_TPG_ENABLED:
        queue_bare_specs.extend([
            app.QueueSpec(inst=f"fragment_q",
                          kind='FollyMPMCQueue',
                          capacity=1000),
            app.QueueSpec(inst=f'taset_q',
                          kind='FollyMPMCQueue',
                          capacity=1000),
        ])
        for ru in range(len(RU_CONFIG)):
            queue_bare_specs.extend([
                app.QueueSpec(inst=f"tpsets_from_netq_{ru}",
                              kind='FollySPSCQueue',
                              capacity=1000),
                app.QueueSpec(inst=f'zipped_tpset_q_{ru}',
                              kind='FollySPSCQueue',
                              capacity=1000),
            ])
            for idx in range(RU_CONFIG[ru]["channel_count"]):
                queue_bare_specs.extend([
                    app.QueueSpec(inst=f"tpset_q_for_buf{ru}_{idx}",
                                  kind='FollySPSCQueue',
                                  capacity=1000),
                    app.QueueSpec(inst=f"data_request_q{ru}_{idx}",
                                  kind='FollySPSCQueue',
                                  capacity=1000),
                ])

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs,
                                        key=lambda x: x.inst))

    mod_specs = []

    if SOFTWARE_TPG_ENABLED:
        mod_specs.extend([
            mspec(f"request_receiver", "RequestReceiver", [
                app.QueueInfo(name="output",
                              inst=f"data_request_q{ru}_{idy}",
                              dir="output") for ru in range(len(RU_CONFIG))
                for idy in range(RU_CONFIG[ru]["channel_count"])
            ])
        ] + [
            mspec(f"tpset_receiver", "TPSetReceiver", [
                app.QueueInfo(name="output",
                              inst=f"tpset_q_for_buf{ru}_{idy}",
                              dir="output") for ru in range(len(RU_CONFIG))
                for idy in range(RU_CONFIG[ru]["channel_count"])
            ])
        ] + [
            mspec(f"fragment_sender", "FragmentSender", [
                app.QueueInfo(
                    name="input_queue", inst=f"fragment_q", dir="input")
            ]),
            mspec(
                f'tcm',
                'TriggerCandidateMaker',
                [  # TASet -> TC
                    app.QueueInfo(name='input', inst=f'taset_q', dir='input'),
                    app.QueueInfo(name='output',
                                  inst=f'trigger_candidate_q',
                                  dir='output'),
                ])
        ])
        for ru in range(len(RU_CONFIG)):
            mod_specs.extend([
                mspec(f"tpset_subscriber_{ru}", "NetworkToQueue", [
                    app.QueueInfo(name="output",
                                  inst=f"tpsets_from_netq_{ru}",
                                  dir="output")
                ]),
                mspec(
                    f"zip_{ru}",
                    "TPZipper",
                    [
                        app.QueueInfo(name="input",
                                      inst=f"tpsets_from_netq_{ru}",
                                      dir="input"),
                        app.QueueInfo(
                            name="output",
                            inst=f"zipped_tpset_q_{ru}",
                            dir="output"
                        ),  #FIXME need to fanout this zipped_tpset_q if using multiple algorithms
                    ]),

                ### Algorithm(s)
                mspec(
                    f'tam_{ru}',
                    'TriggerActivityMaker',
                    [  # TPSet -> TASet
                        app.QueueInfo(name='input',
                                      inst=f'zipped_tpset_q_{ru}',
                                      dir='input'),
                        app.QueueInfo(
                            name='output', inst=f'taset_q', dir='output'),
                    ]),
            ])
            for idy in range(RU_CONFIG[ru]["channel_count"]):
                mod_specs.extend([
                    mspec(f"buf{ru}_{idy}", "TPSetBufferCreator", [
                        app.QueueInfo(name="tpset_source",
                                      inst=f"tpset_q_for_buf{ru}_{idy}",
                                      dir="input"),
                        app.QueueInfo(name="data_request_source",
                                      inst=f"data_request_q{ru}_{idy}",
                                      dir="input"),
                        app.QueueInfo(name="fragment_sink",
                                      inst=f"fragment_q",
                                      dir="output"),
                    ])
                ])

    mod_specs += ([

        ### Timing TCs
        mspec("ttcm", "TimingTriggerCandidateMaker", [
            app.QueueInfo(
                name="output", inst="trigger_candidate_q", dir="output"),
        ]),

        ### Module level trigger
        mspec("mlt", "ModuleLevelTrigger", [
            app.QueueInfo(name="trigger_candidate_source",
                          inst="trigger_candidate_q",
                          dir="input"),
            app.QueueInfo(name="trigger_decision_sink",
                          inst="trigger_decision_q",
                          dir="output"),
        ]),

        ### DFO
        mspec("dfo", "DataFlowOrchestrator", [
            app.QueueInfo(name="trigger_decision_queue",
                          inst="trigger_decision_q",
                          dir="input"),
        ]),
    ])

    cmd_data['init'] = app.Init(queues=queue_specs,
                                modules=mod_specs,
                                nwconnections=NW_SPECS)

    # Generate schema for the maker plugins on the fly in the temptypes module
    make_moo_record(ACTIVITY_CONFIG, 'ActivityConf', 'temptypes')
    make_moo_record(CANDIDATE_CONFIG, 'CandidateConf', 'temptypes')
    import temptypes

    tp_confs = []

    if SOFTWARE_TPG_ENABLED:
        tp_confs.extend([
            ("request_receiver",
             rrcv.ConfParams(map=[
                 rrcv.geoidinst(region=RU_CONFIG[ru]["region_id"],
                                element=idy + RU_CONFIG[ru]["start_channel"],
                                system="DataSelection",
                                queueinstance=f"data_request_q{ru}_{idy}")
                 for ru in range(len(RU_CONFIG))
                 for idy in range(RU_CONFIG[ru]["channel_count"])
             ],
                             general_queue_timeout=100,
                             connection_name=f"{PARTITION}.ds_tp_datareq_0")),
            ("tpset_receiver",
             tpsrcv.ConfParams(map=[
                 tpsrcv.geoidinst(region=RU_CONFIG[ru]["region_id"],
                                  element=idy + RU_CONFIG[ru]["start_channel"],
                                  system=SYSTEM_TYPE,
                                  queueinstance=f"tpset_q_for_buf{ru}_{idy}")
                 for ru in range(len(RU_CONFIG))
                 for idy in range(RU_CONFIG[ru]["channel_count"])
             ],
                               general_queue_timeout=100,
                               topic=f"TPSets")),
            (f"fragment_sender", None),
            (f'tcm',
             tcm.Conf(candidate_maker=CANDIDATE_PLUGIN,
                      candidate_maker_config=temptypes.CandidateConf(
                          **CANDIDATE_CONFIG))),
        ])
        for idx in range(len(RU_CONFIG)):
            tp_confs.extend([
                (f"tpset_subscriber_{idx}",
                 ntoq.Conf(msg_type="dunedaq::trigger::TPSet",
                           msg_module_name="TPSetNQ",
                           receiver_config=nor.Conf(
                               name=f'{PARTITION}.tpsets_{idx}',
                               subscriptions=["TPSets"]))),
                (
                    f"zip_{idx}",
                    tzip.ConfParams(
                        cardinality=RU_CONFIG[idx]["channel_count"],
                        max_latency_ms=1000,
                        region_id=0,  # Fake placeholder
                        element_id=0  # Fake placeholder
                    )),

                ### Algorithms
                (
                    f'tam_{idx}',
                    tam.Conf(
                        activity_maker=ACTIVITY_PLUGIN,
                        geoid_region=0,  # Fake placeholder
                        geoid_element=0,  # Fake placeholder
                        window_time=
                        10000,  # should match whatever makes TPSets, in principle
                        buffer_time=625000,  # 10ms in 62.5 MHz ticks
                        activity_maker_config=temptypes.ActivityConf(
                            **ACTIVITY_CONFIG))),
            ])
            for idy in range(RU_CONFIG[idx]["channel_count"]):
                tp_confs.extend([
                    (f"buf{idx}_{idy}",
                     buf.Conf(tpset_buffer_size=10000,
                              region=RU_CONFIG[idx]["region_id"],
                              element=idy + RU_CONFIG[idx]["start_channel"]))
                ])

    total_link_count = 0
    for ru in range(len(RU_CONFIG)):
        total_link_count += RU_CONFIG[ru]["channel_count"]

    cmd_data['conf'] = acmd(tp_confs + [

        ### Timing TCs
        ("ttcm",
         ttcm.Conf(
             s1=ttcm.map_t(signal_type=TTCM_S1,
                           time_before=TRIGGER_WINDOW_BEFORE_TICKS,
                           time_after=TRIGGER_WINDOW_AFTER_TICKS),
             s2=ttcm.map_t(signal_type=TTCM_S2,
                           time_before=TRIGGER_WINDOW_BEFORE_TICKS,
                           time_after=TRIGGER_WINDOW_AFTER_TICKS),
             hsievent_connection_name=PARTITION + ".hsievent",
         )),

        # Module level trigger
        (
            "mlt",
            mlt.ConfParams(
                # This line requests the raw data from upstream DAQ _and_ the raw TPs from upstream DAQ
                links=[
                    mlt.GeoID(system=SYSTEM_TYPE,
                              region=RU_CONFIG[ru]["region_id"],
                              element=RU_CONFIG[ru]["start_channel"] + idx)
                    for ru in range(len(RU_CONFIG))
                    for idx in range(RU_CONFIG[ru]["channel_count"])
                ] + ([
                    mlt.GeoID(system="DataSelection",
                              region=RU_CONFIG[ru]["region_id"],
                              element=RU_CONFIG[ru]["start_channel"] + idx)
                    for ru in range(len(RU_CONFIG))
                    for idx in range(RU_CONFIG[ru]["channel_count"])
                ] if SOFTWARE_TPG_ENABLED else []) + ([
                    mlt.GeoID(system=SYSTEM_TYPE,
                              region=RU_CONFIG[ru]["region_id"],
                              element=RU_CONFIG[ru]["start_channel"] + idx +
                              total_link_count) for ru in range(len(RU_CONFIG))
                    for idx in range(RU_CONFIG[ru]["channel_count"])
                ] if SOFTWARE_TPG_ENABLED else []), )),
        ("dfo",
         dfo.ConfParams(
             token_connection=PARTITION + ".triginh",
             dataflow_applications=[
                 dfo.app_config(
                     decision_connection=f"{PARTITION}.trigdec_{dfidx}",
                     capacity=TOKEN_COUNT) for dfidx in range(DF_COUNT)
             ])),
    ])

    # We start modules in "downstream-to-upstream" order, so that each
    # module is ready before its input starts sending data. The stop
    # order is the reverse (upstream-to-downstream), so each module
    # can process all of its input then stop, ensuring all data gets
    # processed
    start_order = ["buf.*", "dfo", "mlt", "ttcm", "ntoq_token"]

    if SOFTWARE_TPG_ENABLED:
        start_order += [
            "fragment_sender", "tcm", "tam_.*", "zip_.*",
            "tpset_subscriber_.*", "tpset_receiver", "request_receiver"
        ]

    stop_order = start_order[::-1]

    startpars = rccmd.StartParams(run=1)
    cmd_data['start'] = acmd([(m, startpars) for m in start_order])
    cmd_data['stop'] = acmd([(m, None) for m in stop_order])

    cmd_data['pause'] = acmd([("mlt", None)])

    resumepars = rccmd.ResumeParams(trigger_interval_ticks=50000000)
    cmd_data['resume'] = acmd([("mlt", resumepars)])

    cmd_data['scrap'] = acmd([("dfo", None)])

    cmd_data['record'] = acmd([("", None)])

    return cmd_data
Esempio n. 8
0
def generate(
        INPUT_FILES: str,
        SLOWDOWN_FACTOR: float,
        
        ACTIVITY_PLUGIN: str = 'TriggerActivityMakerPrescalePlugin',
        ACTIVITY_CONFIG: dict = dict(prescale=1000),
        
        CANDIDATE_PLUGIN: str = 'TriggerCandidateMakerPrescalePlugin',
        CANDIDATE_CONFIG: int = dict(prescale=1000),
        
        TOKEN_COUNT: int = 10,
        
        FORGET_DECISION_PROB: float = 0.0,
        HOLD_DECISION_PROB: float = 0.0,
        HOLD_MAX_SIZE: int = 0,
        HOLD_MIN_SIZE: int = 0,
        HOLD_MIN_MS: int = 0,
        RELEASE_RANDOMLY_PROB: float = 0.0,
        
        CLOCK_SPEED_HZ: int = 50000000
):
    cmd_data = {}

    # Derived parameters
    CLOCK_FREQUENCY_HZ = CLOCK_SPEED_HZ / SLOWDOWN_FACTOR

    # Define modules and queues
    queue_specs = [
        app.QueueSpec(inst=f"tpset_q{i}", kind='FollySPSCQueue', capacity=1000)
        for i in range(len(INPUT_FILES))
    ] +  [
        app.QueueSpec(inst="tpset_plus_hb_q", kind='FollyMPMCQueue', capacity=1000),
        app.QueueSpec(inst='zipped_tpset_q', kind='FollyMPMCQueue', capacity=1000),
        app.QueueSpec(inst='taset_q', kind='FollySPSCQueue', capacity=1000),
        app.QueueSpec(inst='trigger_candidate_q', kind='FollyMPMCQueue', capacity=1000),
        app.QueueSpec(inst='trigger_decision_q', kind='FollySPSCQueue', capacity=1000),
        app.QueueSpec(inst='token_q', kind='FollySPSCQueue', capacity=1000),
    ]

    mod_specs = [
        mspec(f'tpm{i}', 'TriggerPrimitiveMaker', [ # File -> TPSet
            app.QueueInfo(name='tpset_sink', inst=f'tpset_q{i}', dir='output'),
        ])
        for i in range(len(INPUT_FILES))
    ] + [

        mspec(f"ftpchm{i}", "FakeTPCreatorHeartbeatMaker", [
            app.QueueInfo(name="tpset_source", inst=f"tpset_q{i}", dir="input"),
            app.QueueInfo(name="tpset_sink", inst="tpset_plus_hb_q", dir="output"),
        ]) for i in range(len(INPUT_FILES))

    ] +  [

        mspec("zip", "TPZipper", [
            app.QueueInfo(name="input", inst="tpset_plus_hb_q", dir="input"),
            app.QueueInfo(name="output", inst="zipped_tpset_q", dir="output"),
        ]),

        mspec('tam', 'TriggerActivityMaker', [ # TPSet -> TASet
            app.QueueInfo(name='input', inst='zipped_tpset_q', dir='input'),
            app.QueueInfo(name='output', inst='taset_q', dir='output'),
        ]),
        
        mspec('tcm', 'TriggerCandidateMaker', [ # TASet -> TC
            app.QueueInfo(name='input', inst='taset_q', dir='input'),
            app.QueueInfo(name='output', inst='trigger_candidate_q', dir='output'),
        ]),
        
        mspec('mlt', 'ModuleLevelTrigger', [ # TC -> TD (with sufficient tokens)
            app.QueueInfo(name='trigger_candidate_source', inst='trigger_candidate_q', dir='input'),
            app.QueueInfo(name='trigger_decision_sink', inst='trigger_decision_q', dir='output'),
            app.QueueInfo(name='token_source', inst='token_q', dir='input'),
        ]),
        
        mspec('fdf', 'FakeDataFlow', [ # TD -> Token
            app.QueueInfo(name='trigger_decision_source', inst='trigger_decision_q', dir='input'),
            app.QueueInfo(name='trigger_complete_sink', inst='token_q', dir='output'),
        ]),
    ]

    cmd_data['init'] = app.Init(queues=queue_specs, modules=mod_specs)

    make_moo_record(ACTIVITY_CONFIG,'ActivityConf','temptypes')
    make_moo_record(CANDIDATE_CONFIG,'CandidateConf','temptypes')
    import temptypes

    cmd_data['conf'] = acmd([
        (f'tpm{i}', tpm.ConfParams(
            filename=input_file,
            number_of_loops=-1, # Infinite
            tpset_time_offset=0,
            tpset_time_width=10000,
            clock_frequency_hz=CLOCK_FREQUENCY_HZ,
            maximum_wait_time_us=1000
        )) for i,input_file in enumerate(INPUT_FILES)
    ] + [
        (f"ftpchm{i}", ftpchm.Conf(
          heartbeat_interval = 100000
        )) for i in range(len(INPUT_FILES))
    ] + [
        ("zip", tzip.ConfParams(
            cardinality=len(INPUT_FILES),
            max_latency_ms=1000,
            region_id=0, # Fake placeholder
            element_id=0 # Fake placeholder
        )),
        ('tam', tam.Conf(
            activity_maker=ACTIVITY_PLUGIN,
            geoid_region=0, # Fake placeholder
            geoid_element=0, # Fake placeholder
            window_time=10000, # should match whatever makes TPSets, in principle
            buffer_time=625000, # 10ms in 62.5 MHz ticks
            activity_maker_config=temptypes.ActivityConf(**ACTIVITY_CONFIG)
        )),
        ('tcm', tcm.Conf(
            candidate_maker=CANDIDATE_PLUGIN,
            candidate_maker_config=temptypes.CandidateConf(**CANDIDATE_CONFIG)
        )),
        ('mlt', mlt.ConfParams(
            links=[],
            initial_token_count=TOKEN_COUNT                    
        )),
        ('fdf', fdf.ConfParams(
          hold_max_size = HOLD_MAX_SIZE,
          hold_min_size = HOLD_MIN_SIZE,
          hold_min_ms = HOLD_MIN_MS,
          release_randomly_prob = RELEASE_RANDOMLY_PROB,
          forget_decision_prob = FORGET_DECISION_PROB,
          hold_decision_prob = HOLD_DECISION_PROB
        )),
    ])

    startpars = rccmd.StartParams(run=1)
    cmd_data['start'] = acmd(
        [
            ('fdf', startpars),
            ('mlt', startpars),
            ('tcm', startpars),
            ('tam', startpars),
            ('zip', startpars),
        ] +
        [ (f'ftpchm{i}', startpars) for i in range(len(INPUT_FILES)) ] +
        [ (f'tpm{i}', startpars) for i in range(len(INPUT_FILES)) ]
    )

    cmd_data['pause'] = acmd([
        ('mlt', None)
    ])
    
    resumepars = rccmd.ResumeParams(trigger_interval_ticks=50000000)
    cmd_data['resume'] = acmd([
        ('mlt', resumepars)
    ])
    
    cmd_data['stop'] = acmd(
        [ (f'tpm{i}', None) for i in range(len(INPUT_FILES)) ] +
        [ (f'ftpchm{i}', None) for i in range(len(INPUT_FILES)) ] +
        [
            ('zip', None),
            ('tam', None),
            ('tcm', None),
            ('mlt', None),
            ('fdf', None)
        ]
    )

    cmd_data['scrap'] = acmd(
        [ (f'tpm{i}', None) for i in range(len(INPUT_FILES)) ] +
        [ (f'ftpchm{i}', None) for i in range(len(INPUT_FILES)) ] +
        [
            ('zip', None),
            ('tam', None),
            ('tcm', None),
            ('mlt', None),
            ('fdf', None)
        ]
    )

    return cmd_data
Esempio n. 9
0
def generate(NETWORK_ENDPOINTS,
             NUMBER_OF_DATA_PRODUCERS=2,
             EMULATOR_MODE=False,
             DATA_RATE_SLOWDOWN_FACTOR=1,
             RUN_NUMBER=333,
             DATA_FILE="./frames.bin",
             OUTPUT_PATH=".",
             DISABLE_OUTPUT=False,
             FLX_INPUT=True,
             TOKEN_COUNT=0,
             CLOCK_SPEED_HZ=50000000):
    """Generate the json configuration for the readout and DF process"""

    cmd_data = {}

    required_eps = {'trigdec', 'triginh', 'timesync'}
    if not required_eps.issubset(NETWORK_ENDPOINTS):
        raise RuntimeError(
            f"ERROR: not all the required endpoints ({', '.join(required_eps)}) found in list of endpoints {' '.join(NETWORK_ENDPOINTS.keys())}"
        )

    LATENCY_BUFFER_SIZE = 3 * CLOCK_SPEED_HZ / (25 * 12 *
                                                DATA_RATE_SLOWDOWN_FACTOR)
    RATE_KHZ = CLOCK_SPEED_HZ / (25 * 12 * DATA_RATE_SLOWDOWN_FACTOR * 1000)

    # Define modules and queues
    queue_bare_specs = [
        app.QueueSpec(inst="time_sync_q", kind='FollyMPMCQueue', capacity=100),
        app.QueueSpec(inst="token_q", kind='FollySPSCQueue', capacity=100),
        app.QueueSpec(
            inst="trigger_decision_q", kind='FollySPSCQueue', capacity=100),
        app.QueueSpec(inst="trigger_decision_from_netq",
                      kind='FollySPSCQueue',
                      capacity=100),
        app.QueueSpec(inst="trigger_decision_copy_for_bookkeeping",
                      kind='FollySPSCQueue',
                      capacity=100),
        app.QueueSpec(
            inst="trigger_record_q", kind='FollySPSCQueue', capacity=100),
        app.QueueSpec(
            inst="data_fragments_q", kind='FollyMPMCQueue', capacity=1000),
    ] + [
        app.QueueSpec(
            inst=f"data_requests_{idx}", kind='FollySPSCQueue', capacity=100)
        for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ] + [
        app.QueueSpec(
            inst=f"wib_link_{idx}", kind='FollySPSCQueue', capacity=100000)
        for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ]

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs,
                                        key=lambda x: x.inst))

    mod_specs = [
        mspec("ntoq_trigdec", "NetworkToQueue", [
            app.QueueInfo(
                name="output", inst="trigger_decision_from_netq", dir="output")
        ]),
        mspec("qton_token", "QueueToNetwork",
              [app.QueueInfo(name="input", inst="token_q", dir="input")]),
        mspec("qton_timesync", "QueueToNetwork",
              [app.QueueInfo(name="input", inst="time_sync_q", dir="input")]),
        mspec("rqg", "RequestGenerator", [
            app.QueueInfo(name="trigger_decision_input_queue",
                          inst="trigger_decision_from_netq",
                          dir="input"),
            app.QueueInfo(name="trigger_decision_for_event_building",
                          inst="trigger_decision_copy_for_bookkeeping",
                          dir="output"),
        ] + [
            app.QueueInfo(name=f"data_request_{idx}_output_queue",
                          inst=f"data_requests_{idx}",
                          dir="output")
            for idx in range(NUMBER_OF_DATA_PRODUCERS)
        ]),
        mspec("ffr", "FragmentReceiver", [
            app.QueueInfo(name="trigger_decision_input_queue",
                          inst="trigger_decision_copy_for_bookkeeping",
                          dir="input"),
            app.QueueInfo(name="trigger_record_output_queue",
                          inst="trigger_record_q",
                          dir="output"),
            app.QueueInfo(name="data_fragment_input_queue",
                          inst="data_fragments_q",
                          dir="input"),
        ]),
        mspec("datawriter", "DataWriter", [
            app.QueueInfo(name="trigger_record_input_queue",
                          inst="trigger_record_q",
                          dir="input"),
            app.QueueInfo(
                name="token_output_queue", inst="token_q", dir="output"),
        ]),
    ] + [
        mspec(f"datahandler_{idx}", "DataLinkHandler", [
            app.QueueInfo(
                name="raw_input", inst=f"wib_link_{idx}", dir="input"),
            app.QueueInfo(name="timesync", inst="time_sync_q", dir="output"),
            app.QueueInfo(
                name="requests", inst=f"data_requests_{idx}", dir="input"),
            app.QueueInfo(
                name="fragments", inst="data_fragments_q", dir="output"),
        ]) for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ]

    if FLX_INPUT:
        mod_specs.append(
            mspec("flxcard_0", "FelixCardReader", [
                app.QueueInfo(
                    name=f"output_{idx}", inst=f"wib_link_{idx}", dir="output")
                for idx in range(0, min(5, NUMBER_OF_DATA_PRODUCERS))
            ]))
        if NUMBER_OF_DATA_PRODUCERS > 5:
            mod_specs.append(
                mspec("flxcard_1", "FelixCardReader", [
                    app.QueueInfo(name=f"output_{idx}",
                                  inst=f"wib_link_{idx}",
                                  dir="output")
                    for idx in range(5, NUMBER_OF_DATA_PRODUCERS)
                ]))
    else:
        mod_specs.append(
            mspec("fake_source", "FakeCardReader", [
                app.QueueInfo(
                    name=f"output_{idx}", inst=f"wib_link_{idx}", dir="output")
                for idx in range(NUMBER_OF_DATA_PRODUCERS)
            ]))

    cmd_data['init'] = app.Init(queues=queue_specs, modules=mod_specs)

    cmd_data['conf'] = acmd([
        ("ntoq_trigdec",
         ntoq.Conf(msg_type="dunedaq::dfmessages::TriggerDecision",
                   msg_module_name="TriggerDecisionNQ",
                   receiver_config=nor.Conf(
                       ipm_plugin_type="ZmqReceiver",
                       address=NETWORK_ENDPOINTS["trigdec"]))),
        ("qton_token",
         qton.Conf(msg_type="dunedaq::dfmessages::TriggerDecisionToken",
                   msg_module_name="TriggerDecisionTokenNQ",
                   sender_config=nos.Conf(ipm_plugin_type="ZmqSender",
                                          address=NETWORK_ENDPOINTS["triginh"],
                                          stype="msgpack"))),
        ("qton_timesync",
         qton.Conf(msg_type="dunedaq::dfmessages::TimeSync",
                   msg_module_name="TimeSyncNQ",
                   sender_config=nos.Conf(
                       ipm_plugin_type="ZmqSender",
                       address=NETWORK_ENDPOINTS["timesync"],
                       stype="msgpack"))),
        ("rqg",
         rqg.ConfParams(map=rqg.mapgeoidqueue([
             rqg.geoidinst(
                 apa=0, link=idx, queueinstance=f"data_requests_{idx}")
             for idx in range(NUMBER_OF_DATA_PRODUCERS)
         ]))),
        ("ffr", ffr.ConfParams(general_queue_timeout=QUEUE_POP_WAIT_MS)),
        (
            "datawriter",
            dw.ConfParams(
                initial_token_count=TOKEN_COUNT,
                data_store_parameters=hdf5ds.ConfParams(
                    name="data_store",
                    # type = "HDF5DataStore", # default
                    directory_path=OUTPUT_PATH,  # default
                    # mode = "all-per-file", # default
                    max_file_size_bytes=1073741824,
                    disable_unique_filename_suffix=False,
                    filename_parameters=hdf5ds.HDF5DataStoreFileNameParams(
                        overall_prefix="swtest",
                        digits_for_run_number=6,
                        file_index_prefix="",
                        digits_for_file_index=4,
                    ),
                    file_layout_parameters=hdf5ds.
                    HDF5DataStoreFileLayoutParams(
                        trigger_record_name_prefix="TriggerRecord",
                        digits_for_trigger_number=5,
                        digits_for_apa_number=3,
                        digits_for_link_number=2,
                    )))),
        (
            "fake_source",
            fakecr.Conf(
                link_ids=list(range(NUMBER_OF_DATA_PRODUCERS)),
                # input_limit=10485100, # default
                rate_khz=RATE_KHZ,
                raw_type="wib",
                data_filename=DATA_FILE,
                queue_timeout_ms=QUEUE_POP_WAIT_MS)),
        ("flxcard_0",
         flxcr.Conf(card_id=0,
                    logical_unit=0,
                    dma_id=0,
                    chunk_trailer_size=32,
                    dma_block_size_kb=4,
                    dma_memory_size_gb=4,
                    numa_id=0,
                    num_links=min(5, NUMBER_OF_DATA_PRODUCERS))),
        ("flxcard_1",
         flxcr.Conf(card_id=0,
                    logical_unit=1,
                    dma_id=0,
                    chunk_trailer_size=32,
                    dma_block_size_kb=4,
                    dma_memory_size_gb=4,
                    numa_id=0,
                    num_links=max(0, NUMBER_OF_DATA_PRODUCERS - 5))),
    ] + [
        (
            f"datahandler_{idx}",
            dlh.Conf(
                raw_type="wib",
                emulator_mode=EMULATOR_MODE,
                # fake_trigger_flag=0, # default
                source_queue_timeout_ms=QUEUE_POP_WAIT_MS,
                latency_buffer_size=LATENCY_BUFFER_SIZE,
                pop_limit_pct=0.8,
                pop_size_pct=0.1,
                apa_number=0,
                link_number=idx)) for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ])

    startpars = rccmd.StartParams(run=RUN_NUMBER,
                                  disable_data_storage=DISABLE_OUTPUT)
    cmd_data['start'] = acmd([
        ("qton_token", startpars),
        ("datawriter", startpars),
        ("ffr", startpars),
        ("qton_timesync", startpars),
        ("datahandler_.*", startpars),
        ("fake_source", startpars),
        ("flxcard.*", startpars),
        ("rqg", startpars),
        ("ntoq_trigdec", startpars),
    ])

    cmd_data['stop'] = acmd([
        ("ntoq_trigdec", None),
        ("rqg", None),
        ("flxcard.*", None),
        ("fake_source", None),
        ("datahandler_.*", None),
        ("qton_timesync", None),
        ("ffr", None),
        ("datawriter", None),
        ("qton_token", None),
    ])

    cmd_data['pause'] = acmd([("", None)])

    cmd_data['resume'] = acmd([("", None)])

    cmd_data['scrap'] = acmd([("", None)])

    return cmd_data
def generate(NUMBER_OF_DATA_PRODUCERS=2,
             EMULATOR_MODE=False,
             DATA_RATE_SLOWDOWN_FACTOR=10,
             RUN_NUMBER=333,
             TRIGGER_RATE_HZ=1.0,
             DATA_FILE="./frames.bin",
             OUTPUT_PATH=".",
             DISABLE_OUTPUT=False,
             TOKEN_COUNT=10):

    trigger_interval_ticks = math.floor(
        (1 / TRIGGER_RATE_HZ) * CLOCK_SPEED_HZ / DATA_RATE_SLOWDOWN_FACTOR)

    # Define modules and queues
    queue_bare_specs = [
        app.QueueSpec(inst="time_sync_q", kind='FollyMPMCQueue', capacity=100),
        app.QueueSpec(inst="token_q", kind='FollySPSCQueue', capacity=20),
        app.QueueSpec(
            inst="trigger_decision_q", kind='FollySPSCQueue', capacity=20),
        app.QueueSpec(inst="trigger_decision_copy_for_bookkeeping",
                      kind='FollySPSCQueue',
                      capacity=20),
        app.QueueSpec(
            inst="trigger_record_q", kind='FollySPSCQueue', capacity=20),
        app.QueueSpec(
            inst="data_fragments_q", kind='FollyMPMCQueue', capacity=100),
    ] + [
        app.QueueSpec(
            inst=f"data_requests_{idx}", kind='FollySPSCQueue', capacity=20)
        for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ] + [
        app.QueueSpec(inst=f"wib_fake_link_{idx}",
                      kind='FollySPSCQueue',
                      capacity=100000)
        for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ]

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs,
                                        key=lambda x: x.inst))

    mod_specs = [
        mspec("tde", "TriggerDecisionEmulator", [
            app.QueueInfo(
                name="time_sync_source", inst="time_sync_q", dir="input"),
            app.QueueInfo(name="token_source", inst="token_q", dir="input"),
            app.QueueInfo(name="trigger_decision_sink",
                          inst="trigger_decision_q",
                          dir="output"),
        ]),
        mspec("rqg", "RequestGenerator", [
            app.QueueInfo(name="trigger_decision_input_queue",
                          inst="trigger_decision_q",
                          dir="input"),
            app.QueueInfo(name="trigger_decision_for_event_building",
                          inst="trigger_decision_copy_for_bookkeeping",
                          dir="output"),
        ] + [
            app.QueueInfo(name=f"data_request_{idx}_output_queue",
                          inst=f"data_requests_{idx}",
                          dir="output")
            for idx in range(NUMBER_OF_DATA_PRODUCERS)
        ]),
        mspec("ffr", "FragmentReceiver", [
            app.QueueInfo(name="trigger_decision_input_queue",
                          inst="trigger_decision_copy_for_bookkeeping",
                          dir="input"),
            app.QueueInfo(name="trigger_record_output_queue",
                          inst="trigger_record_q",
                          dir="output"),
            app.QueueInfo(name="data_fragment_input_queue",
                          inst="data_fragments_q",
                          dir="input"),
        ]),
        mspec("datawriter", "DataWriter", [
            app.QueueInfo(name="trigger_record_input_queue",
                          inst="trigger_record_q",
                          dir="input"),
            app.QueueInfo(
                name="token_output_queue", inst="token_q", dir="output"),
        ]),
        mspec("fake_source", "FakeCardReader", [
            app.QueueInfo(name=f"output_{idx}",
                          inst=f"wib_fake_link_{idx}",
                          dir="output")
            for idx in range(NUMBER_OF_DATA_PRODUCERS)
        ]),
    ] + [
        mspec(f"datahandler_{idx}", "DataLinkHandler", [
            app.QueueInfo(
                name="raw_input", inst=f"wib_fake_link_{idx}", dir="input"),
            app.QueueInfo(name="timesync", inst="time_sync_q", dir="output"),
            app.QueueInfo(
                name="requests", inst=f"data_requests_{idx}", dir="input"),
            app.QueueInfo(
                name="fragments", inst="data_fragments_q", dir="output"),
        ]) for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ]

    init_specs = app.Init(queues=queue_specs, modules=mod_specs)

    jstr = json.dumps(init_specs.pod(), indent=4, sort_keys=True)
    print(jstr)

    initcmd = rccmd.RCCommand(id=basecmd.CmdId("init"),
                              entry_state="NONE",
                              exit_state="INITIAL",
                              data=init_specs)

    if TOKEN_COUNT > 0:
        df_token_count = 0
        trigemu_token_count = TOKEN_COUNT
    else:
        df_token_count = -1 * TOKEN_COUNT
        trigemu_token_count = 0

    confcmd = mrccmd(
        "conf",
        "INITIAL",
        "CONFIGURED",
        [
            (
                "tde",
                tde.ConfParams(
                    links=[idx for idx in range(NUMBER_OF_DATA_PRODUCERS)],
                    min_links_in_request=NUMBER_OF_DATA_PRODUCERS,
                    max_links_in_request=NUMBER_OF_DATA_PRODUCERS,
                    min_readout_window_ticks=1200,
                    max_readout_window_ticks=1200,
                    trigger_window_offset=1000,
                    # The delay is set to put the trigger well within the latency buff
                    trigger_delay_ticks=math.floor(
                        2 * CLOCK_SPEED_HZ / DATA_RATE_SLOWDOWN_FACTOR),
                    # We divide the trigger interval by
                    # DATA_RATE_SLOWDOWN_FACTOR so the triggers are still
                    # emitted per (wall-clock) second, rather than being
                    # spaced out further
                    trigger_interval_ticks=trigger_interval_ticks,
                    clock_frequency_hz=CLOCK_SPEED_HZ /
                    DATA_RATE_SLOWDOWN_FACTOR,
                    initial_token_count=trigemu_token_count)),
            ("rqg",
             rqg.ConfParams(map=rqg.mapgeoidqueue([
                 rqg.geoidinst(
                     apa=0, link=idx, queueinstance=f"data_requests_{idx}")
                 for idx in range(NUMBER_OF_DATA_PRODUCERS)
             ]))),
            ("ffr", ffr.ConfParams(general_queue_timeout=QUEUE_POP_WAIT_MS)),
            (
                "datawriter",
                dw.ConfParams(
                    initial_token_count=df_token_count,
                    data_store_parameters=hdf5ds.ConfParams(
                        name="data_store",
                        # type = "HDF5DataStore", # default
                        directory_path=OUTPUT_PATH,  # default
                        # mode = "all-per-file", # default
                        max_file_size_bytes=1073741824,
                        filename_parameters=hdf5ds.HDF5DataStoreFileNameParams(
                            overall_prefix="swtest",
                            digits_for_run_number=6,
                            file_index_prefix="",
                            digits_for_file_index=4,
                        ),
                        file_layout_parameters=hdf5ds.
                        HDF5DataStoreFileLayoutParams(
                            trigger_record_name_prefix="TriggerRecord",
                            digits_for_trigger_number=5,
                            digits_for_apa_number=3,
                            digits_for_link_number=2,
                        )))),
            (
                "fake_source",
                fcr.Conf(
                    link_ids=list(range(NUMBER_OF_DATA_PRODUCERS)),
                    # input_limit=10485100, # default
                    rate_khz=CLOCK_SPEED_HZ /
                    (25 * 12 * DATA_RATE_SLOWDOWN_FACTOR * 1000),
                    raw_type="wib",
                    data_filename=DATA_FILE,
                    queue_timeout_ms=QUEUE_POP_WAIT_MS)),
        ] + [
            (
                f"datahandler_{idx}",
                dlh.Conf(
                    raw_type="wib",
                    emulator_mode=EMULATOR_MODE,
                    # fake_trigger_flag=0, # default
                    source_queue_timeout_ms=QUEUE_POP_WAIT_MS,
                    latency_buffer_size=3 * CLOCK_SPEED_HZ /
                    (25 * 12 * DATA_RATE_SLOWDOWN_FACTOR),
                    pop_limit_pct=0.8,
                    pop_size_pct=0.1,
                    apa_number=0,
                    link_number=idx))
            for idx in range(NUMBER_OF_DATA_PRODUCERS)
        ])

    jstr = json.dumps(confcmd.pod(), indent=4, sort_keys=True)
    print(jstr)

    startpars = rccmd.StartParams(
        run=RUN_NUMBER,
        trigger_interval_ticks=trigger_interval_ticks,
        disable_data_storage=DISABLE_OUTPUT)
    startcmd = mrccmd("start", "CONFIGURED", "RUNNING", [
        ("datawriter", startpars),
        ("ffr", startpars),
        ("datahandler_.*", startpars),
        ("fake_source", startpars),
        ("rqg", startpars),
        ("tde", startpars),
    ])

    jstr = json.dumps(startcmd.pod(), indent=4, sort_keys=True)
    print("=" * 80 + "\nStart\n\n", jstr)

    stopcmd = mrccmd("stop", "RUNNING", "CONFIGURED", [
        ("tde", None),
        ("rqg", None),
        ("fake_source", None),
        ("datahandler_.*", None),
        ("ffr", None),
        ("datawriter", None),
    ])

    jstr = json.dumps(stopcmd.pod(), indent=4, sort_keys=True)
    print("=" * 80 + "\nStop\n\n", jstr)

    pausecmd = mrccmd("pause", "RUNNING", "RUNNING", [("", None)])

    jstr = json.dumps(pausecmd.pod(), indent=4, sort_keys=True)
    print("=" * 80 + "\nPause\n\n", jstr)

    resumecmd = mrccmd(
        "resume", "RUNNING", "RUNNING",
        [("tde",
          tde.ResumeParams(trigger_interval_ticks=trigger_interval_ticks))])

    jstr = json.dumps(resumecmd.pod(), indent=4, sort_keys=True)
    print("=" * 80 + "\nResume\n\n", jstr)

    scrapcmd = mrccmd("scrap", "CONFIGURED", "INITIAL", [("", None)])

    jstr = json.dumps(scrapcmd.pod(), indent=4, sort_keys=True)
    print("=" * 80 + "\nScrap\n\n", jstr)

    # Create a list of commands
    cmd_seq = [
        initcmd, confcmd, startcmd, stopcmd, pausecmd, resumecmd, scrapcmd
    ]

    # Print them as json (to be improved/moved out)
    jstr = json.dumps([c.pod() for c in cmd_seq], indent=4, sort_keys=True)
    return jstr
Esempio n. 11
0
def generate(
    FRONTEND_TYPE="pacman",
    NUMBER_OF_DATA_PRODUCERS=1,
    NUMBER_OF_TP_PRODUCERS=1,
    DATA_RATE_SLOWDOWN_FACTOR=1,
    ENABLE_SOFTWARE_TPG=False,
    RUN_NUMBER=333,
    DATA_FILE="./frames.bin",
    TP_DATA_FILE="./tp_frames.bin",
):

    # Define modules and queues
    queue_bare_specs = ([
        app.QueueSpec(inst="time_sync_q", kind="FollyMPMCQueue", capacity=100),
        app.QueueSpec(
            inst="data_fragments_q", kind="FollyMPMCQueue", capacity=100),
        app.QueueSpec(
            inst="errored_frames_q", kind="FollyMPMCQueue", capacity=10000),
    ] + [
        app.QueueSpec(
            inst=f"data_requests_{idx}", kind="FollySPSCQueue", capacity=1000)
        for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ] + [
        app.QueueSpec(
            inst=f"{FRONTEND_TYPE}_link_{idx}",
            kind="FollySPSCQueue",
            capacity=100000,
        ) for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ] + [
        app.QueueSpec(
            inst=f"raw_tp_link_{idx}", kind="FollySPSCQueue", capacity=100000)
        for idx in range(
            NUMBER_OF_DATA_PRODUCERS,
            NUMBER_OF_DATA_PRODUCERS + NUMBER_OF_TP_PRODUCERS,
        )
    ] + [
        app.QueueSpec(
            inst=f"sw_tp_queue_{idx}", kind="FollySPSCQueue", capacity=100000)
        for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ] + [
        app.QueueSpec(
            inst=f"tp_data_requests", kind="FollySPSCQueue", capacity=1000)
    ] + [
        app.QueueSpec(
            inst=f"tpset_link_{idx}", kind="FollySPSCQueue", capacity=10000)
        for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ])

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs,
                                        key=lambda x: x.inst))

    mod_specs = ([
        mspec("fake_source", "PacmanCardReader", [
            app.QueueInfo(
                name=f"output_{idx}",
                inst=f"{FRONTEND_TYPE}_link_{idx}",
                dir="output",
            ) for idx in range(NUMBER_OF_DATA_PRODUCERS)
        ]),
    ] + [
        mspec(
            f"datahandler_{idx}",
            "DataLinkHandler",
            [
                app.QueueInfo(
                    name="raw_input",
                    inst=f"{FRONTEND_TYPE}_link_{idx}",
                    dir="input",
                ),
                app.QueueInfo(
                    name="timesync", inst="time_sync_q", dir="output"),
                app.QueueInfo(name="data_requests_0",
                              inst=f"data_requests_{idx}",
                              dir="input"),
                app.QueueInfo(name="fragment_queue",
                              inst="data_fragments_q",
                              dir="output"),
                app.QueueInfo(
                    name="tp_out", inst=f"sw_tp_queue_{idx}", dir="output"),
                app.QueueInfo(
                    name="tpset_out", inst=f"tpset_link_{idx}", dir="output"),
                app.QueueInfo(name="errored_frames",
                              inst="errored_frames_q",
                              dir="output"),
            ],
        ) for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ] + [
        mspec(
            f"timesync_consumer",
            "TimeSyncConsumer",
            [
                app.QueueInfo(
                    name="input_queue", inst=f"time_sync_q", dir="input")
            ],
        )
    ] + [
        mspec(
            f"fragment_consumer",
            "FragmentConsumer",
            [
                app.QueueInfo(
                    name="input_queue", inst=f"data_fragments_q", dir="input")
            ],
        )
    ] + [
        mspec(
            f"sw_tp_handler_{idx}",
            "DataLinkHandler",
            [
                app.QueueInfo(
                    name="raw_input", inst=f"sw_tp_queue_{idx}", dir="input"),
                app.QueueInfo(
                    name="timesync", inst="time_sync_q", dir="output"),
                app.QueueInfo(
                    name="requests", inst="tp_data_requests", dir="input"),
                app.QueueInfo(name="fragment_queue",
                              inst="data_fragments_q",
                              dir="output"),
            ],
        ) for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ] + [
        mspec(
            f"tpset_publisher_{idx}",
            "QueueToNetwork",
            [
                app.QueueInfo(
                    name="input", inst=f"tpset_link_{idx}", dir="input")
            ],
        ) for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ] + [
        mspec(
            f"raw_tp_handler_{idx}",
            "DataLinkHandler",
            [
                app.QueueInfo(
                    name="raw_input", inst=f"raw_tp_link_{idx}", dir="input"),
                app.QueueInfo(
                    name="timesync", inst="time_sync_q", dir="output"),
            ],
        ) for idx in range(
            NUMBER_OF_DATA_PRODUCERS,
            NUMBER_OF_DATA_PRODUCERS + NUMBER_OF_TP_PRODUCERS,
        )
    ] + [
        mspec(
            "errored_frame_consumer",
            "ErroredFrameConsumer",
            [
                app.QueueInfo(
                    name="input_queue", inst="errored_frames_q", dir="input")
            ],
        )
    ])

    nw_specs = [
        nwmgr.Connection(name=f"tpsets_{idx}",
                         topics=["foo"],
                         address="tcp://127.0.0.1:" + str(5000 + idx))
        for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ]
    nw_specs.append(
        nwmgr.Connection(name="timesync",
                         topics=["Timesync"],
                         address="tcp://127.0.0.1:6000"))

    init_specs = app.Init(queues=queue_specs,
                          modules=mod_specs,
                          nwconnections=nw_specs)

    jstr = json.dumps(init_specs.pod(), indent=4, sort_keys=True)
    print(jstr)

    initcmd = rccmd.RCCommand(
        id=basecmd.CmdId("init"),
        entry_state="NONE",
        exit_state="INITIAL",
        data=init_specs,
    )

    confcmd = mrccmd(
        "conf",
        "INITIAL",
        "CONFIGURED",
        [
            (
                "fake_source",
                pcr.Conf(link_confs=[
                    pcr.LinkConfiguration(geoid=pcr.GeoID(
                        system="kNDLarTPC", region=0, element=idx), )
                    for idx in range(NUMBER_OF_DATA_PRODUCERS)
                ] + [
                    pcr.LinkConfiguration(geoid=sec.GeoID(
                        system="TPC", region=0, element=idx), )
                    for idx in range(
                        NUMBER_OF_DATA_PRODUCERS,
                        NUMBER_OF_DATA_PRODUCERS + NUMBER_OF_TP_PRODUCERS,
                    )
                ],
                         # input_limit=10485100, # default
                         ),
            ),
        ] + [(
            f"datahandler_{idx}",
            rconf.Conf(
                readoutmodelconf=rconf.ReadoutModelConf(
                    source_queue_timeout_ms=QUEUE_POP_WAIT_MS,
                    fake_trigger_flag=1,
                    region_id=0,
                    element_id=idx,
                    timesync_connection_name=f"timesync",
                    timesync_topic_name="Timesync",
                ),
                latencybufferconf=rconf.LatencyBufferConf(
                    latency_buffer_size=3 * CLOCK_SPEED_HZ /
                    (25 * 12 * DATA_RATE_SLOWDOWN_FACTOR),
                    region_id=0,
                    element_id=idx,
                ),
                rawdataprocessorconf=rconf.RawDataProcessorConf(
                    region_id=0,
                    element_id=idx,
                    enable_software_tpg=ENABLE_SOFTWARE_TPG,
                    error_counter_threshold=100,
                    error_reset_freq=10000,
                ),
                requesthandlerconf=rconf.RequestHandlerConf(
                    latency_buffer_size=3 * CLOCK_SPEED_HZ /
                    (25 * 12 * DATA_RATE_SLOWDOWN_FACTOR),
                    pop_limit_pct=0.8,
                    pop_size_pct=0.1,
                    region_id=0,
                    element_id=idx,
                    output_file=f"output_{idx}.out",
                    stream_buffer_size=8388608,
                    enable_raw_recording=True,
                    use_o_direct=False,
                ),
            ),
        ) for idx in range(NUMBER_OF_DATA_PRODUCERS)] + [(
            f"sw_tp_handler_{idx}",
            rconf.Conf(
                readoutmodelconf=rconf.ReadoutModelConf(
                    source_queue_timeout_ms=QUEUE_POP_WAIT_MS,
                    fake_trigger_flag=1,
                    region_id=0,
                    element_id=idx,
                ),
                latencybufferconf=rconf.LatencyBufferConf(
                    latency_buffer_size=3 * CLOCK_SPEED_HZ /
                    (25 * 12 * DATA_RATE_SLOWDOWN_FACTOR),
                    region_id=0,
                    element_id=idx,
                ),
                rawdataprocessorconf=rconf.RawDataProcessorConf(
                    region_id=0,
                    element_id=idx,
                    enable_software_tpg=ENABLE_SOFTWARE_TPG,
                ),
                requesthandlerconf=rconf.RequestHandlerConf(
                    latency_buffer_size=3 * CLOCK_SPEED_HZ /
                    (25 * 12 * DATA_RATE_SLOWDOWN_FACTOR),
                    pop_limit_pct=0.8,
                    pop_size_pct=0.1,
                    region_id=0,
                    element_id=idx,
                    output_file=f"output_{idx}.out",
                    stream_buffer_size=8388608,
                    enable_raw_recording=False,
                    use_o_direct=False,
                ),
            ),
        ) for idx in range(NUMBER_OF_DATA_PRODUCERS)] + [(
            f"raw_tp_handler_{idx}",
            rconf.Conf(
                readoutmodelconf=rconf.ReadoutModelConf(
                    source_queue_timeout_ms=QUEUE_POP_WAIT_MS,
                    fake_trigger_flag=1,
                    region_id=0,
                    element_id=idx,
                ),
                latencybufferconf=rconf.LatencyBufferConf(
                    latency_buffer_size=3 * CLOCK_SPEED_HZ /
                    (25 * 12 * DATA_RATE_SLOWDOWN_FACTOR),
                    region_id=0,
                    element_id=idx,
                ),
                rawdataprocessorconf=rconf.RawDataProcessorConf(
                    region_id=0,
                    element_id=idx,
                    enable_software_tpg=ENABLE_SOFTWARE_TPG,
                ),
                requesthandlerconf=rconf.RequestHandlerConf(
                    latency_buffer_size=3 * CLOCK_SPEED_HZ /
                    (25 * 12 * DATA_RATE_SLOWDOWN_FACTOR),
                    pop_limit_pct=0.8,
                    pop_size_pct=0.1,
                    region_id=0,
                    element_id=idx,
                    output_file=f"output_{idx}.out",
                    stream_buffer_size=8388608,
                    enable_raw_recording=False,
                    use_o_direct=False,
                ),
            ),
        ) for idx in range(
            NUMBER_OF_DATA_PRODUCERS,
            NUMBER_OF_DATA_PRODUCERS + NUMBER_OF_TP_PRODUCERS,
        )] + [(
            f"tpset_publisher_{idx}",
            qton.Conf(
                msg_type="dunedaq::trigger::TPSet",
                msg_module_name="TPSetNQ",
                sender_config=nos.Conf(
                    name=f"tpsets_{idx}",
                    topic="foo",
                    stype="msgpack",
                ),
            ),
        ) for idx in range(NUMBER_OF_DATA_PRODUCERS)],
    )

    jstr = json.dumps(confcmd.pod(), indent=4, sort_keys=True)
    print(jstr)

    startpars = rccmd.StartParams(run=RUN_NUMBER)
    startcmd = mrccmd(
        "start",
        "CONFIGURED",
        "RUNNING",
        [
            ("datahandler_.*", startpars),
            ("fake_source", startpars),
            ("data_recorder_.*", startpars),
            ("timesync_consumer", startpars),
            ("fragment_consumer", startpars),
            ("sw_tp_handler_.*", startpars),
            ("raw_tp_handler_.*", startpars),
            ("tpset_publisher_.*", startpars),
            ("errored_frame_consumer", startpars),
        ],
    )

    jstr = json.dumps(startcmd.pod(), indent=4, sort_keys=True)
    print("=" * 80 + "\nStart\n\n", jstr)

    stopcmd = mrccmd(
        "stop",
        "RUNNING",
        "CONFIGURED",
        [
            ("fake_source", None),
            ("datahandler_.*", None),
            ("data_recorder_.*", None),
            ("timesync_consumer", None),
            ("fragment_consumer", None),
            ("sw_tp_handler_.*", None),
            ("raw_tp_handler_.*", None),
            ("tpset_publisher_.*", None),
            ("errored_frame_consumer", None),
        ],
    )

    jstr = json.dumps(stopcmd.pod(), indent=4, sort_keys=True)
    print("=" * 80 + "\nStop\n\n", jstr)

    scrapcmd = mrccmd("scrap", "CONFIGURED", "INITIAL", [("", None)])

    jstr = json.dumps(scrapcmd.pod(), indent=4, sort_keys=True)
    print("=" * 80 + "\nScrap\n\n", jstr)

    # Create a list of commands
    cmd_seq = [initcmd, confcmd, startcmd, stopcmd, scrapcmd]

    record_cmd = mrccmd(
        "record",
        "RUNNING",
        "RUNNING",
        [("datahandler_.*", rconf.RecordingParams(duration=10))],
    )

    jstr = json.dumps(record_cmd.pod(), indent=4, sort_keys=True)
    print("=" * 80 + "\nRecord\n\n", jstr)

    cmd_seq.append(record_cmd)

    # Print them as json (to be improved/moved out)
    jstr = json.dumps([c.pod() for c in cmd_seq], indent=4, sort_keys=True)
    return jstr
def generate(TRIGGER_RATE_HZ: float = 1.0,
             OUTPUT_PATH: str = ".",
             TOKEN_COUNT: int = 10,
             CLOCK_SPEED_HZ: int = 50000000,
             FORGET_DECISION_PROB: float = 0.0,
             HOLD_DECISION_PROB: float = 0.0,
             HOLD_MAX_SIZE: int = 0,
             HOLD_MIN_SIZE: int = 0,
             HOLD_MIN_MS: int = 0,
             RELEASE_RANDOMLY_PROB: float = 0.0):
    """
    { item_description }
    """
    cmd_data = {}

    # Derived parameters
    TRG_INTERVAL_TICKS = math.floor((1 / TRIGGER_RATE_HZ) * CLOCK_SPEED_HZ)

    # Define modules and queues
    queue_bare_specs = [
        app.QueueSpec(inst="time_sync_q", kind='FollySPSCQueue', capacity=100),
        app.QueueSpec(inst="token_q", kind='FollySPSCQueue', capacity=20),
        app.QueueSpec(inst="trigger_decision_q",
                      kind='FollySPSCQueue',
                      capacity=20),
        app.QueueSpec(inst="trigger_candidate_q",
                      kind='FollyMPMCQueue',
                      capacity=20),  #No MPSC Queue?
    ]

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs,
                                        key=lambda x: x.inst))

    mod_specs = [
        mspec("fdf", "FakeDataFlow", [
            app.QueueInfo(name="trigger_decision_source",
                          inst="trigger_decision_q",
                          dir="input"),
            app.QueueInfo(
                name="trigger_complete_sink", inst="token_q", dir="output"),
        ]),
        mspec("mlt", "ModuleLevelTrigger", [
            app.QueueInfo(name="token_source", inst="token_q", dir="input"),
            app.QueueInfo(name="trigger_decision_sink",
                          inst="trigger_decision_q",
                          dir="output"),
            app.QueueInfo(name="trigger_candidate_source",
                          inst="trigger_candidate_q",
                          dir="output"),
        ]),
        mspec("rtcm_poisson", "RandomTriggerCandidateMaker", [
            app.QueueInfo(
                name="time_sync_source", inst="time_sync_q", dir="input"),
            app.QueueInfo(name="trigger_candidate_sink",
                          inst="trigger_candidate_q",
                          dir="output"),
        ]),
        mspec("rtcm_uniform", "RandomTriggerCandidateMaker", [
            app.QueueInfo(
                name="time_sync_source", inst="time_sync_q", dir="input"),
            app.QueueInfo(name="trigger_candidate_sink",
                          inst="trigger_candidate_q",
                          dir="output"),
        ]),
    ]

    cmd_data['init'] = app.Init(queues=queue_specs, modules=mod_specs)

    cmd_data['conf'] = acmd([
        ("fdf",
         fdf.ConfParams(hold_max_size=HOLD_MAX_SIZE,
                        hold_min_size=HOLD_MIN_SIZE,
                        hold_min_ms=HOLD_MIN_MS,
                        release_randomly_prob=RELEASE_RANDOMLY_PROB,
                        forget_decision_prob=FORGET_DECISION_PROB,
                        hold_decision_prob=HOLD_DECISION_PROB)),
        ("mlt",
         mlt.ConfParams(links=[idx for idx in range(3)],
                        initial_token_count=TOKEN_COUNT)),
        ("rtcm_poisson",
         rtcm.ConfParams(trigger_interval_ticks=TRG_INTERVAL_TICKS,
                         clock_frequency_hz=CLOCK_SPEED_HZ,
                         timestamp_method="kSystemClock",
                         time_distribution="kPoisson")),
        ("rtcm_uniform",
         rtcm.ConfParams(trigger_interval_ticks=TRG_INTERVAL_TICKS,
                         clock_frequency_hz=CLOCK_SPEED_HZ,
                         timestamp_method="kSystemClock",
                         time_distribution="kUniform")),
    ])

    startpars = rccmd.StartParams(run=1, disable_data_storage=False)
    cmd_data['start'] = acmd([
        ("fdf", startpars),
        ("mlt", startpars),
        ("rtcm_poisson", startpars),
        ("rtcm_uniform", startpars),
    ])

    cmd_data['stop'] = acmd([
        ("fdf", None),
        ("mlt", None),
        ("rtcm_poisson", None),
        ("rtcm_uniform", None),
    ])

    cmd_data['pause'] = acmd([("", None)])

    resumepars = rccmd.ResumeParams(trigger_interval_ticks=50000000)
    cmd_data['resume'] = acmd([("mlt", resumepars)])

    cmd_data['scrap'] = acmd([("", None)])

    return cmd_data
Esempio n. 13
0
def generate(
    NETWORK_ENDPOINTS: list,
    NUMBER_OF_DATA_PRODUCERS: int = 2,
    DATA_RATE_SLOWDOWN_FACTOR: int = 1,
    RUN_NUMBER: int = 333,
    TRIGGER_RATE_HZ: float = 1.0,
    DATA_FILE: str = "./frames.bin",
    OUTPUT_PATH: str = ".",
    TOKEN_COUNT: int = 10,
    CLOCK_SPEED_HZ: int = 50000000,
):
    """
    { item_description }
    """
    cmd_data = {}

    required_eps = {'trigdec', 'triginh', 'timesync'}
    if not required_eps.issubset(NETWORK_ENDPOINTS):
        raise RuntimeError(
            f"ERROR: not all the required endpoints ({', '.join(required_eps)}) found in list of endpoints {' '.join(NETWORK_ENDPOINTS.keys())}"
        )

    # Derived parameters
    TRG_INTERVAL_TICKS = math.floor(
        (1 / TRIGGER_RATE_HZ) * CLOCK_SPEED_HZ / DATA_RATE_SLOWDOWN_FACTOR)
    MIN_READOUT_WINDOW_TICKS = math.floor(CLOCK_SPEED_HZ /
                                          (DATA_RATE_SLOWDOWN_FACTOR * 1000))
    MAX_READOUT_WINDOW_TICKS = math.floor(CLOCK_SPEED_HZ /
                                          (DATA_RATE_SLOWDOWN_FACTOR * 1000))
    TRIGGER_WINDOW_OFFSET = math.floor(CLOCK_SPEED_HZ /
                                       (DATA_RATE_SLOWDOWN_FACTOR * 2000))
    # The delay is set to put the trigger well within the latency buff
    TRIGGER_DELAY_TICKS = math.floor(CLOCK_SPEED_HZ /
                                     DATA_RATE_SLOWDOWN_FACTOR)

    # Define modules and queues
    queue_bare_specs = [
        app.QueueSpec(inst="time_sync_from_netq",
                      kind='FollySPSCQueue',
                      capacity=100),
        app.QueueSpec(inst="token_from_netq",
                      kind='FollySPSCQueue',
                      capacity=20),
        app.QueueSpec(inst="trigger_decision_to_netq",
                      kind='FollySPSCQueue',
                      capacity=20),
    ]

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs,
                                        key=lambda x: x.inst))

    mod_specs = [
        mspec("qton_trigdec", "QueueToNetwork", [
            app.QueueInfo(
                name="input", inst="trigger_decision_to_netq", dir="input")
        ]),
        mspec("ntoq_token", "NetworkToQueue", [
            app.QueueInfo(name="output", inst="token_from_netq", dir="output")
        ]),
        mspec("ntoq_timesync", "NetworkToQueue", [
            app.QueueInfo(
                name="output", inst="time_sync_from_netq", dir="output")
        ]),
        mspec("tde", "TriggerDecisionEmulator", [
            app.QueueInfo(name="time_sync_source",
                          inst="time_sync_from_netq",
                          dir="input"),
            app.QueueInfo(
                name="token_source", inst="token_from_netq", dir="input"),
            app.QueueInfo(name="trigger_decision_sink",
                          inst="trigger_decision_to_netq",
                          dir="output"),
        ]),
    ]

    cmd_data['init'] = app.Init(queues=queue_specs, modules=mod_specs)

    cmd_data['conf'] = acmd([
        ("qton_trigdec",
         qton.Conf(msg_type="dunedaq::dfmessages::TriggerDecision",
                   msg_module_name="TriggerDecisionNQ",
                   sender_config=nos.Conf(ipm_plugin_type="ZmqSender",
                                          address=NETWORK_ENDPOINTS["trigdec"],
                                          stype="msgpack"))),
        ("ntoq_token",
         ntoq.Conf(msg_type="dunedaq::dfmessages::TriggerDecisionToken",
                   msg_module_name="TriggerDecisionTokenNQ",
                   receiver_config=nor.Conf(
                       ipm_plugin_type="ZmqReceiver",
                       address=NETWORK_ENDPOINTS["triginh"]))),
        ("ntoq_timesync",
         ntoq.Conf(msg_type="dunedaq::dfmessages::TimeSync",
                   msg_module_name="TimeSyncNQ",
                   receiver_config=nor.Conf(
                       ipm_plugin_type="ZmqReceiver",
                       address=NETWORK_ENDPOINTS["timesync"]))),
        (
            "tde",
            tde.ConfParams(
                links=[idx for idx in range(NUMBER_OF_DATA_PRODUCERS)],
                min_links_in_request=NUMBER_OF_DATA_PRODUCERS,
                max_links_in_request=NUMBER_OF_DATA_PRODUCERS,
                min_readout_window_ticks=MIN_READOUT_WINDOW_TICKS,
                max_readout_window_ticks=MAX_READOUT_WINDOW_TICKS,
                trigger_window_offset=TRIGGER_WINDOW_OFFSET,
                # The delay is set to put the trigger well within the latency buff
                trigger_delay_ticks=TRIGGER_DELAY_TICKS,
                # We divide the trigger interval by
                # DATA_RATE_SLOWDOWN_FACTOR so the triggers are still
                # emitted per (wall-clock) second, rather than being
                # spaced out further
                trigger_interval_ticks=TRG_INTERVAL_TICKS,
                clock_frequency_hz=CLOCK_SPEED_HZ / DATA_RATE_SLOWDOWN_FACTOR,
                initial_token_count=TOKEN_COUNT)),
    ])

    startpars = rccmd.StartParams(run=RUN_NUMBER, disable_data_storage=False)
    cmd_data['start'] = acmd([
        ("qton_trigdec", startpars),
        ("ntoq_token", startpars),
        ("ntoq_timesync", startpars),
        ("tde", startpars),
    ])

    cmd_data['stop'] = acmd([
        ("qton_trigdec", None),
        ("ntoq_timesync", None),
        ("ntoq_token", None),
        ("tde", None),
    ])

    cmd_data['pause'] = acmd([("", None)])

    cmd_data['resume'] = acmd([
        ("tde", tde.ResumeParams(trigger_interval_ticks=TRG_INTERVAL_TICKS))
    ])

    cmd_data['scrap'] = acmd([("", None)])

    return cmd_data
Esempio n. 14
0
def generate(
        RUN_NUMBER = 333, 
        GATHER_INTERVAL = 1e6,
        GATHER_INTERVAL_DEBUG = 10e6,
        MASTER_DEVICE_NAME="",
        MASTER_SEND_DELAYS_PERIOD=0,
        MASTER_CLOCK_FILE="",
        MASTER_CLOCK_MODE=-1,
        PARTITION_IDS=[],
        FANOUT_DEVICES_NAMES=[],
        FANOUT_CLOCK_FILE="",
        ENDPOINT_DEVICE_NAME="",
        ENDPOINT_CLOCK_FILE="",
        ENDPOINT_ADDRESS=0,
        ENDPOINT_PARTITION=0,
        HSI_DEVICE_NAME="",
        HSI_ENDPOINT_ADDRESS=0,
        HSI_ENDPOINT_PARTITION=0,
        HSI_CLOCK_FILE="",
        HSI_RE_MASK=0x0,
        HSI_FE_MASK=0x0,
        HSI_INV_MASK=0x0,
        HSI_RANDOM_RATE=1.0,
        HSI_SOURCE=0x0,
        PART_TRIGGER_MASK=0xff,
        PART_SPILL_GATE_ENABLED=True,
        PART_RATE_CONTROL_ENABLED=True,
        UHAL_LOG_LEVEL="notice",
        OUTPUT_PATH=".",
    ):
    
    # Define modules and queues
    queue_bare_specs = [
            app.QueueSpec(inst="hardware_commands", kind='StdDeQueue', capacity=100),
        ]

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs, key=lambda x: x.inst))

    mod_specs = [
                    mspec("thi", "TimingHardwareManagerPDI", [app.QueueInfo(name="timing_cmds_queue", inst="hardware_commands", dir="input")])
                ]
    conf_cmds = [
                ("thi", thi.ConfParams(
                                           connections_file="${TIMING_SHARE}/config/etc/connections.xml",
                                           gather_interval=GATHER_INTERVAL,
                                           gather_interval_debug=GATHER_INTERVAL_DEBUG,
                                           monitored_device_name_master=MASTER_DEVICE_NAME,
                                           monitored_device_names_fanout=FANOUT_DEVICES_NAMES,
                                           monitored_device_name_endpoint=ENDPOINT_DEVICE_NAME,
                                           monitored_device_name_hsi=HSI_DEVICE_NAME,
                                           uhal_log_level=UHAL_LOG_LEVEL
                                    )),
            ]
    
    ## master and partition controllers
    if MASTER_DEVICE_NAME != "":

        mod_specs.extend( [ mspec("tmc0", "TimingMasterController", [app.QueueInfo(name="hardware_commands_out", inst="hardware_commands", dir="output")]) ] )

        tpc_mods=[]
        for partition_id in PARTITION_IDS:

            tpc_mods.append( mspec("tpc{}".format(partition_id), "TimingPartitionController", [app.QueueInfo(name="hardware_commands_out", inst="hardware_commands", dir="output")]) )
        mod_specs.extend( tpc_mods )

    ## fanout controller
    for i,fanout_device_name in enumerate(FANOUT_DEVICES_NAMES):
        mod_specs.extend( [ mspec("tfc{}".format(i), "TimingFanoutController", [app.QueueInfo(name="hardware_commands_out", inst="hardware_commands", dir="output")]) ] )

    ## endpoint controllers
    if ENDPOINT_DEVICE_NAME != "":
        mod_specs.extend( [ mspec("tec0", "TimingEndpointController", [app.QueueInfo(name="hardware_commands_out", inst="hardware_commands", dir="output")]) ] )

    ## hsi controllers
    if HSI_DEVICE_NAME != "":
        mod_specs.extend( [ mspec("hsi0", "HSIController", [app.QueueInfo(name="hardware_commands_out", inst="hardware_commands", dir="output")]) ] )

    init_specs = app.Init(queues=queue_specs, modules=mod_specs)
    
    jstr = json.dumps(init_specs.pod(), indent=4, sort_keys=True)
    print(jstr)

    initcmd = rcif.RCCommand(
        id=cmdlib.CmdId("init"),
        entry_state="NONE",
        exit_state="INITIAL",
        data=init_specs
    )

    ## conf command

    if MASTER_DEVICE_NAME != "":
        conf_cmds.extend( [
                        ("tmc0", tmc.ConfParams(
                                    device=MASTER_DEVICE_NAME,
                                    send_endpoint_delays_period=MASTER_SEND_DELAYS_PERIOD,
                                    clock_config=MASTER_CLOCK_FILE,
                                    fanout_mode=MASTER_CLOCK_MODE,
                                 )),
                     ] )

        for partition_id in PARTITION_IDS:
            conf_cmds.extend( [
                            ("tpc{}".format(partition_id), tpc.PartitionConfParams(
                                                            device=MASTER_DEVICE_NAME,
                                                            partition_id=partition_id,
                                                            trigger_mask=PART_TRIGGER_MASK,
                                                            spill_gate_enabled=PART_SPILL_GATE_ENABLED,
                                                            rate_control_enabled=PART_RATE_CONTROL_ENABLED,
                                                        )),
                        ] )
    
        for i,fanout_device_name in enumerate(FANOUT_DEVICES_NAMES):
            conf_cmds.extend( [
                            ("tfc{}".format(i), tfc.ConfParams(
                                    device=fanout_device_name,
                                    )),
                         ] )

    if ENDPOINT_DEVICE_NAME != "":
        conf_cmds.extend( [
                        ("tec0", tec.ConfParams(
                                device=ENDPOINT_DEVICE_NAME,
                                address=ENDPOINT_ADDRESS,
                                partition=ENDPOINT_PARTITION
                                )),
                     ] )

    trigger_interval_ticks=0
    if HSI_DEVICE_NAME != "":
        if HSI_RANDOM_RATE > 0:
            trigger_interval_ticks=math.floor((1/HSI_RANDOM_RATE) * CLOCK_SPEED_HZ)
        else:
            console.log('WARNING! Emulated trigger rate of 0 will not disable signal emulation in real HSI hardware! To disable emulated HSI triggers, use  option: "--hsi-source 0" or mask all signal bits', style="bold red")

        conf_cmds.extend( [
                        ("hsi0", hsi.ConfParams(
                                device=HSI_DEVICE_NAME,
                                clock_frequency=CLOCK_SPEED_HZ,
                                trigger_interval_ticks=trigger_interval_ticks,
                                address=HSI_ENDPOINT_ADDRESS,
                                partition=HSI_ENDPOINT_PARTITION,
                                rising_edge_mask=HSI_RE_MASK,                   
                                falling_edge_mask=HSI_FE_MASK,
                                invert_edge_mask=HSI_INV_MASK,
                                data_source=HSI_SOURCE,
                                )),
                     ] )

    confcmd = mrccmd("conf", "INITIAL", "CONFIGURED", conf_cmds)

    jstr = json.dumps(confcmd.pod(), indent=4, sort_keys=True)
    print(jstr)

    startpars = rcif.StartParams(run=RUN_NUMBER, trigger_interval_ticks = trigger_interval_ticks)
    startcmd = mrccmd("start", "CONFIGURED", "RUNNING", [
            ("thi", startpars),
            ("tmc.*", startpars),
            ("tpc.*", startpars),
            ("tfc.*", startpars),
            ("tec.*", startpars),
            ("hsi.*", startpars),
        ])

    jstr = json.dumps(startcmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nStart\n\n", jstr)

    stopcmd = mrccmd("stop", "RUNNING", "CONFIGURED", [
            ("thi", None),
            ("tmc.*", None),
            ("tpc.*", None),
            ("tfc.*", None),
            ("tec.*", None),
            ("hsi.*", None),
        ])

    jstr = json.dumps(stopcmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nStop\n\n", jstr)


    scrapcmd = mcmd("scrap", [
            ("thi", None),
            ("tmc.*", None),
            ("tpc.*", None),
            ("tfc.*", None),
            ("tec.*", None),
            ("hsi.*", None),
        ])

    jstr = json.dumps(scrapcmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nScrap\n\n", jstr)

    ## timing specific commands

    # master commands
    master_io_reset_cmd = mcmd("master_io_reset", [
            ("tmc.*", tcmd.IOResetCmdPayload(
                      clock_config=MASTER_CLOCK_FILE,
                      fanout_mode=MASTER_CLOCK_MODE,
                      soft=False
                      )),
        ])
    jstr = json.dumps(master_io_reset_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nMaster IO reset\n\n", jstr)


    master_set_timestamp_cmd = mcmd("master_set_timestamp", [
            ("tmc.*", None),
        ])
    jstr = json.dumps(master_set_timestamp_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nMaster set timestamp\n\n", jstr)


    master_print_status_cmd = mcmd("master_print_status", [
            ("tmc.*", None),
        ])
    jstr = json.dumps(master_print_status_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nMaster print status\n\n", jstr)


    # partition commands
    partition_configure_cmd = mcmd("partition_configure", [
            ("tpc.*", tpc.PartitionConfParams(
                      trigger_mask=PART_TRIGGER_MASK,
                      spill_gate_enabled=PART_SPILL_GATE_ENABLED,
                      rate_control_enabled=PART_RATE_CONTROL_ENABLED,
                      )),
        ])
    jstr = json.dumps(partition_configure_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nPartition configure\n\n", jstr)


    partition_enable_cmd = mcmd("partition_enable", [
            ("tpc.*", None),
        ])
    jstr = json.dumps(partition_enable_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nPartition enable\n\n", jstr)


    partition_disable_cmd = mcmd("partition_disable", [
            ("tpc.*", None),
        ])
    jstr = json.dumps(partition_disable_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nPartition disable\n\n", jstr)


    partition_start_cmd = mcmd("partition_start", [
            ("tpc.*", None),
        ])
    jstr = json.dumps(partition_start_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nPartition start\n\n", jstr)


    partition_stop_cmd = mcmd("partition_stop", [
            ("tpc.*", None),
        ])
    jstr = json.dumps(partition_stop_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nPartition stop\n\n", jstr)


    partition_enable_triggers_cmd = mcmd("partition_enable_triggers", [
            ("tpc.*", None),
        ])
    jstr = json.dumps(partition_enable_triggers_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nPartition enable triggers\n\n", jstr)


    partition_disable_triggers_cmd = mcmd("partition_disable_triggers", [
            ("tpc.*", None),
        ])
    jstr = json.dumps(partition_disable_triggers_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nPartition disable triggers\n\n", jstr)


    partition_print_status_cmd = mcmd("partition_print_status", [
            ("tpc.*", None),
        ])
    jstr = json.dumps(partition_print_status_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nPartition print status\n\n", jstr)

    # fanout commands
    fanout_io_reset_cmd = mcmd("fanout_io_reset", [
            ("tfc.*", tcmd.IOResetCmdPayload(
                      clock_config=FANOUT_CLOCK_FILE,
                      soft=False
                      )),
        ])
    jstr = json.dumps(fanout_io_reset_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nFanout IO reset\n\n", jstr)


    fanout_print_status_cmd = mcmd("fanout_print_status", [
            ("tfc.*", None),
        ])
    jstr = json.dumps(fanout_print_status_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nFanout print status\n\n", jstr)

    # hsi commands
    hsi_io_reset_cmd = mcmd("hsi_io_reset", [
            ("hsi.*", tcmd.IOResetCmdPayload(
                      clock_config=HSI_CLOCK_FILE,
                      soft=False
                      )),
        ])
    jstr = json.dumps(hsi_io_reset_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nHSI IO reset\n\n", jstr)


    hsi_endpoint_enable_cmd = mcmd("hsi_endpoint_enable", [
            ("hsi.*", tcmd.TimingEndpointConfigureCmdPayload(
                      address=HSI_ENDPOINT_ADDRESS,
                      partition=HSI_ENDPOINT_PARTITION
                      )),
        ])
    jstr = json.dumps(hsi_endpoint_enable_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nHSI endpoint enable\n\n", jstr)


    hsi_endpoint_disable_cmd = mcmd("hsi_endpoint_disable", [
            ("hsi.*", None),
        ])
    jstr = json.dumps(hsi_endpoint_disable_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nHSI endpoint disable\n\n", jstr)


    hsi_endpoint_reset_cmd = mcmd("hsi_endpoint_reset", [
            ("hsi.*", tcmd.TimingEndpointConfigureCmdPayload(
                      address=HSI_ENDPOINT_ADDRESS,
                      partition=HSI_ENDPOINT_PARTITION
                      )),
        ])
    jstr = json.dumps(hsi_endpoint_reset_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nHSI endpoint reset\n\n", jstr)


    hsi_reset_cmd = mcmd("hsi_reset", [
            ("hsi.*", None),
        ])
    jstr = json.dumps(hsi_reset_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nHSI reset\n\n", jstr)


    hsi_configure_cmd = mcmd("hsi_configure", [
            ("hsi.*", tcmd.HSIConfigureCmdPayload(
                      rising_edge_mask=HSI_RE_MASK,                   
                      falling_edge_mask=HSI_FE_MASK,
                      invert_edge_mask=HSI_INV_MASK,
                      data_source=HSI_SOURCE,
                      random_rate=HSI_RANDOM_RATE
                      )),
        ])
    jstr = json.dumps(hsi_configure_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nHSI configure\n\n", jstr)

    hsi_start_cmd = mcmd("hsi_start", [
            ("hsi.*", None),
        ])
    jstr = json.dumps(hsi_start_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nHSI start\n\n", jstr)

    hsi_stop_cmd = mcmd("hsi_stop", [
            ("hsi.*", None),
        ])
    jstr = json.dumps(hsi_stop_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nHSI stop\n\n", jstr)


    hsi_print_status_cmd = mcmd("hsi_print_status", [
            ("hsi.*", None),
        ])
    jstr = json.dumps(hsi_print_status_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nHSI print status\n\n", jstr)



    # endpoint commands
    endpoint_io_reset_cmd = mcmd("endpoint_io_reset", [
            ("tec.*", tcmd.IOResetCmdPayload(
                      clock_config=ENDPOINT_CLOCK_FILE,
                      soft=False
                      )),
        ])
    jstr = json.dumps(endpoint_io_reset_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nEndpoint IO reset\n\n", jstr)


    endpoint_enable_cmd = mcmd("endpoint_enable", [
            ("tec.*", tcmd.TimingEndpointConfigureCmdPayload(
                      address=ENDPOINT_ADDRESS,
                      partition=ENDPOINT_PARTITION
                      )),
        ])
    jstr = json.dumps(endpoint_enable_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nEndpoint enable\n\n", jstr)


    endpoint_disable_cmd = mcmd("endpoint_disable", [
            ("tec.*", None),
        ])
    jstr = json.dumps(endpoint_disable_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nEndpoint disable\n\n", jstr)


    endpoint_reset_cmd = mcmd("endpoint_reset", [
            ("tec.*", tcmd.TimingEndpointConfigureCmdPayload(
                      address=ENDPOINT_ADDRESS,
                      partition=ENDPOINT_PARTITION
                      )),
        ])
    jstr = json.dumps(endpoint_reset_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nEndpoint reset\n\n", jstr)


    endpoint_print_status_cmd = mcmd("endpoint_print_status", [
            ("tec.*", None),
        ])
    jstr = json.dumps(endpoint_print_status_cmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nEndpoint print status\n\n", jstr)
    #####

    # Create a list of commands
    cmd_seq = [initcmd, confcmd, startcmd, stopcmd, scrapcmd]

    if MASTER_DEVICE_NAME != "":
        cmd_seq.extend( [
                        master_io_reset_cmd, master_set_timestamp_cmd, master_print_status_cmd,
                        partition_configure_cmd, partition_enable_cmd, partition_disable_cmd, 
                        partition_start_cmd, partition_stop_cmd, 
                        partition_enable_triggers_cmd, partition_disable_triggers_cmd, 
                        partition_print_status_cmd
                        ] )
    
    if len(FANOUT_DEVICES_NAMES) != 0:
        cmd_seq.extend( [
                        fanout_io_reset_cmd, fanout_print_status_cmd,
                        ] )

    if ENDPOINT_DEVICE_NAME != "":
        cmd_seq.extend( [
                        endpoint_io_reset_cmd, 
                        endpoint_enable_cmd, endpoint_disable_cmd, 
                        endpoint_reset_cmd, endpoint_print_status_cmd
                        ] )

    if HSI_DEVICE_NAME != "":
        cmd_seq.extend( [
                        hsi_io_reset_cmd,
                        hsi_endpoint_enable_cmd,
                        hsi_endpoint_disable_cmd,
                        hsi_endpoint_reset_cmd,
                        hsi_reset_cmd,
                        hsi_configure_cmd,
                        hsi_start_cmd,
                        hsi_stop_cmd,
                        hsi_print_status_cmd,
                        ] )

    # Print them as json (to be improved/moved out)
    jstr = json.dumps([c.pod() for c in cmd_seq], indent=4, sort_keys=True)
    return jstr
Esempio n. 15
0
def generate(
        PARTITION = "hsi_readout_test",
        RUN_NUMBER = 333, 
        CONNECTIONS_FILE="${TIMING_SHARE}/config/etc/connections.xml",
        READOUT_PERIOD = 1e3,
        HSI_DEVICE_NAME="BOREAS_FMC",
        TTCM_S1: int = 1,
        TTCM_S2: int = 2,
        UHAL_LOG_LEVEL="notice",
        OUTPUT_PATH=".",
    ):
    
    # network connection
    nw_specs = [nwmgr.Connection(name=PARTITION + ".hsievent",topics=[],  address="tcp://127.0.0.1:12344")]

    # Define modules and queues
    queue_bare_specs = [
            app.QueueSpec(inst="trigger_candidate_q", kind='FollySPSCQueue', capacity=2000),

        ]

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs, key=lambda x: x.inst))

    mod_specs = [   
                    mspec("hsi", "HSIReadout", []),

                    mspec("ttcm", "TimingTriggerCandidateMaker", [
                                    app.QueueInfo(name="output", inst="trigger_candidate_q", dir="output"),
                                ]),
                ]

    init_specs = app.Init(queues=queue_specs, modules=mod_specs, nwconnections=nw_specs)

    jstr = json.dumps(init_specs.pod(), indent=4, sort_keys=True)
    print(jstr)

    initcmd = rcif.RCCommand(
        id=cmdlib.CmdId("init"),
        entry_state="NONE",
        exit_state="INITIAL",
        data=init_specs
    )

    mods = [
                ("hsi", hsi.ConfParams(
                        connections_file=CONNECTIONS_FILE,
                        readout_period=READOUT_PERIOD,
                        hsi_device_name=HSI_DEVICE_NAME,
                        uhal_log_level=UHAL_LOG_LEVEL,
                        hsievent_connection_name = f"{PARTITION}.hsievent",
                        )),
                
                ("ttcm", ttcm.Conf(
                        s1=ttcm.map_t(signal_type=TTCM_S1,
                                      time_before=100000,
                                      time_after=200000),
                        s2=ttcm.map_t(signal_type=TTCM_S2,
                                      time_before=100000,
                                      time_after=200000),
                        hsievent_connection_name = PARTITION+".hsievent",
                        )),
            ]

    confcmd = mrccmd("conf", "INITIAL", "CONFIGURED", mods)

    jstr = json.dumps(confcmd.pod(), indent=4, sort_keys=True)
    print(jstr)

    startpars = rcif.StartParams(run=1, disable_data_storage=False)

    startcmd = mrccmd("start", "CONFIGURED", "RUNNING", [
            ("hsi", None),
            ("ttcm", startpars),
        ])

    jstr = json.dumps(startcmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nStart\n\n", jstr)

    stopcmd = mrccmd("stop", "RUNNING", "CONFIGURED", [
            ("hsi", None),
            ("ttcm", None),
        ])

    jstr = json.dumps(stopcmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nStop\n\n", jstr)


    scrapcmd = mcmd("scrap", [
            ("", None)
        ])

    jstr = json.dumps(scrapcmd.pod(), indent=4, sort_keys=True)
    print("="*80+"\nScrap\n\n", jstr)

    # Create a list of commands
    cmd_seq = [initcmd, confcmd, startcmd, stopcmd]

    # Print them as json (to be improved/moved out)
    jstr = json.dumps([c.pod() for c in cmd_seq], indent=4, sort_keys=True)
    return jstr
Esempio n. 16
0
def generate(NW_SPECS,
             RU_CONFIG=[],
             HOSTIDX=0,
             RUN_NUMBER=333,
             OUTPUT_PATH=".",
             TOKEN_COUNT=0,
             SYSTEM_TYPE="TPC",
             SOFTWARE_TPG_ENABLED=False,
             TPSET_WRITING_ENABLED=False,
             PARTITION="UNKNOWN",
             OPERATIONAL_ENVIRONMENT="swtest",
             TPC_REGION_NAME_PREFIX="APA",
             MAX_FILE_SIZE=4 * 1024 * 1024 * 1024):
    """Generate the json configuration for the readout and DF process"""

    cmd_data = {}

    required_eps = {PARTITION + f'.trigdec_{HOSTIDX}', PARTITION + '.triginh'}
    if not required_eps.issubset([nw.name for nw in NW_SPECS]):
        raise RuntimeError(
            f"ERROR: not all the required endpoints ({', '.join(required_eps)}) found in list of endpoints {' '.join([nw.name for nw in NW_SPECS])}"
        )

    # Define modules and queues
    queue_bare_specs = [
        app.QueueSpec(
            inst="trigger_decision_q", kind='FollySPSCQueue', capacity=100),
        app.QueueSpec(
            inst="trigger_record_q", kind='FollySPSCQueue', capacity=100),
        app.QueueSpec(
            inst="data_fragments_q", kind='FollyMPMCQueue', capacity=1000),
    ] + ([
        app.QueueSpec(
            inst="tpsets_from_netq", kind='FollyMPMCQueue', capacity=1000),
    ] if TPSET_WRITING_ENABLED else [])

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs,
                                        key=lambda x: x.inst))

    mod_specs = [
        mspec("trigdec_receiver", "TriggerDecisionReceiver", [
            app.QueueInfo(
                name="output", inst="trigger_decision_q", dir="output")
        ]),
        mspec("fragment_receiver", "FragmentReceiver", [
            app.QueueInfo(name="output", inst="data_fragments_q", dir="output")
        ]),
        mspec("trb", "TriggerRecordBuilder", [
            app.QueueInfo(name="trigger_decision_input_queue",
                          inst="trigger_decision_q",
                          dir="input"),
            app.QueueInfo(name="trigger_record_output_queue",
                          inst="trigger_record_q",
                          dir="output"),
            app.QueueInfo(name="data_fragment_input_queue",
                          inst="data_fragments_q",
                          dir="input")
        ]),
        mspec("datawriter", "DataWriter", [
            app.QueueInfo(name="trigger_record_input_queue",
                          inst="trigger_record_q",
                          dir="input")
        ]),
    ] + ([
        mspec(f"tpset_subscriber_{idx}", "NetworkToQueue", [
            app.QueueInfo(
                name="output", inst=f"tpsets_from_netq", dir="output")
        ]) for idx in range(len(RU_CONFIG))
    ] if TPSET_WRITING_ENABLED else []) + ([
        mspec("tpswriter", "TPSetWriter", [
            app.QueueInfo(
                name="tpset_source", inst="tpsets_from_netq", dir="input")
        ])
    ] if TPSET_WRITING_ENABLED else [])

    cmd_data['init'] = app.Init(queues=queue_specs,
                                modules=mod_specs,
                                nwconnections=NW_SPECS)

    total_link_count = 0
    for ru in range(len(RU_CONFIG)):
        total_link_count += RU_CONFIG[ru]["channel_count"]

    cmd_data['conf'] = acmd([
        ("trigdec_receiver",
         tdrcv.ConfParams(general_queue_timeout=QUEUE_POP_WAIT_MS,
                          connection_name=f"{PARTITION}.trigdec_{HOSTIDX}")),
        ("trb",
         trb.ConfParams(
             general_queue_timeout=QUEUE_POP_WAIT_MS,
             reply_connection_name=f"{PARTITION}.frags_{HOSTIDX}",
             map=trb.mapgeoidconnections([
                 trb.geoidinst(region=RU_CONFIG[ru]["region_id"],
                               element=idx + RU_CONFIG[ru]["start_channel"],
                               system=SYSTEM_TYPE,
                               connection_name=f"{PARTITION}.datareq_{ru}")
                 for ru in range(len(RU_CONFIG))
                 for idx in range(RU_CONFIG[ru]["channel_count"])
             ] + ([
                 trb.geoidinst(region=RU_CONFIG[ru]["region_id"],
                               element=idx + RU_CONFIG[ru]["start_channel"] +
                               total_link_count,
                               system=SYSTEM_TYPE,
                               connection_name=f"{PARTITION}.datareq_{ru}")
                 for ru in range(len(RU_CONFIG))
                 for idx in range(RU_CONFIG[ru]["channel_count"])
             ] if SOFTWARE_TPG_ENABLED else []) + ([
                 trb.geoidinst(region=RU_CONFIG[ru]["region_id"],
                               element=idx + RU_CONFIG[ru]["start_channel"],
                               system="DataSelection",
                               connection_name=f"{PARTITION}.ds_tp_datareq_0")
                 for ru in range(len(RU_CONFIG))
                 for idx in range(RU_CONFIG[ru]["channel_count"])
             ] if SOFTWARE_TPG_ENABLED else [])))),
        ("datawriter",
         dw.ConfParams(
             decision_connection=f"{PARTITION}.trigdec_{HOSTIDX}",
             token_connection=PARTITION + ".triginh",
             data_store_parameters=hdf5ds.ConfParams(
                 name="data_store",
                 version=3,
                 operational_environment=OPERATIONAL_ENVIRONMENT,
                 directory_path=OUTPUT_PATH,
                 max_file_size_bytes=MAX_FILE_SIZE,
                 disable_unique_filename_suffix=False,
                 filename_parameters=hdf5ds.FileNameParams(
                     overall_prefix=OPERATIONAL_ENVIRONMENT,
                     digits_for_run_number=6,
                     file_index_prefix="",
                     digits_for_file_index=4,
                 ),
                 file_layout_parameters=hdf5ds.FileLayoutParams(
                     trigger_record_name_prefix="TriggerRecord",
                     digits_for_trigger_number=5,
                     path_param_list=hdf5ds.PathParamList([
                         hdf5ds.PathParams(
                             detector_group_type="TPC",
                             detector_group_name="TPC",
                             region_name_prefix=TPC_REGION_NAME_PREFIX,
                             element_name_prefix="Link"),
                         hdf5ds.PathParams(detector_group_type="PDS",
                                           detector_group_name="PDS"),
                         hdf5ds.PathParams(detector_group_type="NDLArTPC",
                                           detector_group_name="NDLArTPC"),
                         hdf5ds.PathParams(detector_group_type="Trigger",
                                           detector_group_name="Trigger"),
                         hdf5ds.PathParams(detector_group_type="TPC_TP",
                                           detector_group_name="TPC",
                                           region_name_prefix="TP_APA",
                                           element_name_prefix="Link")
                     ]))))),
    ] + [
        ("fragment_receiver",
         frcv.ConfParams(general_queue_timeout=QUEUE_POP_WAIT_MS,
                         connection_name=f"{PARTITION}.frags_{HOSTIDX}")),
    ] + [(f"tpset_subscriber_{idx}",
          ntoq.Conf(msg_type="dunedaq::trigger::TPSet",
                    msg_module_name="TPSetNQ",
                    receiver_config=nor.Conf(name=f'{PARTITION}.tpsets_{idx}',
                                             subscriptions=["TPSets"])))
         for idx in range(len(RU_CONFIG))] + (
             [("tpswriter", tpsw.ConfParams(max_file_size_bytes=1000000000, )
               )] if TPSET_WRITING_ENABLED else []))

    startpars = rccmd.StartParams(run=RUN_NUMBER)
    cmd_data['start'] = acmd(
        [] + ([("tpswriter",
                startpars), ("tpset_subscriber_.*",
                             startpars)] if TPSET_WRITING_ENABLED else []) +
        [("datawriter", startpars), ("fragment_receiver", startpars),
         ("trb", startpars), ("trigdec_receiver", startpars)])

    cmd_data['stop'] = acmd([
        ("trigdec_receiver", None),
        ("trb", None),
        ("fragment_receiver", None),
        ("datawriter", None),
    ] + ([("tpset_subscriber_.*",
           None), ("tpswriter", None)] if TPSET_WRITING_ENABLED else []))

    cmd_data['pause'] = acmd([("", None)])

    cmd_data['resume'] = acmd([("", None)])

    cmd_data['scrap'] = acmd([("fragment_receiver", None),
                              ("trigdec_receiver", None),
                              ("qton_token", None)])

    cmd_data['record'] = acmd([("", None)])

    return cmd_data
Esempio n. 17
0
def generate(TRIGGER_RATE_HZ: float = 1.0,
             OUTPUT_PATH: str = ".",
             TOKEN_COUNT: int = 10,
             CLOCK_SPEED_HZ: int = 50000000,
             FORGET_DECISION_PROB: float = 0.0,
             HOLD_DECISION_PROB: float = 0.0,
             HOLD_MAX_SIZE: int = 0,
             HOLD_MIN_SIZE: int = 0,
             HOLD_MIN_MS: int = 0,
             RELEASE_RANDOMLY_PROB: float = 0.0):
    """
    { item_description }
    """
    cmd_data = {}

    # Derived parameters
    TRIGGER_INTERVAL_NS = math.floor((1e9 / TRIGGER_RATE_HZ))

    # Define modules and queues
    queue_bare_specs = [
        app.QueueSpec(inst="hsievent_q", kind='FollyMPMCQueue', capacity=1000),
        app.QueueSpec(inst="time_sync_q", kind='FollySPSCQueue',
                      capacity=1000),
        app.QueueSpec(inst="token_q", kind='FollySPSCQueue', capacity=2000),
        app.QueueSpec(inst="trigger_decision_q",
                      kind='FollySPSCQueue',
                      capacity=2000),
        app.QueueSpec(inst="trigger_candidate_q",
                      kind='FollyMPMCQueue',
                      capacity=2000),  #No MPSC Queue?
    ]

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs,
                                        key=lambda x: x.inst))

    mod_specs = [
        mspec("fdf", "FakeDataFlow", [
            app.QueueInfo(name="trigger_decision_source",
                          inst="trigger_decision_q",
                          dir="input"),
            app.QueueInfo(
                name="trigger_complete_sink", inst="token_q", dir="output"),
        ]),
        mspec("mlt", "ModuleLevelTrigger", [
            app.QueueInfo(name="token_source", inst="token_q", dir="input"),
            app.QueueInfo(name="trigger_decision_sink",
                          inst="trigger_decision_q",
                          dir="output"),
            app.QueueInfo(name="trigger_candidate_source",
                          inst="trigger_candidate_q",
                          dir="output"),
        ]),
        mspec("ftsdg", "FakeTimeStampedDataGenerator", [
            app.QueueInfo(
                name="hsievent_sink", inst="hsievent_q", dir="output"),
        ]),
        mspec("ttcm", "TimingTriggerCandidateMaker", [
            app.QueueInfo(name="input", inst="hsievent_q", dir="input"),
            app.QueueInfo(
                name="output", inst="trigger_candidate_q", dir="output"),
        ]),
    ]

    cmd_data['init'] = app.Init(queues=queue_specs, modules=mod_specs)

    cmd_data['conf'] = acmd([
        ("fdf",
         fdf.ConfParams(hold_max_size=HOLD_MAX_SIZE,
                        hold_min_size=HOLD_MIN_SIZE,
                        hold_min_ms=HOLD_MIN_MS,
                        release_randomly_prob=RELEASE_RANDOMLY_PROB,
                        forget_decision_prob=FORGET_DECISION_PROB,
                        hold_decision_prob=HOLD_DECISION_PROB)),
        ("mlt",
         mlt.ConfParams(links=[idx for idx in range(3)],
                        initial_token_count=TOKEN_COUNT)),
        ("ftsdg",
         ftsdg.Conf(sleep_time=TRIGGER_INTERVAL_NS, frequency=CLOCK_SPEED_HZ)),
        ("ttcm", ttcm.Conf()),
    ])

    startpars = rccmd.StartParams(run=1, disable_data_storage=False)
    cmd_data['start'] = acmd([
        ("fdf", startpars),
        ("mlt", startpars),
        ("ftsdg", startpars),
        ("ttcm", startpars),
    ])

    # We issue stop commands in the order "upstream to downstream" so
    # that each module can drain its input queue at stop, and be
    # guaranteed to get all inputs (at least when everything lives in
    # the same module)
    cmd_data['stop'] = acmd([
        ("ftsdg", None),
        ("ttcm", None),
        ("mlt", None),
        ("fdf", None),
    ])

    cmd_data['pause'] = acmd([("", None)])

    resumepars = rccmd.ResumeParams(trigger_interval_ticks=50000000)
    cmd_data['resume'] = acmd([("mlt", resumepars)])

    cmd_data['scrap'] = acmd([("", None)])

    return cmd_data
Esempio n. 18
0
def generate(NW_SPECS,
        RU_CONFIG=[],
        EMULATOR_MODE=False,
        RUN_NUMBER=333,
        DATA_FILE="./frames.bin",
        CLOCK_SPEED_HZ=50000000,
        RUIDX=0,
        SYSTEM_TYPE='TPC',
        DQM_ENABLED=False,
        DQM_KAFKA_ADDRESS='',
        DQM_CMAP='HD',
        DQM_RAWDISPLAY_PARAMS=[60, 10, 50],
        DQM_MEANRMS_PARAMS=[10, 1, 100],
        DQM_FOURIER_PARAMS=[600, 60, 100],
        DQM_FOURIERSUM_PARAMS=[10, 1, 8192],
        PARTITION="UNKNOWN"):
    """Generate the json configuration for the dqm process"""

    cmd_data = {}

    required_eps = {f'{PARTITION}.timesync_{RUIDX}'}
    if not required_eps.issubset([nw.name for nw in NW_SPECS]):
        raise RuntimeError(f"ERROR: not all the required endpoints ({', '.join(required_eps)}) found in list of endpoints {' '.join([nw.name for nw in NW_SPECS])}")

    MIN_LINK = RU_CONFIG[RUIDX]["start_channel"]
    MAX_LINK = MIN_LINK + RU_CONFIG[RUIDX]["channel_count"]
    # Define modules and queues
    queue_bare_specs =  [
        app.QueueSpec(inst="data_fragments_q", kind='FollyMPMCQueue', capacity=1000),
        app.QueueSpec(inst="trigger_decision_q_dqm", kind='FollySPSCQueue', capacity=20),
        app.QueueSpec(inst="trigger_record_q_dqm", kind='FollySPSCQueue', capacity=20)
    ]

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs, key=lambda x: x.inst))

    mod_specs = [mspec("trb_dqm", "TriggerRecordBuilder", [
                    app.QueueInfo(name="trigger_decision_input_queue", inst="trigger_decision_q_dqm", dir="input"),
                    app.QueueInfo(name="trigger_record_output_queue", inst="trigger_record_q_dqm", dir="output"),
                    app.QueueInfo(name="data_fragment_input_queue", inst="data_fragments_q", dir="input")
                ]),
    ]
    mod_specs += [mspec("dqmprocessor", "DQMProcessor", [
                    app.QueueInfo(name="trigger_record_dqm_processor", inst="trigger_record_q_dqm", dir="input"),
                    app.QueueInfo(name="trigger_decision_dqm_processor", inst="trigger_decision_q_dqm", dir="output"),
                ]),
    ]

    mod_specs += [
        mspec(f"fragment_receiver_dqm", "FragmentReceiver",
              [app.QueueInfo(name="output", inst="data_fragments_q", dir="output")
               ])]

    cmd_data['init'] = app.Init(queues=queue_specs, modules=mod_specs, nwconnections=NW_SPECS)

    conf_list = [("fragment_receiver_dqm", frcv.ConfParams(
                    general_queue_timeout=QUEUE_POP_WAIT_MS,
                    connection_name=f"{PARTITION}.fragx_dqm_{RUIDX}"))
            ] + [
                ("trb_dqm", trb.ConfParams(
                        general_queue_timeout=QUEUE_POP_WAIT_MS,
                        reply_connection_name = f"{PARTITION}.fragx_dqm_{RUIDX}",
                        map=trb.mapgeoidconnections([
                                trb.geoidinst(region=RU_CONFIG[RUIDX]["region_id"], element=idx, system=SYSTEM_TYPE, connection_name=f"{PARTITION}.datareq_{RUIDX}") for idx in range(MIN_LINK, MAX_LINK)
                            ]),
                        ))
            ] + [
                ('dqmprocessor', dqmprocessor.Conf(
                        region=RU_CONFIG[RUIDX]["region_id"],
                        channel_map=DQM_CMAP, # 'HD' for horizontal drift or 'VD' for vertical drift
                        sdqm_hist=dqmprocessor.StandardDQM(**{'how_often' : DQM_RAWDISPLAY_PARAMS[0], 'unavailable_time' : DQM_RAWDISPLAY_PARAMS[1], 'num_frames' : DQM_RAWDISPLAY_PARAMS[2]}),
                        sdqm_mean_rms=dqmprocessor.StandardDQM(**{'how_often' : DQM_MEANRMS_PARAMS[0], 'unavailable_time' : DQM_MEANRMS_PARAMS[1], 'num_frames' : DQM_MEANRMS_PARAMS[2]}),
                        sdqm_fourier=dqmprocessor.StandardDQM(**{'how_often' : DQM_FOURIER_PARAMS[0], 'unavailable_time' : DQM_FOURIER_PARAMS[1], 'num_frames' : DQM_FOURIER_PARAMS[2]}),
                        sdqm_fourier_sum=dqmprocessor.StandardDQM(**{'how_often' : DQM_FOURIERSUM_PARAMS[0], 'unavailable_time' : DQM_FOURIERSUM_PARAMS[1], 'num_frames' : DQM_FOURIERSUM_PARAMS[2]}),
                        kafka_address=DQM_KAFKA_ADDRESS,
                        link_idx=list(range(MIN_LINK, MAX_LINK)),
                        clock_frequency=CLOCK_SPEED_HZ,
                        timesync_connection_name = f"{PARTITION}.timesync_{RUIDX}",
                        ))
            ]

    cmd_data['conf'] = acmd(conf_list)

    startpars = rccmd.StartParams(run=RUN_NUMBER)
    cmd_data['start'] = acmd([
            ("fragment_receiver_dqm", startpars),
            ("dqmprocessor", startpars),
            ("trb_dqm", startpars),
            ])

    cmd_data['stop'] = acmd([
            ("trb_dqm", None), 
            ("dqmprocessor", None),
            ("fragment_receiver_dqm", None),
            ])

    cmd_data['pause'] = acmd([("", None)])

    cmd_data['resume'] = acmd([("", None)])

    cmd_data['scrap'] = acmd([("", None)])

    cmd_data['record'] = acmd([("", None)])

    return cmd_data
Esempio n. 19
0
def generate(NW_SPECS: list,
             RUN_NUMBER=333,
             CLOCK_SPEED_HZ: int = 50000000,
             DATA_RATE_SLOWDOWN_FACTOR: int = 1,
             TRIGGER_RATE_HZ: int = 1,
             HSI_DEVICE_ID: int = 0,
             MEAN_SIGNAL_MULTIPLICITY: int = 0,
             SIGNAL_EMULATION_MODE: int = 0,
             ENABLED_SIGNALS: int = 0b00000001,
             PARTITION="UNKNOWN"):
    """
    { item_description }
    """
    cmd_data = {}

    required_eps = {PARTITION + '.hsievent'}
    if not required_eps.issubset([nw.name for nw in NW_SPECS]):
        raise RuntimeError(
            f"ERROR: not all the required endpoints ({', '.join(required_eps)}) found in list of endpoints {' '.join([nw.name for nw in NW_SPECS])}"
        )

    # Define modules and queues
    queue_bare_specs = []

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs,
                                        key=lambda x: x.inst))

    mod_specs = [
        mspec("fhsig", "FakeHSIEventGenerator", []),
    ]

    cmd_data['init'] = app.Init(queues=queue_specs,
                                modules=mod_specs,
                                nwconnections=NW_SPECS)

    trigger_interval_ticks = 0
    if TRIGGER_RATE_HZ > 0:
        trigger_interval_ticks = math.floor(
            (1 / TRIGGER_RATE_HZ) * CLOCK_SPEED_HZ / DATA_RATE_SLOWDOWN_FACTOR)

    cmd_data['conf'] = acmd([
        ("fhsig",
         fhsig.Conf(
             clock_frequency=CLOCK_SPEED_HZ / DATA_RATE_SLOWDOWN_FACTOR,
             trigger_interval_ticks=trigger_interval_ticks,
             mean_signal_multiplicity=MEAN_SIGNAL_MULTIPLICITY,
             signal_emulation_mode=SIGNAL_EMULATION_MODE,
             enabled_signals=ENABLED_SIGNALS,
             timesync_topic="Timesync",
             hsievent_connection_name=PARTITION + ".hsievent",
         )),
    ])

    startpars = rccmd.StartParams(
        run=RUN_NUMBER, trigger_interval_ticks=trigger_interval_ticks)
    resumepars = rccmd.ResumeParams(
        trigger_interval_ticks=trigger_interval_ticks)

    cmd_data['start'] = acmd([
        ("fhsig", startpars),
    ])

    cmd_data['stop'] = acmd([
        ("fhsig", None),
    ])

    cmd_data['pause'] = acmd([("", None)])

    cmd_data['resume'] = acmd([("fhsig", resumepars)])

    cmd_data['scrap'] = acmd([("", None)])

    cmd_data['record'] = acmd([("", None)])

    return cmd_data
Esempio n. 20
0
def generate(
    PARTITION="hsi_readout_test",
    OUTPUT_PATH=".",
    TRIGGER_RATE_HZ: int = 1,
    CLOCK_SPEED_HZ: int = 50000000,
    HSI_TIMESTAMP_OFFSET:
    int = 0,  # Offset for HSIEvent timestamps in units of clock ticks. Positive offset increases timestamp estimate.
    HSI_DEVICE_ID: int = 0,
    MEAN_SIGNAL_MULTIPLICITY: int = 0,
    SIGNAL_EMULATION_MODE: int = 0,
    ENABLED_SIGNALS: int = 0b00000001,
):

    # network connection
    nw_specs = [
        nwmgr.Connection(name=PARTITION + ".hsievent",
                         topics=[],
                         address="tcp://127.0.0.1:12344")
    ]

    # Define modules and queues
    queue_bare_specs = [
        app.QueueSpec(inst="time_sync_from_netq",
                      kind='FollySPSCQueue',
                      capacity=100),
    ]

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs,
                                        key=lambda x: x.inst))

    mod_specs = [
        mspec("ntoq_timesync", "NetworkToQueue", [
            app.QueueInfo(
                name="output", inst="time_sync_from_netq", dir="output")
        ]),
        mspec("fhsig", "FakeHSIEventGenerator", [
            app.QueueInfo(name="time_sync_source",
                          inst="time_sync_from_netq",
                          dir="input"),
        ]),
    ]

    init_specs = app.Init(queues=queue_specs, modules=mod_specs)

    jstr = json.dumps(init_specs.pod(), indent=4, sort_keys=True)
    print(jstr)

    initcmd = rcif.RCCommand(id=cmdlib.CmdId("init"),
                             entry_state="NONE",
                             exit_state="INITIAL",
                             data=init_specs)

    trigger_interval_ticks = 0
    if TRIGGER_RATE_HZ > 0:
        trigger_interval_ticks = math.floor(
            (1 / TRIGGER_RATE_HZ) * CLOCK_SPEED_HZ)

    mods = [
        ("fhsig",
         fhsig.Conf(
             clock_frequency=CLOCK_SPEED_HZ,
             trigger_interval_ticks=trigger_interval_ticks,
             timestamp_offset=HSI_TIMESTAMP_OFFSET,
             mean_signal_multiplicity=MEAN_SIGNAL_MULTIPLICITY,
             signal_emulation_mode=SIGNAL_EMULATION_MODE,
             enabled_signals=ENABLED_SIGNALS,
             timesync_topic="Timesync",
             hsievent_connection_name=PARTITION + ".hsievent",
         )),
    ]

    confcmd = mrccmd("conf", "INITIAL", "CONFIGURED", mods)

    jstr = json.dumps(confcmd.pod(), indent=4, sort_keys=True)
    print(jstr)

    startpars = rcif.StartParams(run=33, disable_data_storage=False)

    startcmd = mrccmd("start", "CONFIGURED", "RUNNING",
                      [("ntoq_timesync", startpars), ("fhsig", startpars)])

    jstr = json.dumps(startcmd.pod(), indent=4, sort_keys=True)
    print("=" * 80 + "\nStart\n\n", jstr)

    stopcmd = mrccmd("stop", "RUNNING", "CONFIGURED", [("ntoq_timesync", None),
                                                       ("fhsig", None)])

    jstr = json.dumps(stopcmd.pod(), indent=4, sort_keys=True)
    print("=" * 80 + "\nStop\n\n", jstr)

    scrapcmd = mcmd("scrap", [("", None)])

    jstr = json.dumps(scrapcmd.pod(), indent=4, sort_keys=True)
    print("=" * 80 + "\nScrap\n\n", jstr)

    # Create a list of commands
    cmd_seq = [initcmd, confcmd, startcmd, stopcmd]

    # Print them as json (to be improved/moved out)
    jstr = json.dumps([c.pod() for c in cmd_seq], indent=4, sort_keys=True)
    return jstr
Esempio n. 21
0
def generate(FRONTEND_TYPE='wib',
             NUMBER_OF_DATA_PRODUCERS=1,
             NUMBER_OF_TP_PRODUCERS=1,
             DATA_RATE_SLOWDOWN_FACTOR=1,
             RUN_NUMBER=333,
             DATA_FILE="./frames.bin"):

    cmd_data = {}

    # Define modules and queues
    queue_specs = [
        app.QueueSpec(inst="time_sync_q", kind='FollyMPMCQueue', capacity=100),
        app.QueueSpec(
            inst="data_fragments_q", kind='FollyMPMCQueue', capacity=100),
    ] + [
        app.QueueSpec(
            inst=f"data_requests_{idx}", kind='FollySPSCQueue', capacity=1000)
        for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ] + [
        app.QueueSpec(inst=f"{FRONTEND_TYPE}_link_{idx}",
                      kind='FollySPSCQueue',
                      capacity=100000)
        for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ] + [
        app.QueueSpec(
            inst=f"tp_link_{idx}", kind='FollySPSCQueue', capacity=100000)
        for idx in range(NUMBER_OF_DATA_PRODUCERS, NUMBER_OF_DATA_PRODUCERS +
                         NUMBER_OF_TP_PRODUCERS)
    ] + [
        app.QueueSpec(inst=f"{FRONTEND_TYPE}_recording_link_{idx}",
                      kind='FollySPSCQueue',
                      capacity=100000)
        for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ] + [
        app.QueueSpec(
            inst=f"tp_queue_{idx}", kind='FollySPSCQueue', capacity=100000)
        for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ] + [
        app.QueueSpec(
            inst=f"tp_data_requests", kind='FollySPSCQueue', capacity=1000)
    ] + [
        app.QueueSpec(
            inst="tp_recording_link", kind='FollySPSCQueue', capacity=1000)
    ] + [
        app.QueueSpec(
            inst=f"tpset_link_{idx}", kind='FollySPSCQueue', capacity=10000)
        for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ]

    mod_specs = [
        mspec("fake_source", "FakeCardReader", [
            app.QueueInfo(name=f"output_{idx}",
                          inst=f"{FRONTEND_TYPE}_link_{idx}",
                          dir="output")
            for idx in range(NUMBER_OF_DATA_PRODUCERS)
        ]),
    ] + [
        mspec(f"datahandler_{idx}", "DataLinkHandler", [
            app.QueueInfo(name="raw_input",
                          inst=f"{FRONTEND_TYPE}_link_{idx}",
                          dir="input"),
            app.QueueInfo(name="timesync", inst="time_sync_q", dir="output"),
            app.QueueInfo(
                name="requests", inst=f"data_requests_{idx}", dir="input"),
            app.QueueInfo(
                name="fragments", inst="data_fragments_q", dir="output"),
            app.QueueInfo(name="raw_recording",
                          inst=f"{FRONTEND_TYPE}_recording_link_{idx}",
                          dir="output"),
            app.QueueInfo(name="tp_out", inst=f"tp_queue_{idx}", dir="output"),
            app.QueueInfo(
                name="tpset_out", inst=f"tpset_link_{idx}", dir="output")
        ]) for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ] + [
        mspec(f"data_recorder_{idx}", "DataRecorder", [
            app.QueueInfo(name="raw_recording",
                          inst=f"{FRONTEND_TYPE}_recording_link_{idx}",
                          dir="input")
        ]) for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ] + [
        mspec(f"timesync_consumer", "TimeSyncConsumer", [
            app.QueueInfo(name="input_queue", inst=f"time_sync_q", dir="input")
        ])
    ] + [
        mspec(f"fragment_consumer", "FragmentConsumer", [
            app.QueueInfo(
                name="input_queue", inst=f"data_fragments_q", dir="input")
        ])
    ] + [
        mspec(f"tp_handler_{idx}", "DataLinkHandler", [
            app.QueueInfo(
                name="raw_input", inst=f"tp_queue_{idx}", dir="input"),
            app.QueueInfo(name="timesync", inst="time_sync_q", dir="output"),
            app.QueueInfo(
                name="requests", inst="tp_data_requests", dir="input"),
            app.QueueInfo(
                name="fragments", inst="data_fragments_q", dir="output"),
            app.QueueInfo(
                name="raw_recording", inst="tp_recording_link", dir="output")
        ]) for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ] + [
        mspec(f"tpset_publisher_{idx}", "QueueToNetwork", [
            app.QueueInfo(name="input", inst=f"tpset_link_{idx}", dir="input")
        ]) for idx in range(NUMBER_OF_DATA_PRODUCERS)
    ]

    cmd_data['init'] = app.Init(queues=queue_specs, modules=mod_specs)

    cmd_data['conf'] = acmd([
        (
            "fake_source",
            fcr.Conf(
                link_confs=[
                    fcr.LinkConfiguration(
                        geoid=fcr.GeoID(system="TPC", region=0, element=idx),
                        slowdown=DATA_RATE_SLOWDOWN_FACTOR,
                        queue_name=f"output_{idx}",
                        data_filename=DATA_FILE,
                        input_limit=10000000000,
                    ) for idx in range(NUMBER_OF_DATA_PRODUCERS)
                ],
                # input_limit=10485100, # default
                queue_timeout_ms=QUEUE_POP_WAIT_MS,
                set_t0_to=0)),
    ] + [(f"datahandler_{idx}",
          dlh.Conf(source_queue_timeout_ms=QUEUE_POP_WAIT_MS,
                   fake_trigger_flag=1,
                   latency_buffer_size=3 * CLOCK_SPEED_HZ /
                   (25 * 12 * DATA_RATE_SLOWDOWN_FACTOR),
                   pop_limit_pct=0.8,
                   pop_size_pct=0.1,
                   apa_number=0,
                   link_number=idx))
         for idx in range(NUMBER_OF_DATA_PRODUCERS)] +
                            [(f"data_recorder_{idx}",
                              bfs.Conf(output_file=f"output_{idx}.out",
                                       stream_buffer_size=8388608))
                             for idx in range(NUMBER_OF_DATA_PRODUCERS)] +
                            [(f"tp_handler_{idx}",
                              dlh.Conf(
                                  source_queue_timeout_ms=QUEUE_POP_WAIT_MS,
                                  fake_trigger_flag=1,
                                  latency_buffer_size=3 * CLOCK_SPEED_HZ /
                                  (25 * 12 * DATA_RATE_SLOWDOWN_FACTOR),
                                  pop_limit_pct=0.8,
                                  pop_size_pct=0.1,
                                  apa_number=0,
                                  link_number=0))
                             for idx in range(NUMBER_OF_DATA_PRODUCERS)] +
                            [(f"tpset_publisher_{idx}",
                              qton.Conf(msg_type="dunedaq::trigger::TPSet",
                                        msg_module_name="TPSetNQ",
                                        sender_config=nos.Conf(
                                            ipm_plugin_type="ZmqPublisher",
                                            address='tcp://127.0.0.1:' +
                                            str(5000 + idx),
                                            topic="foo",
                                            stype="msgpack")))
                             for idx in range(NUMBER_OF_DATA_PRODUCERS)])

    startpars = rccmd.StartParams(run=RUN_NUMBER)
    cmd_data['start'] = acmd([("datahandler_.*", startpars),
                              ("fake_source", startpars),
                              ("data_recorder_.*", startpars),
                              ("timesync_consumer", startpars),
                              ("fragment_consumer", startpars),
                              ("tp_handler_.*", startpars),
                              ("tpset_publisher_.*", startpars)])

    cmd_data['pause'] = acmd([])

    cmd_data['resume'] = acmd([])

    cmd_data['stop'] = acmd([("fake_source", None), ("datahandler_.*", None),
                             ("data_recorder_.*", None),
                             ("timesync_consumer", None),
                             ("fragment_consumer", None),
                             ("tp_handler_.*", None),
                             ("tpset_publisher_.*", None)])

    cmd_data['scrap'] = acmd([("fake_source", None), ("datahandler_.*", None),
                              ("data_recorder_.*", None),
                              ("timesync_consumer", None),
                              ("fragment_consumer", None),
                              ("tp_handler_.*", None),
                              ("tpset_publisher_.*", None)])

    return cmd_data
Esempio n. 22
0
def generate(RUN_NUMBER: int,
        NW_SPECS: list,
        TIMING_CMD_NETWORK_ENDPOINTS: set,
        GATHER_INTERVAL=1e6,
        GATHER_INTERVAL_DEBUG=10e6,
        HSI_DEVICE_NAME="",
        CONNECTIONS_FILE="${TIMING_SHARE}/config/etc/connections.xml",
        UHAL_LOG_LEVEL="notice",
        PARTITION="UNKNOWN"):
    """
    { item_description }
    """
    cmd_data = {}

    required_eps = TIMING_CMD_NETWORK_ENDPOINTS
    if not required_eps.issubset([nw.name for nw in NW_SPECS]):
        raise RuntimeError(f"ERROR: not all the required endpoints ({', '.join(required_eps)}) found in list of endpoints {' '.join([nw.name for nw in NW_SPECS])}")

    # Define modules and queues
    queue_bare_specs = [app.QueueSpec(inst="ntoq_timing_cmds", kind='FollyMPMCQueue', capacity=100),]

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs, key=lambda x: x.inst))

    thi_init_data = thi.InitParams(
                                   qinfos=app.QueueInfos([app.QueueInfo(name="hardware_commands_in", inst="ntoq_timing_cmds", dir="input")]),
                                   connections_file=CONNECTIONS_FILE,
                                   gather_interval=GATHER_INTERVAL,
                                   gather_interval_debug=GATHER_INTERVAL_DEBUG,
                                   monitored_device_name_master="",
                                   monitored_device_names_fanout=[],
                                   monitored_device_name_endpoint="",
                                   monitored_device_name_hsi=HSI_DEVICE_NAME,
                                   uhal_log_level=UHAL_LOG_LEVEL)

    mod_specs = [app.ModSpec(inst="thi", plugin="TimingHardwareManagerPDI", data=thi_init_data),]
    for cmd_nw_endpoint in TIMING_CMD_NETWORK_ENDPOINTS:
        nq_mod_name_suffix=cmd_nw_endpoint.split('.')[-1]
        mod_specs.extend([mspec(f'ntoq_{nq_mod_name_suffix}', "NetworkToQueue", [app.QueueInfo(name="output", inst="ntoq_timing_cmds", dir="output")]),])
            
    cmd_data['init'] = app.Init(queues=queue_specs, modules=mod_specs, nwconnections=NW_SPECS)
    

    conf_cmds = []
    for cmd_nw_endpoint in TIMING_CMD_NETWORK_ENDPOINTS:
        nq_mod_name_suffix=cmd_nw_endpoint.split('.')[-1]
        conf_cmds.extend([(f'ntoq_{nq_mod_name_suffix}', ntoq.Conf(msg_type="dunedaq::timinglibs::timingcmd::TimingHwCmd",
                                               msg_module_name="TimingHwCmdNQ",
                                               receiver_config=nor.Conf(name=cmd_nw_endpoint))),])
    cmd_data['conf'] = acmd(conf_cmds)
 
    startpars = rccmd.StartParams(run=RUN_NUMBER)

    cmd_data['start'] = acmd([("", startpars),])

    cmd_data['stop'] = acmd([("", None),])

    cmd_data['pause'] = acmd([("", None)])

    cmd_data['resume'] = acmd([("", None)])

    cmd_data['scrap'] = acmd([("", None)])

    cmd_data['record'] = acmd([("", None)])

    return cmd_data
Esempio n. 23
0
def generate_trigemu(
        network_endpoints,
        NUMBER_OF_DATA_PRODUCERS=2,          
        DATA_RATE_SLOWDOWN_FACTOR = 1,
        RUN_NUMBER = 333, 
        TRIGGER_RATE_HZ = 1.0,
        DATA_FILE="./frames.bin",
        OUTPUT_PATH=".",
        TOKEN_COUNT=10
    ):
    """Generate the json config for the TriggerDecisionEmulator process"""
    
    trg_interval_ticks = math.floor((1/TRIGGER_RATE_HZ) * CLOCK_SPEED_HZ/DATA_RATE_SLOWDOWN_FACTOR)

    # Define modules and queues
    queue_bare_specs = [
            app.QueueSpec(inst="time_sync_from_netq", kind='FollySPSCQueue', capacity=100),
            app.QueueSpec(inst="token_from_netq", kind='FollySPSCQueue', capacity=20),
            app.QueueSpec(inst="trigger_decision_to_netq", kind='FollySPSCQueue', capacity=20),
        ]

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs, key=lambda x: x.inst))


    mod_specs = [
        mspec("qton_trigdec", "QueueToNetwork", [
                        app.QueueInfo(name="input", inst="trigger_decision_to_netq", dir="input")
                    ]),

        mspec("ntoq_token", "NetworkToQueue", [
                        app.QueueInfo(name="output", inst="token_from_netq", dir="output")
                    ]),

        mspec("ntoq_timesync", "NetworkToQueue", [
                        app.QueueInfo(name="output", inst="time_sync_from_netq", dir="output")
                    ]),

        mspec("tde", "TriggerDecisionEmulator", [
                        app.QueueInfo(name="time_sync_source", inst="time_sync_from_netq", dir="input"),
                        app.QueueInfo(name="token_source", inst="token_from_netq", dir="input"),
                        app.QueueInfo(name="trigger_decision_sink", inst="trigger_decision_to_netq", dir="output"),
                    ]),
        ]

    init_specs = app.Init(queues=queue_specs, modules=mod_specs)

    initcmd = rccmd.RCCommand(
        id=basecmd.CmdId("init"),
        entry_state="NONE",
        exit_state="INITIAL",
        data=init_specs
    )

    confcmd = mrccmd("conf", "INITIAL", "CONFIGURED",[
                ("qton_trigdec", qton.Conf(msg_type="dunedaq::dfmessages::TriggerDecision",
                                           msg_module_name="TriggerDecisionNQ",
                                           sender_config=nos.Conf(ipm_plugin_type="ZmqSender",
                                                                  address=network_endpoints["trigdec"],
                                                                  stype="msgpack")
                                           )
                 ),

                 ("ntoq_token", ntoq.Conf(msg_type="dunedaq::dfmessages::TriggerDecisionToken",
                                            msg_module_name="TriggerDecisionTokenNQ",
                                            receiver_config=nor.Conf(ipm_plugin_type="ZmqReceiver",
                                                                     address=network_endpoints["triginh"])
                                            )
                 ),

                ("ntoq_timesync", ntoq.Conf(msg_type="dunedaq::dfmessages::TimeSync",
                                           msg_module_name="TimeSyncNQ",
                                           receiver_config=nor.Conf(ipm_plugin_type="ZmqReceiver",
                                                                    address=network_endpoints["timesync"])
                                           )
                ),

                ("tde", tde.ConfParams(
                        links=[idx for idx in range(NUMBER_OF_DATA_PRODUCERS)],
                        min_links_in_request=NUMBER_OF_DATA_PRODUCERS,
                        max_links_in_request=NUMBER_OF_DATA_PRODUCERS,
                        min_readout_window_ticks=math.floor(CLOCK_SPEED_HZ/(DATA_RATE_SLOWDOWN_FACTOR*1000)),
                        max_readout_window_ticks=math.floor(CLOCK_SPEED_HZ/(DATA_RATE_SLOWDOWN_FACTOR*1000)),
                        trigger_window_offset=math.floor(CLOCK_SPEED_HZ/(DATA_RATE_SLOWDOWN_FACTOR*2000)),
                        # The delay is set to put the trigger well within the latency buff
                        trigger_delay_ticks=math.floor(CLOCK_SPEED_HZ/DATA_RATE_SLOWDOWN_FACTOR),
                        # We divide the trigger interval by
                        # DATA_RATE_SLOWDOWN_FACTOR so the triggers are still
                        # emitted per (wall-clock) second, rather than being
                        # spaced out further
                        trigger_interval_ticks=trg_interval_ticks,
                        clock_frequency_hz=CLOCK_SPEED_HZ/DATA_RATE_SLOWDOWN_FACTOR,
                        initial_token_count=TOKEN_COUNT                    
                        )),
            ])

    startpars = rccmd.StartParams(run=RUN_NUMBER, disable_data_storage=False)
    startcmd = mrccmd("start", "CONFIGURED", "RUNNING", [
            ("qton_trigdec", startpars),
            ("ntoq_token", startpars),
            ("ntoq_timesync", startpars),
            ("tde", startpars),
        ])

    stopcmd = mrccmd("stop", "RUNNING", "CONFIGURED", [
            ("qton_trigdec", None),
            ("ntoq_timesync", None),
            ("ntoq_token", None),
            ("tde", None),
        ])

    pausecmd = mrccmd("pause", "RUNNING", "RUNNING", [
            ("", None)
        ])

    resumecmd = mrccmd("resume", "RUNNING", "RUNNING", [
            ("tde", tde.ResumeParams(
                            trigger_interval_ticks=trg_interval_ticks
                        ))
        ])

    scrapcmd = mrccmd("scrap", "CONFIGURED", "INITIAL", [
            ("", None)
        ])

    # Create a list of commands
    cmd_seq = [initcmd, confcmd, startcmd, stopcmd, pausecmd, resumecmd, scrapcmd]

    # Print them as json (to be improved/moved out)
    jstr = json.dumps([c.pod() for c in cmd_seq], indent=4, sort_keys=True)
    return jstr
Esempio n. 24
0
def generate_df(
        network_endpoints,
        NUMBER_OF_DATA_PRODUCERS=2,
        EMULATOR_MODE=False,
        DATA_RATE_SLOWDOWN_FACTOR = 1,
        RUN_NUMBER = 333, 
        TRIGGER_RATE_HZ = 1.0,
        DATA_FILE="./frames.bin",
        OUTPUT_PATH=".",
        DISABLE_OUTPUT=False,
        FLX_INPUT=True,
        TOKEN_COUNT=0
    ):
    """Generate the json configuration for the readout and DF process"""
   
    trg_interval_ticks = math.floor((1/TRIGGER_RATE_HZ) * CLOCK_SPEED_HZ/DATA_RATE_SLOWDOWN_FACTOR)

    # Define modules and queues
    queue_bare_specs = [
            app.QueueSpec(inst="time_sync_q", kind='FollyMPMCQueue', capacity=100),
            app.QueueSpec(inst="token_q", kind='FollySPSCQueue', capacity=100),
            app.QueueSpec(inst="trigger_decision_q", kind='FollySPSCQueue', capacity=100),
            app.QueueSpec(inst="trigger_decision_from_netq", kind='FollySPSCQueue', capacity=100),
            app.QueueSpec(inst="trigger_decision_copy_for_bookkeeping", kind='FollySPSCQueue', capacity=100),
            app.QueueSpec(inst="trigger_record_q", kind='FollySPSCQueue', capacity=100),
            app.QueueSpec(inst="data_fragments_q", kind='FollyMPMCQueue', capacity=1000),
        ] + [
            app.QueueSpec(inst=f"data_requests_{idx}", kind='FollySPSCQueue', capacity=100)
                for idx in range(NUMBER_OF_DATA_PRODUCERS)
        ] + [

            app.QueueSpec(inst=f"wib_link_{idx}", kind='FollySPSCQueue', capacity=100000)
                for idx in range(NUMBER_OF_DATA_PRODUCERS)
        ]
    

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs, key=lambda x: x.inst))


    mod_specs = [
        mspec("ntoq_trigdec", "NetworkToQueue", [
                        app.QueueInfo(name="output", inst="trigger_decision_from_netq", dir="output")
                    ]),

        mspec("qton_token", "QueueToNetwork", [
                        app.QueueInfo(name="input", inst="token_q", dir="input")
                    ]),

        mspec("qton_timesync", "QueueToNetwork", [
                        app.QueueInfo(name="input", inst="time_sync_q", dir="input")
                    ]),

        mspec("rqg", "RequestGenerator", [
                        app.QueueInfo(name="trigger_decision_input_queue", inst="trigger_decision_from_netq", dir="input"),
                        app.QueueInfo(name="trigger_decision_for_event_building", inst="trigger_decision_copy_for_bookkeeping", dir="output"),
                    ] + [
                        app.QueueInfo(name=f"data_request_{idx}_output_queue", inst=f"data_requests_{idx}", dir="output")
                            for idx in range(NUMBER_OF_DATA_PRODUCERS)
                    ]),

        mspec("ffr", "FragmentReceiver", [
                        app.QueueInfo(name="trigger_decision_input_queue", inst="trigger_decision_copy_for_bookkeeping", dir="input"),
                        app.QueueInfo(name="trigger_record_output_queue", inst="trigger_record_q", dir="output"),
                        app.QueueInfo(name="data_fragment_input_queue", inst="data_fragments_q", dir="input"),
                    ]),

        mspec("datawriter", "DataWriter", [
                        app.QueueInfo(name="trigger_record_input_queue", inst="trigger_record_q", dir="input"),
                        app.QueueInfo(name="token_output_queue", inst="token_q", dir="output"),
                    ]),

        ] + [
                mspec(f"datahandler_{idx}", "DataLinkHandler", [

                            app.QueueInfo(name="raw_input", inst=f"wib_link_{idx}", dir="input"),
                            app.QueueInfo(name="timesync", inst="time_sync_q", dir="output"),
                            app.QueueInfo(name="requests", inst=f"data_requests_{idx}", dir="input"),
                            app.QueueInfo(name="fragments", inst="data_fragments_q", dir="output"),
                            ]) for idx in range(NUMBER_OF_DATA_PRODUCERS)
        ]

    if FLX_INPUT:
        mod_specs.append(mspec("flxcard_0", "FelixCardReader", [
                        app.QueueInfo(name=f"output_{idx}", inst=f"wib_link_{idx}", dir="output")
                            for idx in range(0,min(5, NUMBER_OF_DATA_PRODUCERS))
                        ]))
        if NUMBER_OF_DATA_PRODUCERS>5 :
            mod_specs.append(mspec("flxcard_1", "FelixCardReader", [
                            app.QueueInfo(name=f"output_{idx}", inst=f"wib_link_{idx}", dir="output")
                                for idx in range(5, NUMBER_OF_DATA_PRODUCERS)
                            ]))
    else:
        mod_specs.append(mspec("fake_source", "FakeCardReader", [
                        app.QueueInfo(name=f"output_{idx}", inst=f"wib_link_{idx}", dir="output")
                            for idx in range(NUMBER_OF_DATA_PRODUCERS)
                        ]))

    


    init_specs = app.Init(queues=queue_specs, modules=mod_specs)

    initcmd = rccmd.RCCommand(
        id=basecmd.CmdId("init"),
        entry_state="NONE",
        exit_state="INITIAL",
        data=init_specs
    )

    confcmd = mrccmd("conf", "INITIAL", "CONFIGURED",[
                ("ntoq_trigdec", ntoq.Conf(msg_type="dunedaq::dfmessages::TriggerDecision",
                                           msg_module_name="TriggerDecisionNQ",
                                           receiver_config=nor.Conf(ipm_plugin_type="ZmqReceiver",
                                                                    address=network_endpoints["trigdec"])
                                           )
                 ),

                ("qton_token", qton.Conf(msg_type="dunedaq::dfmessages::TriggerDecisionToken",
                                           msg_module_name="TriggerDecisionTokenNQ",
                                           sender_config=nos.Conf(ipm_plugin_type="ZmqSender",
                                                                  address=network_endpoints["triginh"],
                                                                  stype="msgpack")
                                           )
                 ),

                ("qton_timesync", qton.Conf(msg_type="dunedaq::dfmessages::TimeSync",
                                            msg_module_name="TimeSyncNQ",
                                            sender_config=nos.Conf(ipm_plugin_type="ZmqSender",
                                                                   address=network_endpoints["timesync"],
                                                                   stype="msgpack")
                                           )
                ),
        
                ("rqg", rqg.ConfParams(
                        map=rqg.mapgeoidqueue([
                                rqg.geoidinst(apa=0, link=idx, queueinstance=f"data_requests_{idx}") for idx in range(NUMBER_OF_DATA_PRODUCERS)
                            ])  
                        )),
                ("ffr", ffr.ConfParams(
                            general_queue_timeout=QUEUE_POP_WAIT_MS
                        )),
                ("datawriter", dw.ConfParams(
                            initial_token_count=TOKEN_COUNT,
                            data_store_parameters=hdf5ds.ConfParams(
                                name="data_store",
                                # type = "HDF5DataStore", # default
                                directory_path = OUTPUT_PATH, # default
                                # mode = "all-per-file", # default
                                max_file_size_bytes = 1073741824,
                                disable_unique_filename_suffix = False,
                                filename_parameters = hdf5ds.HDF5DataStoreFileNameParams(
                                    overall_prefix = "swtest",
                                    digits_for_run_number = 6,
                                    file_index_prefix = "",
                                    digits_for_file_index = 4,
                                ),
                                file_layout_parameters = hdf5ds.HDF5DataStoreFileLayoutParams(
                                    trigger_record_name_prefix= "TriggerRecord",
                                    digits_for_trigger_number = 5,
                                    digits_for_apa_number = 3,
                                    digits_for_link_number = 2,
                                )
                            )
                        )),
                ("fake_source",fakecr.Conf(
                            link_ids=list(range(NUMBER_OF_DATA_PRODUCERS)),
                            # input_limit=10485100, # default
                            rate_khz = CLOCK_SPEED_HZ/(25*12*DATA_RATE_SLOWDOWN_FACTOR*1000),
                            raw_type = "wib",
                            data_filename = DATA_FILE,
                            queue_timeout_ms = QUEUE_POP_WAIT_MS
                        )),
                ("flxcard_0",flxcr.Conf(
                            card_id=0,
                            logical_unit=0,
                            dma_id=0,
                            chunk_trailer_size= 32,
                            dma_block_size_kb= 4,
                            dma_memory_size_gb= 4,
                            numa_id=0,
                            num_links=min(5,NUMBER_OF_DATA_PRODUCERS)
                        )),
                ("flxcard_1",flxcr.Conf(
                            card_id=0,
                            logical_unit=1,
                            dma_id=0,
                            chunk_trailer_size= 32,
                            dma_block_size_kb= 4,
                            dma_memory_size_gb= 4,
                            numa_id=0,
                            num_links=max(0, NUMBER_OF_DATA_PRODUCERS-5)
                        )),
            ] + [
                (f"datahandler_{idx}", dlh.Conf(
                        raw_type = "wib",
                        emulator_mode = EMULATOR_MODE,
                        # fake_trigger_flag=0, # default
                        source_queue_timeout_ms= QUEUE_POP_WAIT_MS,
                        latency_buffer_size = 3*CLOCK_SPEED_HZ/(25*12*DATA_RATE_SLOWDOWN_FACTOR),
                        pop_limit_pct = 0.8,
                        pop_size_pct = 0.1,
                        apa_number = 0,
                        link_number = idx
                        )) for idx in range(NUMBER_OF_DATA_PRODUCERS)
            ])

    startpars = rccmd.StartParams(run=RUN_NUMBER, trigger_interval_ticks=trg_interval_ticks, disable_data_storage=DISABLE_OUTPUT)
    startcmd = mrccmd("start", "CONFIGURED", "RUNNING", [
            ("qton_token", startpars),
            ("datawriter", startpars),
            ("ffr", startpars),
            ("qton_timesync", startpars),
            ("datahandler_.*", startpars),
            ("fake_source", startpars),
            ("flxcard.*", startpars),
            ("rqg", startpars),
            ("ntoq_trigdec", startpars),
        ])

    stopcmd = mrccmd("stop", "RUNNING", "CONFIGURED", [
            ("ntoq_trigdec", None),
            ("rqg", None),
            ("flxcard.*", None),
            ("fake_source", None),
            ("datahandler_.*", None),
            ("qton_timesync", None),
            ("ffr", None),
            ("datawriter", None),
            ("qton_token", None),
        ])

    pausecmd = mrccmd("pause", "RUNNING", "RUNNING", [
            ("", None)
        ])

    resumecmd = mrccmd("resume", "RUNNING", "RUNNING", [
            ("tde", tde.ResumeParams(
                            trigger_interval_ticks=trg_interval_ticks
                        ))
        ])

    scrapcmd = mrccmd("scrap", "CONFIGURED", "INITIAL", [
            ("", None)
        ])

    # Create a list of commands
    cmd_seq = [initcmd, confcmd, startcmd, stopcmd, pausecmd, resumecmd, scrapcmd]

    # Print them as json (to be improved/moved out)
    jstr = json.dumps([c.pod() for c in cmd_seq], indent=4, sort_keys=True)
    return jstr
Esempio n. 25
0
def generate(NW_SPECS,
             RU_CONFIG=[],
             EMULATOR_MODE=False,
             DATA_RATE_SLOWDOWN_FACTOR=1,
             RUN_NUMBER=333,
             DATA_FILE="./frames.bin",
             FLX_INPUT=False,
             SSP_INPUT=True,
             CLOCK_SPEED_HZ=50000000,
             RUIDX=0,
             RAW_RECORDING_ENABLED=False,
             RAW_RECORDING_OUTPUT_DIR=".",
             FRONTEND_TYPE='wib',
             SYSTEM_TYPE='TPC',
             SOFTWARE_TPG_ENABLED=False,
             USE_FAKE_DATA_PRODUCERS=False,
             PARTITION="UNKNOWN",
             LATENCY_BUFFER_SIZE=499968):
    """Generate the json configuration for the readout and DF process"""

    cmd_data = {}

    required_eps = {f'{PARTITION}.timesync_{RUIDX}'}
    if not required_eps.issubset([nw.name for nw in NW_SPECS]):
        raise RuntimeError(
            f"ERROR: not all the required endpoints ({', '.join(required_eps)}) found in list of endpoints {' '.join([nw.name for nw in NW_SPECS])}"
        )

    RATE_KHZ = CLOCK_SPEED_HZ / (25 * 12 * DATA_RATE_SLOWDOWN_FACTOR * 1000)

    MIN_LINK = RU_CONFIG[RUIDX]["start_channel"]
    MAX_LINK = MIN_LINK + RU_CONFIG[RUIDX]["channel_count"]
    # Define modules and queues
    queue_bare_specs = [
        app.QueueSpec(
            inst=f"data_requests_{idx}", kind='FollySPSCQueue', capacity=100)
        for idx in range(MIN_LINK, MAX_LINK)
    ] + [
        app.QueueSpec(inst="fragment_q", kind="FollyMPMCQueue", capacity=100)
    ]

    if not USE_FAKE_DATA_PRODUCERS:
        queue_bare_specs += [
            app.QueueSpec(inst=f"{FRONTEND_TYPE}_link_{idx}",
                          kind='FollySPSCQueue',
                          capacity=100000)
            for idx in range(MIN_LINK, MAX_LINK)
        ]
    if SOFTWARE_TPG_ENABLED:
        queue_bare_specs += [
            app.QueueSpec(inst=f"sw_tp_link_{idx}",
                          kind='FollySPSCQueue',
                          capacity=100000)
            for idx in range(MIN_LINK, MAX_LINK)
        ] + [
            app.QueueSpec(
                inst=f"tpset_queue", kind='FollyMPMCQueue', capacity=10000)
        ] + [
            app.QueueSpec(
                inst=f"tp_requests_{idx}", kind='FollySPSCQueue', capacity=100)
            for idx in range(MIN_LINK, MAX_LINK)
        ]

    if FRONTEND_TYPE == 'wib':
        queue_bare_specs += [
            app.QueueSpec(inst="errored_frames_q",
                          kind="FollyMPMCQueue",
                          capacity=10000)
        ]

    # Only needed to reproduce the same order as when using jsonnet
    queue_specs = app.QueueSpecs(sorted(queue_bare_specs,
                                        key=lambda x: x.inst))

    mod_specs = [
        mspec(f"fragment_sender", "FragmentSender", [
            app.QueueInfo(name="input_queue", inst="fragment_q", dir="input")
        ])
    ]

    if SOFTWARE_TPG_ENABLED:
        mod_specs += [
            mspec(f"request_receiver", "RequestReceiver", [
                app.QueueInfo(
                    name="output", inst=f"data_requests_{idx}", dir="output")
                for idx in range(MIN_LINK, MAX_LINK)
            ] + [
                app.QueueInfo(
                    name="output", inst=f"tp_requests_{idx}", dir="output")
                for idx in range(MIN_LINK, MAX_LINK)
            ])
        ] + [
            mspec(f"tp_datahandler_{idx}", "DataLinkHandler", [
                app.QueueInfo(
                    name="raw_input", inst=f"sw_tp_link_{idx}", dir="input"),
                app.QueueInfo(name="data_requests_0",
                              inst=f"tp_requests_{idx}",
                              dir="input"),
                app.QueueInfo(
                    name="fragment_queue", inst="fragment_q", dir="output")
            ]) for idx in range(MIN_LINK, MAX_LINK)
        ] + [
            mspec(f"tpset_publisher", "QueueToNetwork", [
                app.QueueInfo(name="input", inst=f"tpset_queue", dir="input")
            ])
        ]
    else:
        mod_specs += [
            mspec(f"request_receiver", "RequestReceiver", [
                app.QueueInfo(
                    name="output", inst=f"data_requests_{idx}", dir="output")
                for idx in range(MIN_LINK, MAX_LINK)
            ])
        ]

    if FRONTEND_TYPE == 'wib':
        mod_specs += [
            mspec("errored_frame_consumer", "ErroredFrameConsumer", [
                app.QueueInfo(
                    name="input_queue", inst="errored_frames_q", dir="input")
            ])
        ]

    # There are two flags to be checked so I think a for loop
    # is the closest way to the blocks that are being used here

    for idx in range(MIN_LINK, MAX_LINK):
        if USE_FAKE_DATA_PRODUCERS:
            mod_specs = mod_specs + [
                mspec(f"fakedataprod_{idx}", "FakeDataProd", [
                    app.QueueInfo(name="data_request_input_queue",
                                  inst=f"data_requests_{idx}",
                                  dir="input"),
                ])
            ]
        else:
            ls = [
                app.QueueInfo(name="raw_input",
                              inst=f"{FRONTEND_TYPE}_link_{idx}",
                              dir="input"),
                app.QueueInfo(name="data_requests_0",
                              inst=f"data_requests_{idx}",
                              dir="input"),
                app.QueueInfo(name="fragment_queue",
                              inst="fragment_q",
                              dir="output")
            ]
            if SOFTWARE_TPG_ENABLED:
                ls.extend([
                    app.QueueInfo(name="tp_out",
                                  inst=f"sw_tp_link_{idx}",
                                  dir="output"),
                    app.QueueInfo(name="tpset_out",
                                  inst=f"tpset_queue",
                                  dir="output")
                ])

            if FRONTEND_TYPE == 'wib':
                ls.extend([
                    app.QueueInfo(name="errored_frames",
                                  inst="errored_frames_q",
                                  dir="output")
                ])

            mod_specs += [mspec(f"datahandler_{idx}", "DataLinkHandler", ls)]

    if not USE_FAKE_DATA_PRODUCERS:
        if FLX_INPUT:
            mod_specs.append(
                mspec("flxcard_0", "FelixCardReader", [
                    app.QueueInfo(name=f"output_{idx}",
                                  inst=f"{FRONTEND_TYPE}_link_{idx}",
                                  dir="output")
                    for idx in range(
                        MIN_LINK, MIN_LINK +
                        min(5, RU_CONFIG[RUIDX]["channel_count"]))
                ]))
            if RU_CONFIG[RUIDX]["channel_count"] > 5:
                mod_specs.append(
                    mspec("flxcard_1", "FelixCardReader", [
                        app.QueueInfo(name=f"output_{idx}",
                                      inst=f"{FRONTEND_TYPE}_link_{idx}",
                                      dir="output")
                        for idx in range(MIN_LINK + 5, MAX_LINK)
                    ]))
        elif SSP_INPUT:
            mod_specs.append(
                mspec("ssp_0", "SSPCardReader", [
                    app.QueueInfo(name=f"output_{idx}",
                                  inst=f"{FRONTEND_TYPE}_link_{idx}",
                                  dir="output")
                    for idx in range(MIN_LINK, MAX_LINK)
                ]))

        else:
            fake_source = "fake_source"
            card_reader = "FakeCardReader"
            if FRONTEND_TYPE == 'pacman':
                fake_source = "pacman_source"
                card_reader = "PacmanCardReader"

            mod_specs.append(
                mspec(fake_source, card_reader, [
                    app.QueueInfo(name=f"output_{idx}",
                                  inst=f"{FRONTEND_TYPE}_link_{idx}",
                                  dir="output")
                    for idx in range(MIN_LINK, MAX_LINK)
                ]))

    cmd_data['init'] = app.Init(queues=queue_specs,
                                modules=mod_specs,
                                nwconnections=NW_SPECS)

    total_link_count = 0
    for ru in range(len(RU_CONFIG)):
        total_link_count += RU_CONFIG[ru]["channel_count"]

    conf_list = [
        (
            "fake_source",
            sec.Conf(
                link_confs=[
                    sec.LinkConfiguration(
                        geoid=sec.GeoID(system=SYSTEM_TYPE,
                                        region=RU_CONFIG[RUIDX]["region_id"],
                                        element=idx),
                        slowdown=DATA_RATE_SLOWDOWN_FACTOR,
                        queue_name=f"output_{idx}",
                        data_filename=DATA_FILE,
                        emu_frame_error_rate=0,
                    ) for idx in range(MIN_LINK, MAX_LINK)
                ],
                # input_limit=10485100, # default
                queue_timeout_ms=QUEUE_POP_WAIT_MS)),
        ("pacman_source",
         pcr.Conf(link_confs=[
             pcr.LinkConfiguration(geoid=pcr.GeoID(
                 system=SYSTEM_TYPE,
                 region=RU_CONFIG[RUIDX]["region_id"],
                 element=idx), ) for idx in range(MIN_LINK, MAX_LINK)
         ],
                  zmq_receiver_timeout=10000)),
        ("flxcard_0",
         flxcr.Conf(card_id=RU_CONFIG[RUIDX]["card_id"],
                    logical_unit=0,
                    dma_id=0,
                    chunk_trailer_size=32,
                    dma_block_size_kb=4,
                    dma_memory_size_gb=4,
                    numa_id=0,
                    num_links=min(5, RU_CONFIG[RUIDX]["channel_count"]))),
        ("flxcard_1",
         flxcr.Conf(card_id=RU_CONFIG[RUIDX]["card_id"],
                    logical_unit=1,
                    dma_id=0,
                    chunk_trailer_size=32,
                    dma_block_size_kb=4,
                    dma_memory_size_gb=4,
                    numa_id=0,
                    num_links=max(0, RU_CONFIG[RUIDX]["channel_count"] - 5))),
        ("ssp_0",
         flxcr.Conf(card_id=RU_CONFIG[RUIDX]["card_id"],
                    logical_unit=0,
                    dma_id=0,
                    chunk_trailer_size=32,
                    dma_block_size_kb=4,
                    dma_memory_size_gb=4,
                    numa_id=0,
                    num_links=RU_CONFIG[RUIDX]["channel_count"])),
    ] + [
        ("request_receiver",
         rrcv.ConfParams(map=[
             rrcv.geoidinst(region=RU_CONFIG[RUIDX]["region_id"],
                            element=idx,
                            system=SYSTEM_TYPE,
                            queueinstance=f"data_requests_{idx}")
             for idx in range(MIN_LINK, MAX_LINK)
         ] + [
             rrcv.geoidinst(region=RU_CONFIG[RUIDX]["region_id"],
                            element=idx + total_link_count,
                            system=SYSTEM_TYPE,
                            queueinstance=f"tp_requests_{idx}")
             for idx in range(MIN_LINK, MAX_LINK) if SOFTWARE_TPG_ENABLED
         ],
                         general_queue_timeout=QUEUE_POP_WAIT_MS,
                         connection_name=f"{PARTITION}.datareq_{RUIDX}"))
    ] + [
        (
            f"datahandler_{idx}",
            rconf.Conf(
                readoutmodelconf=rconf.ReadoutModelConf(
                    source_queue_timeout_ms=QUEUE_POP_WAIT_MS,
                    # fake_trigger_flag=0, # default
                    region_id=RU_CONFIG[RUIDX]["region_id"],
                    element_id=idx,
                    timesync_connection_name=f"{PARTITION}.timesync_{RUIDX}",
                    timesync_topic_name="Timesync",
                ),
                latencybufferconf=rconf.LatencyBufferConf(
                    latency_buffer_alignment_size=4096,
                    latency_buffer_size=LATENCY_BUFFER_SIZE,
                    region_id=RU_CONFIG[RUIDX]["region_id"],
                    element_id=idx,
                ),
                rawdataprocessorconf=rconf.RawDataProcessorConf(
                    region_id=RU_CONFIG[RUIDX]["region_id"],
                    element_id=idx,
                    enable_software_tpg=SOFTWARE_TPG_ENABLED,
                    emulator_mode=EMULATOR_MODE,
                    error_counter_threshold=100,
                    error_reset_freq=10000),
                requesthandlerconf=rconf.RequestHandlerConf(
                    latency_buffer_size=LATENCY_BUFFER_SIZE,
                    pop_limit_pct=0.8,
                    pop_size_pct=0.1,
                    region_id=RU_CONFIG[RUIDX]["region_id"],
                    element_id=idx,
                    output_file=path.join(RAW_RECORDING_OUTPUT_DIR,
                                          f"output_{RUIDX}_{idx}.out"),
                    stream_buffer_size=8388608,
                    enable_raw_recording=RAW_RECORDING_ENABLED,
                ))) for idx in range(MIN_LINK, MAX_LINK)
    ] + [
        (
            f"tp_datahandler_{idx}",
            rconf.Conf(
                readoutmodelconf=rconf.ReadoutModelConf(
                    source_queue_timeout_ms=QUEUE_POP_WAIT_MS,
                    # fake_trigger_flag=0, default
                    region_id=RU_CONFIG[RUIDX]["region_id"],
                    element_id=total_link_count + idx,
                ),
                latencybufferconf=rconf.LatencyBufferConf(
                    latency_buffer_size=LATENCY_BUFFER_SIZE,
                    region_id=RU_CONFIG[RUIDX]["region_id"],
                    element_id=total_link_count + idx,
                ),
                rawdataprocessorconf=rconf.RawDataProcessorConf(
                    region_id=RU_CONFIG[RUIDX]["region_id"],
                    element_id=total_link_count + idx,
                    enable_software_tpg=False,
                ),
                requesthandlerconf=rconf.RequestHandlerConf(
                    latency_buffer_size=LATENCY_BUFFER_SIZE,
                    pop_limit_pct=0.8,
                    pop_size_pct=0.1,
                    region_id=RU_CONFIG[RUIDX]["region_id"],
                    element_id=total_link_count + idx,
                    # output_file = f"output_{idx + MIN_LINK}.out",
                    stream_buffer_size=100
                    if FRONTEND_TYPE == 'pacman' else 8388608,
                    enable_raw_recording=False,
                ))) for idx in range(MIN_LINK, MAX_LINK)
    ]

    if SOFTWARE_TPG_ENABLED:

        conf_list.extend([(f"tpset_publisher",
                           qton.Conf(msg_type="dunedaq::trigger::TPSet",
                                     msg_module_name="TPSetNQ",
                                     sender_config=nos.Conf(
                                         name=f"{PARTITION}.tpsets_{RUIDX}",
                                         topic="TPSets",
                                         stype="msgpack")))])

    if USE_FAKE_DATA_PRODUCERS:
        conf_list.extend([
            (f"fakedataprod_{idx}",
             fdp.ConfParams(
                 system_type=SYSTEM_TYPE,
                 apa_number=RU_CONFIG[RUIDX]["region_id"],
                 link_number=idx,
                 time_tick_diff=25,
                 frame_size=464,
                 response_delay=0,
                 timesync_connection_name=f"{PARTITION}.timesync_{RUIDX}",
                 timesync_topic_name="Timesync",
                 fragment_type="FakeData"))
            for idx in range(MIN_LINK, MAX_LINK)
        ])

    conf_list.extend([("fragment_sender", None)])

    cmd_data['conf'] = acmd(conf_list)

    startpars = rccmd.StartParams(run=RUN_NUMBER)
    cmd_data['start'] = acmd([("datahandler_.*", startpars),
                              ("fake_source", startpars),
                              ("pacman_source", startpars),
                              ("flxcard.*", startpars),
                              ("request_receiver", startpars),
                              ("ssp.*", startpars),
                              ("ntoq_trigdec", startpars),
                              (f"tp_datahandler_.*", startpars),
                              (f"tpset_publisher", startpars),
                              ("fakedataprod_.*", startpars),
                              ("fragment_sender", startpars),
                              ("errored_frame_consumer", startpars)])

    cmd_data['stop'] = acmd([("request_receiver", None), ("flxcard.*", None),
                             ("ssp.*", None), ("fake_source", None),
                             ("pacman_source", None), ("datahandler_.*", None),
                             (f"tp_datahandler_.*", None),
                             (f"tpset_publisher", None),
                             ("fakedataprod_.*", None),
                             ("fragment_sender", None),
                             ("errored_frame_consumer", None)])

    cmd_data['pause'] = acmd([("", None)])

    cmd_data['resume'] = acmd([("", None)])

    cmd_data['scrap'] = acmd([("", None)])

    cmd_data['record'] = acmd([("", None)])

    return cmd_data