def test_union(self): union_mix = dl.Union([ dl.Int(dl.Size(16)), dl.Bool(), dl.Str(dl.Size(10)), dl.Tuple([ dl.Int(dl.Size(8)), dl.UInt(dl.Size(16)), dl.Float(dl.Size(64)), dl.Complex(dl.Size(128)), dl.Bool(), dl.Str(dl.Size(10)) ]), dl.Record(RecATI) ]) @dl.Interactive([("ack_union_mix", bool)], [("out_union_mix", union_mix)]) def testbench(node: dl.PythonNode): node.send(out_union_mix=5) assert node.receive("ack_union_mix") node.send(out_union_mix=False) assert node.receive("ack_union_mix") node.send(out_union_mix='abcd') assert node.receive("ack_union_mix") node.send(out_union_mix=(-5, 10, -1.5, (1.5 + 2.5j), False, 'hello')) assert node.receive("ack_union_mix") node.send(out_union_mix=RecATI([1, 2], (3.0, 4), 5)) assert node.receive("ack_union_mix") raise DeltaRuntimeExit s_union_mix = dl.lib.StateSaver(union_mix, verbose=True) with dl.DeltaGraph() as graph: p = dl.placeholder_node_factory() p.specify_by_node( testbench.call(s_union_mix.save_and_ack(p.out_union_mix))) self.check_executes_graph( graph, """\ saving 5 saving False saving abcd saving (-5, 10, -1.5, (1.5+2.5j), False, 'hello') saving RecATI(x=[1, 2], y=(3.0, 4), z=5) """)
def test_compound(self): tuple_mix = dl.Tuple([ dl.Int(dl.Size(8)), dl.UInt(dl.Size(16)), dl.Float(dl.Size(64)), dl.Complex(dl.Size(128)), dl.Bool(), dl.Str(dl.Size(10)) ]) array_float = dl.Array(dl.Float(dl.Size(64)), dl.Size(3)) record_mix = dl.Record(RecATI) @dl.Interactive([("ack_tuple_mix", bool), ("ack_array_float", bool), ("ack_record_mix", bool)], [("out_tuple_mix", tuple_mix), ("out_array_float", array_float), ("out_record_mix", record_mix)]) def testbench(node: dl.PythonNode): node.send(out_tuple_mix=(-5, 1000, -100.5, (1.5 + 2.5j), False, '0123456789')) assert node.receive("ack_tuple_mix") node.send(out_array_float=[0.5, -0.25, 0.125]) assert node.receive("ack_array_float") node.send(out_record_mix=RecATI([1, 2], (3.0, 4), 5)) assert node.receive("ack_record_mix") raise DeltaRuntimeExit s_tuple_mix = dl.lib.StateSaver(tuple_mix, verbose=True) s_array_float = dl.lib.StateSaver(array_float, verbose=True) s_record_mix = dl.lib.StateSaver(record_mix, verbose=True) with dl.DeltaGraph() as graph: p = dl.placeholder_node_factory() p.specify_by_node( testbench.call(s_tuple_mix.save_and_ack(p.out_tuple_mix), s_array_float.save_and_ack(p.out_array_float), s_record_mix.save_and_ack(p.out_record_mix))) self.check_executes_graph( graph, """\ saving (-5, 1000, -100.5, (1.5+2.5j), False, '0123456789') saving [0.5, -0.25, 0.125] saving RecATI(x=[1, 2], y=(3.0, 4), z=5) """)
def migen_body(self, template): """ This is the body of the migen node connecting the pulser and timestamper as 2 submodules. """ # Node inputs self.reset = template.add_pa_in_port('reset', dl.Optional(int)) self.photon = template.add_pa_in_port('photon', dl.Optional(int)) # Node outputs self.time = template.add_pa_out_port('time', dl.UInt()) self.error = template.add_pa_out_port('error', dl.Int()) self.rf_trigger = Signal(1) self.pmt_trigger = Signal(1) self.hit_channels = Signal(2) self.clock = Signal(TIME_RES) ### self.comb += [ self.hit_channels.eq(self.pmt_trigger + 2 * self.rf_trigger), self.photon.ready.eq(1), ] # error management ( if photon is outside valid range) self.sync += [ If( self.photon.valid & ((self.photon.data < 1) | (self.photon.data > TIME_RES - 1)), self.error.data.eq(1), self.error.valid.eq(1)).Elif( self.photon.valid, self.error.data.eq(0), self.error.valid.eq(1)).Else( self.error.data.eq(self.error.data), self.error.valid.eq(0)) ] self.pulser_inst = TimestamperModel.Pulser(self.reset, self.pmt_trigger, self.rf_trigger, self.photon, self.clock) self.timestamper_inst = TimestamperModel.Timestamper( self.hit_channels, self.time, self.reset, self.clock) self.submodules += [self.timestamper_inst, self.pulser_inst]
def testbench(node): data_array = generate_data_vector(C_N_BITS, C_N_INPUTS) # Temporary - needs df.Array => migen.Array support data_vector = 0 logging.debug(f'data sent to DUT {data_array}') for i in range(C_N_INPUTS): data_vector += data_array[i] << C_N_BITS * i data_vector = dl.Int(dl.Size(C_VECTOR_LEN)).from_numpy_object(data_vector) for cmd in range(0x01, 0x06): node.send(data=data_vector, cmd=cmd) result = node.receive('result') error = node.receive('error') logging.debug(f'cmd: {cmd}') exp_err = 0 if cmd == Commands.MIN: exp_res = np.min(data_array) logging.debug(f'result: {result}, expected: {exp_res}') assert result == exp_res elif cmd == Commands.MAX: exp_res = np.max(data_array) logging.debug(f'result: {result}, expected: {exp_res}') assert result == exp_res elif cmd == Commands.SUM: exp_res = np.sum(data_array) logging.debug(f'result: {result}, expected: {exp_res}') assert result == exp_res elif cmd == Commands.AVG: exp_res_low = trunc(np.mean(data_array)) - 1 exp_res_high = int(np.mean(data_array)) + 1 exp_res = np.mean(data_array) logging.debug(f'result: {result}, expected: {exp_res}') assert result >= exp_res_low assert result <= exp_res_high else: exp_err = 1 result = -1 exp_res = -1 assert error == exp_err raise dl.DeltaRuntimeExit
self.error.data.eq(self.error.data), self.error.valid.eq(0)) ] self.pulser_inst = TimestamperModel.Pulser(self.reset, self.pmt_trigger, self.rf_trigger, self.photon, self.clock) self.timestamper_inst = TimestamperModel.Timestamper( self.hit_channels, self.time, self.reset, self.clock) self.submodules += [self.timestamper_inst, self.pulser_inst] @dl.Interactive(inputs=[('time', dl.UInt()), ('error', dl.Int())], outputs=[('reset', dl.Int()), ('photon', dl.Int())]) def testbench(node): """ Testbench for Timestamper model node. Starts with random testing and ends with corner cases """ for i in range(TEST_LENGTH): photon = random.randint(1, TIME_RES - 2) do_test(i, photon, node) # corner cases # 1 : photon arrival time 0 - error expected do_test(-1, 0, node) # 2: photon arrival time TIME_RES - no expected error do_test(-2, TIME_RES - 1, node)
def test_primitives(self): tuple_int = dl.Tuple([ dl.Int(dl.Size(8)), dl.Int(dl.Size(16)), dl.Int(dl.Size(32)), dl.Int(dl.Size(64)) ]) tuple_uint = dl.Tuple([ dl.UInt(dl.Size(8)), dl.UInt(dl.Size(16)), dl.UInt(dl.Size(32)), dl.UInt(dl.Size(64)) ]) tuple_float = dl.Tuple([dl.Float(dl.Size(32)), dl.Float(dl.Size(64))]) tuple_complex = dl.Tuple( [dl.Complex(dl.Size(64)), dl.Complex(dl.Size(128))]) tuple_bool_char = dl.Tuple([dl.Bool(), dl.Str(dl.Size(1))]) @dl.Interactive([("ack_int", bool), ("ack_uint", bool), ("ack_float", bool), ("ack_complex", bool), ("ack_bool_char", bool)], [("out_int", tuple_int), ("out_uint", tuple_uint), ("out_float", tuple_float), ("out_complex", tuple_complex), ("out_bool_char", tuple_bool_char)]) def testbench(node: dl.PythonNode): node.send(out_int=(-128, -32768, -2147483648, -9223372036854775808)) assert node.receive("ack_int") node.send(out_int=(127, 32767, 2147483647, 9223372036854775807)) assert node.receive("ack_int") node.send(out_uint=(0, 0, 0, 0)) assert node.receive("ack_uint") node.send(out_uint=(255, 65535, 4294967295, 18446744073709551615)) assert node.receive("ack_uint") # this is just a rough estimate node.send(out_float=(1.0000001, 1.000000000000001)) assert node.receive("ack_float") node.send(out_complex=((1.0000001 + 1.0000001j), (1.000000000000001 + 1.000000000000001j))) assert node.receive("ack_complex") node.send(out_bool_char=(True, 'a')) assert node.receive("ack_bool_char") raise DeltaRuntimeExit s_int = dl.lib.StateSaver(tuple_int, verbose=True) s_uint = dl.lib.StateSaver(tuple_uint, verbose=True) s_float = dl.lib.StateSaver(tuple_float, verbose=True) s_complex = dl.lib.StateSaver(tuple_complex, verbose=True) s_bool_char = dl.lib.StateSaver(tuple_bool_char, verbose=True) with dl.DeltaGraph() as graph: p = dl.placeholder_node_factory() p.specify_by_node( testbench.call(s_int.save_and_ack(p.out_int), s_uint.save_and_ack(p.out_uint), s_float.save_and_ack(p.out_float), s_complex.save_and_ack(p.out_complex), s_bool_char.save_and_ack(p.out_bool_char))) self.check_executes_graph( graph, f"""\ saving (-128, -32768, -2147483648, -9223372036854775808) saving (127, 32767, 2147483647, 9223372036854775807) saving (0, 0, 0, 0) saving (255, 65535, 4294967295, 18446744073709551615) saving ({np.float32(1.0000001)}, 1.000000000000001) saving (({np.float32(1.0000001)}+{np.float32(1.0000001)}j), (1.000000000000001+1.000000000000001j)) saving (True, 'a') """)
def forked_return_output(x: dl.Int(dl.Size(8)), y: dl.Int(dl.Size(8)) ) -> Tuple[int, bool, int, int]: return 0, 1, 1, 0
def tb_deltaqueue_to_pa(pa): def transfer_queue_to_pa(queue, pa): """Emulates the movement of data from the DeltaQueue to a PA """ if (yield pa.almost_full) != 1: # write to the PA FIFO yield yield pa.wr_valid_in.eq(1) yield pa.wr_data_in.eq(queue.get().msg) yield yield pa.wr_valid_in.eq(0) yield # First create a DeltaQueue object # Hack: OutPort needs a "node" object with full_name attribute mock_parent_node = SimpleNamespace() mock_parent_node.full_name = "parent_node" in_port = InPort("in", dl.Int(), None, self.tb_buf_width) in_queue = DeltaQueue(OutPort("out", dl.Int(), in_port, mock_parent_node), maxsize=self.tb_buf_depth + 1) # fill up the DeltaQueue so it has more items than the PA FIFO data = [] for i in range(self.tb_buf_depth + 1): # generate random entry d = random.randint(0, pow(2, self.tb_buf_width)) data.append(d) # add entry to reference data for checking in_queue.put(QueueMessage(d)) # check size of DeltaQueue self.assertEqual(in_queue.qsize(), self.tb_buf_depth + 1) # ensure PA is ready for consuming yield self.assertEqual((yield pa.rd_valid_out), 0) self.assertEqual((yield pa.wr_ready_out), 1) self.assertEqual((yield pa.rd_data_out), 0) yield pa.rd_ready_in.eq(0) # fill up the PA FIFO from the DeltaQueue until its full for i in range(in_queue.qsize()): yield from transfer_queue_to_pa(in_queue, pa) # check PA FIFO is full self.assertEqual((yield pa.fifo.dout), data[0]) self.assertEqual((yield pa.fifo.we), 0) self.assertEqual((yield pa.almost_full), 1) self.assertEqual((yield pa.num_fifo_elements), self.tb_buf_depth) # check size of DeltaQueue self.assertEqual(in_queue.qsize(), 1) # read a single entry from the PA FIFO (the first entry) yield pa.rd_ready_in.eq(1) yield self.assertEqual((yield pa.rd_valid_out), 1) self.assertEqual((yield pa.rd_data_out), data[0]) yield pa.rd_ready_in.eq(0) yield data.pop(0) # remove first entry from reference data # try and add remaining DeltaQueue data to the PA FIFO for i in range(in_queue.qsize()): if (yield pa.almost_full) != 1: yield from transfer_queue_to_pa(in_queue, pa) # check PA FIFO count hasn't changed self.assertEqual((yield pa.almost_full), 1) self.assertEqual((yield pa.num_fifo_elements), self.tb_buf_depth) # check size of DeltaQueue self.assertEqual(in_queue.qsize(), 0) # check original first entry in PA FIFO is gone and all others remain for d in data: yield pa.rd_ready_in.eq(1) yield self.assertEqual((yield pa.rd_data_out), d) yield pa.rd_ready_in.eq(0) yield
def experiment_stopper(completed: dl.Int(dl.Size(8))) -> dl.Void: raise dl.DeltaRuntimeExit
self.d_out_valid_reg.eq(1)).Else(self.err_valid_reg.eq(0)) ] self.sync += [ self.d_out.data.eq(self.d_out_data_reg), self.d_out.valid.eq(self.d_out_valid_reg), self.err.data.eq(self.err_data_reg), self.err.valid.eq(self.err_valid_reg) ] def generate_data_vector(N_BITS, N_INPUTS): return np.random.randint(0, pow(2, N_BITS), size=N_INPUTS) @dl.Interactive(inputs=[('result', dl.Int()), ('error', dl.Int())], outputs=[('cmd', dl.Int()), ('data', dl.Int(dl.Size(C_VECTOR_LEN)))]) def testbench(node): data_array = generate_data_vector(C_N_BITS, C_N_INPUTS) # Temporary - needs df.Array => migen.Array support data_vector = 0 logging.debug(f'data sent to DUT {data_array}') for i in range(C_N_INPUTS): data_vector += data_array[i] << C_N_BITS * i data_vector = dl.Int(dl.Size(C_VECTOR_LEN)).from_numpy_object(data_vector) for cmd in range(0x01, 0x06): node.send(data=data_vector, cmd=cmd)
def migen_body(self, template): template.add_pa_in_port('i', dl.Optional(dl.Int(dl.Size(8))))
def method_func_no_output(self, i: dl.Int(dl.Size(8))) -> dl.Void: print(i + 1)
def multi_body_no_output(i: dl.Int(dl.Size(8))) -> dl.Void: print(i)
If(self.time_in.valid, self.pmt_trig.eq(1), NextState("WAIT_FOR_RF"))) self.fsm.act( "WAIT_FOR_RF", self.time_in.ready.eq(1), If(self.time_in.valid, self.rf_trig.eq(1), NextState("SEND_TIME"))) self.fsm.act("SEND_TIME", self.time_in.ready.eq(1), self.time_out.data.eq(self.rf_reg - self.pmt_reg), self.time_out.valid.eq(1), NextState("WAIT_ACC_LATENCY")) self.fsm.act("WAIT_ACC_LATENCY", If(self.time_in.valid == 0, NextState("RESET_COUNTER"))) @dl.Interactive(inputs=[('time_out', dl.UInt()), ('reset', dl.Int())], outputs=[('output', dl.UInt())]) def testbench(node): """ Testbench for Timestamper interface node. Starts with random testing and ends with corner cases """ _ITER = 10 for i in range(_ITER): logging.debug(f'---Testbench iter {i}---') time_pmt = random.randint(0, 100) time_rf = random.randint(0, 100) do_test(node, time_pmt, time_rf) raise dl.DeltaRuntimeExit
def migen_body(self, template): # generics N_BITS = template.generics["N_BITS"] # 1-64 N_INPUTS = template.generics["N_INPUTS"] TREE_DEPTH = int(ceil(log2(N_INPUTS))) # inputs self.d_in = template.add_pa_in_port( 'd_in', dl.Optional(dl.Int(dl.Size(N_BITS * N_INPUTS)))) self.cmd = template.add_pa_in_port('cmd', dl.Optional(dl.Int())) # outputs self.d_out = template.add_pa_out_port('d_out', dl.Int()) self.err = template.add_pa_out_port('error', dl.Int()) # input length correction [need a power of 2 sized tree] N_INPUTS_CORR = pow(2, TREE_DEPTH) # internals # correct the size of the input tree to be a power of 2 # and register the inputs self.d_in_full_reg = Signal(N_INPUTS_CORR * N_BITS) self.d_in_valid_reg = Signal(1) self.cmd_data_reg = Signal(8) self.cmd_valid_reg = Signal(1) # register outputs self.d_out_data_reg = Signal(N_BITS + TREE_DEPTH) self.d_out_valid_reg = Signal(1) self.err_data_reg = Signal(1) self.err_valid_reg = Signal(1) # create the 2D array of data [INPUTS x TREE_DEPTH] to route # all the core units in an iterative way. The number of bits is incremented # at each stage to account for the carry in additions. self.d_pipe = Array( Array(Signal(N_BITS + b) for a in range(N_INPUTS_CORR)) for b in range(TREE_DEPTH + 1)) # create the 2D array of error signals. self.e_pipe = Array( Array(Signal(N_BITS) for a in range(N_INPUTS_CORR)) for b in range(TREE_DEPTH)) ### # correct input vector length to match a power of 2. # fill non-provided inputs with 0's (affects mean and minimum) self.sync += [ self.d_in_full_reg.eq(self.d_in.data), self.d_in_valid_reg.eq(self.d_in.valid), self.cmd_data_reg.eq(self.cmd.data), self.cmd_valid_reg.eq(self.cmd.valid) ] # wiring inputs to the first stage of the tree for i in range(N_INPUTS_CORR): self.comb += [ self.d_pipe[0][i].eq(self.d_in_full_reg[N_BITS * i:N_BITS * (i + 1)]) ] # instantiation of the core units. for j in range(TREE_DEPTH): for i in range(int(N_INPUTS_CORR / (pow(2, j + 1)))): self.submodules += CoreUnit(self.d_pipe[j][2 * i], self.d_pipe[j][2 * i + 1], self.d_pipe[j + 1][i], self.cmd_data_reg, self.e_pipe[j][i], N_BITS) # error signal propagation. If any of the single units have # a high error signal, the error is propagated to the node's output. self.comb += [ If(self.e_pipe[j][i] == 1, self.err_data_reg.eq(1)) ] self.comb += [ self.d_in.ready.eq(1), self.cmd.ready.eq(1), self.d_out_data_reg.eq(self.d_pipe[TREE_DEPTH][0]), If(self.d_in_valid_reg, self.err_valid_reg.eq(1), self.d_out_valid_reg.eq(1)).Else(self.err_valid_reg.eq(0)) ] self.sync += [ self.d_out.data.eq(self.d_out_data_reg), self.d_out.valid.eq(self.d_out_valid_reg), self.err.data.eq(self.err_data_reg), self.err.valid.eq(self.err_valid_reg) ]
def migen_body(self, template): _TIME_RES = 32 # Node inputs self.time_in = template.add_pa_in_port('time_in', dl.Optional(dl.UInt())) # Node outputs self.time_out = template.add_pa_out_port('time_out', dl.UInt()) self.counter_reset = template.add_pa_out_port('counter_reset', dl.Int()) # Internal signals self.pmt_reg = Signal(_TIME_RES) self.rf_reg = Signal(_TIME_RES) self.pmt_trig = Signal(1) self.rf_trig = Signal(1) self.submodules.fsm = FSM(reset_state="RESET_COUNTER") self.sync += [ If( self.pmt_trig, self.pmt_reg.eq(self.time_in.data), ).Elif(self.fsm.ongoing("RESET_COUNTER"), self.pmt_reg.eq(0)).Else(self.pmt_reg.eq(self.pmt_reg)), If( self.rf_trig, self.rf_reg.eq(self.time_in.data), ).Elif(self.fsm.ongoing("RESET_COUNTER"), self.rf_reg.eq(0)).Else(self.rf_reg.eq(self.rf_reg)) ] """FSM The FSM is used to control the readouts from the HPTDC chip and generate a time signal for the accumulator RESET_COUNTER This is the dinitial state of the FSM at the start of the experiment. It resets the "coarse counter" of the HPTDC chip to establish a TO time reference. WAIT_FOR_PMT This state holds until the PMT timestamp is available at the HPTDC chip readout (first data_ready sync pulse) WAIT_FOR_RF This state holds until the RMT timestamp is available at the HPTDC chip readout (second data_ready sync pulse) SEND_TIME In this state, the difference between t_PMT and t_RF is derived and sent to the accumulator. WAIT_ACC_LATENCY This state is used to wait for any delays on inter-node communication """ self.fsm.act( "RESET_COUNTER", self.pmt_trig.eq(0), self.rf_trig.eq(0), self.time_in.ready.eq(1), self.counter_reset.data.eq(1), # reset counters self.counter_reset.valid.eq(1), NextState("WAIT_FOR_PMT")) self.fsm.act( "WAIT_FOR_PMT", self.counter_reset.data.eq(0), self.time_in.ready.eq(1), If(self.time_in.valid, self.pmt_trig.eq(1), NextState("WAIT_FOR_RF"))) self.fsm.act( "WAIT_FOR_RF", self.time_in.ready.eq(1), If(self.time_in.valid, self.rf_trig.eq(1), NextState("SEND_TIME"))) self.fsm.act("SEND_TIME", self.time_in.ready.eq(1), self.time_out.data.eq(self.rf_reg - self.pmt_reg), self.time_out.valid.eq(1), NextState("WAIT_ACC_LATENCY")) self.fsm.act("WAIT_ACC_LATENCY", If(self.time_in.valid == 0, NextState("RESET_COUNTER")))
def num_cycles() -> dl.Int(): return len(Permutation(2, 1, 4, 5, 3).to_cycles())
def tb_pa_to_deltaqueue(pa): def transfer_pa_to_queue(pa, queue): """Emulates the movement of data from the PA to a DeltaQueue """ if not queue.full(): # read a single entry from the PA FIFO (the first entry) yield pa.rd_ready_in.eq(1) yield self.assertEqual((yield pa.rd_valid_out), 1) # write to the queue queue.put(QueueMessage((yield pa.rd_data_out))) yield pa.rd_ready_in.eq(0) yield # First create a DeltaQueue object # Hack: OutPort needs a "node" object with full_name attribute mock_parent_node = SimpleNamespace() mock_parent_node.full_name = "parent_node" in_port = InPort("in", dl.Int(), None, self.tb_buf_width) out_queue = DeltaQueue(OutPort("out", dl.Int(), in_port, mock_parent_node), maxsize=self.tb_buf_depth - 1) yield self.assertEqual((yield pa.rd_valid_out), 0) self.assertEqual((yield pa.wr_ready_out), 1) self.assertEqual((yield pa.rd_data_out), 0) yield pa.rd_ready_in.eq(0) # fill up PA FIFO so it has more items than max size of the DeltaQueue data = [] for i in range(self.tb_buf_depth): # generate random entry d = random.randint(0, pow(2, self.tb_buf_width)) data.append(d) # add entry to reference data for checking # write to the PA FIFO yield yield pa.wr_valid_in.eq(1) yield pa.wr_data_in.eq(d) yield yield pa.wr_valid_in.eq(0) yield # check PA FIFO is full self.assertEqual((yield pa.fifo.dout), data[0]) self.assertEqual((yield pa.fifo.we), 0) self.assertEqual((yield pa.almost_full), 1) self.assertEqual((yield pa.num_fifo_elements), self.tb_buf_depth) # fill up the DeltaQueue from the PA FIFO until its full for i in range(self.tb_buf_depth): yield from transfer_pa_to_queue(pa, out_queue) # check PA FIFO has been emptied except for 1 self.assertEqual((yield pa.almost_full), 0) self.assertEqual((yield pa.num_fifo_elements), 1) # check size of DeltaQueue self.assertEqual(out_queue.qsize(), self.tb_buf_depth - 1) # get a single item from the DeltaQueue self.assertEqual(out_queue.get().msg, data[0]) data.pop(0) # remove first entry from reference data # try and add remaining PA FIFO data to the DeltaQueue for i in range((yield pa.num_fifo_elements)): yield from transfer_pa_to_queue(pa, out_queue) # check PA FIFO has been emptied self.assertEqual((yield pa.almost_full), 0) self.assertEqual((yield pa.num_fifo_elements), 0) # check size of DeltaQueue self.assertEqual(out_queue.qsize(), self.tb_buf_depth - 1) # check the remaining data was moved into the DeltaQueue for i in range(out_queue.qsize()): self.assertEqual(out_queue.get().msg, data[i])
class Foo(): def __init__(self): pass @dl.DeltaMethodBlock() def method_func_no_output(self, i: dl.Int(dl.Size(8))) -> dl.Void: print(i + 1) class MigenFoo(dl.MigenNodeTemplate): def migen_body(self, template): template.add_pa_in_port('i', dl.Optional(dl.Int(dl.Size(8)))) @dl.Interactive([('i', dl.Int(dl.Size(8)))]) def interactive_func_no_output(node: dl.RealNode): node.receive('i') template_no_output_no_body = dl.NodeTemplate( name="template_no_output_no_body", inputs=[('i', dl.Int(dl.Size(8)))] ) @dl.DeltaBlock() def experiment_stopper(completed: dl.Int(dl.Size(8))) -> dl.Void: raise dl.DeltaRuntimeExit