def __init__(self, dut, period = CLK_PERIOD): self.dut = dut self.dut.log.warning("Setup Sata") cocotb.fork(Clock(dut.clk, CLK_PERIOD).start()) self.dut.rst = 0 self.dut.prim_scrambler_en = 1 self.dut.data_scrambler_en = 1
def E_find_mk_test(dut): """ Finds MK successfully """ log = SimLog("cocotb.%s" % dut._name) log.setLevel(logging.DEBUG) cocotb.fork(Clock(dut.clk_i, 1000).start()) filename = '../test_data/wpa2-psk-linksys.hccap' start = '1000000000' end = '1000000300' #Comparison currently hardcoded as 1000000200 dut.cs_i <= 1 yield reset(dut) yield RisingEdge(dut.clk_i) print_process_vars(dut) yield load_file(dut, filename) print_process_vars(dut) yield load_mk(dut, start) print_process_vars(dut) yield load_mk(dut, end) print_process_vars(dut) #This clock isn't necessary while pipelining yield RisingEdge(dut.clk_i) yield wait_process(dut) print_process_vars(dut) if int(str(dut.pmk_valid), 2) == 0: raise TestFailure("MK search failed") else: log.info("Master key found!")
def F_exhaust_mk_test(dut): """ Hits end of MK list before matching """ log = SimLog("cocotb.%s" % dut._name) log.setLevel(logging.DEBUG) cocotb.fork(Clock(dut.clk_i, 1000).start()) filename = '../test_data/wpa2-psk-linksys.hccap' start = '1000000000' end = '1000000020' #Comparison currently hardcoded as 1000000200 dut.cs_i <= 1 yield reset(dut) yield RisingEdge(dut.clk_i) yield load_file(dut, filename) yield load_mk(dut, start) yield load_mk(dut, end) #This clock isn't necessary while pipelining yield RisingEdge(dut.clk_i) yield wait_process(dut) if int(str(dut.pmk_valid), 2) == 0: raise TestFailure("Master key found, not good!") else: log.info("List done")
def setup_dut(dut): cocotb.fork(Clock(dut.in_clk, CLK_PERIOD).start()) dut.request_interrupt = 0 global interrupt_called interrupt_called = False global data_is_ready data_is_ready = False
def __init__(self, dut, sim_config, period = CLK_PERIOD, user_paths = []): self.status = Status() self.status.set_level('verbose') self.user_paths = user_paths self.comm_lock = cocotb.triggers.Lock('comm') self.dut = dut dev_dict = json.load(open(sim_config), object_pairs_hook = OrderedDict) super (NysaSim, self).__init__(dev_dict, self.status) self.timeout = 1000 self.response = Array('B') self.dut.rst <= 0 self.dut.ih_reset <= 0 self.dut.in_ready <= 0 self.dut.in_command <= 0 self.dut.in_address <= 0 self.dut.in_data <= 0 self.dut.in_data_count <= 0 gd = GenSDB() self.callbacks = {} self.rom = gd.gen_rom(self.dev_dict, user_paths = self.user_paths, debug = False) cocotb.fork(Clock(dut.clk, period).start()) cocotb.fork(self.interrupt_interface())
def test1(dut): dut.log.info("Cocotb test boot") random.seed(0) cocotb.fork(ClockDomainAsyncReset(dut.clk, dut.reset)) cocotb.fork(simulationSpeedPrinter(dut.clk)) drivers = [] checkers = [] for i in range(3): readQueue = Queue() ahb = Bundle(dut, "ahbMasters_" + str(i)) drivers.append(AhbLite3MasterDriver(ahb, AhbLite3TraficGeneratorWithMemory(12, 32,readQueue,i), dut.clk, dut.reset)) checkers.append(AhbLite3MasterReadChecker(ahb, readQueue, dut.clk, dut.reset)) # AhbLite3MasterIdle(Bundle(dut, "ahbMasters_1")) # AhbLite3MasterIdle(Bundle(dut, "ahbMasters_2")) AhbLite3SlaveMemory(Bundle(dut, "ahbSlaves_0"), 0x000, 0x400, dut.clk, dut.reset) AhbLite3SlaveMemory(Bundle(dut, "ahbSlaves_1"), 0x400, 0x400, dut.clk, dut.reset) AhbLite3SlaveMemory(Bundle(dut, "ahbSlaves_2"), 0x800, 0x400, dut.clk, dut.reset) AhbLite3SlaveMemory(Bundle(dut, "ahbSlaves_3"), 0xC00, 0x400, dut.clk, dut.reset) while True: yield RisingEdge(dut.clk) done = True for checker in checkers: if checker.counter < 1000: done = False if done: break dut.log.info("Cocotb test done")
def __init__(self, entity, name, clock, wr_fifo_name = "WR", wr_fifo_clk = None, wr_fifo_clk_period = 10, rd_fifo_name = "RD", rd_fifo_clk = None, rd_fifo_clk_period = 10): BusDriver.__init__(self, entity, name, clock) if wr_fifo_clk is None: wr_fifo_clk = entity.WR_CLK if rd_fifo_clk is None: rd_fifo_clk = entity.RD_CLK self.bus.EN.setimmediatevalue(0) self.bus.ADR.setimmediatevalue(0) self.bus.ADR_FIXED.setimmediatevalue(0) self.bus.ADR_WRAP.setimmediatevalue(0) self.bus.WR_RD.setimmediatevalue(0) self.bus.COUNT.setimmediatevalue(0) # Mutex for each channel that we master to prevent contention self.command_busy = Lock("%s_wabusy" % name) cocotb.fork(Clock(wr_fifo_clk, wr_fifo_clk_period).start()) cocotb.fork(Clock(rd_fifo_clk, rd_fifo_clk_period).start()) self.write_fifo = PPFIFOWritePath(entity, wr_fifo_name, wr_fifo_clk) self.read_fifo = PPFIFOReadPath(entity, rd_fifo_name, rd_fifo_clk)
def write(self, address, value, byte_enable=0xf, address_latency=0, data_latency=0): """ Write a value to an address. The *_latency KWargs allow control over the delta """ c_addr = cocotb.fork(self._send_write_address(address, delay=address_latency)) c_data = cocotb.fork(self._send_write_data(value, byte_enable=byte_enable, delay=data_latency)) if c_addr: yield c_addr.join() if c_data: yield c_data.join() # Wait for the response while True: yield ReadOnly() if self.bus.BVALID.value and self.bus.BREADY.value: result = self.bus.BRESP.value break yield RisingEdge(self.clock) yield RisingEdge(self.clock) if int(result): raise AXIReadError("Write to address 0x%08x failed with BRESP: %d" %( address, int(result))) raise ReturnValue(result)
def run_test(dut, data_in=None, config_coroutine=None, idle_inserter=None, backpressure_inserter=None): cocotb.fork(clock_gen(dut.clk)) tb = EndianSwapperTB(dut) yield tb.reset() dut.stream_out_ready <= 1 # Start off any optional coroutines if config_coroutine is not None: cocotb.fork(config_coroutine(tb.csr)) if idle_inserter is not None: tb.stream_in.set_valid_generator(idle_inserter()) if backpressure_inserter is not None: tb.backpressure.start(backpressure_inserter()) # Send in the packets for transaction in data_in(): yield tb.stream_in.send(transaction) # Wait at least 2 cycles where output ready is low before ending the test for i in xrange(2): yield RisingEdge(dut.clk) while not dut.stream_out_ready.value: yield RisingEdge(dut.clk) pkt_count = yield tb.csr.read(1) if pkt_count.integer != tb.pkts_sent: raise TestFailure("DUT recorded %d packets but tb counted %d" % ( pkt_count.integer, tb.pkts_sent)) else: dut.log.info("DUT correctly counted %d packets" % pkt_count.integer) raise tb.scoreboard.result
def write_1_word_test(dut): """ Description: * Test ID: 0 Expected Results: * """ CLK_WR_PERIOD = 10 CLK_RD_PERIOD = 10 dut.rst <= 1 dut.test_id <= 0 cocotb.fork(Clock(dut.WR_CLK, CLK_WR_PERIOD).start()) cocotb.fork(Clock(dut.RD_CLK, CLK_RD_PERIOD).start()) writer = PPFIFOWritePath(dut, "WR", dut.WR_CLK) reader = PPFIFOReadPath(dut, "RD", dut.RD_CLK) yield Timer(CLK_WR_PERIOD * 10) dut.rst <= 1 yield Timer(CLK_WR_PERIOD * 10) dut.rst <= 0 yield Timer(CLK_WR_PERIOD * 10) yield writer.write([0x00, 0x01, 0x02, 0x03]) yield Timer(CLK_WR_PERIOD * 20) data = yield reader.read(4) yield Timer(CLK_WR_PERIOD * 20) print "Data:" for d in data: print "0x%08X" % d
def E_process_second_input_round_test(dut): """Test input processing with 32 word input""" log = SimLog("cocotb.%s" % dut._name) cocotb.fork(Clock(dut.clk_i, 10000).start()) mockObject = Sha1Model() yield reset(dut) #yield load_data(dut, log, mockObject, 16) #mockObject.processInput() #mockObject.displayAll() yield load_data(dut, log, mockObject, 16) mockObject.processInput() yield load_data(dut, log, mockObject, 66) mockOut = "{:08x}".format(mockObject.W[16]) compare1 = convert_hex(dut.pinput1.test_word_1.value).rjust(8, '0') compare2 = convert_hex(dut.pinput1.test_word_5.value).rjust(8, '0') if compare1 != mockOut: raise TestFailure( "First load incorrect: {0} != {1}".format(compare1, mockOut)) elif compare2 != "{:08x}".format(mockObject.W[79]): raise TestFailure( "First load incorrect: {0} != {1}".format(compare2, "{:08x}".format(mockObject.W[79]))) else: log.info("First load ok!")
def value_test(dut, num): """ Test n*num/n = num """ data_width = int(dut.DATA_WIDTH.value) bus_width = int(dut.BUS_WIDTH.value) dut.log.info('Detected DATA_WIDTH = %d, BUS_WIDTH = %d' %(data_width, bus_width)) cocotb.fork(clock_gen(dut.clk, period=clock_period)) dut.rst <= 1 for i in range(bus_width): dut.i_data[i] <= 0 dut.i_valid <= 0 yield RisingEdge(dut.clk) yield RisingEdge(dut.clk) dut.rst <= 0 for i in range(bus_width): dut.i_data[i] <= num dut.i_valid <= 1 yield RisingEdge(dut.clk) dut.i_valid <= 0 yield RisingEdge(dut.clk) got = int(dut.o_data.value) if got != num: raise TestFailure( 'Mismatch detected: got %d, exp %d!' %(got, num))
def test_ram(dut): """Try writing values into the RAM and reading back""" RAM = {} # Read the parameters back from the DUT to set up our model width = dut.D_WIDTH.value.integer depth = 2**dut.A_WIDTH.value.integer dut.log.info("Found %d entry RAM by %d bits wide" % (depth, width)) # Set up independent read/write clocks cocotb.fork(Clock(dut.clk_write, 3200).start()) cocotb.fork(Clock(dut.clk_read, 5000).start()) dut.log.info("Writing in random values") for i in xrange(depth): RAM[i] = int(random.getrandbits(width)) yield write_ram(dut, i, RAM[i]) dut.log.info("Reading back values and checking") for i in xrange(depth): value = yield read_ram(dut, i) if value != RAM[i]: dut.log.error("RAM[%d] expected %d but got %d" % (i, RAM[i], dut.data_read.value.value)) raise TestFailure("RAM contents incorrect") dut.log.info("RAM contents OK")
def test_fork_and_monitor(dut, period=1000, clocks=6): cocotb.fork(Clock(dut.clk, period).start()) # Ensure the clock has started yield RisingEdge(dut.clk) timer = Timer(period + 10) task = cocotb.fork(count_edges_cycles(dut.clk, clocks)) count = 0 expect = clocks - 1 while True: result = yield [timer, task.join()] if count > expect: raise TestFailure("Task didn't complete in expected time") if result is timer: dut.log.info("Count %d: Task still running" % count) count += 1 else: break if count != expect: raise TestFailure("Expected to monitor the task %d times but got %d" % (expect, count)) if result != clocks: raise TestFailure("Expected task to return %d but got %s" % (clocks, repr(result)))
def test_yield_list_stale(dut): """ Test that a trigger yielded as part of a list can't cause a spurious wakeup """ # gh-843 events = [Event() for i in range(3)] waiters = [e.wait() for e in events] @cocotb.coroutine def wait_for_lists(): ret_i = waiters.index((yield [waiters[0], waiters[1]])) assert ret_i == 0, "Expected event 0 to fire, not {}".format(ret_i) ret_i = waiters.index((yield [waiters[2]])) assert ret_i == 2, "Expected event 2 to fire, not {}".format(ret_i) @cocotb.coroutine def wait_for_e1(): """ wait on the event that didn't wake `wait_for_lists` """ ret_i = waiters.index((yield waiters[1])) assert ret_i == 1, "Expected event 1 to fire, not {}".format(ret_i) @cocotb.coroutine def fire_events(): """ fire the events in order """ for e in events: yield Timer(1) e.set() fire_task = cocotb.fork(fire_events()) e1_task = cocotb.fork(wait_for_e1()) yield wait_for_lists() # make sure the other tasks finish yield fire_task.join() yield e1_task.join()
def process_image(dut, filename="", debug=False, threshold=0.22): """Run an image file through the jpeg encoder and compare the result""" cocotb.fork(Clock(dut.clk, 100).start()) #Overwriting debug (original) with the one from env debug = os.getenv('COCOTB_DEBUG') # None/1 driver = ImageDriver(dut) monitor = JpegMonitor(dut) if debug: # pragma: no cover driver.log.setLevel(logging.DEBUG) monitor.log.setLevel(logging.DEBUG) stimulus = Image.open(filename) yield driver.send(stimulus) output = yield monitor.wait_for_recv() if debug: # pragma: no cover output.save(filename + "_process.jpg") difference = compare(stimulus, output) dut.log.info("Compressed image differs to original by %f%%" % (difference)) if difference > threshold: # pragma: no cover raise TestFailure("Resulting image file was too different (%f > %f)" % (difference, threshold))
def test1(dut): dut.log.info("Cocotb test boot") random.seed(0) cocotb.fork(ClockDomainAsyncReset(dut.clk, None)) for i in range(0,1000): randSignal(dut.io_conds_0) randSignal(dut.io_conds_1) randSignal(dut.io_conds_2) randSignal(dut.io_conds_3) randSignal(dut.io_conds_4) randSignal(dut.io_conds_5) randSignal(dut.io_conds_6) randSignal(dut.io_conds_7) randSignal(dut.io_data_0 ) randSignal(dut.io_data_1 ) randSignal(dut.io_data_2 ) randSignal(dut.io_data_3 ) randSignal(dut.io_data_4 ) randSignal(dut.io_data_5 ) randSignal(dut.io_data_6 ) randSignal(dut.io_data_7 ) randSignal(dut.io_data_8 ) randSignal(dut.io_data_9 ) randSignal(dut.io_data_10) randSignal(dut.io_data_11) yield RisingEdge(dut.clk) ref = Ref(dut) assertEquals(ref.io_outDefault,dut.io_outDefault,"io_outDefault") assertEquals(ref.io_outComplex, dut.io_outComplex, "io_outComplex") yield Timer(1) assertEquals(ref.io_outComplex, dut.io_outRegComplex, "io_outRegComplex") dut.log.info("Cocotb test done")
def _ad9361_tx_to_rx_loopback(self): cocotb.fork(self._tx_data_from_ad9361()) i_bin_val = BinaryValue(bits=12, bigEndian=False) q_bin_val = BinaryValue(bits=12, bigEndian=False) while True: yield RisingEdge(self.dut.rx_clk_in_p) if self.rx_frame_asserted: self.dut.rx_data_in_p <= i_bin_val[5:0] self.dut.rx_data_in_n <= ~i_bin_val[5:0] self.rx_frame_asserted = False self.dut.rx_frame_in_p <= 0 self.dut.rx_frame_in_n <= 1 else: if len(self.lbqi) > 0: i_bin_val = self.lbqi.popleft() else: i_bin_val.set_value(0) if len(self.lbqq) > 0: q_bin_val = self.lbqq.popleft() else: q_bin_val.set_value(0) self.dut.rx_data_in_p <= i_bin_val[11:6] self.dut.rx_data_in_n <= ~i_bin_val[11:6] self.rx_frame_asserted = True self.dut.rx_frame_in_p <= 1 self.dut.rx_frame_in_n <= 0 yield RisingEdge(self.dut.rx_clk_in_n) if self.rx_frame_asserted: self.dut.rx_data_in_p <= q_bin_val[11:6] self.dut.rx_data_in_n <= ~q_bin_val[11:6] else: self.dut.rx_data_in_p <= q_bin_val[5:0] self.dut.rx_data_in_n <= ~q_bin_val[5:0]
def test_external_from_fork(dut): clk_gen = cocotb.fork(Clock(dut.clk, 100).start()) coro = cocotb.fork(run_external(dut)) yield coro.join() dut._log.info("Back from join")
def run_test(dut, data_generator=random_data, delay_cycles=2): """ Send data through the DUT and check it is sorted out output """ cocotb.fork(Clock(dut.clk, 100).start()) # Don't check until valid output expected = [None] * delay_cycles for index, values in enumerate(data_generator(bits=len(dut.in1))): expected.append(sorted(values)) yield RisingEdge(dut.clk) dut.in1 = values[0] dut.in2 = values[1] dut.in3 = values[2] dut.in4 = values[3] dut.in5 = values[4] yield ReadOnly() expect = expected.pop(0) if expect is None: continue got = [int(dut.out5), int(dut.out4), int(dut.out3), int(dut.out2), int(dut.out1)] if got != expect: dut.log.error('Expected %s' % expect) dut.log.error('Got %s' % got) raise TestFailure("Output didn't match") dut.log.info('Sucessfully sent %d cycles of data' % (index + 1))
def test_failure_from_system_task(dut): """Allow the dut to call $fail_test() from verilog""" clock = Clock(dut.clk, 100) clock.start() coro = cocotb.fork(clock_mon(dut)) extern = cocotb.fork(run_external(dut)) yield Timer(10000000)
def A_gen_data_test(dut): """ Tests that gen_tenhex generates sane values """ log = SimLog("cocotb.%s" % dut._name) cocotb.fork(Clock(dut.clk_i, 10000).start()) outStr = '' yield reset(dut) yield RisingEdge(dut.clk_i) for x in xrange(0xff): complete = int(dut.main1.gen1.complete_o.value) if complete != 0: raise TestFailure("Premature completion") outStr = '{:x}'.format(int(dut.main1.gen1.mk_test9.value)) + \ '{:x}'.format(int(dut.main1.gen1.mk_test8.value)) + \ '{:x}'.format(int(dut.main1.gen1.mk_test7.value)) + \ '{:x}'.format(int(dut.main1.gen1.mk_test6.value)) + \ '{:x}'.format(int(dut.main1.gen1.mk_test5.value)) + \ '{:x}'.format(int(dut.main1.gen1.mk_test4.value)) + \ '{:x}'.format(int(dut.main1.gen1.mk_test3.value)) + \ '{:x}'.format(int(dut.main1.gen1.mk_test2.value)) + \ '{:x}'.format(int(dut.main1.gen1.mk_test1.value)) + \ '{:x}'.format(int(dut.main1.gen1.mk_test0.value)) yield RisingEdge(dut.clk_i) if outStr != "00000000fe": raise TestFailure("Wrong loaded values!") else: log.info("Ok!")
def run(self): cocotb.fork(StreamRandomizer("dispatcherInOrderInput", self.onInput,None, self.dut, self.dut.clk)) for idx in range(0,3): cocotb.fork(StreamReader("dispatcherInOrderOutputs_" + str(idx), self.onOutput, idx, self.dut, self.dut.clk)) while self.counter < 1000: yield RisingEdge(self.dut.clk)
def write_257_word_test(dut): """ Description: * Test ID: 2 Expected Results: * """ CLK_WR_PERIOD = 10 CLK_RD_PERIOD = 10 COUNT = 0x101 dut.rst <= 1 dut.test_id <= 2 cocotb.fork(Clock(dut.WR_CLK, CLK_WR_PERIOD).start()) cocotb.fork(Clock(dut.RD_CLK, CLK_RD_PERIOD).start()) writer = BlockFIFOWritePath(dut, "WR", dut.WR_CLK) reader = BlockFIFOReadPath(dut, "RD", dut.RD_CLK) yield Timer(CLK_WR_PERIOD * 10) dut.rst <= 1 yield Timer(CLK_WR_PERIOD * 10) dut.rst <= 0 yield Timer(CLK_WR_PERIOD * 10) data_out = [] for i in range(COUNT): data_out.append(i) yield writer.write(data_out) yield Timer(CLK_WR_PERIOD * 100) data = yield reader.read(COUNT) yield Timer(CLK_WR_PERIOD * 200)
def start(self): self.fork_falling = cocotb.fork(self._FallingEdgeDetection()) self.fork_rising = cocotb.fork(self._RisingEdgeDetection()) self.fork_start = cocotb.fork(self._startDetection()) self.fork_stop = cocotb.fork(self._stopDetection()) yield self._analyser()
def F_process_first_buffer_test(dut): """Test data after processing the first message buffer""" log = SimLog("cocotb.%s" % dut._name) cocotb.fork(Clock(dut.clk_i, 10000).start()) mockObject = Sha1Model() yield reset(dut) #yield load_data(dut, log, mockObject, 16) #mockObject.processInput() #mockObject.displayAll() yield load_data(dut, log, mockObject, 16) mockObject.processInput() mockObject.processBuffer() yield load_data(dut, log, mockObject, 65) yield load_data(dut, log, mockObject, 85) mockOut = "{:08x}".format(mockObject.H0) compare1 = convert_hex(dut.pbuffer1.test_word_4.value).rjust(8, '0') if compare1 != mockOut: raise TestFailure( "First buffer incorrect: {0} != {1}".format(compare1, mockOut)) else: log.info("First buffer ok!")
def run(self): cocotb.fork(StreamRandomizer("forkInput", self.onInput,None, self.dut, self.dut.clk)) for idx in range(0,3): cocotb.fork(StreamReader("forkOutputs_" + str(idx), self.onOutput, idx, self.dut, self.dut.clk)) while not reduce(lambda x,y: x and y, map(lambda x: x > 1000, self.counters)): yield RisingEdge(self.dut.clk)
def test_fifo_singleclock_standard(dut): """ Test the module fifo_singleclock_standard, a synchronous FIFO with standard read characteristics """ # Read the parameters back from the DUT to set up our model width = dut.WIDTH.value.integer # [bit] depth = dut.DEPTH.value.integer # [entries] dut._log.info("%d bit wide FIFO with %d entries." % (width, depth)) # setup clock cocotb.fork(Clock(dut.clk, 1000).start()) # reset dut._log.debug("Resetting DUT") dut.rst <= 1 dut.din <= 0 dut.wr_en <= 0 dut.rd_en <= 0 for _ in range(2): yield RisingEdge(dut.clk) dut.rst <= 0 # start read and write processes write_thread = cocotb.fork(write_fifo(dut, dut.clk)) read_thread = cocotb.fork(read_fifo_standard(dut, dut.clk)) # wait for read/write to finish. Read only finishes if all required data # has been obtained, i.e. it implicitly waits for write as well. yield read_thread.join() dut._log.info("All tests done")
def A_cache_data_test(dut): """ Tests that initial data cache gets built and latched properly """ log = SimLog("cocotb.%s" % dut._name) cocotb.fork(Clock(dut.clk_i, 10000).start()) mockSha1 = wpa2slow.sha1.Sha1Model() mockObject = wpa2slow.hmac.HmacModel(mockSha1) yield reset(dut) size = random.randint(8, 64) print "Length: {:d}".format(size) yield load_random_data(dut, log, mockObject, size) #mockObject.displayAll() mockOut = "{}".format(mockObject.shaBo) print convert_hex(dut.test_word_1) + " " + convert_hex(dut.test_word_2) + " " + convert_hex(dut.test_word_3) + " " + convert_hex(dut.test_word_4) + " " + convert_hex(dut.test_word_5) if convert_hex(dut.test_word_1).zfill(8) != mockOut: raise TestFailure( "Load data is incorrect: {0} != {1}".format(convert_hex(dut.test_word_1), mockOut)) else: log.info("Ok!")
async def test_fc_mem(dut): # Create a 10us period clock on port clk clock = Clock(dut.clk_i, 10, units="us") cocotb.fork(clock.start()) # Reset system await FallingEdge(dut.clk_i) dut.rst_n_i <= 0 dut.cycle_en_i <= 0 dut.wr_en_i <= 0 dut.rd_en_i <= 0 dut.rd_wr_bank_i <= 0 dut.rd_wr_addr_i <= 0 dut.wr_data_i <= 0 await FallingEdge(dut.clk_i) dut.rst_n_i <= 1 dut.cycle_en_i <= 0 dut.wr_en_i <= 0 dut.rd_en_i <= 0 dut.rd_wr_bank_i <= 0 dut.rd_wr_addr_i <= 0 dut.wr_data_i <= 0 await FallingEdge(dut.clk_i) # Sequential Write Weights for i in range(208 * 2): dut.cycle_en_i <= 0 dut.wr_en_i <= 1 dut.rd_en_i <= 0 dut.rd_wr_bank_i <= i // 208 dut.rd_wr_addr_i <= i % 208 dut.wr_data_i <= i % 208 await FallingEdge(dut.clk_i) # Sequential Write Bias for i in range(2): dut.cycle_en_i <= 0 dut.wr_en_i <= 1 dut.rd_en_i <= 0 dut.rd_wr_bank_i <= i + 2 dut.rd_wr_addr_i <= 0 dut.wr_data_i <= i await FallingEdge(dut.clk_i) # Sequential Read Weight for i in range(208 * 2): dut.cycle_en_i <= 0 dut.wr_en_i <= 0 dut.rd_en_i <= 1 dut.rd_wr_bank_i <= i // 208 dut.rd_wr_addr_i <= i % 208 dut.wr_data_i <= 0 await FallingEdge(dut.clk_i) # Sequential Read Bias for i in range(2): dut.cycle_en_i <= 0 dut.wr_en_i <= 0 dut.rd_en_i <= 1 dut.rd_wr_bank_i <= i + 2 dut.rd_wr_addr_i <= 0 dut.wr_data_i <= 0 await FallingEdge(dut.clk_i) await FallingEdge(dut.clk_i) # Check Cycling for i in range(208): dut.cycle_en_i <= 1 dut.wr_en_i <= 0 dut.rd_en_i <= 0 dut.rd_wr_bank_i <= 0 dut.rd_wr_addr_i <= 0 dut.wr_data_i <= 0 await FallingEdge(dut.clk_i)
async def initial_reset(vif_spi, dut): await Timer(0, "ns") vif_spi.i_reset <= 1 await Timer(52, "ns") vif_spi.i_reset <= 0 cocotb.fork(initial_run_test(dut, vif_spi))
def nt_recv_capture_top_test(dut): """Test bench main function.""" # start the clock cocotb.fork(clk_gen(dut.clk, CLK_FREQ_MHZ)) # no software reset dut.rst_sw <= 0 # reset DuT yield rstn(dut.clk, dut.rstn) # create AXI4-Lite writer, connect and reset it axilite_writer = AXI_Lite_Writer() axilite_writer.connect(dut, dut.clk, AXI_CTRL_BIT_WIDTH, "ctrl") yield axilite_writer.rst() # create AXI4-Lite reader, connect and reset it axilite_reader = AXI_Lite_Reader() axilite_reader.connect(dut, dut.clk, AXI_CTRL_BIT_WIDTH, "ctrl") yield axilite_reader.rst() # create AXI4-Stream writer, connect and reset it axis_writer = AXIS_Writer() axis_writer.connect(dut, dut.clk, AXIS_BIT_WIDTH) yield axis_writer.rst() # create a ring buffer memory (initially of size 0) and connect it to the # DuT ring_buff = Mem(0) ring_buff.connect(dut, "ddr3") # generate a couple of random Ethernet packets. For each packet, generate # a 16 bit latency value and a 26 bit inter-packet time value pkts = [] latencies = [] inter_packet_times = [] for _ in range(N_PACKETS): pkts.append(gen_packet()) latencies.append(random.randint(0, 2**24 - 1)) inter_packet_times.append(random.randint(0, 2**28 - 1)) # start the ring buffer memory main routine cocotb.fork(ring_buff.main()) # wait some more clock cycles yield wait_n_cycles(dut.clk, 5) # iterate over all ring buffer sizes for i, ring_buff_size in enumerate(RING_BUFF_SIZES): # set ring buffer size ring_buff.set_size(ring_buff_size) # iterate over all adderesses where ring buffer shall be located in # memory for j, ring_buff_addr in enumerate(RING_BUFF_ADDRS): # print status print("Test %d/%d (this will take a while)" % (i * len(RING_BUFF_ADDRS) + j + 1, len(RING_BUFF_ADDRS) * len(RING_BUFF_SIZES))) # we have a total of 8 GByte of memory. Make sure the ring buffer # fits at the desired address if ring_buff_addr + ring_buff_size > 0x1FFFFFFFF: raise cocotb.result.TestFailure("ring buffer is too large") # to reduce the simulation memory footprint, provide the memory # module the first memory address that we actually care about ring_buff.set_offset(ring_buff_addr) # write ring buffer memory location and address range yield axilite_writer.write(CPUREG_OFFSET_CTRL_MEM_ADDR_HI, ring_buff_addr >> 32) yield axilite_writer.write(CPUREG_OFFSET_CTRL_MEM_ADDR_LO, ring_buff_addr & 0xFFFFFFFF) yield axilite_writer.write(CPUREG_OFFSET_CTRL_MEM_RANGE, ring_buff_size - 1) # itererate over all capture lengths for max_len_capture in MAX_CAPTURE_LENS: # reset read address pointer yield axilite_writer.write(CPUREG_OFFSET_CTRL_ADDR_RD, 0x0) # set max capture length yield axilite_writer.write(CPUREG_OFFSET_CTRL_MAX_LEN_CAPTURE, max_len_capture) # start couroutine that applies packets at input cocotb.fork( packets_write(dut, axis_writer, axilite_writer, axilite_reader, pkts, latencies, inter_packet_times)) # wait a bit yield wait_n_cycles(dut.clk, 50) # start the ring buffer read coroutine and wait until it # completes yield ring_buff_read(dut, axilite_writer, axilite_reader, ring_buff, ring_buff_addr, max_len_capture, pkts, latencies, inter_packet_times) # make sure no error occured errs = yield axilite_reader.read(CPUREG_OFFSET_STATUS_ERRS) assert errs == 0x0 # make sure packet count is correct pkt_cnt = \ yield axilite_reader.read(CPUREG_OFFSET_STATUS_PKT_CNT) assert pkt_cnt == len(pkts) # make sure module is deactivated now active = yield axilite_reader.read(CPUREG_OFFSET_STATUS_ACTIVE) assert active == 0 # clear the ring buffer contents ring_buff.clear()
def __init__(self, data, header, clock, enable=None, slip=None, scramble=True, reverse=False, *args, **kwargs): self.log = logging.getLogger(f"cocotb.{data._path}") self.data = data self.header = header self.clock = clock self.enable = enable self.slip = slip self.scramble = scramble self.reverse = reverse self.log.info("BASE-R serdes source") self.log.info("Copyright (c) 2021 Alex Forencich") self.log.info("https://github.com/alexforencich/verilog-ethernet") super().__init__(*args, **kwargs) self.active = False self.queue = Queue() self.dequeue_event = Event() self.current_frame = None self.idle_event = Event() self.idle_event.set() self.enable_dic = True self.ifg = 12 self.force_offset_start = False self.bit_offset = 0 self.queue_occupancy_bytes = 0 self.queue_occupancy_frames = 0 self.queue_occupancy_limit_bytes = -1 self.queue_occupancy_limit_frames = -1 self.width = len(self.data) self.byte_size = 8 self.byte_lanes = 8 assert self.width == self.byte_lanes * self.byte_size self.log.info("BASE-R serdes source model configuration") self.log.info(" Byte size: %d bits", self.byte_size) self.log.info(" Data width: %d bits (%d bytes)", self.width, self.byte_lanes) self.log.info(" Enable scrambler: %s", self.scramble) self.log.info(" Bit reverse: %s", self.reverse) self.data.setimmediatevalue(0) self.header.setimmediatevalue(0) self._run_cr = cocotb.fork(self._run())
def setup_function(dut, key, enc_dec, text_input): cocotb.fork(Clock(dut.clk, CLK_PERIOD).start()) dut.key = key dut.enc_dec = enc_dec dut.text_input = text_input dut.rst = 1
def setup_dut(dut): cocotb.fork(Clock(dut.S_AXI_ACLK, CLK_PERIOD).start())
async def run_test(dut, config_clk="NoC_slwT_AXI", idle_inserter=None, backpressure_inserter=None): noc_flavor = os.getenv("FLAVOR") noc_cfg = noc_const.NOC_CFG[noc_flavor] # Setup testbench idle = "no_idle" if idle_inserter == None else "w_idle" backp = "no_backpressure" if backpressure_inserter == None else "w_backpressure" tb = Tb(dut, f"sim_{config_clk}_{idle}_{backp}", noc_cfg) tb.set_idle_generator(idle_inserter) tb.set_backpressure_generator(backpressure_inserter) await tb.setup_clks(config_clk) await tb.arst(config_clk) csr = noc_const.NOC_CSRs # RaveNoC Version router = randrange(0, noc_cfg['max_nodes']) resp = await tb.read(sel=router, address=csr['RAVENOC_VERSION'], length=4, size=0x2) assert resp.resp == AxiResp.OKAY, "AXI bus should not have raised an error here!" version = resp.data.decode( )[:: -1] # get object data, convert from bytearray to string with decode method and invert it assert version == noc_const.NOC_VERSION, "NoC version not matching with expected!" tb.log.info("NoC Version = %s", version) # Router X,Y coordinates check router = randrange(0, noc_cfg['max_nodes']) ref_pkt = RaveNoC_pkt( cfg=noc_cfg, src_dest=(router, 0 if router != 0 else 1)) # pkt not used, only to compare in the assertion resp_row = await tb.read(sel=router, address=csr['ROUTER_ROW_X_ID'], length=4, size=0x2) resp_col = await tb.read(sel=router, address=csr['ROUTER_COL_Y_ID'], length=4, size=0x2) assert resp_row.resp == AxiResp.OKAY, "AXI bus should not have raised an error here!" assert resp_col.resp == AxiResp.OKAY, "AXI bus should not have raised an error here!" resp_row = int.from_bytes(resp_row.data, byteorder='little', signed=False) resp_col = int.from_bytes(resp_col.data, byteorder='little', signed=False) assert resp_row == ref_pkt.src[1], "NoC CSR - Coordinate ROW not matching" assert resp_col == ref_pkt.src[2], "NoC CSR - Coordinate COL not matching" # IRQ registers router = randrange(0, noc_cfg['max_nodes']) resp = await tb.read(sel=router, address=csr['IRQ_RD_STATUS'], length=4, size=0x2) assert resp.resp == AxiResp.OKAY, "AXI bus should have raised an error here!" irq = resp.data.decode()[::-1] router = randrange(0, noc_cfg['max_nodes']) rand_data = bytearray(tb._get_random_string(length=4), 'utf-8') req = await tb.write(sel=router, address=csr['IRQ_RD_MUX'], data=rand_data, size=0x2) assert req.resp == AxiResp.OKAY, "AXI bus should have not raised an error here!" resp = await tb.read(sel=router, address=csr['IRQ_RD_MUX'], length=4, size=0x2) assert resp.resp == AxiResp.OKAY, "AXI bus should have raised an error here!" data_in = int.from_bytes(rand_data, byteorder='little', signed=False) data_out = int.from_bytes(resp.data, byteorder='little', signed=False) assert data_in == data_out, "NoC CSR, mismatch on IRQ_RD_MUX - Write/Read back" router = randrange(0, noc_cfg['max_nodes']) rand_data = bytearray(tb._get_random_string(length=4), 'utf-8') req = await tb.write(sel=router, address=csr['IRQ_RD_MASK'], data=rand_data, size=0x2) assert req.resp == AxiResp.OKAY, "AXI bus should not have raised an error here!" resp = await tb.read(sel=router, address=csr['IRQ_RD_MASK'], length=4, size=0x2) assert resp.resp == AxiResp.OKAY, "AXI bus should have raised an error here!" data_in = int.from_bytes(rand_data, byteorder='little', signed=False) data_out = int.from_bytes(resp.data, byteorder='little', signed=False) assert data_in == data_out, "NoC CSR, mismatch on IRQ_RD_MASK - Write/Read back" # Illegal operations not_writable = [ csr['RAVENOC_VERSION'], csr['ROUTER_ROW_X_ID'], csr['ROUTER_COL_Y_ID'], csr['IRQ_RD_STATUS'] ] not_writable.extend([(csr['IRQ_RD_MASK'] + 4 + 4 * x) for x in range(noc_cfg['n_virt_chn'])]) router = randrange(0, noc_cfg['max_nodes']) rand_data = bytearray(tb._get_random_string(length=4), 'utf-8') for not_wr in not_writable: req = await tb.write(sel=router, address=not_wr, data=rand_data, size=0x2) assert req.resp == AxiResp.SLVERR, "AXI bus should have raised an error here! ILLEGAL WR CSR:" + hex( not_wr) router = randrange(0, noc_cfg['max_nodes']) if noc_cfg['flit_data_width'] == 64: for i in csr: req = await tb.read(sel=router, address=csr[i], size=0x3) assert req.resp == AxiResp.SLVERR, "AXI bus should have raised an error here!" req = await tb.write(sel=router, address=csr[i], data=rand_data, size=0x3) assert req.resp == AxiResp.SLVERR, "AXI bus should have raised an error here!" # Testing RD_SIZE_VC[0,1,2...]_PKT for vc in range(noc_cfg['n_virt_chn']): await tb.arst(config_clk) msg_size = randrange(5, noc_cfg['max_sz_pkt']) msg = tb._get_random_string(length=msg_size) pkt = RaveNoC_pkt(cfg=noc_cfg, msg=msg, virt_chn_id=vc) write = cocotb.fork( tb.write_pkt(pkt, timeout=noc_const.TIMEOUT_AXI_EXT)) await tb.wait_irq() resp_csr = await tb.read(sel=pkt.dest[0], address=(csr['RD_SIZE_VC_START'] + 4 * vc), length=4, size=0x2) resp_pkt_size = int.from_bytes(resp_csr.data, byteorder='little', signed=False) assert resp_pkt_size == pkt.length_beats, "Mistmatch on CSR pkt size vs pkt sent!" resp = await tb.read_pkt(pkt, timeout=noc_const.TIMEOUT_AXI_EXT) tb.check_pkt(resp.data, pkt.msg)
def __init__(self, dut): self.dut = dut self.BAR0_APERTURE = int(os.getenv("PARAM_BAR0_APERTURE")) self.log = SimLog("cocotb.tb") self.log.setLevel(logging.DEBUG) # PCIe self.rc = RootComplex() self.rc.max_payload_size = 0x1 # 256 bytes self.rc.max_read_request_size = 0x2 # 512 bytes self.dev = UltraScalePlusPcieDevice( # configuration options pcie_generation=3, pcie_link_width=8, user_clk_frequency=250e6, alignment="dword", cq_cc_straddle=False, rq_rc_straddle=False, rc_4tlp_straddle=False, enable_pf1=False, enable_client_tag=True, enable_extended_tag=True, enable_parity=False, enable_rx_msg_interface=False, enable_sriov=False, enable_extended_configuration=False, enable_pf0_msi=True, enable_pf1_msi=False, # signals # Clock and Reset Interface user_clk=dut.clk_250mhz, user_reset=dut.rst_250mhz, # user_lnk_up # sys_clk # sys_clk_gt # sys_reset # phy_rdy_out # Requester reQuest Interface rq_bus=AxiStreamBus.from_prefix(dut, "m_axis_rq"), pcie_rq_seq_num0=dut.s_axis_rq_seq_num_0, pcie_rq_seq_num_vld0=dut.s_axis_rq_seq_num_valid_0, pcie_rq_seq_num1=dut.s_axis_rq_seq_num_1, pcie_rq_seq_num_vld1=dut.s_axis_rq_seq_num_valid_1, # pcie_rq_tag0 # pcie_rq_tag1 # pcie_rq_tag_av # pcie_rq_tag_vld0 # pcie_rq_tag_vld1 # Requester Completion Interface rc_bus=AxiStreamBus.from_prefix(dut, "s_axis_rc"), # Completer reQuest Interface cq_bus=AxiStreamBus.from_prefix(dut, "s_axis_cq"), # pcie_cq_np_req # pcie_cq_np_req_count # Completer Completion Interface cc_bus=AxiStreamBus.from_prefix(dut, "m_axis_cc"), # Transmit Flow Control Interface # pcie_tfc_nph_av=dut.pcie_tfc_nph_av, # pcie_tfc_npd_av=dut.pcie_tfc_npd_av, # Configuration Management Interface cfg_mgmt_addr=dut.cfg_mgmt_addr, cfg_mgmt_function_number=dut.cfg_mgmt_function_number, cfg_mgmt_write=dut.cfg_mgmt_write, cfg_mgmt_write_data=dut.cfg_mgmt_write_data, cfg_mgmt_byte_enable=dut.cfg_mgmt_byte_enable, cfg_mgmt_read=dut.cfg_mgmt_read, cfg_mgmt_read_data=dut.cfg_mgmt_read_data, cfg_mgmt_read_write_done=dut.cfg_mgmt_read_write_done, # cfg_mgmt_debug_access # Configuration Status Interface # cfg_phy_link_down # cfg_phy_link_status # cfg_negotiated_width # cfg_current_speed cfg_max_payload=dut.cfg_max_payload, cfg_max_read_req=dut.cfg_max_read_req, # cfg_function_status # cfg_vf_status # cfg_function_power_state # cfg_vf_power_state # cfg_link_power_state # cfg_err_cor_out # cfg_err_nonfatal_out # cfg_err_fatal_out # cfg_local_error_out # cfg_local_error_valid # cfg_rx_pm_state # cfg_tx_pm_state # cfg_ltssm_state # cfg_rcb_status # cfg_obff_enable # cfg_pl_status_change # cfg_tph_requester_enable # cfg_tph_st_mode # cfg_vf_tph_requester_enable # cfg_vf_tph_st_mode # Configuration Received Message Interface # cfg_msg_received # cfg_msg_received_data # cfg_msg_received_type # Configuration Transmit Message Interface # cfg_msg_transmit # cfg_msg_transmit_type # cfg_msg_transmit_data # cfg_msg_transmit_done # Configuration Flow Control Interface cfg_fc_ph=dut.cfg_fc_ph, cfg_fc_pd=dut.cfg_fc_pd, cfg_fc_nph=dut.cfg_fc_nph, cfg_fc_npd=dut.cfg_fc_npd, cfg_fc_cplh=dut.cfg_fc_cplh, cfg_fc_cpld=dut.cfg_fc_cpld, cfg_fc_sel=dut.cfg_fc_sel, # Configuration Control Interface # cfg_hot_reset_in # cfg_hot_reset_out # cfg_config_space_enable # cfg_dsn # cfg_bus_number # cfg_ds_port_number # cfg_ds_bus_number # cfg_ds_device_number # cfg_ds_function_number # cfg_power_state_change_ack # cfg_power_state_change_interrupt cfg_err_cor_in=dut.status_error_cor, cfg_err_uncor_in=dut.status_error_uncor, # cfg_flr_in_process # cfg_flr_done # cfg_vf_flr_in_process # cfg_vf_flr_func_num # cfg_vf_flr_done # cfg_pm_aspm_l1_entry_reject # cfg_pm_aspm_tx_l0s_entry_disable # cfg_req_pm_transition_l23_ready # cfg_link_training_enable # Configuration Interrupt Controller Interface # cfg_interrupt_int # cfg_interrupt_sent # cfg_interrupt_pending cfg_interrupt_msi_enable=dut.cfg_interrupt_msi_enable, cfg_interrupt_msi_mmenable=dut.cfg_interrupt_msi_mmenable, cfg_interrupt_msi_mask_update=dut.cfg_interrupt_msi_mask_update, cfg_interrupt_msi_data=dut.cfg_interrupt_msi_data, # cfg_interrupt_msi_select=dut.cfg_interrupt_msi_select, cfg_interrupt_msi_int=dut.cfg_interrupt_msi_int, cfg_interrupt_msi_pending_status=dut. cfg_interrupt_msi_pending_status, cfg_interrupt_msi_pending_status_data_enable=dut. cfg_interrupt_msi_pending_status_data_enable, # cfg_interrupt_msi_pending_status_function_num=dut.cfg_interrupt_msi_pending_status_function_num, cfg_interrupt_msi_sent=dut.cfg_interrupt_msi_sent, cfg_interrupt_msi_fail=dut.cfg_interrupt_msi_fail, # cfg_interrupt_msix_enable # cfg_interrupt_msix_mask # cfg_interrupt_msix_vf_enable # cfg_interrupt_msix_vf_mask # cfg_interrupt_msix_address # cfg_interrupt_msix_data # cfg_interrupt_msix_int # cfg_interrupt_msix_vec_pending # cfg_interrupt_msix_vec_pending_status cfg_interrupt_msi_attr=dut.cfg_interrupt_msi_attr, cfg_interrupt_msi_tph_present=dut.cfg_interrupt_msi_tph_present, cfg_interrupt_msi_tph_type=dut.cfg_interrupt_msi_tph_type, # cfg_interrupt_msi_tph_st_tag=dut.cfg_interrupt_msi_tph_st_tag, # cfg_interrupt_msi_function_number=dut.cfg_interrupt_msi_function_number, # Configuration Extend Interface # cfg_ext_read_received # cfg_ext_write_received # cfg_ext_register_number # cfg_ext_function_number # cfg_ext_write_data # cfg_ext_write_byte_enable # cfg_ext_read_data # cfg_ext_read_data_valid ) # self.dev.log.setLevel(logging.DEBUG) self.rc.make_port().connect(self.dev) self.driver = mqnic.Driver(self.rc) self.dev.functions[0].msi_multiple_message_capable = 5 self.dev.functions[0].configure_bar(0, 2**self.BAR0_APERTURE, ext=True, prefetch=True) # Ethernet cocotb.fork(Clock(dut.sfp_1_rx_clk, 6.4, units="ns").start()) self.sfp_1_source = XgmiiSource(dut.sfp_1_rxd, dut.sfp_1_rxc, dut.sfp_1_rx_clk, dut.sfp_1_rx_rst) cocotb.fork(Clock(dut.sfp_1_tx_clk, 6.4, units="ns").start()) self.sfp_1_sink = XgmiiSink(dut.sfp_1_txd, dut.sfp_1_txc, dut.sfp_1_tx_clk, dut.sfp_1_tx_rst) cocotb.fork(Clock(dut.sfp_2_rx_clk, 6.4, units="ns").start()) self.sfp_2_source = XgmiiSource(dut.sfp_2_rxd, dut.sfp_2_rxc, dut.sfp_2_rx_clk, dut.sfp_2_rx_rst) cocotb.fork(Clock(dut.sfp_2_tx_clk, 6.4, units="ns").start()) self.sfp_2_sink = XgmiiSink(dut.sfp_2_txd, dut.sfp_2_txc, dut.sfp_2_tx_clk, dut.sfp_2_tx_rst) dut.sfp_1_npres.setimmediatevalue(0) dut.sfp_2_npres.setimmediatevalue(0) dut.sfp_1_los.setimmediatevalue(0) dut.sfp_2_los.setimmediatevalue(0) dut.sma_in.setimmediatevalue(0) dut.sfp_i2c_scl_i.setimmediatevalue(1) dut.sfp_1_i2c_sda_i.setimmediatevalue(1) dut.sfp_2_i2c_sda_i.setimmediatevalue(1) dut.eeprom_i2c_scl_i.setimmediatevalue(1) dut.eeprom_i2c_sda_i.setimmediatevalue(1) dut.flash_dq_i.setimmediatevalue(0) self.loopback_enable = False cocotb.fork(self._run_loopback())
def transaction_setup(self, addr, data, epnum=0): epaddr_out = EndpointType.epaddr(0, EndpointType.OUT) xmit = cocotb.fork(self.host_setup(addr, epnum, data)) yield self.expect_setup(epaddr_out, data) yield xmit.join()
def setup_dut(dut): cocotb.fork(Clock(dut.clock, CLOCK_PERIOD, units="ns").start())
async def test_fork_deprecated(_): with pytest.warns(DeprecationWarning): cocotb.fork(example_coro())
def test1(dut): random.seed(0) from cocotblib.misc import cocotbXHack cocotbXHack() cocotb.fork( ClockDomainAsyncResetCustom(3300, dut.clk0, dut.serdesClk0, dut.serdesClk90, dut.rst0)) cocotb.fork(simulationSpeedPrinter(dut.clk0)) bmbs = [Bmb(dut, "system_io_ports_" + str(x)) for x in range(1)] tester = BmbMemoryTester(bmbs, 64 * 1024, 8, 32, dut.clk0, dut.rst0, True) @cocotb.coroutine def delay(): yield RisingEdge(dut.clk0) ctrlApb = Apb3(dut, "system_io_ctrlApb", dut.clk0) phyApb = Apb3(dut, "system_io_phyApb", dut.clk0) @cocotb.coroutine def apbWrite(address, data): yield ctrlApb.write(address, data) yield apbWrite(0x0, 0x20001) yield apbWrite(0x4, 0x2) yield apbWrite(0x10, 0x491) yield apbWrite(0x20, 0x1170205) yield apbWrite(0x24, 0x2) yield apbWrite(0x28, 0x2030102) yield apbWrite(0x30, 0x5) yield apbWrite(0x34, 0x103) yield apbWrite(0x110, 0x0) yield apbWrite(0x110, 0x1) yield apbWrite(0x110, 0x3) yield apbWrite(0x10c, 0x2) yield apbWrite(0x108, 0x200) yield apbWrite(0x104, 0x0) yield apbWrite(0x100, 0x0) yield apbWrite(0x10c, 0x3) yield apbWrite(0x108, 0x0) yield apbWrite(0x104, 0x0) yield apbWrite(0x100, 0x0) yield apbWrite(0x10c, 0x1) yield apbWrite(0x108, 0x44) yield apbWrite(0x104, 0x0) yield apbWrite(0x100, 0x0) yield apbWrite(0x10c, 0x0) yield apbWrite(0x108, 0x310) yield apbWrite(0x104, 0x0) yield apbWrite(0x100, 0x0) yield apbWrite(0x10c, 0x0) yield apbWrite(0x108, 0x400) yield apbWrite(0x104, 0xc) yield apbWrite(0x100, 0x0) # yield ctrlApb.write(0x000, 0x00) #phase command = 0 # yield ctrlApb.write(0x110, 0x00) #reset # yield ctrlApb.delay(10) # yield ctrlApb.write(0x110, 0x01) #!reset # yield ctrlApb.delay(10) # yield ctrlApb.write(0x110, 0x03) #cke # yield ctrlApb.delay(10) # # # @cocotb.coroutine # def command(cmd, bank, address): # yield ctrlApb.write(0x10C, bank) # yield ctrlApb.write(0x108, address) # yield ctrlApb.write(0x104, cmd) # yield ctrlApb.write(0x100, 0) # yield ctrlApb.delay(10) # # CKE = 1 << 0 # CSn = 1 << 1 # RASn = 1 << 2 # CASn = 1 << 3 # WEn = 1 << 4 # # PRE = CKE | CASn # REF = CKE | WEn # MOD = CKE # ZQCL = CKE | RASn | CASn # # CL = 2 # 5 # # # yield command(MOD, 2, 0) # yield command(MOD, 3, 0) # yield command(MOD, 1, 0) # yield command(MOD, 0, (1 << 9) | 0x100 | ((CL & 1) << 2) | ((CL & 0xE) << 3)) #DDL reset # yield command(ZQCL, 0, 0x400) # yield ctrlApb.delay(1000) delay() tester.run = True while True: yield Timer(0x1000000)
def __init__(self, dut): self.dut = dut self.log = logging.getLogger("cocotb.tb") self.log.setLevel(logging.DEBUG) # PCIe self.rc = RootComplex() self.dev = UltraScalePlusPcieDevice( # configuration options pcie_generation=3, # pcie_link_width=2, # user_clk_frequency=250e6, alignment="dword", cq_cc_straddle=False, rq_rc_straddle=False, rc_4tlp_straddle=False, enable_pf1=False, enable_client_tag=True, enable_extended_tag=False, enable_parity=False, enable_rx_msg_interface=False, enable_sriov=False, enable_extended_configuration=False, enable_pf0_msi=True, enable_pf1_msi=False, # signals user_clk=dut.clk, user_reset=dut.rst, rq_bus=AxiStreamBus.from_prefix(dut, "m_axis_rq"), pcie_rq_seq_num0=dut.s_axis_rq_seq_num_0, pcie_rq_seq_num_vld0=dut.s_axis_rq_seq_num_valid_0, pcie_rq_seq_num1=dut.s_axis_rq_seq_num_1, pcie_rq_seq_num_vld1=dut.s_axis_rq_seq_num_valid_1, rc_bus=AxiStreamBus.from_prefix(dut, "s_axis_rc"), cfg_max_payload=dut.max_payload_size, cfg_max_read_req=dut.max_read_request_size, cfg_fc_sel=0b100, cfg_fc_ph=dut.pcie_tx_fc_ph_av, cfg_fc_pd=dut.pcie_tx_fc_pd_av, cfg_fc_nph=dut.pcie_tx_fc_nph_av, ) self.dev.log.setLevel(logging.DEBUG) self.rc.make_port().connect(self.dev) # AXI self.axi_ram = AxiRam(AxiBus.from_prefix(dut, "m_axi"), dut.clk, dut.rst, size=2**16) # Control self.read_desc_source = DescSource(DescBus.from_prefix(dut, "s_axis_read_desc"), dut.clk, dut.rst) self.read_desc_status_sink = DescStatusSink(DescStatusBus.from_prefix(dut, "m_axis_read_desc_status"), dut.clk, dut.rst) self.write_desc_source = DescSource(DescBus.from_prefix(dut, "s_axis_write_desc"), dut.clk, dut.rst) self.write_desc_status_sink = DescStatusSink(DescStatusBus.from_prefix(dut, "m_axis_write_desc_status"), dut.clk, dut.rst) dut.requester_id.setimmediatevalue(0) dut.requester_id_enable.setimmediatevalue(0) dut.ext_tag_enable.setimmediatevalue(0) dut.read_enable.setimmediatevalue(0) dut.write_enable.setimmediatevalue(0) # monitor error outputs self.status_error_cor_asserted = False self.status_error_uncor_asserted = False cocotb.fork(self._run_monitor_status_error_cor()) cocotb.fork(self._run_monitor_status_error_uncor())
def __init__(self, dut): self.dut = dut self.log = logging.getLogger("cocotb.tb") self.log.setLevel(logging.DEBUG) if len(dut.xgmii_txd) == 64: cocotb.fork(Clock(dut.logic_clk, 6.4, units="ns").start()) cocotb.fork(Clock(dut.rx_clk, 6.4, units="ns").start()) cocotb.fork(Clock(dut.tx_clk, 6.4, units="ns").start()) else: cocotb.fork(Clock(dut.logic_clk, 3.2, units="ns").start()) cocotb.fork(Clock(dut.rx_clk, 3.2, units="ns").start()) cocotb.fork(Clock(dut.tx_clk, 3.2, units="ns").start()) self.xgmii_source = XgmiiSource(dut.xgmii_rxd, dut.xgmii_rxc, dut.rx_clk, dut.rx_rst) self.xgmii_sink = XgmiiSink(dut.xgmii_txd, dut.xgmii_txc, dut.tx_clk, dut.tx_rst) self.axis_source = AxiStreamSource( AxiStreamBus.from_prefix(dut, "tx_axis"), dut.logic_clk, dut.logic_rst) self.axis_sink = AxiStreamSink( AxiStreamBus.from_prefix(dut, "rx_axis"), dut.logic_clk, dut.logic_rst) dut.ptp_sample_clk.setimmediatevalue(0) dut.ptp_ts_96.setimmediatevalue(0) dut.ptp_ts_step.setimmediatevalue(0)
def test_fork_syntax_error(dut): """Syntax error in a coroutine that we fork""" yield clock_gen(dut.clk) cocotb.fork(syntax_error()) yield clock_gen(dut.clk)
def test_function_not_a_coroutine_fork(dut): """Example of trying to fork a coroutine that isn't a coroutine""" yield Timer(500) cocotb.fork(function_not_a_coroutine()) yield Timer(500)
def dual_iteration(dut): loop_one = cocotb.fork(iteration_loop(dut)) loop_two = cocotb.fork(iteration_loop(dut)) yield [loop_one.join(), loop_two.join()]
def __init__(self, dut): self.dut = dut self.log = logging.getLogger("cocotb.tb") self.log.setLevel(logging.DEBUG) # PCIe self.rc = RootComplex() self.dev = UltraScalePlusPcieDevice( # configuration options pcie_generation=3, # pcie_link_width=2, # user_clk_frequency=250e6, alignment="dword", cq_cc_straddle=False, rq_rc_straddle=False, rc_4tlp_straddle=False, enable_pf1=False, enable_client_tag=True, enable_extended_tag=False, enable_parity=False, enable_rx_msg_interface=False, enable_sriov=False, enable_extended_configuration=False, enable_pf0_msi=True, enable_pf1_msi=False, # signals user_clk=dut.user_clk, user_reset=dut.user_reset, user_lnk_up=dut.user_lnk_up, sys_clk=dut.sys_clk, sys_clk_gt=dut.sys_clk_gt, sys_reset=dut.sys_reset, phy_rdy_out=dut.phy_rdy_out, rq_bus=AxiStreamBus.from_prefix(dut, "s_axis_rq"), pcie_rq_seq_num0=dut.pcie_rq_seq_num0, pcie_rq_seq_num_vld0=dut.pcie_rq_seq_num_vld0, pcie_rq_seq_num1=dut.pcie_rq_seq_num1, pcie_rq_seq_num_vld1=dut.pcie_rq_seq_num_vld1, pcie_rq_tag0=dut.pcie_rq_tag0, pcie_rq_tag1=dut.pcie_rq_tag1, pcie_rq_tag_av=dut.pcie_rq_tag_av, pcie_rq_tag_vld0=dut.pcie_rq_tag_vld0, pcie_rq_tag_vld1=dut.pcie_rq_tag_vld1, rc_bus=AxiStreamBus.from_prefix(dut, "m_axis_rc"), cq_bus=AxiStreamBus.from_prefix(dut, "m_axis_cq"), pcie_cq_np_req=dut.pcie_cq_np_req, pcie_cq_np_req_count=dut.pcie_cq_np_req_count, cc_bus=AxiStreamBus.from_prefix(dut, "s_axis_cc"), pcie_tfc_nph_av=dut.pcie_tfc_nph_av, pcie_tfc_npd_av=dut.pcie_tfc_npd_av, cfg_phy_link_down=dut.cfg_phy_link_down, cfg_phy_link_status=dut.cfg_phy_link_status, cfg_negotiated_width=dut.cfg_negotiated_width, cfg_current_speed=dut.cfg_current_speed, cfg_max_payload=dut.cfg_max_payload, cfg_max_read_req=dut.cfg_max_read_req, cfg_function_status=dut.cfg_function_status, cfg_function_power_state=dut.cfg_function_power_state, cfg_vf_status=dut.cfg_vf_status, cfg_vf_power_state=dut.cfg_vf_power_state, cfg_link_power_state=dut.cfg_link_power_state, cfg_mgmt_addr=dut.cfg_mgmt_addr, cfg_mgmt_function_number=dut.cfg_mgmt_function_number, cfg_mgmt_write=dut.cfg_mgmt_write, cfg_mgmt_write_data=dut.cfg_mgmt_write_data, cfg_mgmt_byte_enable=dut.cfg_mgmt_byte_enable, cfg_mgmt_read=dut.cfg_mgmt_read, cfg_mgmt_read_data=dut.cfg_mgmt_read_data, cfg_mgmt_read_write_done=dut.cfg_mgmt_read_write_done, cfg_mgmt_debug_access=dut.cfg_mgmt_debug_access, cfg_err_cor_out=dut.cfg_err_cor_out, cfg_err_nonfatal_out=dut.cfg_err_nonfatal_out, cfg_err_fatal_out=dut.cfg_err_fatal_out, cfg_local_error_valid=dut.cfg_local_error_valid, cfg_local_error_out=dut.cfg_local_error_out, cfg_ltssm_state=dut.cfg_ltssm_state, cfg_rx_pm_state=dut.cfg_rx_pm_state, cfg_tx_pm_state=dut.cfg_tx_pm_state, cfg_rcb_status=dut.cfg_rcb_status, cfg_obff_enable=dut.cfg_obff_enable, cfg_pl_status_change=dut.cfg_pl_status_change, cfg_tph_requester_enable=dut.cfg_tph_requester_enable, cfg_tph_st_mode=dut.cfg_tph_st_mode, cfg_vf_tph_requester_enable=dut.cfg_vf_tph_requester_enable, cfg_vf_tph_st_mode=dut.cfg_vf_tph_st_mode, cfg_msg_received=dut.cfg_msg_received, cfg_msg_received_data=dut.cfg_msg_received_data, cfg_msg_received_type=dut.cfg_msg_received_type, cfg_msg_transmit=dut.cfg_msg_transmit, cfg_msg_transmit_type=dut.cfg_msg_transmit_type, cfg_msg_transmit_data=dut.cfg_msg_transmit_data, cfg_msg_transmit_done=dut.cfg_msg_transmit_done, cfg_fc_ph=dut.cfg_fc_ph, cfg_fc_pd=dut.cfg_fc_pd, cfg_fc_nph=dut.cfg_fc_nph, cfg_fc_npd=dut.cfg_fc_npd, cfg_fc_cplh=dut.cfg_fc_cplh, cfg_fc_cpld=dut.cfg_fc_cpld, cfg_fc_sel=dut.cfg_fc_sel, cfg_dsn=dut.cfg_dsn, cfg_bus_number=dut.cfg_bus_number, cfg_power_state_change_ack=dut.cfg_power_state_change_ack, cfg_power_state_change_interrupt=dut.cfg_power_state_change_interrupt, cfg_err_cor_in=dut.cfg_err_cor_in, cfg_err_uncor_in=dut.cfg_err_uncor_in, cfg_flr_in_process=dut.cfg_flr_in_process, cfg_flr_done=dut.cfg_flr_done, cfg_vf_flr_in_process=dut.cfg_vf_flr_in_process, cfg_vf_flr_func_num=dut.cfg_vf_flr_func_num, cfg_vf_flr_done=dut.cfg_vf_flr_done, cfg_link_training_enable=dut.cfg_link_training_enable, cfg_interrupt_int=dut.cfg_interrupt_int, cfg_interrupt_pending=dut.cfg_interrupt_pending, cfg_interrupt_sent=dut.cfg_interrupt_sent, cfg_interrupt_msi_enable=dut.cfg_interrupt_msi_enable, cfg_interrupt_msi_mmenable=dut.cfg_interrupt_msi_mmenable, cfg_interrupt_msi_mask_update=dut.cfg_interrupt_msi_mask_update, cfg_interrupt_msi_data=dut.cfg_interrupt_msi_data, cfg_interrupt_msi_select=dut.cfg_interrupt_msi_select, cfg_interrupt_msi_int=dut.cfg_interrupt_msi_int, cfg_interrupt_msi_pending_status=dut.cfg_interrupt_msi_pending_status, cfg_interrupt_msi_pending_status_data_enable=dut.cfg_interrupt_msi_pending_status_data_enable, cfg_interrupt_msi_pending_status_function_num=dut.cfg_interrupt_msi_pending_status_function_num, cfg_interrupt_msi_sent=dut.cfg_interrupt_msi_sent, cfg_interrupt_msi_fail=dut.cfg_interrupt_msi_fail, cfg_interrupt_msi_attr=dut.cfg_interrupt_msi_attr, cfg_interrupt_msi_tph_present=dut.cfg_interrupt_msi_tph_present, cfg_interrupt_msi_tph_type=dut.cfg_interrupt_msi_tph_type, cfg_interrupt_msi_tph_st_tag=dut.cfg_interrupt_msi_tph_st_tag, cfg_interrupt_msi_function_number=dut.cfg_interrupt_msi_function_number, cfg_pm_aspm_l1_entry_reject=dut.cfg_pm_aspm_l1_entry_reject, cfg_pm_aspm_tx_l0s_entry_disable=dut.cfg_pm_aspm_tx_l0s_entry_disable, cfg_hot_reset_out=dut.cfg_hot_reset_out, cfg_config_space_enable=dut.cfg_config_space_enable, cfg_req_pm_transition_l23_ready=dut.cfg_req_pm_transition_l23_ready, cfg_hot_reset_in=dut.cfg_hot_reset_in, cfg_ds_port_number=dut.cfg_ds_port_number, cfg_ds_bus_number=dut.cfg_ds_bus_number, cfg_ds_device_number=dut.cfg_ds_device_number, ) self.dev.log.setLevel(logging.DEBUG) dut.pcie_cq_np_req.setimmediatevalue(1) dut.cfg_mgmt_addr.setimmediatevalue(0) dut.cfg_mgmt_function_number.setimmediatevalue(0) dut.cfg_mgmt_write.setimmediatevalue(0) dut.cfg_mgmt_write_data.setimmediatevalue(0) dut.cfg_mgmt_byte_enable.setimmediatevalue(0) dut.cfg_mgmt_read.setimmediatevalue(0) dut.cfg_mgmt_debug_access.setimmediatevalue(0) dut.cfg_msg_transmit.setimmediatevalue(0) dut.cfg_msg_transmit_type.setimmediatevalue(0) dut.cfg_msg_transmit_data.setimmediatevalue(0) dut.cfg_fc_sel.setimmediatevalue(0) dut.cfg_dsn.setimmediatevalue(0) dut.cfg_power_state_change_ack.setimmediatevalue(0) dut.cfg_err_cor_in.setimmediatevalue(0) dut.cfg_err_uncor_in.setimmediatevalue(0) dut.cfg_flr_done.setimmediatevalue(0) dut.cfg_vf_flr_func_num.setimmediatevalue(0) dut.cfg_vf_flr_done.setimmediatevalue(0) dut.cfg_link_training_enable.setimmediatevalue(1) dut.cfg_interrupt_int.setimmediatevalue(0) dut.cfg_interrupt_pending.setimmediatevalue(0) dut.cfg_interrupt_msi_select.setimmediatevalue(0) dut.cfg_interrupt_msi_int.setimmediatevalue(0) dut.cfg_interrupt_msi_pending_status.setimmediatevalue(0) dut.cfg_interrupt_msi_pending_status_data_enable.setimmediatevalue(0) dut.cfg_interrupt_msi_pending_status_function_num.setimmediatevalue(0) dut.cfg_interrupt_msi_attr.setimmediatevalue(0) dut.cfg_interrupt_msi_tph_present.setimmediatevalue(0) dut.cfg_interrupt_msi_tph_type.setimmediatevalue(0) dut.cfg_interrupt_msi_tph_st_tag.setimmediatevalue(0) dut.cfg_interrupt_msi_function_number.setimmediatevalue(0) dut.cfg_pm_aspm_l1_entry_reject.setimmediatevalue(0) dut.cfg_pm_aspm_tx_l0s_entry_disable.setimmediatevalue(0) dut.cfg_config_space_enable.setimmediatevalue(1) dut.cfg_req_pm_transition_l23_ready.setimmediatevalue(0) dut.cfg_hot_reset_in.setimmediatevalue(0) dut.cfg_ds_port_number.setimmediatevalue(0) dut.cfg_ds_bus_number.setimmediatevalue(0) dut.cfg_ds_device_number.setimmediatevalue(0) dut.sys_clk.setimmediatevalue(0) dut.sys_clk_gt.setimmediatevalue(0) dut.sys_reset.setimmediatevalue(1) self.rc.make_port().connect(self.dev) # user logic self.rq_source = RqSource(AxiStreamBus.from_prefix(dut, "s_axis_rq"), dut.user_clk, dut.user_reset) self.rc_sink = RcSink(AxiStreamBus.from_prefix(dut, "m_axis_rc"), dut.user_clk, dut.user_reset) self.cq_sink = CqSink(AxiStreamBus.from_prefix(dut, "m_axis_cq"), dut.user_clk, dut.user_reset) self.cc_source = CcSource(AxiStreamBus.from_prefix(dut, "s_axis_cc"), dut.user_clk, dut.user_reset) self.regions = [None]*6 self.regions[0] = mmap.mmap(-1, 1024*1024) self.regions[1] = mmap.mmap(-1, 1024*1024) self.regions[3] = mmap.mmap(-1, 1024) self.current_tag = 0 self.tag_count = 32 self.tag_active = [False]*256 self.tag_release = Event() self.dev.functions[0].msi_multiple_message_capable = 5 self.dev.functions[0].configure_bar(0, len(self.regions[0])) self.dev.functions[0].configure_bar(1, len(self.regions[1]), True, True) self.dev.functions[0].configure_bar(3, len(self.regions[3]), False, False, True) cocotb.fork(self._run_cq())
def test(dut): dut.num_samples <= 24 # number of samples dut.pre_trigger <= 8 # number of samples before trigger dut.trigger_value <= 170 # trigger value ch1 = dut.input_sample ch2 = 0 ram = RAM_Controller(dut.clk, dut.write_enable, ch1, ch2) adc = ADC(dut.clk, dut.input_rdy, ch1, ch2) tx_protocol = TX_PROTOCOL(dut.clk, dut.rqst_trigger_status, dut.trigger_status_data, dut.trigger_status_rdy, dut.trigger_status_eof, dut.trigger_status_ack) cocotb.fork(Clock(dut.clk, 10, units='ns').start()) yield Reset(dut) # Testing triggered for t in range(100): x1 = 128 + int(100 * sin(2 * pi * 1e6 * t * 20e-9)) x2 = 128 - int(050 * sin(2 * pi * 1e6 * t * 20e-9)) adc.write(x1, x2) cocotb.fork(ram.run()) cocotb.fork(adc.sampling()) yield Start(dut) for i in range(10): yield RisingEdge(dut.clk) yield tx_protocol.request_data() while (dut.triggered_o.value.integer == 0 or dut.buffer_full_o.value.integer == 0): yield RisingEdge(dut.clk) for i in range(10): yield RisingEdge(dut.clk) print "-----------------------------------------" print "Fifo LEN = " + repr(len(tx_protocol.fifo)) for i in range(len(tx_protocol.fifo)): print "Fifo Read: " + repr(tx_protocol.fifo.pop(0)) print "triggered , full = " + repr( dut.triggered_o.value.integer) + " ; " + repr( dut.buffer_full_o.value.integer) yield tx_protocol.request_data() print "triggered , full = " + repr( dut.triggered_o.value.integer) + " ; " + repr( dut.buffer_full_o.value.integer) for i in range(50): yield RisingEdge(dut.clk) print "-----------------------------------------" print "Fifo LEN = " + repr(len(tx_protocol.fifo)) for i in range(len(tx_protocol.fifo)): print "Fifo Read: " + repr(tx_protocol.fifo.pop(0)) print "-----------------------------------------" # Testing trigger adc.clear() for t in range(100): x1 = 128 + int(10 * sin(2 * pi * 1e6 * t * 20e-9)) x2 = 128 - int(5 * sin(2 * pi * 1e6 * t * 20e-9)) adc.write(x1, x2) yield Start(dut) while (dut.buffer_full_o.value.integer == 0): yield RisingEdge(dut.clk) for i in range(20): yield RisingEdge(dut.clk)
def init_posedge_clk(dut_clk): # Start clock running in background cocotb.fork(Clock(dut_clk, 10, 'us').start(start_high=False)) return RisingEdge(dut_clk)
def transaction_status_out(self, addr, ep): epnum = EndpointType.epnum(ep) assert EndpointType.epdir(ep) == EndpointType.OUT xmit = cocotb.fork(self.host_send(PID.DATA1, addr, epnum, [])) yield xmit.join()
def setup_function(dut, salt, count, user_password): cocotb.fork(Clock(dut.clk, CLK_PERIOD).start()) dut.rst = 0 dut.salt = salt dut.count = count dut.user_password = user_password
def create_clock(dut): cocotb.fork(Clock(dut.clk, 10, 'ns').start())
def __init__(self, entity, name, clock, readlatency_min=1, readlatency_max=1, memory=None, avl_properties={}, **kwargs): BusDriver.__init__(self, entity, name, clock, **kwargs) if avl_properties != {}: for key, value in self._avalon_properties.items(): self._avalon_properties[key] = avl_properties.get(key, value) if self._avalon_properties["burstCountUnits"] != "symbols": self.log.error("Only symbols burstCountUnits is supported") if self._avalon_properties["addressUnits"] != "symbols": self.log.error("Only symbols addressUnits is supported") self._burstread = False self._burstwrite = False self._readable = False self._writeable = False self._width = None if hasattr(self.bus, "readdata"): self._width = len(self.bus.readdata) self.dataByteSize = int(self._width / 8) self._readable = True if hasattr(self.bus, "writedata"): width = len(self.bus.writedata) if (self._width is not None) and self._width != width: self.log.error("readdata and writedata bus" + " are not the same size") self._width = width self.dataByteSize = int(self._width / 8) self._writeable = True if not self._readable and not self._writeable: raise TestError("Attempt to instantiate useless memory") # Allow dual port RAMs by referencing the same dictionary if memory is None: self._mem = {} else: self._mem = memory self._val = BinaryValue(n_bits=self._width, bigEndian=False) self._readlatency_min = readlatency_min self._readlatency_max = readlatency_max self._responses = [] self._coro = cocotb.fork(self._respond()) if hasattr(self.bus, "readdatavalid"): self.bus.readdatavalid.setimmediatevalue(0) if hasattr(self.bus, "waitrequest"): self.bus.waitrequest.setimmediatevalue(0) if hasattr(self.bus, "burstcount"): if hasattr(self.bus, "readdatavalid"): self._burstread = True self._burstwrite = True if self._avalon_properties.get("WriteBurstWaitReq", True): self.bus.waitrequest <= 1 else: self.bus.waitrequest <= 0 if hasattr(self.bus, "readdatavalid"): self.bus.readdatavalid.setimmediatevalue(0)
async def test_task_repr(dut): """Test RunningTask.__repr__.""" log = logging.getLogger("cocotb.test") gen_e = Event('generator_coro_inner') def generator_coro_inner(): gen_e.set() yield Timer(1, units='ns') raise ValueError("inner") @cocotb.coroutine # testing debug with legacy coroutine syntax def generator_coro_outer(): yield from generator_coro_inner() gen_task = generator_coro_outer() log.info(repr(gen_task)) assert re.match(r"<Task \d+ created coro=generator_coro_outer\(\)>", repr(gen_task)) cocotb.fork(gen_task) await gen_e.wait() log.info(repr(gen_task)) assert re.match( r"<Task \d+ pending coro=generator_coro_inner\(\) trigger=<Timer of 1000.00ps at \w+>>", repr(gen_task)) try: await Join(gen_task) except ValueError: pass log.info(repr(gen_task)) assert re.match( r"<Task \d+ finished coro=generator_coro_outer\(\) outcome=Error\(ValueError\('inner',?\)\)>", repr(gen_task)) coro_e = Event('coroutine_inner') async def coroutine_forked(task): log.info(repr(task)) assert re.match(r"<Task \d+ adding coro=coroutine_outer\(\)>", repr(task)) @cocotb.coroutine # Combine requires use of cocotb.coroutine async def coroutine_wait(): await Timer(1, units='ns') async def coroutine_inner(): await coro_e.wait() this_task = coro_e.data # cr_await is None while the coroutine is running, so we can't get the stack... log.info(repr(this_task)) assert re.match(r"<Task \d+ running coro=coroutine_outer\(\)>", repr(this_task)) cocotb.fork(coroutine_forked(this_task)) await Combine(*(coroutine_wait() for _ in range(2))) return "Combine done" async def coroutine_middle(): return await coroutine_inner() async def coroutine_outer(): return await coroutine_middle() coro_task = cocotb.fork(coroutine_outer()) coro_e.set(coro_task) await NullTrigger() log.info(repr(coro_task)) assert re.match( r"<Task \d+ pending coro=coroutine_inner\(\) trigger=Combine\(Join\(<Task \d+>\), Join\(<Task \d+>\)\)>", repr(coro_task)) await Timer(2, units='ns') log.info(repr(coro_task)) assert re.match( r"<Task \d+ finished coro=coroutine_outer\(\) outcome=Value\('Combine done'\)", repr(coro_task)) async def coroutine_first(): await First(coroutine_wait(), Timer(2, units='ns')) coro_task = cocotb.fork(coroutine_first()) log.info(repr(coro_task)) assert re.match( r"<Task \d+ pending coro=coroutine_first\(\) trigger=First\(Join\(<Task \d+>\), <Timer of 2000.00ps at \w+>\)>", repr(coro_task)) async def coroutine_timer(): await Timer(1, units='ns') coro_task = cocotb.fork(coroutine_timer()) # Trigger.__await__ should be popped from the coroutine stack log.info(repr(coro_task)) assert re.match( r"<Task \d+ pending coro=coroutine_timer\(\) trigger=<Timer of 1000.00ps at \w+>>", repr(coro_task))
def run_test(self, test_name=""): uvm_debug(self, 'run_test', 'Called with testname |' + test_name + '|') from .uvm_coreservice import UVMCoreService cs = UVMCoreService.get() factory = cs.get_factory() testname_plusarg = False test_names = [] msg = "" uvm_test_top = None # uvm_component ##process phase_runner_proc; // store thread forked below for final cleanup # Set up the process that decouples the thread that drops objections from # the process that processes drop/all_dropped objections. Thus, if the # original calling thread (the "dropper") gets killed, it does not affect # drain-time and propagation of the drop up the hierarchy. # Needs to be done in run_test since it needs to be in an # initial block to fork a process. uvm_debug(self, 'run_test', 'Calling m_init_objections') yield UVMObjection().m_init_objections() # Retrieve the test names provided on the command line. Command line # overrides the argument. test_name_count = self.clp.get_arg_values("+UVM_TESTNAME=", test_names) uvm_debug(self, 'run_test', 'Found testnames from cmdline: ' + str(test_names)) # If at least one, use first in queue. if test_name_count > 0: test_name = test_names[0] uvm_debug(self, 'run_test', 'Found test name %s' % (test_name)) testname_plusarg = True # If multiple, provided the warning giving the number, which one will be # used and the complete list. if test_name_count > 1: test_list = "" sep = "" for i in range(0, len(test_names)): if i != 0: sep = ", " test_list = test_list + sep + test_names[i] self.uvm_report_warning( "MULTTST", MULTI_TESTS.format(test_name_count, test_name, test_list), UVM_NONE) # if test now defined, create it using common factory uvm_debug(self, 'run_test', 'Running now test ' + test_name) if test_name != "": if "uvm_test_top" in self.m_children: uvm_fatal( "TTINST", "An uvm_test_top already exists via a previous call to run_test", UVM_NONE) #0; // forces shutdown because $finish is forked yield Timer(0, "NS") uvm_debug(self, 'run_test', "factory.create in UVMRoot testname " + test_name) uvm_test_top = factory.create_component_by_name( test_name, "", "uvm_test_top", None) if uvm_test_top is None: if testname_plusarg: msg = "command line +UVM_TESTNAME=" + test_name else: msg = "call to run_test(" + test_name + ")" uvm_report_fatal("INVTST", "Requested test from " + msg + " not found.", UVM_NONE) yield Timer(0, "NS") if len(self.m_children) == 0: self.uvm_report_fatal( "NOCOMP", ("No components instantiated. You must either instantiate" + " at least one component before calling run_test or use" + " run_test to do so. To run a test using run_test," + " use +UVM_TESTNAME or supply the test name in" + " the argument to run_test(). Exiting simulation."), UVM_NONE) return self.running_test_msg(test_name, uvm_test_top) # phase runner, isolated from calling process #fork begin # spawn the phase runner task #phase_runner_proc = process::self() uvm_debug(self, 'run_test', 'Forking now phases') cocotb.fork(UVMPhase.m_run_phases()) uvm_debug(self, 'run_test', 'After phase-fork executing') #end #join_none yield Timer(0) ##0; // let the phase runner start uvm_debug(self, 'run_test', 'Waiting all phases to complete JKJK') yield self.wait_all_phases_done() uvm_debug(self, 'run_test', 'All phases are done now JKJK') #// clean up after ourselves #phase_runner_proc.kill() l_rs = get_report_server() l_rs.report_summarize() if self.finish_on_completion: self.uvm_report_info('FINISH', '$finish was reached in run_test()', UVM_NONE)
async def test_zero_pad(dut): """ Test Zero Padder """ # Create a 10us period clock on port clk clock = Clock(dut.clk_i, 10, units="us") cocotb.fork(clock.start()) # Reset system await FallingEdge(dut.clk_i) dut.rst_n_i <= 0 dut.data_i <= 0 dut.last_i <= 0 dut.valid_i <= 0 dut.ready_i <= 0 await FallingEdge(dut.clk_i) dut.rst_n_i <= 1 dut.ready_i <= 1 # Sequential data emission - even number of entries vals = [i for i in range(FRAME_LENGTH)] expected_vals = [ max(vals[i * 2], vals[i * 2 + 1]) for i in range(FRAME_LENGTH // 2) ] await FallingEdge(dut.clk_i) for i in range(FRAME_LENGTH + 2): if i < FRAME_LENGTH: val = vals[i] dut.data_i <= val dut.valid_i <= 1 dut.last_i <= 0 else: dut.last_i <= 0 dut.valid_i <= 0 if i == FRAME_LENGTH - 1: dut.last_i <= 1 await FallingEdge(dut.clk_i) observed = dut.data_o.value if dut.valid_o.value == 1: expected = expected_vals[(i - 1) // 2] assert observed == expected,\ "expected = %x, observed = %x" % (expected, observed) # Sequential data emission - odd number of entries vals = [i for i in range(FRAME_LENGTH)] vals.append(10) expected_vals = [ max(vals[i * 2], vals[i * 2 + 1]) for i in range(FRAME_LENGTH // 2) ] expected_vals.append(10) print(expected_vals) await FallingEdge(dut.clk_i) for i in range(FRAME_LENGTH + 3): if i < FRAME_LENGTH + 1: val = vals[i] dut.data_i <= val dut.valid_i <= 1 dut.last_i <= 0 else: dut.last_i <= 0 dut.valid_i <= 0 if i == FRAME_LENGTH: dut.last_i <= 1 await FallingEdge(dut.clk_i) observed = dut.data_o.value if dut.valid_o.value == 1: expected = expected_vals[(i - 1) // 2] assert observed == expected,\ "expected = %x, observed = %x" % (expected, observed) for i in range(5): await FallingEdge(dut.clk_i)
def start(self, generator=None): self._cr = cocotb.fork(self._cr_twiddler(generator=generator))
def run_phase(self, phase): # fork forked_proc = cocotb.fork(self.collect_transactions()) # join yield forked_proc
def setup_dut(dut): cocotb.fork(Clock(dut.clk, CLK_PERIOD).start())
def __init__(self, dut, debug=False): self.dut = dut self.stream_in = AvalonSTDriver(dut, "stream_in", dut.clk) self.backpressure = BitDriver(self.dut.stream_out_ready, self.dut.clk) self.stream_out = AvalonSTMonitor(dut, "stream_out", dut.clk, config={'firstSymbolInHighOrderBits': True}) self.csr = AvalonMaster(dut, "csr", dut.clk) cocotb.fork(stream_out_config_setter(dut, self.stream_out, self.stream_in)) # Create a scoreboard on the stream_out bus self.pkts_sent = 0 self.expected_output = [] self.scoreboard = Scoreboard(dut) self.scoreboard.add_interface(self.stream_out, self.expected_output) # Reconstruct the input transactions from the pins # and send them to our 'model' self.stream_in_recovered = AvalonSTMonitor(dut, "stream_in", dut.clk, callback=self.model) # Set verbosity on our various interfaces level = logging.DEBUG if debug else logging.WARNING self.stream_in.log.setLevel(level) self.stream_in_recovered.log.setLevel(level)