def test_drain_pifo(dut): """Testing the simple_tm module """ # start HW sim clock cocotb.fork(Clock(dut.axis_aclk, PERIOD).start()) # Reset the DUT dut._log.debug("Resetting DUT") dut.axis_resetn <= 0 yield ClockCycles(dut.axis_aclk, 10) dut.axis_resetn <= 1 dut._log.debug("Out of reset") # wait for the pifo to finish resetting yield FallingEdge(dut.axis_aclk) while dut.simple_tm_inst.pifo_busy.value: yield RisingEdge(dut.axis_aclk) yield FallingEdge(dut.axis_aclk) yield RisingEdge(dut.axis_aclk) yield ClockCycles(dut.axis_aclk, 100) dut.m_axis_tready <= 0 # build the list of pkts and metadata to insert pkts_meta_in = [] for i in range(NUM_PKTS): # pkt_len = random.randint(50, 1000) # build a packet pkt = Ether(dst='aa:aa:aa:aa:aa:aa', src='bb:bb:bb:bb:bb:bb') pkt = pkt / ('\x11' * 18 + '\x22' * 32) # pkt = pkt / ('\x11'*18 + '\x22'*32 + '\x33'*32 + '\x44'*32 + '\x55'*16) # pkt = pkt / ('\x11'*18 + '\x22'*32 + '\x33'*32 + '\x44'*32 + '\x55'*32 + '\x66'*32 + '\x77'*32 + '\x88'*32 + '\x99'*16) # pkt = pkt / ('\x11'*(pkt_len - 14)) rank = random.randint(0, 100) # build the metadata meta = Metadata(pkt_len=len(pkt), src_port=0b00000001, dst_port=0b00000100, rank=rank) tuser = BinaryValue(bits=len(meta) * 8, bigEndian=False) tuser.set_buff(str(meta)) pkts_meta_in.append((rank, pkt, tuser)) ranks_in = [tup[0] for tup in pkts_meta_in] pkts_in = [tup[1] for tup in pkts_meta_in] meta_in = [tup[2] for tup in pkts_meta_in] # Attach an AXI4Stream Master to the input pkt interface pkt_master = AXI4StreamMaster(dut, 's_axis', dut.axis_aclk) pkt_in_stats = AXI4StreamStats(dut, 's_axis', dut.axis_aclk, idle_timeout=IDLE_TIMEOUT) pkt_in_stats_thread = cocotb.fork( pkt_in_stats.record_n_delays(len(pkts_in))) # Send pkts and metadata in the HW sim yield pkt_master.write_pkts(pkts_in, meta_in) # start recording queue_sizes reg_stats = RegStats(dut) reg_stats_thread = cocotb.fork(reg_stats.start()) # delay between writing pkts and reading them out yield ClockCycles(dut.axis_aclk, 25) # wait for the pifo to finish the final enqueue yield FallingEdge(dut.axis_aclk) while dut.simple_tm_inst.pifo_busy.value: yield RisingEdge(dut.axis_aclk) yield FallingEdge(dut.axis_aclk) yield RisingEdge(dut.axis_aclk) # Attach an AXI4StreamSlave to the output pkt interface tready_delay = 256 / (EGRESS_LINK_RATE * 5) - 1 pkt_slave = AXI4StreamSlave(dut, 'm_axis', dut.axis_aclk, idle_timeout=IDLE_TIMEOUT, tready_delay=tready_delay) pkt_out_stats = AXI4StreamStats(dut, 'm_axis', dut.axis_aclk, idle_timeout=IDLE_TIMEOUT) pkt_out_stats_thread = cocotb.fork( pkt_out_stats.record_n_delays(len(pkts_in))) # Read pkts out yield pkt_slave.read_n_pkts(len(pkts_in)) # # wait for stats threads to finish # yield pkt_in_stats_thread.join() # yield pkt_out_stats_thread.join() # stop the reg_stats reg_stats.stop() yield reg_stats_thread.join() sorted_pkts_meta = sorted(pkts_meta_in, key=lambda x: x[0]) expected_ranks = [tup[0] for tup in sorted_pkts_meta] expected_pkts = [tup[1] for tup in sorted_pkts_meta] expected_meta = [tup[2] for tup in sorted_pkts_meta] pkts_out = pkt_slave.pkts meta_out = pkt_slave.metadata actual_ranks = [Metadata(m.get_buff()).rank for m in meta_out] print 'input ranks = {}'.format(ranks_in) print 'expected output ranks = {}'.format(expected_ranks) print 'actual output ranks = {}'.format(actual_ranks) print '' print 'pkt_in_delays = {}'.format(pkt_in_stats.delays) print 'pkt_out_delays = {}'.format(pkt_out_stats.delays) print '\tmax = {}'.format(max(pkt_out_stats.delays)) print '\tavg = {}'.format( sum(pkt_out_stats.delays) / float(len(pkt_out_stats.delays))) print '\tmin = {}'.format(min(pkt_out_stats.delays)) results = {} results['enq_delays'] = pkt_in_stats.delays results['deq_delays'] = pkt_out_stats.delays with open(RESULTS_FILE, 'w') as f: json.dump(results, f) error = False for (exp_pkt, pkt, exp_meta, meta, i) in zip(expected_pkts, pkts_out, expected_meta, meta_out, range(len(expected_pkts))): if str(exp_pkt) != str(pkt): print 'ERROR: exp_pkt != pkt_out for pkt {}'.format(i) error = True if exp_meta.get_buff() != meta.get_buff(): print 'ERROR: exp_meta != meta_out for pkt {}'.format(i) exp_meta = Metadata(exp_meta.get_buff()) meta = Metadata(meta.get_buff()) print 'exp_meta = {}'.format(exp_meta.summary()) print 'meta = {}'.format(meta.summary()) error = True yield ClockCycles(dut.axis_aclk, 20) plot_reg_size(reg_stats.reg_size) font = {'family': 'normal', 'weight': 'bold', 'size': 22} matplotlib.rc('font', **font) plt.show() if error: print 'ERROR: Test Failed' raise (TestFailure)
def test_axi_stream_pipeline(dut): """Testing axi_stream_pipeline """ # start HW sim clock cocotb.fork(Clock(dut.axis_aclk, PERIOD).start()) # Reset the DUT dut._log.debug("Resetting DUT") dut.axis_resetn <= 0 yield ClockCycles(dut.axis_aclk, 10) dut.axis_resetn <= 1 dut._log.debug("Out of reset") # Attach an AXI4Stream Master to the input pkt interface pkt_master = AXI4StreamMaster(dut, 's_axis', dut.axis_aclk) # Attach and AXI4StreamSlave to the output pkt interfaces pkt_slave = AXI4StreamSlave(dut, 'm_axis', dut.axis_aclk) # build the list of pkts and metadata to insert pkts_meta_in = [] for i in range(20): pkt_len = random.randint(50, 1000) # build a packet pkt = Ether(dst='aa:aa:aa:aa:aa:aa', src='bb:bb:bb:bb:bb:bb') # pkt = pkt / ('\x11'*18 + '\x22'*32) # pkt = pkt / ('\x11'*18 + '\x22'*32 + '\x33'*32 + '\x44'*32 + '\x55'*16) pkt = pkt / ('\x11' * (pkt_len - 14)) rank = random.randint(0, 100) # build the metadata meta = Metadata(pkt_len=len(pkt), src_port=0b00000001, dst_port=0b00000100, rank=rank) tuser = BinaryValue(bits=len(meta) * 8, bigEndian=False) tuser.set_buff(str(meta)) pkts_meta_in.append((pkt, tuser)) pkts_in = [tup[0] for tup in pkts_meta_in] meta_in = [tup[1] for tup in pkts_meta_in] # Read pkts out slave_thread = cocotb.fork(pkt_slave.read_n_pkts(len(pkts_in))) # Send pkts and metadata in the HW sim yield pkt_master.write_pkts(pkts_in, meta_in) yield slave_thread.join() expected_pkts = pkts_in expected_meta = meta_in pkts_out = pkt_slave.pkts meta_out = pkt_slave.metadata error = False for (exp_pkt, pkt, exp_meta, meta, i) in zip(expected_pkts, pkts_out, expected_meta, meta_out, range(len(expected_pkts))): if str(exp_pkt) != str(pkt): print 'ERROR: exp_pkt != pkt_out for pkt {}'.format(i) error = True if exp_meta.get_buff() != meta.get_buff(): print 'ERROR: exp_meta != meta_out for pkt {}'.format(i) exp_meta = Metadata(exp_meta.get_buff()) meta = Metadata(meta.get_buff()) print 'exp_meta = {}'.format(exp_meta.summary()) print 'meta = {}'.format(meta.summary()) error = True yield ClockCycles(dut.axis_aclk, 20) if error: print 'ERROR: Test Failed' raise (TestFailure)
def test_simple_tm(dut): """Testing the simple_tm module """ # start HW sim clock cocotb.fork(Clock(dut.axis_aclk, PERIOD).start()) # Reset the DUT dut._log.debug("Resetting DUT") dut.axis_resetn <= 0 yield ClockCycles(dut.axis_aclk, 10) dut.axis_resetn <= 1 dut._log.debug("Out of reset") # wait for the pifo to finish resetting yield wait_pifo_busy(dut) yield ClockCycles(dut.axis_aclk, 100) dut.m_axis_tready <= 0 # build the list of pkts and metadata to insert pkts_meta_in = [] for i in range(NUM_PKTS): # pkt_len = random.randint(50, 1000) # build a packet pkt = Ether(dst='aa:aa:aa:aa:aa:aa', src='bb:bb:bb:bb:bb:bb') pkt = pkt / ('\x11' * 18 + '\x22' * 32) rank = random.randint(0, 100) # build the metadata meta = Metadata(pkt_len=len(pkt), src_port=0b00000001, dst_port=0b00000100, rank=rank) tuser = BinaryValue(bits=len(meta) * 8, bigEndian=False) tuser.set_buff(str(meta)) pkts_meta_in.append((rank, pkt, tuser)) ranks_in = [tup[0] for tup in pkts_meta_in] pkts_in = [tup[1] for tup in pkts_meta_in] meta_in = [tup[2] for tup in pkts_meta_in] # Attach an AXI4Stream Master to the input pkt interface pkt_master = AXI4StreamMaster(dut, 's_axis', dut.axis_aclk) pkt_in_stats = AXI4StreamStats(dut, 's_axis', dut.axis_aclk, idle_timeout=IDLE_TIMEOUT) pkt_in_stats_thread = cocotb.fork( pkt_in_stats.record_n_delays(len(pkts_in))) # Send pkts and metadata in the HW sim yield pkt_master.write_pkts(pkts_in, meta_in) # wait for pifo to no longer be busy yield wait_pifo_busy(dut) # delay between writing pkts and reading them out yield ClockCycles(dut.axis_aclk, 25) # wait for the pifo to finish the final enqueue yield FallingEdge(dut.axis_aclk) while dut.simple_tm_inst.pifo_busy.value: yield RisingEdge(dut.axis_aclk) yield FallingEdge(dut.axis_aclk) yield RisingEdge(dut.axis_aclk) # Attach an AXI4StreamSlave to the output pkt interface pkt_slave = AXI4StreamSlave(dut, 'm_axis', dut.axis_aclk, idle_timeout=IDLE_TIMEOUT) pkt_out_stats = AXI4StreamStats(dut, 'm_axis', dut.axis_aclk, idle_timeout=IDLE_TIMEOUT) pkt_out_stats_thread = cocotb.fork( pkt_out_stats.record_n_delays(len(pkts_in))) # Read pkts out yield pkt_slave.read_n_pkts(len(pkts_in)) # # wait for stats threads to finish # yield pkt_in_stats_thread.join() # yield pkt_out_stats_thread.join() sorted_pkts_meta = sorted(pkts_meta_in, key=lambda x: x[0]) expected_ranks = [tup[0] for tup in sorted_pkts_meta] expected_pkts = [tup[1] for tup in sorted_pkts_meta] expected_meta = [tup[2] for tup in sorted_pkts_meta] pkts_out = pkt_slave.pkts meta_out = pkt_slave.metadata actual_ranks = [Metadata(m.get_buff()).rank for m in meta_out] print 'input ranks = {}'.format(ranks_in) print 'expected output ranks = {}'.format(expected_ranks) print 'actual output ranks = {}'.format(actual_ranks) print '' print 'pkt_in_delays = {}'.format(pkt_in_stats.delays) print 'pkt_out_delays = {}'.format(pkt_out_stats.delays) print '\tmax = {}'.format(max(pkt_out_stats.delays)) print '\tavg = {}'.format( sum(pkt_out_stats.delays) / float(len(pkt_out_stats.delays))) print '\tmin = {}'.format(min(pkt_out_stats.delays)) results = {} results['enq_delays'] = pkt_in_stats.delays results['deq_delays'] = pkt_out_stats.delays with open(RESULTS_FILE, 'w') as f: json.dump(results, f) error = False for (exp_pkt, pkt, exp_meta, meta, i) in zip(expected_pkts, pkts_out, expected_meta, meta_out, range(len(expected_pkts))): if str(exp_pkt) != str(pkt): print 'ERROR: exp_pkt != pkt_out for pkt {}'.format(i) error = True if exp_meta.get_buff() != meta.get_buff(): print 'ERROR: exp_meta != meta_out for pkt {}'.format(i) exp_meta = Metadata(exp_meta.get_buff()) meta = Metadata(meta.get_buff()) print 'exp_meta = {}'.format(exp_meta.summary()) print 'meta = {}'.format(meta.summary()) error = True print "******************" print "Checking for duplicates:" print "******************" for r, i in zip(actual_ranks, range(len(actual_ranks))): try: ranks_in.remove(r) except ValueError as e: print 'ERROR: output rank ({}) not in input set'.format(r) print e error = True if len(ranks_in) > 0: print 'ERROR: not all ranks removed: {}'.format(ranks_in) error = True yield ClockCycles(dut.axis_aclk, 20) if error: print 'ERROR: Test Failed' raise (TestFailure)
def test_port_tm(dut): """Testing port_tm """ # start HW sim clock cocotb.fork(Clock(dut.axis_aclk, PERIOD).start()) # Reset the DUT dut._log.debug("Resetting DUT") dut.axis_resetn <= 0 yield ClockCycles(dut.axis_aclk, 10) dut.axis_resetn <= 1 dut._log.debug("Out of reset") print "FINISHED RESET..." dut.m_axis_tready <= 0 dst_port = 0b00000100 # build the list of pkts and metadata to insert pkts_meta_in = [] for i in range(10): # pkt_len = random.randint(50, 1000) # build a packet pkt = Ether(dst='aa:aa:aa:aa:aa:aa', src='bb:bb:bb:bb:bb:bb') pkt = pkt / ('\x11'*18 + '\x22'*32) # pkt = pkt / ('\x11'*18 + '\x22'*32 + '\x33'*32 + '\x44'*32 + '\x55'*16) # pkt = pkt / ('\x11'*(pkt_len - 14)) rank = random.randint(0, 100) # build the metadata meta = Metadata(pkt_len=len(pkt), src_port=0b00000001, dst_port=dst_port, rank=rank) tuser = BinaryValue(bits=len(meta)*8, bigEndian=False) tuser.set_buff(str(meta)) pkts_meta_in.append((rank, pkt, tuser)) ranks_in = [tup[0] for tup in pkts_meta_in] pkts_in = [tup[1] for tup in pkts_meta_in] meta_in = [tup[2] for tup in pkts_meta_in] # Attach an AXI4Stream Master to the input pkt interface pkt_master = AXI4StreamMaster(dut, 's_axis', dut.axis_aclk) # Attach ReqMaster to each input interface nf0_req_master = ReqMaster(dut, 'nf0_sel', dut.axis_aclk) nf1_req_master = ReqMaster(dut, 'nf1_sel', dut.axis_aclk) nf2_req_master = ReqMaster(dut, 'nf2_sel', dut.axis_aclk) nf3_req_master = ReqMaster(dut, 'nf3_sel', dut.axis_aclk) # Send pkts and metadata in the HW sim yield pkt_master.write_pkts(pkts_in, meta_in) # delay between writing pkts and reading them out yield ClockCycles(dut.axis_aclk, 10) # Attach and AXI4StreamSlave to the output pkt interface pkt_slave = AXI4StreamSlave(dut, 'm_axis', dut.axis_aclk) # Read output pkts pkt_slave_thread = cocotb.fork(pkt_slave.read_n_pkts(len(pkts_in))) # start submitting read requests delay = 20 #nf0_req_thread = cocotb.fork(nf0_req_master.write_reqs(requests, delay)) nf1_req_thread = cocotb.fork(nf1_req_master.write_reqs(len(ranks_in), delay)) #nf2_req_thread = cocotb.fork(nf2_req_master.write_reqs(requests, delay)) #nf3_req_thread = cocotb.fork(nf3_req_master.write_reqs(requests, delay)) # wait for all pkts yield pkt_slave_thread.join() sorted_pkts_meta = sorted(pkts_meta_in, key=lambda x: x[0]) expected_ranks = [tup[0] for tup in sorted_pkts_meta] expected_pkts = [tup[1] for tup in sorted_pkts_meta] expected_meta = [tup[2] for tup in sorted_pkts_meta] pkts_out = pkt_slave.pkts meta_out = pkt_slave.metadata actual_ranks = [Metadata(m.get_buff()).rank for m in meta_out] print 'input ranks = {}'.format(ranks_in) print 'expected output ranks = {}'.format(expected_ranks) print 'actual output ranks = {}'.format(actual_ranks) error = False for (exp_pkt, pkt, exp_meta, meta, i) in zip(expected_pkts, pkts_out, expected_meta, meta_out, range(len(expected_pkts))): if str(exp_pkt) != str(pkt): print 'ERROR: exp_pkt != pkt_out for pkt {}'.format(i) error = True if exp_meta.get_buff() != meta.get_buff(): print 'ERROR: exp_meta != meta_out for pkt {}'.format(i) exp_meta = Metadata(exp_meta.get_buff()) meta = Metadata(meta.get_buff()) print 'exp_meta = {}'.format(exp_meta.summary()) print 'meta = {}'.format(meta.summary()) error = True yield ClockCycles(dut.axis_aclk, 20) if error: print 'ERROR: Test Failed' raise(TestFailure)
def test_both_enqdeq(dut): """Testing the simple_tm module with a constant fill level """ # start HW sim clock cocotb.fork(Clock(dut.axis_aclk, PERIOD).start()) # Reset the DUT dut._log.debug("Resetting DUT") dut.axis_resetn <= 0 yield ClockCycles(dut.axis_aclk, 10) dut.axis_resetn <= 1 dut._log.debug("Out of reset") # wait for the pifo to finish resetting yield FallingEdge(dut.axis_aclk) while dut.simple_tm_inst.pifo_busy.value: yield RisingEdge(dut.axis_aclk) yield FallingEdge(dut.axis_aclk) yield RisingEdge(dut.axis_aclk) yield ClockCycles(dut.axis_aclk, 100) dut.m_axis_tready <= 0 # build the list of pkts and metadata to insert pkts_meta_in = make_pkts_and_meta(FILL_LEVEL - 1) ranks_in = [tup[0] for tup in pkts_meta_in] pkts_in = [tup[1] for tup in pkts_meta_in] meta_in = [tup[2] for tup in pkts_meta_in] # Attach an AXI4Stream Master to the input pkt interface pkt_master = AXI4StreamMaster(dut, 's_axis', dut.axis_aclk) # Send pkts and metadata in the HW sim yield pkt_master.write_pkts(pkts_in, meta_in) # wait a few cycles before begining measurements yield ClockCycles(dut.axis_aclk, 25) # Attach and AXI4StreamSlave to the output pkt interface pkt_slave = AXI4StreamSlave(dut, 'm_axis', dut.axis_aclk, idle_timeout=IDLE_TIMEOUT) # connect stats tools pkt_in_stats = AXI4StreamStats(dut, 's_axis', dut.axis_aclk, idle_timeout=IDLE_TIMEOUT) pkt_out_stats = AXI4StreamStats(dut, 'm_axis', dut.axis_aclk, idle_timeout=IDLE_TIMEOUT) expected_outputs = [] enq_delays = [] deq_delays = [] for i in range(NUM_SAMPLES): data = make_pkts_and_meta(1) pkts_in = [tup[1] for tup in data] meta_in = [tup[2] for tup in data] # compute expected outputs expected_outputs.append(min(pkts_meta_in)) pkts_meta_in.remove(min(pkts_meta_in)) # The removal happens after the insertion pkts_meta_in += data # # start recording stats # pkt_in_stats_thread = cocotb.fork(pkt_in_stats.record_n_delays(1)) # pkt_out_stats_thread = cocotb.fork(pkt_out_stats.record_n_delays(1)) # send in packet pkt_master_thread = cocotb.fork(pkt_master.write_pkts( pkts_in, meta_in)) # Read out packet pkt_slave_thread = cocotb.fork(pkt_slave.read_n_pkts(1)) yield pkt_master_thread.join() yield pkt_slave_thread.join() # # record results # enq_delays += pkt_in_stats.delays # deq_delays += pkt_out_stats.delays # wait a few cycles between samples yield ClockCycles(dut.axis_aclk, 30) if pkt_slave.error: print "ERROR: pkt_slave timed out" break sorted_pkts_meta = sorted(pkts_meta_in, key=lambda x: x[0]) expected_ranks = [tup[0] for tup in expected_outputs] expected_pkts = [tup[1] for tup in expected_outputs] expected_meta = [tup[2] for tup in expected_outputs] pkts_out = pkt_slave.pkts meta_out = pkt_slave.metadata actual_ranks = [Metadata(m.get_buff()).rank for m in meta_out] print 'input_ranks = {}'.format(input_ranks) print 'expected output ranks = {}'.format(expected_ranks) print 'actual output ranks = {}'.format(actual_ranks) print '' print 'pkt_in_delays = {}'.format(enq_delays) print 'pkt_out_delays = {}'.format(deq_delays) results = {} results['enq_delays'] = enq_delays results['deq_delays'] = deq_delays with open(RESULTS_FILE, 'w') as f: json.dump(results, f) error = False for (exp_pkt, pkt, exp_meta, meta, i) in zip(expected_pkts, pkts_out, expected_meta, meta_out, range(len(expected_pkts))): if str(exp_pkt) != str(pkt): print 'ERROR: exp_pkt != pkt_out for pkt {}'.format(i) error = True if exp_meta.get_buff() != meta.get_buff(): print 'ERROR: exp_meta != meta_out for pkt {}'.format(i) exp_meta = Metadata(exp_meta.get_buff()) meta = Metadata(meta.get_buff()) print 'exp_meta = {}'.format(exp_meta.summary()) print 'meta = {}'.format(meta.summary()) error = True yield ClockCycles(dut.axis_aclk, 20) if error: print 'ERROR: Test Failed' raise (TestFailure)