def worker_clear_all(self, mgid): for dev_port, rid in self.rids[mgid].items(): try: # remove group entry self.mgid_table.entry_mod_inc( self.target, [self.mgid_table.make_key([gc.KeyTuple('$MGID', mgid)])], [ self.mgid_table.make_data([ gc.DataTuple('$MULTICAST_NODE_ID', int_arr_val=[rid]), gc.DataTuple('$MULTICAST_NODE_L1_XID_VALID', bool_arr_val=[False]), gc.DataTuple('$MULTICAST_NODE_L1_XID', int_arr_val=[0]) ]) ], bfruntime_pb2.TableModIncFlag.MOD_INC_DELETE) except gc.BfruntimeReadWriteRpcException as e: self.logger.info( "Multicast node ID {} remove from group {} failed; maybe it's already deleted?" .format(rid, mgid)) try: # remove node entry self.node_table.entry_del(self.target, [ self.node_table.make_key( [gc.KeyTuple('$MULTICAST_NODE_ID', rid)]) ]) except gc.BfruntimeReadWriteRpcException as e: self.logger.info( "Multicast node ID {} delete failed; maybe it's already deleted?" .format(rid)) del self.rids[mgid][dev_port]
def add_send_worker(self, rid, mac, ip, qpn, initial_psn, rkey=None): # first, add entry to fill in headers for RoCE packet self.create_roce_packet.entry_add(self.target, [ self.create_roce_packet.make_key( [gc.KeyTuple('eg_md.switchml_md.worker_id', rid)]) ], [ self.create_roce_packet.make_data( [gc.DataTuple('dest_mac', mac), gc.DataTuple('dest_ip', ip)], 'Egress.rdma_sender.fill_in_roce_fields') ]) # now, add entry to add QPN and PSN to packet self.fill_in_qpn_and_psn.entry_add( self.target, [ self.fill_in_qpn_and_psn.make_key([ gc.KeyTuple('eg_md.switchml_md.worker_id', rid), gc.KeyTuple('eg_md.switchml_md.pool_index', 0x00000, 0x00000) ]) ], [ self.fill_in_qpn_and_psn.make_data( [ gc.DataTuple('qpn', qpn), #gc.DataTuple('Egress.rdma_sender.psn_register.f1', initial_psn)], ], 'Egress.rdma_sender.add_qpn_and_psn') ])
def CfgPortTableClearTest(self, port1): target = client.Target(device_id=0, pipe_id=0xffff) self.port_table.entry_del(target, key_list=None) self.port_table.entry_add( target, [self.port_table.make_key([client.KeyTuple('$DEV_PORT', port1)])], [ self.port_table.make_data([ client.DataTuple('$SPEED', str_val="BF_SPEED_100G"), client.DataTuple('$FEC', str_val="BF_FEC_TYP_NONE") ]) ]) self.port_table.entry_del(target, key_list=None) try: get_data_list = self.port_table.make_data([client.DataTuple("$SPEED")]) resp = self.port_table.entry_get( target, [self.port_table.make_key([client.KeyTuple('$DEV_PORT', port1)])], {"from_hw": False}, get_data_list) data_dict = next(resp)[0].to_dict() # since we have deleted all the ports, the above API call should have # failed. Assert if not logger.error("Unable to clear port cfg table") assert (0) except: logger.info("Cleared port cfg table successfully")
def timing_loop(self): from backports.time_perf_counter import perf_counter # initialize self.table.entry_add(self.target, [ self.table.make_key( [gc.KeyTuple('hdr.ethernet.dst_addr', "00:11:22:33:44:55")]) ], [ self.table.make_data( [gc.DataTuple('egress_port', 0)], 'Ingress.non_switchml_forward.set_egress_port') ]) count = 1000000 start = perf_counter() for i in range(count): self.table.entry_mod(self.target, [ self.table.make_key([ gc.KeyTuple('hdr.ethernet.dst_addr', "00:11:22:33:44:55") ]) ], [ self.table.make_data( [gc.DataTuple('egress_port', 0)], 'Ingress.non_switchml_forward.set_egress_port') ]) end = perf_counter() rate = count / (end - start) print("{} mods in {:02f} seconds: {:02f} m/s".format( count, end - start, rate))
def runTest(self): self.p4_name = "tna_32q_multiprogram_b" # Keep BINDing to the program till success while (True): try: time.sleep(1) logger.info("Client W2: Binding to %s", self.p4_name) self.interface.bind_pipeline_config( "tna_32q_multiprogram_b") self.bfrt_info = self.interface.bfrt_info_get(self.p4_name) break except gc.BfruntimeRpcException as e: # Errors tolerated -> # 1. Device is locked, try again # 2. Already_exists because this client is already bound # to the same P4, break if e.grpc_error_get().code( ) == grpc.StatusCode.UNAVAILABLE: logger.info( "Failed to BIND because device is locked %s", e.grpc_error_get().code()) continue elif e.grpc_error_get().code( ) == grpc.StatusCode.ALREADY_EXISTS: logger.info( "This client is already connected to this P4 %s", e.grpc_error_get().code()) break logger.error("Failed to BIND %s", e.grpc_error_get().code()) raise e except Exception as e: raise e # Write a few entries logger.info("Writing to Table: SwitchIngress_b.forward") target = gc.Target(device_id=0, pipe_id=0xffff) dip = "5.6.7.8" self.e_forward_table = self.bfrt_info.table_get( "SwitchEgress_b.forward") self.e_forward_table.info.key_field_annotation_add( "hdr.ipv4.dst_addr", "ipv4") for i in range(10): try: self.e_forward_table.entry_add(target, [ self.e_forward_table.make_key([ gc.KeyTuple( 'hdr.ipv4.dst_addr', dip, prefix_len=31), gc.KeyTuple('hdr.ipv4.ttl', i), gc.KeyTuple('hdr.custom_metadata.custom_tag', i) ]) ], [ self.e_forward_table.make_data([], 'SwitchEgress_b.hit') ]) except gc.BfruntimeRpcException as e: # All errors tolerated here since we just want to try and write entries logger.info("%s", e) pass except Exception as e: raise e
def modify_grp(self, sel_table, members, member_status): target = gc.Target(device_id=dev_id, pipe_id=0xffff) # Modify group sel_table.entry_mod(target, [ sel_table.make_key( [gc.KeyTuple('$SELECTOR_GROUP_ID', self.group_id)]) ], [ sel_table.make_data([ gc.DataTuple('$ACTION_MEMBER_ID', int_arr_val=members), gc.DataTuple('$ACTION_MEMBER_STATUS', bool_arr_val=member_status) ]) ]) # Verify member_dict = { members[i]: member_status[i] for i in range(len(members)) } get_resp = sel_table.entry_get(target, [ sel_table.make_key( [gc.KeyTuple('$SELECTOR_GROUP_ID', self.group_id)]) ], {"from_hw": False}) data_dict = next(get_resp)[0].to_dict() member_dict_recv = { data_dict["$ACTION_MEMBER_ID"][i]: data_dict["$ACTION_MEMBER_STATUS"][i] for i in range(len(data_dict["$ACTION_MEMBER_ID"])) } assert member_dict == member_dict_recv
def runTest(self): # Get bfrt_info and set it as part of the test target = client.Target(device_id=0, pipe_id=0xffff) # Insert 100 entries num_entries = 100 phase0_igr_ports = {} try: logger.info("Inserting %d entries in the phase0 table", num_entries) for i in range(num_entries): igr_port = 0 phase0data = 0 while True: igr_pipe = random.randint(0, 3) igr_local_port = random.randint(0, 63) igr_port = (igr_pipe << 7) | (igr_local_port) if igr_port not in phase0_igr_ports: field1 = random.randint(1, 0xffff) # 16 bit field2 = random.randint(1, 0xffffff) # 24 bits field3 = random.randint(1, 0xffff) # 16 bits field4 = random.randint(1, 0xff) # 8 bits phase0data = make_phase0_data(field1, field2, field3, field4) phase0_igr_ports[igr_port] = phase0data break self.port_metadata_table.entry_add( target, [self.port_metadata_table.make_key( [client.KeyTuple('ig_intr_md.ingress_port', igr_port)])], [self.port_metadata_table.make_data( [client.DataTuple('$DEFAULT_FIELD', phase0data)])]) # Read back the entries logger.info("Reading back %d entries", num_entries) for key, value in list(phase0_igr_ports.items()): igr_port = key phase0data = value resp = self.port_metadata_table.entry_get( target, [self.port_metadata_table.make_key( [client.KeyTuple('ig_intr_md.ingress_port', igr_port)])], {"from_hw": True}) fields = next(resp)[0].to_dict() logger.info("Verifying %d entry for igr port %d", i, igr_port) recv_data = fields["$DEFAULT_FIELD"] if recv_data != phase0data: logger.info("Exp data : %s : Rcv data : %s", phase0data, recv_data) assert(0) finally: logger.info("Cleaning up entries") self.port_metadata_table.entry_del(target)
def get_dev_port(self, front_panel_port, lane): # convert front-panel port to dev port resp = self.port_hdl_info_table.entry_get(self.target, [ self.port_hdl_info_table.make_key([ gc.KeyTuple('$CONN_ID', front_panel_port), gc.KeyTuple('$CHNL_ID', lane) ]) ], {"from_hw": False}) dev_port = next(resp)[0].to_dict()["$DEV_PORT"] #self.logger.debug("Got dev port {} for front panel port {}/{}".format(dev_port, front_panel_port, lane)) return dev_port
def runTest(self): eg_port = swports[2] dmac = '22:22:22:22:22:22' dkey = '22:22:22:22:22:23' dmask = 'ff:ff:ff:ff:ff:f0' port_mask = 0 pkt = testutils.simple_tcp_packet(eth_dst=dmac) pkt2 = testutils.simple_tcp_packet(eth_dst=dkey) exp_pkt = pkt exp_pkt2 = pkt2 # Get bfrt_info and set it as part of the test bfrt_info = self.interface.bfrt_info_get("tna_dkm") # Set the scope of the table to ALL_PIPES logger.info("=============== Testing Dyn Key Mask ===============") target = gc.Target(device_id=0, pipe_id=0xffff) logger.info("set dyn key mask") forward_table = bfrt_info.table_get("SwitchIngress.forward") forward_table.info.key_field_annotation_add("hdr.ethernet.dst_addr", "mac") key_mask = forward_table.make_key( [gc.KeyTuple('hdr.ethernet.dst_addr', dmask), gc.KeyTuple('ig_intr_md.ingress_port', port_mask)]) forward_table.attribute_dyn_key_mask_set(target, key_mask) resp = forward_table.attribute_get(target, "DynamicKeyMask") for d in resp: assert d["fields"].to_dict()["ig_intr_md.ingress_port"]["value"] == port_mask assert d["fields"].to_dict()["hdr.ethernet.dst_addr"]["value"] == dmask logger.info("Add entry") key_list = [forward_table.make_key( [gc.KeyTuple('hdr.ethernet.dst_addr', dmac), gc.KeyTuple('ig_intr_md.ingress_port', swports_0[0])])] data_list = [forward_table.make_data([gc.DataTuple('port', eg_port)], "SwitchIngress.hit")] forward_table.entry_add(target, key_list, data_list) self.send_and_verify_packet(swports_0[0], eg_port, pkt2, exp_pkt2) self.send_and_verify_packet(swports_1[0], eg_port, pkt2, exp_pkt2) if int(testutils.test_param_get('num_pipes')) > 2: self.send_and_verify_packet(swports_2[0], eg_port, pkt2, exp_pkt2) if int(testutils.test_param_get('num_pipes')) > 3: self.send_and_verify_packet(swports_3[0], eg_port, pkt2, exp_pkt2) self.send_and_verify_packet(swports_0[0], eg_port, pkt, exp_pkt) self.send_and_verify_packet(swports_1[0], eg_port, pkt, exp_pkt) if int(testutils.test_param_get('num_pipes')) > 2: self.send_and_verify_packet(swports_2[0], eg_port, pkt, exp_pkt) if int(testutils.test_param_get('num_pipes')) > 3: self.send_and_verify_packet(swports_3[0], eg_port, pkt, exp_pkt) testutils.verify_no_other_packets(self, timeout=2) logger.info("Delete the entry") forward_table.entry_del(target, key_list)
def runTest(self): target = client.Target(device_id=0, pipe_id=0xffff) num_entries = random.randint(1, 100) wred_indices = [x + 1 for x in range(num_entries)] random.shuffle(wred_indices) key_dict = {} ip_addrs = [] # Get bfrt_info and set it as part of the test bfrt_info = self.interface.bfrt_info_get("tna_meter_lpf_wred") wred_match_table = bfrt_info.table_get("SwitchIngress.wred_match_tbl") wred_match_table.info.key_field_annotation_add("hdr.ipv4.src_addr", "ipv4") for i in range(num_entries): ip_addr = "%d.%d.%d.%d" % (random.randint( 1, 255), random.randint(0, 255), random.randint( 0, 255), random.randint(0, 255)) while ip_addr in key_dict: ip_addr = "%d.%d.%d.%d" % (random.randint( 1, 255), random.randint(0, 255), random.randint( 0, 255), random.randint(0, 255)) ip_addrs.append(ip_addr) key_dict[ip_addr] = True logger.info( "Inserting %d entries to the match table with random WRED indices", num_entries) for x in range(num_entries): wred_match_table.entry_add(target, [ wred_match_table.make_key( [client.KeyTuple('hdr.ipv4.src_addr', ip_addrs[x])]) ], [ wred_match_table.make_data( [client.DataTuple('wred_idx', wred_indices[x])], 'SwitchIngress.mark_wred') ]) for x in range(num_entries): resp = wred_match_table.entry_get(target, [ wred_match_table.make_key( [client.KeyTuple('hdr.ipv4.src_addr', ip_addrs[x])]) ], {"from_hw": False}) data_dict = next(resp)[0].to_dict() assert data_dict["wred_idx"] == wred_indices[x] for x in range(num_entries): wred_match_table.entry_del(target, [ wred_match_table.make_key( [client.KeyTuple('hdr.ipv4.src_addr', ip_addrs[x])]) ])
def enable_alternate_recirc_port(self): # pipes 0 and 1 (TODO: 0 doesn't work) #ports = [64, 192] # # pipes 1 and 3 # ports = [192, 448] # pipe 1 ports = [192] print("Checking port state before enabling recirculation mode:") resp = self.pktgen_port_cfg_table.entry_get(self.target, [ self.pktgen_port_cfg_table.make_key( [gc.KeyTuple('dev_port', port)]) for port in ports ], {'from_hw': False}) for v, k in resp: v = v.to_dict() k = k.to_dict() pprint((k, v)) print("Enabling recirculation.") try: self.pktgen_port_cfg_table.entry_add(self.target, [ self.pktgen_port_cfg_table.make_key( [gc.KeyTuple('dev_port', port)]) for port in ports ], [ self.pktgen_port_cfg_table.make_data( [gc.DataTuple('recirculation_enable', bool_val=True)]) ] * len(ports)) except: # print(""" # NOTE: # You must "remove" the ports in the CLI before this code will work right now. For instance, for port 192: # bf-sde.port_mgr> bf_port_rmv 0 1 64 # """) print(""" NOTE: You must "remove" the ports in the CLI before this code will work right now. bf-sde.port_mgr> bf_port_rmv 0 1 64 """) raise print("Verifying port state after enabling recirculation:") resp = self.pktgen_port_cfg_table.entry_get(self.target, [ self.pktgen_port_cfg_table.make_key( [gc.KeyTuple('dev_port', port)]) for port in ports ], {'from_hw': False}) for v, k in resp: v = v.to_dict() k = k.to_dict() pprint((k, v))
def runTest(self): target = gc.Target(device_id=0, pipe_id=0xffff) # Get bfrt_info and set it as part of the test bfrt_info = self.interface.bfrt_info_get(p4_program_name) ipv4_match_regular = bfrt_info.table_get("SwitchIngress.ipv4_match_regular") ipv4_match_regular.info.key_field_annotation_add('hdr.ethernet.dst_addr', 'mac') ipv4_match_regular.info.key_field_annotation_add('hdr.ethernet.src_addr', 'mac') ipv4_match_regular.info.key_field_annotation_add('hdr.ipv4.dst_addr', 'ipv4') ipv4_match_regular.info.key_field_annotation_add('hdr.ipv4.src_addr', 'ipv4') key_data_regular = ipv4_match_regular.make_key([ gc.KeyTuple(name='hdr.ethernet.dst_addr', value='11:11:11:11:11:11'), gc.KeyTuple(name='hdr.ethernet.src_addr', value='22:22:22:22:22:22'), gc.KeyTuple(name='hdr.ipv4.dst_addr', value='100.99.98.97'), gc.KeyTuple(name='hdr.ipv4.src_addr', value='1.2.3.4'), ]) action_data_regular = ipv4_match_regular.make_data( [gc.DataTuple(name='port_id', val=swports[3])], 'SwitchIngress.set_output_port' ) ipv4_match_regular.entry_add( target, [key_data_regular], [action_data_regular]) try: ipkt = testutils.simple_udp_packet(eth_dst='11:11:11:11:11:11', eth_src='22:22:22:22:22:22', ip_src='1.2.3.4', ip_dst='100.99.98.97', ip_id=101, ip_ttl=64, udp_sport=0x1234, udp_dport=0xabcd) testutils.send_packet(self, swports[0], ipkt) testutils.verify_packet(self, ipkt, swports[3]) finally: ipv4_match_regular.default_entry_reset(target) ipv4_match_regular.entry_del(target, [key_data_regular])
def clear(self): # first, clean up old group if they exist #self.mgid_table.entry_del(self.target) # ideally we could do this, but it's not supported. try: self.mgid_table.entry_del(self.target, [ self.mgid_table.make_key( [gc.KeyTuple('$MGID', self.switchml_mgid)]) ]) except gc.BfruntimeReadWriteRpcException as e: self.logger.info( "Multicast group ID {} not found in switch already during delete; this is expected." .format(self.switchml_mgid)) try: self.mgid_table.entry_del(self.target, [ self.mgid_table.make_key([gc.KeyTuple('$MGID', self.all_mgid)]) ]) except gc.BfruntimeReadWriteRpcException as e: self.logger.info( "Multicast group ID {} not found in switch already during delete; this is expected." .format(self.all_mgid)) ## now, clean up old nodes. #self.node_table.entry_del(self.target) # try: # self.node_table.entry_del( # target, # [self.node_table.make_key([gc.KeyTuple('$MULTICAST_NODE_ID', worker.rid)])]) # except gc.BfruntimeReadWriteRpcException as e: # self.logger.info("Multicast node ID {} not found in switch already during delete; this is expected.".format(worker.rid)) # # Set -1 as CopyToCPU port # print("Setting port", port, "as CopyToCPU port") # self.port_table.entry_add( # self.target, # [self.port_table.make_key([ # client.KeyTuple('$DEV_PORT', port)])], # [self.port_table.make_data([ # client.DataTuple('$COPY_TO_CPU_PORT_ENABLE', bool_val=True)])] # ) # set CPU port self.logger.info("Setting port {} as CopyToCPU port.".format( self.cpu_port)) self.port_table.entry_add(self.target, [ self.port_table.make_key([gc.KeyTuple('$DEV_PORT', self.cpu_port)]) ], [ self.port_table.make_data( [gc.DataTuple('$COPY_TO_CPU_PORT_ENABLE', bool_val=True)]) ])
def runTest(self): ig_port = self.swports(1) eg_port = ig_port meter_config = get_one_meter_config() entity = self.add_to_t_mtr_0(ig_port, eg_port, meter_config) # read table entry over p4runtime rentity = self.read_one(entity) self.assertProtoEqual(rentity, entity) # Set up Bfruntime interface bri = self.BriInterface("$SHARED") target = client.Target(device_id=0, pipe_id=0xffff) # Add an entry to the node table bri.node_table.entry_add(target, [ bri.node_table.make_key([client.KeyTuple('$MULTICAST_NODE_ID', 1)]) ], [ bri.node_table.make_data([ client.DataTuple('$MULTICAST_RID', 2), client.DataTuple('$MULTICAST_LAG_ID', int_arr_val=[10]), client.DataTuple('$DEV_PORT', int_arr_val=[20]) ]) ]) # Add an entry to the MGID table key = bri.mgid_table.make_key([client.KeyTuple('$MGID', 1)]) data = bri.mgid_table.make_data([ client.DataTuple('$MULTICAST_NODE_ID', int_arr_val=[1]), client.DataTuple('$MULTICAST_NODE_L1_XID_VALID', bool_arr_val=[True]), client.DataTuple('$MULTICAST_NODE_L1_XID', int_arr_val=[2]) ]) bri.mgid_table.entry_add(target, [key], [data]) # Read the entry back and ensure that it was the same for data_ret, key_ret in bri.mgid_table.entry_get(target, [key]): data_ret_dict = data_ret.to_dict() data_dict = data.to_dict() assert data_ret_dict["$MULTICAST_NODE_ID"] == data_dict[ "$MULTICAST_NODE_ID"] assert data_ret_dict["$MULTICAST_NODE_L1_XID_VALID"] == data_dict[ "$MULTICAST_NODE_L1_XID_VALID"] assert data_ret_dict["$MULTICAST_NODE_L1_XID"] == data_dict[ "$MULTICAST_NODE_L1_XID"] # Delete the entries bri.mgid_table.entry_del(target, [key]) bri.node_table.entry_del(target, [ bri.node_table.make_key([client.KeyTuple('$MULTICAST_NODE_ID', 1)]) ]) bri.tearDown()
def _forward_table_add(table, target, smac, smac_mask, priority, port, c_bytes, c_pkts): table.entry_add(target, [ table.make_key([ gc.KeyTuple('hdr.ethernet.src_addr', smac, smac_mask), gc.KeyTuple('$MATCH_PRIORITY', priority) ]) ], [ table.make_data([ gc.DataTuple('port', port), gc.DataTuple('$COUNTER_SPEC_BYTES', c_pkts), gc.DataTuple('$COUNTER_SPEC_PKTS', c_bytes) ], 'SwitchIngress.hit') ])
def runTest(self): ig_port = swports[1] eg_port = swports[2] dmac = '22:22:22:22:22:22' target = client.Target(device_id=0, pipe_id=0xffff) # Get bfrt_info and set it as part of the test bfrt_info = self.interface.bfrt_info_get("tna_action_profile") forward_table = bfrt_info.table_get("SwitchIngress.forward") action_profile = bfrt_info.table_get("SwitchIngress.action_profile") ap_key = action_profile.make_key( [client.KeyTuple('$ACTION_MEMBER_ID', 1)]) action_profile.entry_add(target, [ap_key], [ action_profile.make_data([client.DataTuple('port', eg_port)], 'SwitchIngress.set_port') ]) fwd_key = forward_table.make_key([ client.KeyTuple('ig_intr_md.ingress_port', ig_port), client.KeyTuple('vid', 0) ]) forward_table.entry_add(target, [fwd_key], [ forward_table.make_data([client.DataTuple('$ACTION_MEMBER_ID', 1)]) ]) try: pkt = testutils.simple_tcp_packet(eth_dst=dmac) logger.info("Sending packet on port %d", ig_port) testutils.send_packet(self, ig_port, pkt) exp_pkt = pkt logger.info("Expecting packet on port %d", eg_port) testutils.verify_packets(self, exp_pkt, [eg_port]) finally: forward_table.entry_del(target, [fwd_key]) action_profile.entry_del(target, [ap_key]) try: logger.info("Sending packet on port %d", ig_port) testutils.send_packet(self, ig_port, pkt) logger.info("Packet is expected to get dropped.") testutils.verify_no_other_packets(self) finally: pass
def readEntryHelper(self, target, eg_port, num_read, from_hw, init_pkts_val, init_bytes_val): from timeit import default_timer as timer avg_diff_time = 0 for i in range(0, num_read): local_start_time = timer() resp = self.counter_table.entry_get(target, [ self.counter_table.make_key( [gc.KeyTuple('$COUNTER_INDEX', eg_port)]) ], {"from_hw": from_hw}) data_dict = next(resp)[0].to_dict() recv_pkts = data_dict["$COUNTER_SPEC_PKTS"] recv_bytes = data_dict["$COUNTER_SPEC_BYTES"] local_end_time = timer() local_diff_time = (local_end_time - local_start_time ) * 1000 # convert to milliseconds logger.info("Time to read hw counter is %s", str(local_diff_time)) avg_diff_time = avg_diff_time + local_diff_time if (init_pkts_val != recv_pkts): logger.error("Error! inited pkts val = %s received val = %s", str(init_pkts_val), str(recv_pkts)) assert 0 if (init_bytes_val != recv_bytes): logger.error("Error! inited bytes val = %s received val = %s", str(init_bytes_val), str(recv_bytes)) assert 0 avg_diff_time = avg_diff_time / num_read return avg_diff_time
def worker_clear_all(self): self.table.entry_add(self.target, [ self.table.make_key( [gc.KeyTuple('hdr.ethernet.dst_addr', mac_address)]) for mac_address in self.mac_addresses ]) self.mac_addresses.clear()
def runTest(self): target = client.Target(device_id=0, pipe_id=0xffff) bfrt_info = self.interface.bfrt_info_get("tna_meter_lpf_wred") table = bfrt_info.table_get("SwitchIngress.direct_meter_color") table.info.key_field_annotation_add("hdr.ethernet.src_addr", "mac") entry_key = table.make_key( [client.KeyTuple('hdr.ethernet.src_addr', '00:11:22:33:44:55')]) entry_data = table.make_data([], "SwitchIngress.set_color_direct") table.entry_add(target, [entry_key], [entry_data]) for data, key in table.entry_get(target, [entry_key], {"from_hw": False}): data_dict = data.to_dict() if g_is_tofino and testutils.test_param_get("target") is not "hw": self.assertEqual(data_dict["$METER_SPEC_CIR_KBPS"], 5195848221) self.assertEqual(data_dict["$METER_SPEC_PIR_KBPS"], 5195848221) elif g_is_tofino2: self.assertEqual(data_dict["$METER_SPEC_CIR_KBPS"], 4087999890) self.assertEqual(data_dict["$METER_SPEC_PIR_KBPS"], 4087999890) self.assertEqual(data_dict["$METER_SPEC_CBS_KBITS"], 4380866642) self.assertEqual(data_dict["$METER_SPEC_PBS_KBITS"], 4380866642) table.entry_del(target, [entry_key])
def _mk_key(self, keys): if keys is not None: return [ self.table.make_key(map(lambda key: gc.KeyTuple(**key), keys)) ] else: return None
def clear_counters(self): self.logger.info("Clearing next_step counters...") # this should work, but it doesn't. #self.consume_counter.entry_del(self.target) #self.harvest_counter.entry_del(self.target) self.recirculate_counter.entry_del(self.target) self.broadcast_counter.entry_del(self.target) self.retransmit_counter.entry_del(self.target) self.drop_counter.entry_del(self.target) # so we'll clear them manually for counter in [ #self.consume_counter, #self.harvest_counter, self.recirculate_counter, self.broadcast_counter, self.retransmit_counter, self.drop_counter ]: count = counter.info.size self.logger.info("Clearing {} keys for counter table {}".format( count, counter.info.name_get())) counter.entry_mod(self.target, [ counter.make_key([gc.KeyTuple('$COUNTER_INDEX', i)]) for i in range(count) ], [counter.make_data([gc.DataTuple('$COUNTER_SPEC_PKTS', 0)])] * count)
def runTest(self): seed = random.randint(1, 65535) logger.info("Seed used for RegisterTest is %d", seed) random.seed(seed) # Get bfrt_info and set it as part of the test bfrt_info = self.interface.bfrt_info_get("tna_register") register_idx = random.randint(0, 500) register_value_hi = random.randint(0, 1000) register_value_lo = random.randint(0, 1000) logger.info("Register value hi %s", str(register_value_hi)) logger.info("Register value lo %s", str(register_value_lo)) register_value_hi_arr = {} register_value_lo_arr = {} num_pipes = int(testutils.test_param_get('num_pipes')) for i in range(num_pipes): register_value_hi_arr[i] = register_value_hi register_value_lo_arr[i] = register_value_lo target = gc.Target(device_id=0, pipe_id=0xffff) register_table = bfrt_info.table_get("SwitchIngress.test_reg") register_table.entry_add(target, [ register_table.make_key( [gc.KeyTuple('$REGISTER_INDEX', register_idx)]) ], [ register_table.make_data([ gc.DataTuple('SwitchIngress.test_reg.first', register_value_lo), gc.DataTuple('SwitchIngress.test_reg.second', register_value_hi) ]) ]) resp = register_table.entry_get(target, [ register_table.make_key( [gc.KeyTuple('$REGISTER_INDEX', register_idx)]) ], {"from_hw": True}) data, _ = next(resp) data_dict = data.to_dict() VerifyReadRegisters(self, "SwitchIngress.test_reg.first", "SwitchIngress.test_reg.second", register_value_lo_arr, register_value_hi_arr, data_dict)
def getCntr(self, index): t = self.bfrt_info.table_get('cntr') resp = t.entry_get( gc.Target(device_id=dev_id, pipe_id=0xffff), [t.make_key([gc.KeyTuple('$COUNTER_INDEX', index)])], {"from_hw": True}, None) data_dict = next(resp)[0].to_dict() return data_dict['$COUNTER_SPEC_PKTS']
def modifyEntries(self): # To make sure that the phase0 table modify has taken effect delete # the old entries from the exact match table self.port_md_exm_match_table.entry_del(self.target) for key, value in list(self.igr_to_egr_port_map.items()): igr_port = key egr_port = value # For each igr port add a entry in the port_metadata (phase0) table # Form data to be programmed in the phase0 table for this ingress port phase0data = 0 field1 = 0 field2 = 0 field3 = 0 field4 = 0 old_phase0data = 0 while True: field1 = random.randint(256, 0xffff) # 16 bit field2 = random.randint(1, 0xffffff) # 24 bits field3 = random.randint(1, 0xffff) # 16 bits field4 = random.randint(1, 0xff) # 8 bits phase0data = make_phase0_data(field1, field2, field3, field4) if self.phase0_data_map[igr_port] != phase0data: old_phase0data = self.phase0_data_map[igr_port] self.phase0_data_map[igr_port] = phase0data break self.port_metadata_table.entry_mod( self.target, [self.port_metadata_table.make_key([client.KeyTuple('ig_intr_md.ingress_port', igr_port)])], [self.port_metadata_table.make_data([client.DataTuple('$DEFAULT_FIELD', phase0data)])]) # Add the new entry for the igr port in the exact match table self.port_md_exm_match_table.entry_add( self.target, [self.port_md_exm_match_table.make_key( [client.KeyTuple('ig_md.port_md.field1', field1), client.KeyTuple('ig_md.port_md.field2', field2), client.KeyTuple('ig_md.port_md.field3', field3), client.KeyTuple('ig_md.port_md.field4', field4)])], [self.port_md_exm_match_table.make_data( [client.DataTuple('port', egr_port)], 'SwitchIngress.hit')] )
def clear(self): # not yet supported #self.table.entry_del(self.target) # delete each session we created while len(self.sessions): sid = self.sessions.pop(0) self.table.entry_del( self.target, [self.table.make_key([gc.KeyTuple('$sid', sid)])])
def table_print(target, table, keys): keys = [table.make_key([gc.KeyTuple(*f) for f in keys])] for data, key in table.entry_get(target, keys): key_fields = key.to_dict() data_fields = data.to_dict() return data_fields[b'$PORT_UP']
def add_default_entries(self): # add broadcast entry self.table.entry_add(self.target, [ self.table.make_key( [gc.KeyTuple('hdr.ethernet.dst_addr', "ff:ff:ff:ff:ff:ff")]) ], [ self.table.make_data([gc.DataTuple('flood_mgid', self.mgid)], 'Ingress.non_switchml_forward.flood') ])
def worker_add(self, mgid, rid, port, lane): # get dev port for this worker dev_port = self.ports.get_dev_port(port, lane) if rid in self.rids[mgid]: print("Port {}/{} already added to multicast group {}; skipping.". format(port, lane, mgid)) return # add to rid table for this group self.rids[mgid][dev_port] = rid # erase any existing entry try: self.node_table.entry_del(self.target, [ self.node_table.make_key( [gc.KeyTuple('$MULTICAST_NODE_ID', rid)]) ]) except gc.BfruntimeReadWriteRpcException as e: self.logger.info( "Multicast node ID {} not found in switch already during delete; this is expected." .format(rid)) # add to node table self.node_table.entry_add(self.target, [ self.node_table.make_key([gc.KeyTuple('$MULTICAST_NODE_ID', rid)]) ], [ self.node_table.make_data([ gc.DataTuple('$MULTICAST_RID', rid), gc.DataTuple('$DEV_PORT', int_arr_val=[dev_port]) ]) ]) # now that node is added, extend multicast group self.mgid_table.entry_mod_inc( self.target, [self.mgid_table.make_key([gc.KeyTuple('$MGID', mgid)])], [ self.mgid_table.make_data([ gc.DataTuple('$MULTICAST_NODE_ID', int_arr_val=[rid]), gc.DataTuple('$MULTICAST_NODE_L1_XID_VALID', bool_arr_val=[True]), gc.DataTuple('$MULTICAST_NODE_L1_XID', int_arr_val=[rid]) ]) ], bfruntime_pb2.TableModIncFlag.MOD_INC_ADD)
def enc_init(ver=0b00, is_mod=False): key = b'\x00' * 16 # fix the key for debugging # key = os.urandom(16) try: # using os.urandom(16) or other methods k1 = bytearray(siphash24(key, str(0))) k2 = bytearray(siphash24(key, str(1))) k3 = bytearray(siphash24(key, str(2))) key_list1 = [key1_table.make_key([gc.KeyTuple('ig_md.cur_ver', ver)])] data_list1 = [ key1_table.make_data([ gc.DataTuple('k1', k1[4:8]), gc.DataTuple('k2', k1[0:4]), gc.DataTuple('otp1', k2), gc.DataTuple('otp2', k3), ], "SwitchIngress.get_key_1") ] key_list2 = [key2_table.make_key([gc.KeyTuple('ig_md.cur_ver', ver)])] data_list2 = [ key2_table.make_data([ gc.DataTuple('k1', k3[4:8]), gc.DataTuple('k2', k3[0:4]), gc.DataTuple('otp1', k2), gc.DataTuple('otp2', k1), ], "SwitchIngress.get_key_2") ] if is_mod: key1_table.entry_mod(target, key_list1, data_list1) key2_table.entry_mod(target, key_list2, data_list2) else: key1_table.entry_add(target, key_list1, data_list1) key2_table.entry_add(target, key_list2, data_list2) except Exception as e: try: flush_table(key1_table, target) flush_table(key2_table, target) except Exception as e: pass
def insertEntry(self, target, register_idx, register_val): self.register_bool_table.entry_add(target, [ self.register_bool_table.make_key( [gc.KeyTuple('$REGISTER_INDEX', register_idx)]) ], [ self.register_bool_table.make_data([ gc.DataTuple('SwitchIngress.bool_register_table.f1', register_val) ]) ])