def __init__(self, vlan): # move to canonical form if vlan is None: # no VLAN and mark it as a default value self.is_def = True self.tags = () return if isinstance(vlan, VLAN): # copy constructor self.tags = vlan.tags self.is_def = vlan.is_def return # make list of integer vlan_list = (vlan,) if isinstance(vlan, int) else vlan if len(vlan_list) > 2: raise STLError("only up to two VLAN tags are supported") for tag in vlan_list: if not type(tag) == int: raise STLError( "invalid VLAN tag: '{0}'(int value expected)".format(tag)) if not(tag in range(1, 4096)): raise STLError( "invalid VLAN tag: '{0}'(valid range: 1 - 4095)".format(tag)) self.tags = tuple(vlan_list) self.is_def = False
def read_all(self, ipg_usec, min_ipg_usec, speedup, split_mode=None): # get the packets if split_mode is None: pkts = RawPcapReader(self.pcap_file).read_all() else: pkts = rdpcap(self.pcap_file) if not pkts: raise STLError("'%s' does not contain any packets." % self.pcap_file) self.pkts_arr = [] last_ts = 0 # fix times for pkt in pkts: if split_mode is None: pkt_data, meta = pkt ts_usec = meta[0] * 1e6 + meta[1] else: pkt_data = pkt ts_usec = pkt.time * 1e6 if ipg_usec is None: loco = locals() if 'prev_time' in loco: delta_usec = (ts_usec - loco['prev_time']) / float(speedup) else: delta_usec = 0 if min_ipg_usec and delta_usec < min_ipg_usec: delta_usec = min_ipg_usec prev_time = ts_usec last_ts += delta_usec else: # user specified ipg if min_ipg_usec: last_ts += min_ipg_usec elif ipg_usec: last_ts += ipg_usec / float(speedup) else: raise STLError( 'Please specify either min_ipg_usec or ipg_usec, not both.' ) self.pkts_arr.append([pkt_data, last_ts]) if split_mode is None: return self.pkts_arr # we need to split self.graph = Graph() self.pkt_groups = [[], []] if split_mode == 'MAC': self.generate_mac_groups() elif split_mode == 'IP': self.generate_ip_groups() else: raise STLError('unknown split mode for PCAP') return self.pkt_groups
def test_send_traffic_on_tg_error(self): mock_setup_helper = mock.Mock() vpp_rfc = tg_trex_vpp.TrexVppResourceHelper(mock_setup_helper) vpp_rfc.vnfd_helper = base.VnfdHelper(TestTrexTrafficGenVpp.VNFD_0) vpp_rfc.client = mock.Mock() vpp_rfc.client.get_warnings.return_value = 'get_warnings' vpp_rfc.client.get_stats.side_effect = STLError('get_stats') vpp_rfc.client.wait_on_traffic.side_effect = STLError( 'wait_on_traffic') port_pg_id = rfc2544.PortPgIDMap() port_pg_id.add_port(1) port_pg_id.increase_pg_id() port_pg_id.add_port(0) port_pg_id.increase_pg_id() # with self.assertRaises(RuntimeError) as raised: vpp_rfc.send_traffic_on_tg([0, 1], port_pg_id, 30, 10000, True)
def __init__(self, ctx, dst_ip, src_ip=None, pkt_size=64, timeout_sec=3, verbose_level=STLService.ERROR, vlan=None): # init the base object super(STLServiceICMP, self).__init__(verbose_level) if src_ip is None: src_ip = ctx.get_src_ipv4() if not src_ip: raise STLError( 'PING: port {} does not have an IPv4 address. please manually provide source IPv4' .format(ctx.get_port_id())) self.src_ip = src_ip self.dst_ip = dst_ip self.vlan = VLAN(vlan) self.pkt_size = pkt_size self.timeout_sec = timeout_sec self.id = random.getrandbits(16) self.seq = 0 self.record = None
def load(filename, direction=0, port_id=0, **kwargs): """ Load a profile by its type. Supported types are: * py * yaml * pcap file that converted to profile automaticly :Parameters: filename : string as filename direction : profile's direction(if supported by the profile) port_id : which port ID this profile is being loaded to kwargs : forward those key-value pairs to the profile """ x = os.path.basename(filename).split('.') suffix = x[1] if (len(x) == 2) else None if suffix == 'py': profile = STLProfile.load_py(filename, direction, port_id, **kwargs) elif suffix == 'yaml': profile = STLProfile.load_yaml(filename) elif suffix in ['cap', 'pcap']: profile = STLProfile.load_pcap(filename, speedup=1, ipg_usec=1e6) else: raise STLError("unknown profile file type: '{0}'".format(suffix)) profile.meta['stream_count'] = len( profile.get_streams()) if isinstance(profile.get_streams(), list) else 1 return profile
def validate(self, cmd_name, ports, states=None, allow_empty=False): ''' main validator ''' # listify ports = ports if isinstance(ports, (list, tuple)) else [ports] ports = list_remove_dup(ports) if not ports and not allow_empty: raise STLError( '{0} - action requires at least one port'.format(cmd_name)) # default checks for every command states_map = {_PSV_ALL: None} # eventually states is a map from every mandatory state to it's error message if isinstance(states, int): states_map[states] = None elif isinstance(states, (list, tuple)): for s in states: states_map[s] = None elif isinstance(states, dict): states_map.update(states) else: pass for state, err_msg in list(states_map.items()): self.validators[state].validate(self.client, cmd_name, ports, err_msg) # returns the ports listify return ports
def execute_bp_sim(self, json_filename): if self.is_debug: exe = os.path.join(self.bp_sim_path, 'bp-sim-64-debug') else: exe = os.path.join(self.bp_sim_path, 'bp-sim-64') if not os.path.exists(exe): raise STLError( "'{0}' does not exists, please build it before calling the simulation" .format(exe)) cmd = [ exe, '--pcap', '--sl', '--cores', str(self.dp_core_count), '--limit', str(self.pkt_limit), '-f', json_filename ] # out or dry if not self.outfile: cmd += ['--dry'] cmd += ['-o', '/dev/null'] else: cmd += ['-o', self.outfile] if self.dp_core_index is not None: cmd += ['--core_index', str(self.dp_core_index)] if self.mode == 'valgrind': cmd = ['valgrind', '--leak-check=full', '--error-exitcode=1'] + cmd elif self.mode == 'gdb': cmd = ['/usr/bin/gdb', '--args'] + cmd print(("executing command: '{0}'".format(" ".join(cmd)))) if self.silent: FNULL = open(os.devnull, 'wb') rc = subprocess.call(cmd, stdout=FNULL) else: rc = subprocess.call(cmd) if rc != 0: raise STLError( 'simulation has failed with error code {0}'.format(rc)) self.merge_results()
def validate(self, client, cmd_name, ports, custom_err_msg=None): invalid_ports = list_difference(ports, self.get_valid_ports(client)) if invalid_ports: err_msg = self.def_err_msg() if custom_err_msg: err_msg = '{0} - {1}'.format(err_msg, custom_err_msg) raise STLError('{0} - port(s) {1}: {2}'.format( cmd_name, invalid_ports, err_msg))
def __init__(self, bp_sim_path, handler=0, port_id=0, api_h="dummy"): self.bp_sim_path = os.path.abspath(bp_sim_path) if not os.path.exists(self.bp_sim_path): raise STLError('BP sim path %s does not exist' % self.bp_sim_path) # dummies self.handler = handler self.api_h = api_h self.port_id = port_id
def __parse_flow_stats(self, flow_stats_obj): # no such object if not flow_stats_obj or flow_stats_obj.get('enabled') is False: return None pg_id = flow_stats_obj.get('stream_id') if pg_id is None: raise STLError( "Enabled RX stats section must contain 'stream_id' field") return STLFlowStats(pg_id=pg_id)
def load_py(python_file, direction=0, port_id=0, **kwargs): """ Load from Python profile """ # check filename if not os.path.isfile(python_file): raise STLError("File '{0}' does not exist".format(python_file)) basedir = os.path.dirname(python_file) sys.path.insert(0, basedir) try: file = os.path.basename(python_file).split('.')[0] module = __import__(file, globals(), locals(), [], 0) imp.reload(module) # reload the update t = STLProfile.get_module_tunables(module) # for arg in kwargs: # if not arg in t: # raise STLError("Profile {0} does not support tunable '{1}' # - supported tunables are: '{2}'".format(python_file, arg, t)) streams = module.register().get_streams(direction=direction, port_id=port_id, **kwargs) profile = STLProfile(streams) profile.meta = {'type': 'python', 'tunables': t} return profile except Exception: a, b, tb = sys.exc_info() x = ''.join(traceback.format_list(traceback.extract_tb( tb)[1:])) + a.__name__ + ": " + str(b) + "\n" summary = "\nPython Traceback follows:\n\n" + x raise STLError(summary) finally: sys.path.remove(basedir)
def __parse_mode(self, mode_obj): if not mode_obj: return None rate_parser = set(mode_obj).intersection( ['pps', 'bps_L1', 'bps_L2', 'percentage']) if len(rate_parser) != 1: raise STLError( "'rate' must contain exactly one from 'pps', 'bps_L1', 'bps_L2', 'percentage'" ) rate_type = rate_parser.pop() rate = {rate_type: mode_obj[rate_type]} mode_type = mode_obj.get('type') if mode_type == 'continuous': mode = STLTXCont(**rate) elif mode_type == 'single_burst': defaults = STLTXSingleBurst() mode = STLTXSingleBurst(total_pkts=mode_obj.get( 'total_pkts', defaults.fields['total_pkts']), **rate) elif mode_type == 'multi_burst': defaults = STLTXMultiBurst() mode = STLTXMultiBurst( pkts_per_burst=mode_obj.get('pkts_per_burst', defaults.fields['pkts_per_burst']), ibg=mode_obj.get('ibg', defaults.fields['ibg']), count=mode_obj.get('count', defaults.fields['count']), **rate) else: raise STLError( "mode type can be 'continuous', 'single_burst' or 'multi_burst" ) return mode
def parse(self): with open(self.yaml_file, 'r') as f: # read YAML and pass it down to stream object yaml_str = f.read() try: objects = yaml.safe_load(yaml_str) except yaml.parser.ParserError as e: raise STLError(str(e)) streams = [self.__parse_stream(object) for object in objects] return streams
def generate_mac_groups(self): for i, (pkt, _) in enumerate(self.pkts_arr): if not isinstance(pkt, (Ether, Dot3)): raise STLError( "Packet #{0} has an unknown L2 format: {1}".format( i, type(pkt))) self.graph.add(pkt.src, pkt.dst) # split the graph to two groups mac_groups = self.graph.split() for pkt, ts in self.pkts_arr: group = 1 if pkt.src in mac_groups[1] else 0 self.pkt_groups[group].append((bytes(pkt), ts))
def load_yaml(yaml_file): """ Load(from YAML file) a profile with a number of streams""" # check filename if not os.path.isfile(yaml_file): raise STLError("file '{0}' does not exists".format(yaml_file)) yaml_loader = YAMLLoader(yaml_file) streams = yaml_loader.parse() profile = STLProfile(streams) profile.meta = {'type': 'yaml'} return profile
def __parse_packet(self, packet_dict): packet_type = set(packet_dict).intersection(['binary', 'pcap']) if len(packet_type) != 1: raise STLError( "Packet section must contain either 'binary' or 'pcap'") if 'binary' in packet_type: try: pkt_str = base64.b64decode(packet_dict['binary']) except TypeError: raise STLError("'binary' field is not a valid packet format") builder = STLPktBuilder(pkt_buffer=pkt_str) elif 'pcap' in packet_type: pcap = os.path.join(self.yaml_path, packet_dict['pcap']) if not os.path.exists(pcap): raise STLError("'pcap' - cannot find '{0}'".format(pcap)) builder = STLPktBuilder(pkt=pcap) return builder
def _add(self, services): ''' Add a service to the context ''' if isinstance(services, STLService): self._add_single_service(services) elif isinstance(services, (list, tuple)) and all([isinstance(s, STLService) for s in services]): for service in services: self._add_single_service(service) else: raise STLError( "'services' should be STLService subtype or list/tuple of it")
def validate_type(arg_name, arg, valid_types): if valid_types is int: valid_types = (int, int) elif (type(valid_types) is list and int in valid_types and int not in valid_types): valid_types.append(int) if type(valid_types) is list: valid_types = tuple(valid_types) if (type(valid_types) is type or # single type, not array of types # several valid types as tuple type(valid_types) is tuple or type(valid_types) is type): # old style class if isinstance(arg, valid_types): return raise STLTypeError(arg_name, type(arg), valid_types) else: raise STLError('validate_type: valid_types should ' + 'be type or list or tuple of types')
def get_module_tunables(module): # remove self and variables func = module.register().get_streams argc = func.__code__.co_argcount tunables = func.__code__.co_varnames[1:argc] # fetch defaults defaults = func.__defaults__ if defaults is None: return {} if len(defaults) != (argc - 1): raise STLError( "Module should provide default values for all arguments on get_streams()" ) output = {} for t, d in zip(tunables, defaults): output[t] = d return output
def __parse_stream(self, yaml_object): s_obj = yaml_object['stream'] # parse packet packet = s_obj.get('packet') if not packet: raise STLError("YAML file must contain 'packet' field") builder = self.__parse_packet(packet) # mode mode = self.__parse_mode(s_obj.get('mode')) # rx stats flow_stats = self.__parse_flow_stats(s_obj.get('flow_stats')) defaults = default_STLStream # create the stream stream = STLStream( name=yaml_object.get('name'), packet=builder, mode=mode, flow_stats=flow_stats, enabled=s_obj.get('enabled', defaults.fields['enabled']), self_start=s_obj.get('self_start', defaults.fields['self_start']), isg=s_obj.get('isg', defaults.fields['isg']), next=yaml_object.get('next'), action_count=s_obj.get('action_count', defaults.fields['action_count']), mac_src_override_by_pkt=s_obj.get('mac_src_override_by_pkt', 0), mac_dst_override_mode=s_obj.get('mac_src_override_by_pkt', 0)) # hack the VM fields for now if 'vm' in s_obj: stream.fields['vm'].update(s_obj['vm']) return stream
def split(self): color_a = set() color_b = set() # start with all nodes = list(self.db.keys()) # process one by one while len(nodes) > 0: node = nodes.pop(0) friends = self.db[node] # node has never been seen - move to color_a if node not in color_a and node not in color_b: self.log("<NEW> {0} --> A".format(node)) color_a.add(node) # node color node_color, other_color = ( color_a, color_b) if node in color_a else (color_b, color_a) # check that the coloring is possible bad_friends = friends.intersection(node_color) if bad_friends: raise STLError( "ERROR: failed to split PCAP file - {0} and {1} are in the same group" .format(node, bad_friends)) # add all the friends to the other color for friend in friends: self.log("<FRIEND> {0} --> {1}".format( friend, 'A' if other_color is color_a else 'B')) other_color.add(friend) return color_a, color_b
def generate_ip_groups(self): for i, (pkt, t) in enumerate(self.pkts_arr): if not isinstance(pkt, (Ether, Dot3)): raise STLError( "Packet #{0} has an unknown L2 format: {1}".format( i, type(pkt))) ip = pkt.getlayer('IP') if not ip: ip = pkt.getlayer('IPv6') if not ip: continue self.graph.add(ip.src, ip.dst) # split the graph to two groups ip_groups = self.graph.split() for pkt, ts in self.pkts_arr: ip = pkt.getlayer('IP') if not ip: ip = pkt.getlayer('IPv6') group = 0 if ip and ip.src in ip_groups[1]: group = 1 self.pkt_groups[group].append((bytes(pkt), ts))
def run(self, input_list, outfile=None, dp_core_count=1, dp_core_index=None, is_debug=True, pkt_limit=5000, mult="1", duration=-1, mode='none', silent=False, tunables=None): if mode not in [ 'none', 'gdb', 'valgrind', 'json', 'yaml', 'pkt', 'native' ]: raise STLArgumentError('mode', mode) # listify input_list = input_list if isinstance(input_list, list) else [input_list] # check streams arguments if not all([isinstance(i, (STLStream, str)) for i in input_list]): raise STLArgumentError('input_list', input_list) # split to two type input_files = [x for x in input_list if isinstance(x, str)] stream_list = [x for x in input_list if isinstance(x, STLStream)] # handle YAMLs if tunables is None: tunables = {} for input_file in input_files: try: if 'direction' not in tunables: tunables['direction'] = self.port_id % 2 profile = STLProfile.load(input_file, **tunables) except STLError as e: s = format_text( "\nError while loading profile '{0}'\n".format(input_file), 'bold') s += "\n" + e.brief() raise STLError(s) stream_list += profile.get_streams() # load streams cmds_json = [] id_counter = 1 lookup = {} # allocate IDs for stream in stream_list: if stream.get_id() is not None: stream_id = stream.get_id() else: stream_id = id_counter id_counter += 1 name = stream.get_name() if stream.get_name() is not None else id( stream) if name in lookup: raise STLError( "multiple streams with name: '{0}'".format(name)) lookup[name] = stream_id # resolve names for stream in stream_list: name = stream.get_name() if stream.get_name() is not None else id( stream) stream_id = lookup[name] next_id = -1 next = stream.get_next() if next: if next not in lookup: raise STLError( "stream dependency error - unable to find '{0}'". format(next)) next_id = lookup[next] stream_json = stream.to_json() stream_json['next_stream_id'] = next_id cmd = { "id": 1, "jsonrpc": "2.0", "method": "add_stream", "params": { "handler": self.handler, "api_h": self.api_h, "port_id": self.port_id, "stream_id": stream_id, "stream": stream_json } } cmds_json.append(cmd) # generate start command cmds_json.append( self.generate_start_cmd(mult=mult, force=True, duration=duration)) if mode == 'json': print((json.dumps(cmds_json, indent=4, separators=(',', ': '), sort_keys=True))) return elif mode == 'yaml': print((STLProfile(stream_list).dump_to_yaml())) return elif mode == 'pkt': print((STLProfile(stream_list).dump_as_pkt())) return elif mode == 'native': print((STLProfile(stream_list).dump_to_code())) return # start simulation self.outfile = outfile self.dp_core_count = dp_core_count self.dp_core_index = dp_core_index self.is_debug = is_debug self.pkt_limit = pkt_limit self.mult = mult self.duration = duration, self.mode = mode self.silent = silent self.__run(cmds_json)
def validate_choice(arg_name, arg, choices): if arg is not None and arg not in choices: raise STLError("validate_choice: argument '{0}' " + "can only be one of '{1}'".format(arg_name, choices))
def stl_map_ports(client, ports=None): # by default use all ports if ports is None: ports = client.get_all_ports() client.acquire(ports, force=True, sync_streams=False) unresolved_ports = list_difference(ports, client.get_resolved_ports()) if unresolved_ports: raise STLError( "Port(s) {0} have unresolved destination addresses".format( unresolved_ports)) stl_send_3_pkts(client, ports) PKTS_SENT = 5 pgid_per_port = {} active_pgids_tmp = client.get_active_pgids() active_pgids = [] for key in list(active_pgids_tmp.keys()): active_pgids += active_pgids_tmp[key] base_pkt = Ether() / IP() / UDP() / ('x' * 18) test_pgid = 10000000 # add latency packet per checked port for port in ports: for i in range(3): while test_pgid in active_pgids: test_pgid += 1 stream = STLStream(packet=STLPktBuilder(pkt=base_pkt), flow_stats=STLFlowLatencyStats(pg_id=test_pgid), mode=STLTXSingleBurst(pps=1e4, total_pkts=PKTS_SENT)) try: client.add_streams(stream, [port]) except STLError: continue pgid_per_port[port] = test_pgid test_pgid += 1 break if len(pgid_per_port) != len(ports): raise STLError('Could not add flow stats streams per port.') # inject client.clear_stats(ports, clear_global=False, clear_flow_stats=True, clear_latency_stats=False, clear_xstats=False) client.start(ports, mult="5%") client.wait_on_traffic(ports) stats = client.get_pgid_stats(list(pgid_per_port.values()))['flow_stats'] # cleanup client.reset(ports) table = {'map': {}, 'bi': [], 'unknown': []} # actual mapping for tx_port in ports: table['map'][tx_port] = None for rx_port in ports: if stats[pgid_per_port[tx_port]]['rx_pkts'][ rx_port] * 2 > PKTS_SENT: table['map'][tx_port] = rx_port unmapped = list(ports) while len(unmapped) > 0: port_a = unmapped.pop(0) port_b = table['map'][port_a] # if unknown - add to the unknown list if port_b is None: table['unknown'].append(port_a) # self-loop, due to bug? elif port_a == port_b: continue # bi-directional ports elif (table['map'][port_b] == port_a): unmapped.remove(port_b) table['bi'].append((port_a, port_b)) return table
def verify_exclusive_arg(args_list): if not (len(list([x for x in args_list if x is not None])) == 1): raise STLError( 'exactly one parameter from {0} should be provided'.format( args_list))
def load_pcap(pcap_file, ipg_usec=None, speedup=1.0, loop_count=1, vm=None, packet_hook=None, split_mode=None, min_ipg_usec=None): """ Convert a pcap file with a number of packets to a list of connected streams. packet1->packet2->packet3 etc :parameters: pcap_file : string Name of the pcap file ipg_usec : float Inter packet gap in usec. If IPG is None, IPG is taken from pcap file speedup : float When reading the pcap file, divide IPG by this "speedup" factor. Resulting IPG is sped up by this factor. loop_count : uint16_t Number of loops to repeat the pcap file vm : list List of Field engine instructions packet_hook : Callable or function will be applied to every packet split_mode : str should this PCAP be split to two profiles based on IPs / MACs used for dual mode can be 'MAC' or 'IP' min_ipg_usec : float Minumum inter packet gap in usec. Used to guard from too small IPGs. :return: STLProfile """ if speedup <= 0: raise STLError('Speedup should be positive.') if min_ipg_usec and min_ipg_usec < 0: raise STLError('min_ipg_usec should not be negative.') # make sure IPG is not less than 0.001 usec if (ipg_usec is not None and (ipg_usec < 0.001 * speedup) and (min_ipg_usec is None or min_ipg_usec < 0.001)): raise STLError( "ipg_usec cannot be less than 0.001 usec: '{0}'".format( ipg_usec)) if loop_count < 0: raise STLError("'loop_count' cannot be negative") try: if split_mode is None: pkts = PCAPReader(pcap_file).read_all(ipg_usec, min_ipg_usec, speedup) if len(pkts) == 0: raise STLError( "'{0}' does not contain any packets".format(pcap_file)) return STLProfile.__pkts_to_streams(pkts, loop_count, vm, packet_hook) else: pkts_a, pkts_b = PCAPReader(pcap_file).read_all( ipg_usec, min_ipg_usec, speedup, split_mode=split_mode) if not (pkts_a or pkts_b): raise STLError("'%s' does not contain any packets." % pcap_file) elif not (pkts_a and pkts_b): raise STLError("'%s' contains only one direction." % pcap_file) # swap is ts of first packet in b is earlier start_time_a = pkts_a[0][1] start_time_b = pkts_b[0][1] if start_time_b < start_time_a: pkts_a, pkts_b = pkts_b, pkts_a # get last ts end_time_a = pkts_a[-1][1] end_time_b = pkts_b[-1][1] start_delay_usec = 1000 if ipg_usec: start_delay_usec = ipg_usec / speedup if min_ipg_usec and min_ipg_usec > start_delay_usec: start_delay_usec = min_ipg_usec end_time = max(end_time_a, end_time_b) profile_a = STLProfile.__pkts_to_streams( pkts_a, loop_count, vm, packet_hook, start_delay_usec, end_delay_usec=end_time - end_time_a) profile_b = STLProfile.__pkts_to_streams( pkts_b, loop_count, vm, packet_hook, start_delay_usec, end_delay_usec=end_time - end_time_b) return profile_a, profile_b except Scapy_Exception as e: raise STLError("failed to open PCAP file {0}: '{1}'".format( pcap_file, str(e)))
def __init__(self, pcap_file): if not os.path.isfile(pcap_file): raise STLError("File '{0}' does not exist.".format(pcap_file)) self.pcap_file = pcap_file
def __init__( self, name=None, packet=None, mode=STLTXCont(pps=1), enabled=True, self_start=True, isg=0.0, flow_stats=None, next=None, stream_id=None, action_count=0, random_seed=0, mac_src_override_by_pkt=None, mac_dst_override_mode=None, # see STLStreamDstMAC_xx dummy_stream=False): """ Stream object :parameters: name : string Name of the stream. Required if this stream is dependent on another stream, and another stream needs to refer to this stream by name. packet : STLPktBuilder see :class: `trex_stl_lib.trex_stl_packet_builder_scapy.STLPktBuilder` Template packet and field engine program. Example: packet = STLPktBuilder(pkt = base_pkt/pad) mode : :class:`trex_stl_lib.trex_stl_streams.STLTXCont` or : class:`trex_stl_lib.trex_stl_streams.STLTXSingleBurst` or : class:`trex_stl_lib.trex_stl_streams.STLTXMultiBurst` enabled : bool Indicates whether the stream is enabled. self_start : bool If False, another stream activates it. isg : float Inter-stream gap in usec. Time to wait until the stream sends the first packet. flow_stats : :class:`trex_stl_lib.trex_stl_streams.STLFlowStats` Per stream statistic object. See: STLFlowStats next : string Name of the stream to activate. stream_id : For use by HLTAPI. action_count : uint16_t If there is a next stream, number of loops before stopping. Default: 0(unlimited). random_seed: uint16_t If given, the seed for this stream will be this value. Useful if you need a deterministic random value. mac_src_override_by_pkt : bool Template packet sets src MAC. mac_dst_override_mode=None : STLStreamDstMAC_xx Template packet sets dst MAC. dummy_stream : bool For delay purposes, will not be sent. """ # type checking validate_type('mode', mode, STLTXMode) validate_type('packet', packet, (type(None), CTrexPktBuilderInterface)) validate_type('flow_stats', flow_stats, (type(None), STLFlowStatsInterface)) validate_type('enabled', enabled, bool) validate_type('self_start', self_start, bool) validate_type('isg', isg, (int, float)) validate_type('stream_id', stream_id, (type(None), int)) validate_type('random_seed', random_seed, int) validate_type('dummy_stream', dummy_stream, bool) if (type(mode) == STLTXCont) and (next is not None): raise STLError("Continuous stream cannot have a next stream ID") # tag for the stream and next - can be anything self.name = name self.next = next # save for easy construct code from stream object self.mac_src_override_by_pkt = mac_src_override_by_pkt self.mac_dst_override_mode = mac_dst_override_mode self.id = stream_id self.fields = {} int_mac_src_override_by_pkt = 0 int_mac_dst_override_mode = 0 if mac_src_override_by_pkt is None: int_mac_src_override_by_pkt = 0 if packet: if packet.is_default_src_mac() is False: int_mac_src_override_by_pkt = 1 else: int_mac_src_override_by_pkt = int(mac_src_override_by_pkt) if mac_dst_override_mode is None: int_mac_dst_override_mode = 0 if packet: if packet.is_default_dst_mac() is False: int_mac_dst_override_mode = STLStreamDstMAC_PKT else: int_mac_dst_override_mode = int(mac_dst_override_mode) self.is_default_mac = not (int_mac_src_override_by_pkt or int_mac_dst_override_mode) self.fields['flags'] = (int_mac_src_override_by_pkt & 1) + \ ((int_mac_dst_override_mode & 3) << 1) + (int(dummy_stream) << 3) self.fields['action_count'] = action_count # basic fields self.fields['enabled'] = enabled self.fields['self_start'] = self_start self.fields['isg'] = isg if random_seed != 0: self.fields['random_seed'] = random_seed # optional # mode self.fields['mode'] = mode.to_json() self.mode_desc = str(mode) # packet self.fields['packet'] = {} self.fields['vm'] = {} if not packet: packet = STLPktBuilder(pkt=Ether() / IP()) if dummy_stream: self.packet_desc = 'Dummy' self.scapy_pkt_builder = packet # packet builder packet.compile() # packet and VM self.fields['packet'] = packet.dump_pkt() self.fields['vm'] = packet.get_vm_data() self.pkt = base64.b64decode(self.fields['packet']['binary']) # this is heavy, calculate lazy self.packet_desc = None if not flow_stats: self.fields['flow_stats'] = STLFlowStats.defaults() else: self.fields['flow_stats'] = flow_stats.to_json()
def to_code(self): """ Convert to Python code as profile """ layer = Ether(self.pkt) layer.hide_defaults() # remove fields with default values imports_arr = [] layers_commands = [] # remove checksums, add imports if needed while layer: layer_class = layer.__class__.__name__ if layer_class not in vars(scapy.layers.all): # custom import found_import = False for module_path, module in list(sys.modules.items()): if not module_path.startswith( ('scapy.layers', 'scapy.contrib')): continue import_string = 'from %s import %s' % (module_path, layer_class) if import_string in imports_arr: # already present in extra imports found_import = True break if hasattr(module, layer_class): # add as extra import imports_arr.append(import_string) found_import = True break if not found_import: raise STLError('Could not determine import of layer %s' % layer.name) # remove checksums for chksum_name in ('cksum', 'chksum'): if chksum_name in layer.fields: del layer.fields[chksum_name] # remove Paddings(FCS etc.) if isinstance(layer, 'Padding'): break payload = layer.payload layer.remove_payload() if isinstance(layer, Raw): payload_data = bytes(layer) # compact form Raw('x' * 100) etc. if payload_data == payload_data[0:1] * len(payload_data): layer_command = '%s * %s)' % (Raw( payload_data[0:1]).command().rstrip(')'), len(payload_data)) else: layer_command = layer.command() layers_commands.append(layer_command) break layers_commands.append(layer.command()) layer = payload imports = '\n'.join(imports_arr) packet_code = 'packet =(' + \ (' / \n ').join(layers_commands) + ')' vm_list = [] for inst in self.fields['vm']['instructions']: if inst['type'] == 'flow_var': vm_list.append( "STLVmFlowVar(name='{name}', size={size}, op='{op}', init_value={init_value}," + "min_value={min_value}, max_value={max_value}, step={step})" .format(**inst)) elif inst['type'] == 'write_flow_var': vm_list.append( "STLVmWrFlowVar(fv_name='{name}', pkt_offset={pkt_offset}, " + "add_val={add_value}, is_big={is_big_endian})".format( **inst)) elif inst['type'] == 'write_mask_flow_var': inst = copy.copy(inst) inst['mask'] = hex(inst['mask']) vm_list.append(( "STLVmWrMaskFlowVar(fv_name='{name}', pkt_offset={pkt_offset}, " + "pkt_cast_size={pkt_cast_size}," + "mask={mask}, shift={shift}, add_value={add_value}, " + "is_big={is_big_endian})").format(**inst)) elif inst['type'] == 'fix_checksum_ipv4': vm_list.append( "STLVmFixIpv4(offset={pkt_offset})".format(**inst)) elif inst['type'] == 'trim_pkt_size': vm_list.append( "STLVmTrimPktSize(fv_name='{name}')".format(**inst)) elif inst['type'] == 'tuple_flow_var': inst = copy.copy(inst) inst['ip_min'] = ltoa(inst['ip_min']) inst['ip_max'] = ltoa(inst['ip_max']) vm_list.append( ("STLVmTupleGen(name='{name}', ip_min='{ip_min}', " + "ip_max='{ip_max}', port_min={port_min}, " + "port_max={port_max}, limit_flows={limit_flows}, " + "flags={flags})").format(**inst)) elif inst['type'] == 'flow_var_rand_limit': vm_list.append( "STLVmFlowVarRepetableRandom(name='{name}', size={size}, limit={limit}, " + "seed={seed}, min_value={min_value}, max_value={max_value})" .format(**inst)) vm_code = 'vm = STLScVmRaw([' + \ ',\n '.join(vm_list) + '])' stream_params_list = [] stream_params_list.append( 'packet = STLPktBuilder(pkt = packet, vm = vm)') if default_STLStream.name != self.name: stream_params_list.append('name = %s' % STLStream.__add_quotes(self.name)) if default_STLStream.fields['enabled'] != self.fields['enabled']: stream_params_list.append('enabled = %s' % self.fields['enabled']) if default_STLStream.fields['self_start'] != self.fields['self_start']: stream_params_list.append('self_start = %s' % self.fields['self_start']) if default_STLStream.fields['isg'] != self.fields['isg']: stream_params_list.append('isg = %s' % self.fields['isg']) if default_STLStream.fields['flow_stats'] != self.fields['flow_stats']: stream_params_list.append('flow_stats = STLFlowStats(%s)' % self.fields['flow_stats']['stream_id']) if default_STLStream.__next__ != self.__next__: stream_params_list.append('next = %s' % STLStream.__add_quotes(self.__next__)) if default_STLStream.id != self.id: stream_params_list.append('stream_id = %s' % self.id) if default_STLStream.fields['action_count'] != self.fields[ 'action_count']: stream_params_list.append('action_count = %s' % self.fields['action_count']) if 'random_seed' in self.fields: stream_params_list.append('random_seed = %s' % self.fields.get('random_seed', 0)) if default_STLStream.mac_src_override_by_pkt != self.mac_src_override_by_pkt: stream_params_list.append('mac_src_override_by_pkt = %s' % self.mac_src_override_by_pkt) if default_STLStream.mac_dst_override_mode != self.mac_dst_override_mode: stream_params_list.append('mac_dst_override_mode = %s' % self.mac_dst_override_mode) mode_args = '' for key, value in list(self.fields['mode'].items()): if key not in ('rate', 'type'): mode_args += '%s = %s, ' % (key, value) mode_args += '%s = %s' % (self.fields['mode']['rate']['type'], self.fields['mode']['rate']['value']) if self.mode_desc == STLTXCont.__str__(): stream_params_list.append('mode = STLTXCont(%s)' % mode_args) elif self.mode_desc == STLTXSingleBurst().__str__(): stream_params_list.append('mode = STLTXSingleBurst(%s)' % mode_args) elif self.mode_desc == STLTXMultiBurst().__str__(): stream_params_list.append('mode = STLTXMultiBurst(%s)' % mode_args) else: raise STLError('Could not determine mode: %s' % self.mode_desc) stream = "stream = STLStream(" + \ ',\n '.join(stream_params_list) + ')' return '\n'.join([imports, packet_code, vm_code, stream])