def __launch_ss(self, dur, log_path): """Run ss command and append log to file. Args: dur: The duration of the experiment. log_path: The path of log file. """ t0 = time.time() t = t0 port_cnt = sum([c.num for c in self.__conns]) with open(log_path, 'w') as f: f.truncate() ss_ip = '[%s]' if self.__ip_mode == socket.AF_INET6 else '%s' ss_ip %= self.__recv ss_cmd = 'ss -tin "dport >= :%d and dport < :%d and dst %s" >> %s' % ( self.__first_port, self.__first_port + port_cnt, ss_ip, log_path,) while t < t0 + dur: with open(log_path, 'a') as f: f.write('# %f\n' % (time.time(),)) shell.run(ss_cmd) t += self.__ss_interval_second to_sleep = t - time.time() if to_sleep > 0: time.sleep(to_sleep)
def set_receive_offload(self, lro=False, gro=False): """Sets LRO and GRO. Args: lro: Whether to enable LRO. gro: Whether to enable GRO. """ lro_str = 'on' if lro else 'off' gro_str = 'on' if gro else 'off' for iface in self.get_all_ifaces(): LOG.debug('set_receive_offload: ethtool -K %s gro %s lro %s', iface, gro_str, lro_str) shell.run('ethtool -K %s gro %s' % (iface, gro_str)) shell.run('ethtool -K %s lro %s' % (iface, lro_str))
def end(self): """Writes the klog compressed file. See the outparser.Visitor interface. """ klog_paths = [] for _, klogf in self._klog_files.iteritems(): # Should drop the directory prefix to have a flat tarball. klog_paths.append(os.path.basename(klogf.name)) klogf.close() shell.run('tar -C %s -cvjf %s %s' % (self._exp_dir, os.path.join( self._exp_dir, 'kern-debug.tbz2'), ' '.join(klog_paths)))
def reset(self, cmd): """Resets the sender. Stops the sender thread, resets all the output directories, and kills netperf and tcpdump processes. Args: cmd: The command to run before starting an experiment. """ self.maybe_join() if os.path.exists(path.get_tmp_dir()): shutil.rmtree(path.get_tmp_dir()) os.makedirs(path.get_tmp_dir()) for tool in transperf.TOOLS.values(): for binary in tool.binaries: shell.run('pkill %s' % binary) shell.run('killall -q tcpdump') for iface in self.get_all_ifaces(): shell.run('%s qdisc del dev %s root' % (path.tc(), iface)) if cmd: shell.run(cmd)
def set_conns(self, conn_list, first_port): """Sets the connections of this sender. This method is called by RPC. Args: conn_list: List of connections. first_port: The first port this sender should use. Next ports are allocated continuously. """ tuple_to_burst = lambda t: transperf.burst(wait=t[0], rounds=t[1], repeat=t[2], req=t[3], res=t[4]) if t else None self.__conns = [transperf.conn(cc=c[0], num=c[1], start=c[2], dur=c[3], size=c[4], burst=tuple_to_burst(c[5]), params=c[6], upload=c[7], tool=c[8]) for c in conn_list] self.__conns.sort(key=lambda c: c.start) LOG.debug('sender on %s: connections are: %s', socket.gethostname(), ','.join([str(c) for c in self.__conns])) self.__first_port = first_port probed = {'cubic': True, 'reno': True} for c in self.__conns: if probed.get(c.cc): continue probed[c.cc] = True shell.run('rmmod tcp_%s' % c.cc) shell.run('modprobe tcp_%s' % c.cc) if not c.params: continue for p in c.params.split(','): p = p.strip() if not p: continue k, v = p.split('=') params_dir = self.__cc_parameters_path(c.cc) try: f = open('%s/%s' % (params_dir, k), 'w') f.write(v) f.close() LOG.info('set parameter %s:%s to %s', c.cc, k, v) except IOError: # Rather than crashing, warn user in log, ignore the # invalid parameter, and continue. LOG.warn('attempting to set invalid parameter %s:%s to %s', c.cc, k, v)
def _stage_ifacecfg(ifacecfg, ifacecfg_dir): """Stage validated interface configuration file. Args: ifacecfg: The interface config file. ifacecfg_dir: The staging directory. Returns: The name of the config file. Raises: RuntimeError if the config is invalid. """ cmd = 'mkdir -p {stage}'.format(stage=ifacecfg_dir) cmd = 'cp {cfg} {stage}'.format(cfg=ifacecfg, stage=ifacecfg_dir) shell.run(cmd) cfgname = os.path.basename(os.path.normpath(ifacecfg)) return cfgname
def run(cmd): """Wraps transerf::shell::run() with more descriptive debug info.""" rewind = 2 fmt = ' Shell:\t%s:%d:\t%s: Executing [%s]' LOG.debug( fmt, *([ Utils.__file__(rewind), Utils.__line__(rewind), Utils.__func__(rewind), cmd ])) return shell.run(cmd)
def run(self, cmd, use_rootns=True, nohup=False): """Runs the command, possibly using ssh. Args: cmd: The command. use_rootns: For ssh wrappers normally targeted at a container, specify that we must execute in the root namespace instead. Returns: A tuple of lines in stdout and stderr. """ return shell.run(self._build_executor_cmd(cmd, use_rootns, nohup))
def setup_ifb(self, do_modprobe): """Sets up the ifb interfaces. Args: do_modprobe: Whether we should unload/reload the ifb module. We only do so if we're running in baremetal/normal mode. In singleserver mode, virtsetup.py handles it. """ ifaces = self.get_all_ifaces() if do_modprobe: shell.run('rmmod ifb') shell.run('modprobe ifb numifbs=%s' % len(ifaces)) for iface in ifaces: iface_ifb = self.get_ifb_for_iface(iface) shell.run('ip link set dev %s up' % iface_ifb) shell.run('ifconfig %s txqueuelen 128000' % iface_ifb)
def _validate_netperf(exp): """Validates netperf binary to make sure it has all the options we need. Args: exp: The experiment object. Returns: The error message, if netperf cannot run the experiment. """ has_burst = [c for c in exp.conn.conn_list if c.burst_tuple()] if not has_burst: return None out, _, _ = shell.run(path.netperf() + ' -b 1 -w 1 -H 1 -p 1') if out.find('not compiled in') == -1: return None return 'netperf is not compiled with interval support'
def __init__(self, iface_cfg, singlesrv_mode, ip_mode, save_pcap, save_kern_debug, hosts): self.__singlesrv_mode = singlesrv_mode self.__done = True self.__conns = [] self.__cmds = [] self.__run_thread = None self.__ip_mode = ip_mode hostname = socket.gethostname() if hosts is not None: self.__ip_map = parse_ip_map(hosts) else: self.__ip_map = {} LOG.info('No hosts file provided, skip parsing ip map.') if singlesrv_mode: assert self.__ip_map assert hostname in self.__ip_map LOG.info('IP Address map is: %s', str(self.__ip_map)) self.__ip_addr = (self.__ip_map[hostname] if hostname in self.__ip_map else socket.getaddrinfo( socket.gethostname(), 0, self.__ip_mode, socket.SOCK_STREAM, socket.IPPROTO_TCP)[0][4][0]) LOG.info('IPAddr: %s', self.__ip_addr) self.__first_port = -1 self.__recv = None self.__phys_ifaces = [] self.__set_ifaces(iface_cfg) self.__ss_interval_second = 0.1 self.__save_pcap = save_pcap self.__save_kern_debug = save_kern_debug if path.tc(): shell.run('chmod a+x %s' % path.tc()) if path.netperf(): shell.run('chmod a+x %s' % path.netperf()) if path.netserver(): shell.run('chmod a+x %s' % path.netserver())
def __init__(self, iface_cfg, singlesrv_mode, ip_mode, hosts): self.__singlesrv_mode = singlesrv_mode self.__senders = [] self.__done = True self.__run_thread = None self.__bws = None self.__slot = None self.__policer = None self.__buf = 0 self.__loss = 0 self.__oloss = 0 self.__port_range = None self.__port_to_addr = dict() self.__cmds = [] if hosts is not None: self.__ip_map = parse_ip_map(hosts) else: self.__ip_map = {} LOG.info('No hosts file provided, skip parsing ip map.') if singlesrv_mode: assert self.__ip_map self.__ip_mode = ip_mode self.__proto = Receiver.tc_protocol_map[ip_mode] self.__match = Receiver.tc_match_map[ip_mode] self.__prev_lro = None self.__prev_gro = None self.__bond_iface = None self.__phys_ifaces = [] self.__set_ifaces(iface_cfg) self.setup_ifb(not self.__singlesrv_mode) if path.tc(): shell.run('chmod a+x %s' % path.tc()) if path.netperf(): shell.run('chmod a+x %s' % path.netperf()) if path.netserver(): shell.run('chmod a+x %s' % path.netserver())
def __do_run(self, tools, start_ts, dur, nsenders, out_dir): """Runs the experiment.""" self.__servers = [] till_start_sec = start_ts - calendar.timegm( datetime.datetime.utcnow().utctimetuple()) # Build a set of unique tools and their associated ports. tool_to_ports = {} for tool, port in zip(tools, self.__port_to_addr.keys()): existing = tool_to_ports.setdefault(tool, []) existing.append((port, self.__port_to_addr[port])) # Have each tool add receiver commands to support the senders. for tool, ports in tool_to_ports.iteritems(): toolobj = transperf.TOOLS[tool] toolobj.options_dict['ip_mode'] = ( '-6' if self.__ip_mode == socket.AF_INET6 else '-4') for cmd in transperf.TOOLS[tool].receiver_cmds(ports, till_start_sec): proc = shell.bg(cmd) self.__servers.append(proc) if proc.poll(): raise RuntimeError('cannot run ' + cmd) if not self.__servers: raise RuntimeError('no server to run') LOG.debug('creating commands') if self.__qdisc_noop(nsenders): # If there is no RTT, BW, nor Policer, don't install any qdisc. cmds = [] else: # Setup root qdiscs. for iface in self.get_all_ifaces(): # Skip setting up eth0 and ifb0, if bandwidth is noop. if iface == self.get_bond_iface() and self.__bw_qdisc_noop(): continue iface_ifb = self.get_ifb_for_iface(iface) _, err, _ = shell.run(''' %(tc)s qdisc replace dev %(iface)s handle 1: root htb %(tc)s qdisc replace dev %(iface)s handle ffff: ingress %(tc)s class replace dev %(iface)s parent 1: classid 1:1 \ htb rate 100Gbit ''' % { 'tc': path.tc(), 'iface': iface, }) # Some tc versions print 'Success' to stderr. if any(l and l != 'RTNETLINK answers: Success' for l in err.split('\n')): raise RuntimeError('Error in setting up %s: %s' % (iface, err)) _, err, _ = shell.run(''' %(tc)s qdisc replace dev %(iface)s handle 1: root htb %(tc)s class replace dev %(iface)s parent 1: classid 1:1 \ htb rate 100Gbit ''' % { 'tc': path.tc(), 'iface': iface_ifb, }) if any(l and l != 'RTNETLINK answers: Success' for l in err.split('\n')): raise RuntimeError('Error setting up %s: %s' % (iface_ifb, err)) # We generate commands and their wait time before starting the loop. cmds = self.__cmds cmds += self.__bw_cmds() cmds += self.__rtt_cmds(nsenders) cmds += self.__filter_cmds(nsenders) cmds += self.__policer_cmds() cmds.sort(key=lambda c: c[1]) for cmd in cmds: LOG.debug('at %s will run %s', cmd[1], cmd[0]) cmds_at_zero = [cmd for cmd in cmds if not cmd[1]] cmds_rest = [cmd for cmd in cmds if cmd[1]] # Run all the commands that should be run at 0. for cmd in cmds_at_zero: shell.run(cmd[0]) now = calendar.timegm(datetime.datetime.utcnow().utctimetuple()) sdur = start_ts - now LOG.debug('sleeping for %s seconds', sdur) if start_ts > now: time.sleep(start_ts - now) now = 0.0 # Run the commands that has a later deadline. for cmd in cmds_rest: if cmd[1] < now: LOG.warning('command %s is ran after its deadline', cmd) if cmd[1] > now: LOG.debug('sleeping from %s til %s', now, cmd[1]) time.sleep(cmd[1] - now) now = cmd[1] shell.run(cmd[0]) end_time = datetime.datetime.utcnow().utctimetuple() delta = calendar.timegm(end_time) - start_ts if delta < dur: time.sleep(dur - delta) LOG.info('saving qdisc state in %s', out_dir) if os.path.exists(out_dir): shutil.rmtree(out_dir) os.makedirs(out_dir) # Save qdisc stats. tcs = '\n'.join([shell.run(path.tc() + ' -d -s -p qdisc show')[0], shell.run(path.tc() + ' -d -s -p class show')[0], shell.run(path.tc() + ' -d -s -p class show')[0], shell.run(path.tc() + ' -d -s -p filter show')[0], shell.run(path.tc() + ' -d -s -p filter show')[0]]) tcf = open(os.path.join(out_dir, 'tc.out'), 'w') tcf.write(tcs) tcf.close() hostname = socket.gethostname() if self.__singlesrv_mode: assert hostname in self.__ip_map if hostname in self.__ip_map: rcv_ip = self.__ip_map[hostname] else: rcv_ip = socket.getaddrinfo(hostname, 0, self.__ip_mode, socket.SOCK_STREAM, socket.IPPROTO_TCP)[0][4][0] ipf = open(os.path.join(out_dir, 'recv.info'), 'w') ipf.write(rcv_ip) ipf.close()
def reset(self, cmd): """Cleans all the settings and reinitializes the receiver. Args: cmd: The command to run before starting an experiment. """ for tool in transperf.TOOLS.values(): for binary in tool.binaries: shell.run('pkill %s' % binary) shell.run('killall -q tcpdump') shell.run(path.tc() + ' qdisc show') for iface in self.get_all_ifaces(): iface_ifb = self.get_ifb_for_iface(iface) for dev in [iface, iface_ifb]: shell.run(path.tc() + ' filter del dev ' + dev + ' pref 10 parent ffff:') shell.run(path.tc() + ' filter del dev ' + dev + ' pref 10') shell.run(path.tc() + ' qdisc del dev ' + dev + ' ingress') shell.run(path.tc() + ' qdisc del dev ' + dev + ' clsact') shell.run(path.tc() + ' qdisc del dev ' + dev + ' root') if cmd: shell.run(cmd) self.__bws = None self.__policer = None self.__buf = 0 self.__loss = 0 self.__oloss = 0 self.__port_range = None self.__port_to_addr = dict()
def get_init_pid_from_unshare(unshare_pid): unshare_pid = str(unshare_pid) output = shell.run('pgrep -P %s' % unshare_pid)[0].splitlines() return output[0]
def __do_run(self, start_ts, dur, out_dir): """Runs the experiment. Args: start_ts: When to start the experiment. dur: The duration of the experiment. out_dir: The output directory. """ # We wait for 1 second in netperf to establish the control channel. dur += 1 tcpdump_procs, pcap_files = self.__launch_tcpdump() self.__truncate_log() now = calendar.timegm(datetime.datetime.utcnow().utctimetuple()) if now < start_ts: LOG.debug('sleeping for %s seconds', start_ts - now) time.sleep(start_ts - now) LOG.info('starting at %s', datetime.datetime.now()) ss_thread, ss_log_path = self.launch_ss(dur) wait = 0 live_conns = [] port = self.__first_port tasks = self.__conns + self.__cmds tasks.sort(key=lambda t: t.start) for t in tasks: if t.start > wait: delta = t.start - wait # TODO(soheil): This may drift. Use an absolute TS instead? LOG.info('sleeping til the next connection for %s seconds', delta) time.sleep(delta) wait += delta if isinstance(t, transperf.Conn): LOG.info('starting connection %s', t) n = t.num while n: # Make sure the duration of netperf is always 1+ seconds. cmd = t.tool.sender_cmd(t, self.__recv, port, max(1, dur - wait), self.__ip_addr) LOG.info('running %s', cmd) np_proc = shell.bg(cmd) live_conns.append((t, np_proc, port, t.tool)) port += 1 n -= 1 elif isinstance(t, transperf.MachineCommand): shell.bg(t.cmd) # Wait until the end of the experiment. if wait < dur: time.sleep(dur - wait) ss_thread.join() # Collect results. LOG.info('saving results in %s', out_dir) if os.path.exists(out_dir): shutil.rmtree(out_dir) os.makedirs(out_dir) # Kill all the tool processes and collect their outputs. conn_infos = '' tool_stats = '' for conn, np_proc, port, tool in live_conns: out, err, _ = shell.wait(np_proc) if err: # TODO(soheil): cleanup the output directories. LOG.error('error in netperf of %s: %s', conn, err) throughput = tool.throughput(out) tool_stats += '%s of %s:\n%s\n' % (tool, conn, out) conn_infos += '%d=%s,%s,%s,%s,%s,%s,%s\n' % (port, self.__ip_addr, conn.tool.name(), conn.cc, conn.start, conn.dur, throughput, conn.params) LOG.debug('experiment successfully concluded') npf = open(os.path.join(out_dir, 'tool.out'), 'w') npf.write(tool_stats) npf.close() cif = open(os.path.join(out_dir, 'conn.info'), 'w') cif.write(conn_infos) cif.close() if ss_log_path: shutil.move(ss_log_path, out_dir) # Save tcpdump. time.sleep(1) for proc in tcpdump_procs: shell.terminate(proc) for f in pcap_files: shutil.move(f, out_dir) # Save sysctl. mod_params = '' for cc in set([c.cc for c in self.__conns]): params_dir = self.__cc_parameters_path(cc) mod_params += shell.run( 'grep . %s/*' % params_dir)[0] mod_params += '\n' modf = open(os.path.join(out_dir, 'mod.out'), 'w') modf.write(mod_params) modf.close() sysctl = shell.run('sysctl -a')[0] sysf = open(os.path.join(out_dir, 'sys.out'), 'w') sysf.write(sysctl) sysf.close() # Save kernel debug logs if commanded to do so. if self.__save_kern_debug: self.__save_kernel_debug_logs(out_dir) else: LOG.info('Not saving kernel debug log per user request.')
def main(): invocation_dir = os.getcwd() source_dir = os.path.dirname(os.path.realpath(__file__)) out_dir_rel = _timestamp_dirname() out_dir = os.path.join(invocation_dir, out_dir_rel) opts, args = getopt.getopt(sys.argv[1:], 'vo:b:c:s:nt:xyhq:V', [ 'rport=', 'sport=', 'help', 'skip_pcap_scan', 'sslog_interval=', 'ifacecfg=', 'bridge=', 'ssrv=', 'ssrv_local', 'save=', 'genonly', 'gen_from_file=', 'virtcleanup', 'ip_mode=', 'no_pcap', 'no_kern_debug', ]) # Setup logging early. log.setup_logging(opts) # These are arbitrary ports that must be open between test machines. ip_mode = 4 rport, sport = 6200, 6300 sync = True has_xplot = False binary_dirs = [invocation_dir] if invocation_dir != source_dir: binary_dirs.append(source_dir) data_search_path = [] open_page = True skip_pcap_scan = False sslog_interval = 0.1 singlesrv_cfg = { 'enabled': False, 'bridge': virtsetup.Constants.DEFAULT_BRIDGE, 'host': None, 'local': False, 'nodes': [], 'cleanup': False } ifacecfg = None save_dir = None gen_only = False gen_from_file = None save_pcap = True save_kern_debug = True # cfgs includes all configs both in files and in command line. cfgs = [] for opt, val in opts: if opt == '-V': print("transperf {}".format(transperf.__version__)) return if opt == '-v': continue elif opt == '--ip_mode': ip_mode = int(val) assert ip_mode in [4, 6], '--ip_mode must be in [4 (default), 6]' elif opt == '--virtcleanup': singlesrv_cfg['cleanup'] = True elif opt == '-o': out_dir_rel = val out_dir = abspath(out_dir_rel) elif opt == '-b': binary_dirs.extend([abspath(path) for path in val.split(',')]) data_search_path.extend(binary_dirs) elif opt == '-c': cfg_paths = [abspath(path) for path in val.split(',')] for path in cfg_paths: if os.path.isfile(path): data_search_path.append(os.path.dirname(path)) cfgs.append(_read_file(path)) continue data_search_path.append(path) cfgs += [ _read_file(cfg_file) for cfg_file in transperf.path.list_files(path) ] elif opt == '-s': cfgs.append(val) elif opt == '-n': sync = False elif opt == '--rport': rport = int(val) elif opt == '--sport': sport = int(val) elif opt == '-x': has_xplot = True elif opt == '-q': open_page = False elif opt == '--skip_pcap_scan': skip_pcap_scan = True elif opt == '--no_pcap': save_pcap = False skip_pcap_scan = True # Since we have no other way to get metrics. elif opt == '--no_kern_debug': save_kern_debug = False elif opt == '--genonly': gen_only = True elif opt == '--gen_from_file': gen_from_file = '-' if val == '-' else (os.path.abspath( os.path.expanduser(val))) elif opt == '--sslog_interval': sslog_interval = float(val) elif opt == '--ifacecfg': ifacecfg = abspath(val) elif opt == '--ssrv': if '_' in val: assert False, 'Cannot have underscore in hostname for --ssrv.' if singlesrv_cfg['local']: assert False, 'Cannot set both --ssrv and --ssrv_local at once.' singlesrv_cfg['enabled'] = True singlesrv_cfg['host'] = val elif opt == '--ssrv_local': if singlesrv_cfg['host']: assert False, 'Cannot set both --ssrv and --ssrv_local at once.' singlesrv_cfg['enabled'] = True singlesrv_cfg['local'] = True elif opt == '--save': save_dir = abspath(val) elif opt == '--bridge': curr_val = singlesrv_cfg['bridge'] singlesrv_cfg['bridge'] = val if val is not None else curr_val elif (opt == '-h' or opt == '--help'): print_usage() return -1 else: # Catch-all for unexpected flags. print_usage() return -1 # After processing the input paths, we change directory so we can # stage/invoke other source files within transperf. Special case though: we # may be invoking from a zip file. In that case, it's hard to know what # directory contains the unzipped source, so we just don't bother. if os.path.isdir(source_dir): os.chdir(source_dir) # Short circuit if we're generating an output webpage for previous test run. if gen_only: _mkdir_if_not_exists(out_dir) return _process_output(out_dir, has_xplot, open_page, skip_pcap_scan, gen_from_file) else: assert gen_from_file is None, ('--gen_from_file only meaningful ' 'if --genonly specified.') if not args: print_usage() sys.exit(-1) if not cfgs: raise RuntimeError('no configuration found') LOG.debug('%d config(s) loaded: %s', len(cfgs), cfgs) _mkdir_if_not_exists(out_dir) _dump_cfgs(cfgs, data_search_path, out_dir) ifacecfg_dir = os.path.join(out_dir, transperf.path.IFACE_CFG_DIR) _mkdir_if_not_exists(ifacecfg_dir) ifacecfg_rel = _validate_and_stage_ifacecfg(ifacecfg, ifacecfg_dir) # Grab receiver and sender hostnames. recvh, _, recvh_internal = args[0].partition('/') sendhs, _, sendhs_internal = [ list(t) for t in zip(*[_.partition('/') for _ in args[1:]]) ] # Check for duplicates. nodeset = set([recvh] + sendhs) if len(nodeset) != len(sendhs) + 1: # There was repetition, which we do not support. raise RuntimeError('There are repeated nodes in the arguments!') # Are we using ssh or are we local? For debug statements. session_type = 'ssh' if singlesrv_cfg['enabled']: if singlesrv_cfg['local']: session_type = 'local' else: assert singlesrv_cfg['host'] if singlesrv_cfg['enabled']: # Strip usernames for single server mode; we must use root. recvh = recvh.split('@')[-1] sendhs = [sendh.split('@')[-1] for sendh in sendhs] nodes = [recvh] + sendhs singlesrv_cfg['nodes'] = nodes singlesrv_cfg['scratchd'] = (os.path.join( transperf.path.get_transperf_home(), 'containers')) singlesrv_cfg['out_dir'] = os.path.join(singlesrv_cfg['scratchd'], out_dir_rel) node_exec_cfgs = get_container_node_exec_cfgs(singlesrv_cfg, nodes) else: nodes = [recvh] + sendhs nodes_internal = [recvh_internal] + sendhs_internal node_exec_cfgs = { node: { 'ssh': node, 'int_ip': ip, 'cfg': None } for node, ip in zip(nodes, nodes_internal) } LOG.info('creating %s session to %s', session_type, recvh) r_exec = executor.Executor(node_exec_cfgs[recvh]['ssh'], internal_ip=node_exec_cfgs[recvh]['int_ip'], container_params=node_exec_cfgs[recvh]['cfg']) LOG.info('creating %s sessions to %s', session_type, sendhs) s_execs = [ executor.Executor(node_exec_cfgs[sendh]['ssh'], internal_ip=node_exec_cfgs[sendh]['int_ip'], container_params=node_exec_cfgs[sendh]['cfg']) for sendh in sendhs ] _, staged_src = _stage_transperf_src() if singlesrv_cfg['enabled']: # In this case all executor sessions just point to the same box, # so we can just reuse r_exec in use_rootns mode. _init_containers( r_exec, singlesrv_cfg, nodes, staged_src, os.path.join(ifacecfg_dir, ifacecfg_rel) if ifacecfg_rel is not None else None, ip_mode) # Copy the container hosts file over to the test output directory. singlesrv_cfg['hosts'] = os.path.join(singlesrv_cfg['out_dir'], 'hosts') if singlesrv_cfg['enabled']: if singlesrv_cfg['local']: # launch.py and the send/recv/orch processes are all on the same # node, and out_dir is accessible from all of them. out_dir_for_servers = out_dir else: # launch.py is local but send/recv/orch are remote; point them to # their own directories. out_dir_for_servers = os.path.join( singlesrv_cfg['out_dir'], 'fs', '{node}', transperf.path.EXP_OUT_DIR.lstrip('/')) else: out_dir_for_servers = os.path.join(transperf.path.TRANSPERF_TMP) _init_servers(r_exec, s_execs, binary_dirs, out_dir, sync, staged_src, singlesrv_cfg) _start_servers(r_exec, s_execs, rport, sport, sslog_interval, ifacecfg_rel, singlesrv_cfg, ip_mode, save_pcap, save_kern_debug, out_dir_for_servers) _collect_results(r_exec, s_execs, out_dir, singlesrv_cfg) retcode = _process_output(out_dir, has_xplot, open_page, skip_pcap_scan) # Save a copy of the results (e.g. for debugging, wher outdir vanishes). if save_dir is not None: LOG.info('Saving a copy of results to %s', save_dir) _mkdir_if_not_exists(save_dir) shell.run('cp -r {out} {save}'.format(out=out_dir, save=save_dir + os.path.sep)) else: LOG.info('Saving a copy of results not requested, skipping.') # Cleanup virtual environment if specified/relevant. if singlesrv_cfg['enabled'] and singlesrv_cfg['cleanup']: tgt_dir = singlesrv_cfg['out_dir'] cmd = shell.py_cmd([], 'virtcleanup.py', '-v', '-d', tgt_dir) exec_ctx = shell if singlesrv_cfg['local'] else r_exec out, err, returncode = exec_ctx.run(cmd) LOG.info('Cleanup output: [%d] [%s] stderr: [%s]', returncode, out, err) return retcode
def gen_xplots(data_dir): """Generates xplots for all the experiments in the data directory.""" for _, _, _, _, exp_dir in cfgutil.exps(data_dir): xpl_paths = [] conn_info = outparser.ConnInfo([ os.path.join(d, f) for d, f in all_files(exp_dir, name='conn.info') ]) rcv_ip = outparser.RecvInfo(os.path.join(exp_dir, 'R', 'recv.info')).ip ports = conn_info.ports() all_lines = [] procs = [] for d, f in all_files(exp_dir, regex=r'.*\.pcap$'): if d == exp_dir: continue procs.append( shell.bg('tcptrace -CRSzxy --output_dir="%s" "%s"' % (d, os.path.join(d, f)))) for p in procs: shell.wait(p) for d, f in all_files(exp_dir, regex=r'.*\.pcap$'): for xd, xf in all_files(d, regex=r'.*\.xpl$'): # Only process time sequence graphs. if xf.find('_tsg') == -1: continue xplf = open(os.path.join(xd, xf)) lines = xplf.readlines() # The first 3 lines in the xplot are for the title. # The last line is the draw command. The rest (3:-1) # is data. We save the rest in all_lines in order to # create one xplot that contains the time seqeuence # graphs for all flows. all_lines += lines[3:-1] # Parse the ip and port from the xplot's title. Note that the # addresses may be either IPv4 or IPv6. parts = lines[2].split('_==>_')[0].split(':') ip_base = ':'.join(parts[:-1]) port = int(parts[-1]) try: ip = socket.getaddrinfo(ip_base, 0, socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)[0][4][0] except socket.gaierror: ip = socket.getaddrinfo(ip_base, 0, socket.AF_INET6, socket.SOCK_STREAM, socket.IPPROTO_TCP)[0][4][0] # If the ip and port are not from this experiment ignore this # file. if ip == rcv_ip or port not in ports: continue # Rewrite the title of the explot as: # ==> CC -- IP:PORT addr, _, cc, _, _, _, _ = conn_info.conn_info(port) lines[2] = '==>%s -- %s:%s\n' % (cc, addr, port) # Save the file. xpath = os.path.join(xd, 'out-%s.xpl' % port) xpl_paths.append(xpath) oxplf = open(xpath, 'w') oxplf.writelines(lines) oxplf.close() # Prepend the title to all_lines and append the draw command (ie, go). all_lines = (['dtime signed\n', 'title\n', '===> All flows\n'] + all_lines + ['go']) axpath = os.path.join(exp_dir, 'out-all.xpl') xpl_paths.append(axpath) axplf = open(axpath, 'w') axplf.writelines(all_lines) axplf.close() shell.run('tar -C %s -cvjf %s %s' % (exp_dir, os.path.join(exp_dir, 'xplots.tbz2'), ' '.join( [os.path.relpath(p, exp_dir) for p in xpl_paths])))