def _prepare_client(self, flow): host = flow.generator client_params = dict(server=ipaddress(flow.receiver_bind), duration=flow.duration) if flow.type == "tcp_stream": #tcp stream is the default for iperf3 pass elif flow.type == "udp_stream": client_params["udp"] = True elif flow.type == "sctp_stream": client_params["sctp"] = True else: raise RecipeError("Unsupported flow type '{}'".format(flow.type)) if flow.cpupin is not None and flow.cpupin >= 0: if flow.parallel_streams == 1: client_params["cpu_bind"] = flow.cpupin else: raise RecipeError("Unsupported combination of single cpupin " "with parallel perf streams.") elif flow.cpupin is not None: raise RecipeError("Negative perf cpupin value provided.") if flow.parallel_streams > 1: client_params["parallel"] = flow.parallel_streams if flow.msg_size: client_params["blksize"] = flow.msg_size return host.prepare_job(IperfClient(**client_params), job_level=ResultLevel.NORMAL)
def _prepare_client(self, flow: Flow) -> Job: host = flow.generator client_params = dict(workload = flow.type, server = ipaddress(flow.receiver_bind), test_length = flow.duration) if flow.cpupin is not None and flow.cpupin >= 0: if flow.parallel_streams == 1: client_params["cpu_bind"] = flow.cpupin else: raise RecipeError("Unsupported combination of single cpupin " "with parallel perf streams.") elif flow.cpupin is not None: raise RecipeError("Negative perf cpupin value provided.") #TODO Figure out what to do about parallel_streams # (num_treads? num_flows? possible 2 instances runing at once?) # Added NeperBase options to configure num_threads, num_flows but # it appears parallel streams is not needed for now. # The legacy lnst doesnt seem to use the paralellism even when # setting perf_parallel_steams #if flow.parallel_streams > 1: # client_params["parallel"] = flow.parallel_streams if flow.msg_size: client_params["request_size"] = flow.msg_size client_params["response_size"] = flow.msg_size return host.prepare_job(NeperClient(**client_params), job_level=ResultLevel.NORMAL)
def _set_cpupin_params(self, params, cpupin): if cpupin is not None: for cpu in cpupin: if cpu < 0: raise RecipeError("Negative perf cpupin value provided.") # at the moment iperf does not support pinning to multiple cpus # so pin to the first cpu specified in the list if len(cpupin) > 1: raise RecipeError("Cannot pin neper to the specified list "\ "of cpus due to use not supporting it with neper.") params["cpu_bind"] = cpupin[0]
def _prepare_server(self, flow): host = flow.receiver server_params = dict(bind=ipaddress(flow.receiver_bind), oneoff=True) if flow.cpupin is not None and flow.cpupin >= 0: if flow.parallel_streams == 1: server_params["cpu_bind"] = flow.cpupin else: raise RecipeError("Unsupported combination of single cpupin " "with parallel perf streams.") elif flow.cpupin is not None: raise RecipeError("Negative perf cpupin value provided.") return host.prepare_job(IperfServer(**server_params), job_level=ResultLevel.NORMAL)
def netperf_run(self, netserver, netperf, perfrepo_result=None): srv_proc = self.matched.m1.run(netserver, bg=True) if perfrepo_result: if not hasattr(self, 'perf_api'): raise RecipeError("no class variable called perf_api") baseline = self.perf_api.get_baseline_of_result(perfrepo_result) #TODO: #netperf_baseline_template(netperf, baseline) time.sleep(2) res_data = self.matched.m2.run( netperf, timeout=(self.params.netperf_duration + self.params.nperf_reserve) * self.params.netperf_runs) if perfrepo_result: netperf_result_template(perfrepo_result, res_data) if hasattr(self, 'pr_comment'): perfrepo_result.set_comment(self.params.pr_comment) self.perf_api.save_result(perfrepo_result) srv_proc.kill(2) return res_data, srv_proc
def _prepare_client(self, flow): host = flow.generator client_params = dict(server = ipaddress(flow.receiver_bind), duration = flow.duration) if flow.type == "tcp_stream": #tcp stream is the default for iperf3 pass elif flow.type == "udp_stream": client_params["udp"] = True elif flow.type == "sctp_stream": client_params["sctp"] = True elif flow.type == "mptcp_stream": client_params["mptcp"] = True else: raise RecipeError("Unsupported flow type '{}'".format(flow.type)) self._set_cpupin_params(client_params, flow.cpupin) if flow.parallel_streams > 1: client_params["parallel"] = flow.parallel_streams if flow.msg_size: client_params["blksize"] = flow.msg_size if flow.receiver_port is not None: client_params["port"] = flow.receiver_port return host.prepare_job(IperfClient(**client_params), job_level=ResultLevel.NORMAL)
def _pin_dev_interrupts(self, dev, cpu): netns = dev.netns cpu_info = netns.run("lscpu", job_level=ResultLevel.DEBUG).stdout regex = "CPU\(s\): *([0-9]*)" num_cpus = int(re.search(regex, cpu_info).groups()[0]) if cpu < 0 or cpu > num_cpus - 1: raise RecipeError( "Invalid CPU value given: %d. Accepted value %s." % ( cpu, "is: 0" if num_cpus == 1 else "are: 0..%d" % (num_cpus - 1), )) res = netns.run( "grep {} /proc/interrupts | cut -f1 -d: | sed 's/ //'".format( dev.name), job_level=ResultLevel.DEBUG, ) intrs = res.stdout split = res.stdout.split("\n") if len(split) == 1 and split[0] == "": res = netns.run( "dev_irqs=/sys/class/net/{}/device/msi_irqs; " "[ -d $dev_irqs ] && ls -1 $dev_irqs".format(dev.name), job_level=ResultLevel.DEBUG, ) intrs = res.stdout for intr in intrs.split("\n"): try: int(intr) netns.run("echo -n {} > /proc/irq/{}/smp_affinity_list".format( cpu, intr.strip())) except ValueError: pass
def _prepare_server(self, flow: Flow) -> Job: host = flow.receiver server_params = dict(workload = flow.type, bind = ipaddress(flow.receiver_bind), test_length = flow.duration) if flow.cpupin is not None and flow.cpupin >= 0: if flow.parallel_streams == 1: server_params["cpu_bind"] = flow.cpupin else: raise RecipeError("Unsupported combination of single cpupin " "with parallel perf streams.") elif flow.cpupin is not None: raise RecipeError("Negative perf cpupin value provided.") if flow.msg_size: server_params["request_size"] = flow.msg_size server_params["response_size"] = flow.msg_size return host.prepare_job(NeperServer(**server_params), job_level=ResultLevel.NORMAL)
def _check_cpu_validity(self, host, cpus): cpu_info = host.run("lscpu", job_level=ResultLevel.DEBUG).stdout regex = "CPU\(s\): *([0-9]*)" num_cpus = int(re.search(regex, cpu_info).groups()[0]) for cpu in cpus: if cpu < 0 or cpu > num_cpus - 1: raise RecipeError( "Invalid CPU value given: %d. Accepted value %s." % ( cpu, "is: 0" if num_cpus == 1 else "are: 0..%d" % (num_cpus - 1), ) )