def get_sensors(self): netperf_graph = Graph("msg size", "throughput", r"/tmp/throughput_qemu_vm2vm.pdf", r"/tmp/throughput_qemu_vm2vm.txt") self.netperf = NetPerfTCP(netperf_graph, runtime=self.netperf_runtime) packet_sensor = PacketNumberSensor( Graph("msg size", "packet number", r"/tmp/packet_num_vm2vm.pdf", r"/tmp/packet_num_vm2vm.txt"), Graph("msg size", "average packet size", r"/tmp/packet_size_vm2vm.pdf", r"/tmp/packet_size_vm2vm.txt") ) return [self.netperf, packet_sensor]
def get_all_cpu_sensors(directory, prefix, normalize=None, exits_graph: Graph = None): result = list() result_dict = dict() for utilType in CpuUtilizationEnum: sensor = CpuUserSensor( utilType, Graph("Msg Size", "CPU Util {}".format(utilType.name), path.join( directory, "{prefix}-cpu-{name}".format(prefix=prefix, name=utilType.name)), normalize=normalize)) result.append(sensor) result_dict[utilType.name] = sensor if exits_graph is not None: sensor = DummySensor( FuncGraph( lambda k, u, g, e: (k + u - g) / e, result_dict["SYSTEM"].graph, result_dict["USER"].graph, "Message Size [bytes]", "cost", path.join(directory, "{prefix}-exit-cost".format(prefix=prefix)), graph_title="Cost per Exit", more_graphs=(result_dict["GUEST"].graph, exits_graph), )) result.append(sensor) return result
def get_sensors(self): netperf_graph = Graph("msg size", "throughput", r"/tmp/throughput.pdf", r"/tmp/throughput.txt") self.netperf = NetPerfTCP(netperf_graph, runtime=self.netperf_runtime) # packet_sensor = PacketNumberSensor( # Graph("msg size", "packet number", r"/tmp/packet_num.pdf", r"/tmp/packet_num.txt"), # Graph("msg size", "average packet size", r"/tmp/packet_size.pdf", r"/tmp/packet_size.txt") # ) return [self.netperf] # , packet_sensor]
def get_sensors(self): netperf_graph = Graph("msg size", "throughput", r"../tmp/throughput.pdf", r"../tmp/throughput.txt") self.netperf = NetPerfTCP(netperf_graph, runtime=self.netperf_runtime) packet_sensor = PacketNumberSensor( Graph("msg size", "packet number", r"../tmp/packet_num.pdf", r"../tmp/packet_num.txt"), Graph("msg size", "average packet size", r"../tmp/packet_size.pdf", r"../tmp/packet_size.txt")) interrupt_sensor = InterruptSensor( Graph("msg size", "interrupt count", r"../tmp/interrupts.pdf", r"../tmp/interrupts.txt")) return [ self.netperf, packet_sensor, interrupt_sensor, ]
def get_all_proc_cpu_sensors(directory, prefix, normalize=None, exits_graph: Graph = None): result = list() result_dict = dict() for proc_sensor, name in ((ProcKernelUsegeSensor, "Kernel"), (ProcGuestUsegeSensor, "Guest"), (ProcUserUsegeSensor, "User")): sensor = proc_sensor( Graph("Msg Size", "Process {name} Time".format(name=name), path.join( directory, "{prefix}-proc-cpu-{name}".format(prefix=prefix, name=name)), normalize=normalize)) result.append(sensor) result_dict[name] = sensor if exits_graph is not None: sensor = DummySensor( FuncGraph( lambda k, u, g, e: (k + u - g) / e, result_dict["Kernel"].graph, result_dict["User"].graph, "Message Size [bytes]", "cost", path.join(directory, "{prefix}-exit-cost-proc".format(prefix=prefix)), graph_title="Cost per Exit", more_graphs=(result_dict["Guest"].graph, exits_graph), )) result.append(sensor) # ratio_sensor = DummySensor( # RatioGraph(sensor.graph, netperf_graph, # "msg size", "{name} time".format(name=name), # path.join(directory, # "{prefix}-proc-cpu-{name}-ratio".format(prefix=prefix, name=name)) # ) # ) # result.append(ratio_sensor) return result
if __name__ == "__main__": os.makedirs(BASE_DIR, exist_ok=True) log_file = os.path.join(BASE_DIR, "log") if os.path.exists(log_file): os.unlink(log_file) root_logger = logging.getLogger() root_logger.addHandler(logging.FileHandler(log_file)) root_logger.setLevel(logging.DEBUG) test = TestCmpThroughput(create_vms(), RUNTIME, RETRIES, directory=BASE_DIR) test._sensors.append( MaxPacketPerSecondSensor( Graph("msg size", "Maximum recv packets per second", path.join(test.dir, "throughput-max_recv_packets")))) test.pre_run() test.run() test.post_run() additional_x = [(1488, "1.5K")] test = TestCmpLatency(create_vms(), RUNTIME, RETRIES, directory=BASE_DIR, additional_x=additional_x) test.pre_run() test.run() test.post_run()
def get_sensors(self): netperf_graph = Graph("message size", "", path.join(self.dir, "throughput"), graph_title="throughput (mbps)") self.netperf = self.NETPERF_CLS(netperf_graph, runtime=self.netperf_runtime) # packet_sensor = PacketNumberSensor( # Graph("message size", "packet number", r"../tmp/packet_num.pdf", r"../tmp/packet_num.txt"), # Graph("message size", "average packet size", r"../tmp/packet_size.pdf", r"../tmp/packet_size.txt") # ) netperf_graph_ratio = DummySensor( GraphRatioGnuplot(netperf_graph, "message size", "throughput (log ratio to first)", path.join(self.dir, "throughput-ratio"), graph_title="throughput (%s sec)" % (self.netperf_runtime, ))) packet_sensor_tx_bytes = PacketRxBytesSensor( Graph("message size", "TX size per second (Mb)", path.join(self.dir, "throughput-tx_bytes"), normalize=self.netperf_runtime * 1000 * 1000 / 8)) packet_sensor_tx_packets = PacketRxPacketsSensor( Graph("message size", "total tx packets", path.join(self.dir, "throughput-tx_packets"), normalize=self.netperf_runtime)) packet_sensor_avg_size = DummySensor( RatioGraph(packet_sensor_tx_bytes.graph, packet_sensor_tx_packets.graph, "message size", "tx packet size (KB)", path.join(self.dir, "throughput-tx_packet_size"), normalize=8 * 0.001)) interrupt_sensor = InterruptSensor( Graph("message size", "interrupt count (per sec)", path.join(self.dir, "throughput-interrupts"), normalize=self.netperf_runtime)) kvm_exits = KvmExitsSensor( Graph("message size", "exits count (per sec)", path.join(self.dir, "throughput-kvm_exits"), normalize=self.netperf_runtime)) kvm_exits_ratio = DummySensor( RatioGraph(kvm_exits.graph, packet_sensor_tx_packets.graph, "message size", "exits per Packet", path.join(self.dir, "throughput-kvm_exits-ratio"))) interrupt_ratio = DummySensor( RatioGraph(interrupt_sensor.graph, packet_sensor_tx_packets.graph, "message size", "interrupts per Packet", path.join(self.dir, "throughput-interrupts-ratio"))) kvm_halt_exits = KvmHaltExitsSensor( Graph("message size", "halt exits count (per sec)", path.join(self.dir, "throughput-kvm_halt_exits"), normalize=self.netperf_runtime)) batch_size = QemuBatchSizeSensor( Graph("message size", "average batch size (in packets)", path.join(self.dir, "throughput-batch_size"))) batch_descriptos_size = QemuBatchDescriptorsSizeSensor( Graph("message size", "average batch size (in descriptors)", path.join(self.dir, "throughput-batch_descriptors_size"))) batch_count = QemuBatchCountSensor( Graph("message size", "average batch Count (per Sec)", path.join(self.dir, "throughput-batch_count"), normalize=self.netperf_runtime)) batch_halt_ratio = DummySensor( RatioGraph(batch_count.graph, kvm_halt_exits.graph, "message size", "batch count / kvm halt", path.join(self.dir, "throughput-batchCount_kvmHalt"))) batch_halt_ratio.graph.log_scale_y = 2 cpu_sensors = get_all_cpu_sensors(self.dir, "throughput", self.netperf_runtime, exits_graph=kvm_exits.graph) cpu_proc_sensors = get_all_proc_cpu_sensors( self.dir, "throughput", self.netperf_runtime, exits_graph=kvm_exits.graph) throughput2segment_size = DummySensor( GraphScatter(packet_sensor_avg_size.graph, netperf_graph, "sent segment size (KB)", "Throughput", path.join(self.dir, "throughput-segment_throughput"))) #interrupt_delay = QemuInterruptDelaySensor( # Graph("message size", "average interrupt delay", # path.join(self.dir, "throughput-interrupt_delay")) #) bytes_per_batch = DummySensor( FuncGraph(lambda x, y: x * y, batch_size.graph, packet_sensor_avg_size.graph, "message size", "bytes per batch", path.join(self.dir, "throughput-batch_bytes"))) sched_switch = SchedSwitchSensor( Graph("message size", "num of scheduler switch (per sec)", path.join(self.dir, "throughput-context_switch"), normalize=self.netperf_runtime)) sched_switch_per_batch = DummySensor( RatioGraph(sched_switch.graph, batch_count.graph, "message size", "context switch per batch", path.join(self.dir, "throughput-context_switch-ratio"))) kvm_exits_batch_ratio = DummySensor( RatioGraph(kvm_exits.graph, batch_count.graph, "message size", "exits per batch", path.join(self.dir, "throughput-kvm_exits-batch_ratio"))) batch_time = DummySensor( FuncGraph(lambda x: 1e6 / x, batch_count.graph, EmptyGraph(), "message size", "batch duration [usec]", path.join(self.dir, "throughput-batch_time"))) interrupt_ratio_batch = DummySensor( RatioGraph(interrupt_sensor.graph, batch_count.graph, "message size", "interrupts per batch", path.join(self.dir, "throughput-interrupts-batch"))) #nic_tx_stop = NicTxStopSensor( # Graph("message size", "num of tx queue stops (per sec)", # path.join(self.dir, "throughput-tx_queue_stop"), # normalize=self.netperf_runtime # ) #) #nic_tx_stop_ratio_batch = DummySensor( # RatioGraph(nic_tx_stop.graph, batch_count.graph, # "message size", "queue stops per batch", # path.join(self.dir, "throughput-tx_queue_stop-batch") # ) #) #tcp_total_msgs = TCPTotalMSgs( # Graph("message size", "num of transmited msgs per second", # path.join(self.dir, "throughput-tcp_msgs_total"), # normalize=self.netperf_runtime # ) #) #tcp_first_msgs = TCPFirstMSgs( # Graph("message size", "num of transmited first msgs per second", # path.join(self.dir, "throughput-tcp_msgs_first"), # normalize=self.netperf_runtime # ) #) #tcp_msgs_ratio = DummySensor( # RatioGraph(tcp_first_msgs.graph, tcp_total_msgs.graph, # "message size", "queue stops per batch", # path.join(self.dir, "throughput-tcp_msgs-ratio") # ) #) return [ self.netperf, netperf_graph_ratio, packet_sensor_tx_bytes, packet_sensor_tx_packets, packet_sensor_avg_size, interrupt_sensor, kvm_exits, kvm_exits_ratio, kvm_halt_exits, interrupt_ratio, interrupt_ratio_batch, batch_size, batch_descriptos_size, batch_count, batch_halt_ratio, bytes_per_batch, batch_time, throughput2segment_size, #interrupt_delay, sched_switch, sched_switch_per_batch, kvm_exits_batch_ratio, #nic_tx_stop, #nic_tx_stop_ratio_batch, #tcp_total_msgs, #tcp_first_msgs, #tcp_msgs_ratio, ] + cpu_sensors + cpu_proc_sensors
def get_sensors(self): ret = super(LatencyTest, self).get_sensors self.netperf = netperf.NetPerfLatency( # GraphErrorBarsGnuplot Graph("message size [bytes]", "transactions/sec", path.join(self.dir, "latency"), graph_title="latency"), runtime=self.netperf_runtime) # self.netperf.graph.script_filename = "gnuplot/plot_columns_latency" letency_us = DummySensor( FuncGraph(lambda x1: 1 / x1 * 1000 * 1000, self.netperf.graph, EmptyGraph(), "message size [bytes]", "usec", path.join(self.dir, "latency-time"), graph_title="Latency")) interrupt_sensor = InterruptSensor( Graph("message size", "interrupt count (per sec)", path.join(self.dir, "latency-interrupts"), normalize=self.netperf_runtime)) kvm_exits = KvmExitsSensor( Graph("message size", "exits count (per sec)", path.join(self.dir, "latency-kvm_exits"), normalize=self.netperf_runtime)) kvm_exits_ratio = DummySensor( RatioGraph(kvm_exits.graph, self.netperf.graph, "message size", "Exits per transaction", path.join(self.dir, "latency-kvm_exits-ratio"), graph_title="KVM exits per transaction")) kvm_halt_exits = KvmHaltExitsSensor( GraphErrorBarsGnuplot("message size", "Halt exits count (per sec)", path.join(self.dir, "latency-kvm_halt_exits"), normalize=self.netperf_runtime)) kvm_halt_exits_ratio = DummySensor( RatioGraph(kvm_halt_exits.graph, self.netperf.graph, "message size", "Halt Exits per transaction", path.join(self.dir, "latency-kvm_halt_exits-ratio"), graph_title="KVM Haly exits per transaction")) packet_sensor_tx_bytes = PacketRxBytesSensor( Graph("message size", "Total TX size(Mb)", path.join(self.dir, "latency-tx_bytes"), normalize=self.netperf_runtime * 1000 * 1000 / 8)) packet_sensor_tx_packets = PacketRxPacketsSensor( Graph("message size", "Total TX packets", path.join(self.dir, "latency-tx_packets"), normalize=self.netperf_runtime)) packet_sensor_avg_size = DummySensor( RatioGraph(packet_sensor_tx_bytes.graph, packet_sensor_tx_packets.graph, "message size", "TX Packet Size (KB)", path.join(self.dir, "latency-tx_packet-size"), normalize=8 * 0.001)) interrupt_ratio = DummySensor( RatioGraph(interrupt_sensor.graph, self.netperf.graph, "message size", "interrupts per transaction", path.join(self.dir, "latency-interrupts-ratio"))) batch_size = QemuBatchSizeSensor( Graph("message size", "average batch size (in packets)", path.join(self.dir, "latency-batch_size"))) batch_descriptos_size = QemuBatchDescriptorsSizeSensor( Graph("message size", "average batch size (in descriptors)", path.join(self.dir, "latency-batch_descriptors_size"))) batch_count = QemuBatchCountSensor( Graph("message size", "average batch count (per sec)", path.join(self.dir, "latency-batch_count"), normalize=self.netperf_runtime)) #interrupt_delay = QemuInterruptDelaySensor( # Graph("message size", "average interrupt delay", # path.join(self.dir, "latency-interrupt_delay")) #) sched_switch = SchedSwitchSensor( Graph("message size", "num of scheduler switch (per sec)", path.join(self.dir, "latency-context_switch"), normalize=self.netperf_runtime)) sched_switch_per_batch = DummySensor( RatioGraph(sched_switch.graph, self.netperf.graph, "message size", "context switch per transaction", path.join(self.dir, "latency-context_switch-ratio"))) #nic_tx_stop = NicTxStopSensor( # Graph("message size", "num of tx queue stops (per sec)", # path.join(self.dir, "latency-tx_queue_stop"), # normalize=self.netperf_runtime # ) #) #top_ratio_batch = DummySensor( # RatioGraph(nic_tx_stop.graph, batch_count.graph, # "message size", "queue stops per batch", # path.join(self.dir, "throughput-tx_queue_stop-batch") # ) #) #tcp_total_msgs = TCPTotalMSgs( # Graph("message size", "num of transmited msgs per second", # path.join(self.dir, "throughput-tcp_msgs_total"), # normalize=self.netperf_runtime # ) #) #tcp_first_msgs = TCPFirstMSgs( # Graph("message size", "num of transmited first msgs per second", # path.join(self.dir, "throughput-tcp_msgs_first"), # normalize=self.netperf_runtime # ) #) #msgs_ratio = DummySensor( # RatioGraph(tcp_first_msgs.graph, tcp_total_msgs.graph, # "message size", "queue stops per batch", # path.join(self.dir, "throughput-tcp_msgs-ratio") # ) #) cpu_sensors = get_all_cpu_sensors(self.dir, "latency", self.netperf_runtime, exits_graph=kvm_exits.graph) cpu_proc_sensors = get_all_proc_cpu_sensors( self.dir, "latency", self.netperf_runtime, exits_graph=kvm_exits.graph) return [ self.netperf, letency_us, interrupt_sensor, kvm_exits, kvm_halt_exits, kvm_exits_ratio, kvm_halt_exits_ratio, packet_sensor_tx_bytes, packet_sensor_tx_packets, packet_sensor_avg_size, batch_size, batch_descriptos_size, batch_count, interrupt_ratio, #interrupt_delay, sched_switch, sched_switch_per_batch, #nic_tx_stop, #nic_tx_stop_ratio_batch, #total_msgs, #tcp_first_msgs, #tcp_msgs_ratio, ] + cpu_sensors + cpu_proc_sensors
def get_sensors(self): self.netperf = netperf.NetPerfLatency(Graph("Message size [bytes]", "Transactions/Sec", path.join( self.DIR, "latency"), graph_title="Latency"), runtime=self.netperf_runtime) # self.netperf.graph.script_filename = "gnuplot/plot_columns_latency" interrupt_sensor = InterruptSensor( Graph("msg size", "interrupt count (per sec)", path.join(self.DIR, "latency-interrupts"), normalize=self.netperf_runtime)) kvm_exits = KvmExitsSensor( Graph("msg size", "exits count (per sec)", path.join(self.DIR, "latency-kvm_exits"), normalize=self.netperf_runtime)) kvm_exits_ratio = DummySensor( RatioGraph(kvm_exits.graph, self.netperf.graph, "msg size", "Exits per transaction", path.join(self.DIR, "latency-kvm_exits-ratio"), graph_title="KVM exits per transaction")) kvm_halt_exits = KvmHaltExitsSensor( GraphErrorBarsGnuplot("msg size", "Halt exits count (per sec)", path.join(self.DIR, "latency-kvm_halt_exits"), normalize=self.netperf_runtime)) kvm_halt_exits_ratio = DummySensor( RatioGraph(kvm_halt_exits.graph, self.netperf.graph, "msg size", "Halt Exits per transaction", path.join(self.DIR, "latency-kvm_halt_exits-ratio"), graph_title="KVM Haly exits per transaction")) packet_sensor_tx_bytes = PacketRxBytesSensor( Graph("msg size", "Total TX size", path.join(self.DIR, "latency-tx_bytes"), normalize=self.netperf_runtime)) packet_sensor_tx_packets = PacketRxPacketsSensor( Graph("msg size", "Total TX packets", path.join(self.DIR, "latency-tx_packets"), normalize=self.netperf_runtime)) packet_sensor_avg_size = DummySensor( RatioGraph(packet_sensor_tx_bytes.graph, packet_sensor_tx_packets.graph, "msg size", "TX Packet Size", path.join(self.DIR, "latency-tx_packet-size"))) interrupt_ratio = DummySensor( RatioGraph(interrupt_sensor.graph, self.netperf.graph, "msg size", "Interrupts per transaction", path.join(self.DIR, "latency-interrupts-ratio"))) cpu_sensors = get_all_cpu_sensors(self.DIR, "latency", self.netperf_runtime) proc_cpu_sensors = get_all_proc_cpu_sensors(self.DIR, "latency", self.netperf.graph, self.netperf_runtime) return [ self.netperf, interrupt_sensor, kvm_exits, kvm_halt_exits, kvm_exits_ratio, kvm_halt_exits_ratio, packet_sensor_tx_bytes, packet_sensor_tx_packets, packet_sensor_avg_size, interrupt_ratio ] + cpu_sensors + proc_cpu_sensors
def get_sensors(self): netperf_graph = GraphErrorBarsGnuplot("msg size", "throughput", path.join(self.DIR, "throughput"), graph_title="Throughput") self.netperf = NetPerfTCP(netperf_graph, runtime=self.netperf_runtime) # packet_sensor = PacketNumberSensor( # Graph("msg size", "packet number", r"../tmp/packet_num.pdf", r"../tmp/packet_num.txt"), # Graph("msg size", "average packet size", r"../tmp/packet_size.pdf", r"../tmp/packet_size.txt") # ) packet_sensor_tx_bytes = PacketRxBytesSensor( Graph("msg size", "Total TX size", path.join(self.DIR, "throughput-tx_bytes"), normalize=self.netperf_runtime ) ) packet_sensor_tx_packets = PacketRxPacketsSensor( Graph("msg size", "Total TX packets", path.join(self.DIR, "throughput-tx_packets"), normalize=self.netperf_runtime) ) packet_sensor_avg_size = DummySensor( RatioGraph(packet_sensor_tx_bytes.graph, packet_sensor_tx_packets.graph, "msg size", "TX Packet Size", path.join(self.DIR, "throughput-tx_packet_size") ) ) interrupt_sensor = InterruptSensor( Graph("msg size", "interrupt count (per sec)", path.join(self.DIR, "throughput-interrupts"), normalize=self.netperf_runtime) ) kvm_exits = KvmExitsSensor( Graph("msg size", "exits count (per sec)", path.join(self.DIR, "throughput-kvm_exits"), normalize=self.netperf_runtime) ) kvm_exits_ratio = DummySensor( RatioGraph(kvm_exits.graph, packet_sensor_tx_packets.graph, "msg size", "Exits per Packet", path.join(self.DIR, "throughput-kvm_exits-ratio") ) ) interrupt_ratio = DummySensor( RatioGraph(interrupt_sensor.graph, packet_sensor_tx_packets.graph, "msg size", "Interrupts per Packet", path.join(self.DIR, "throughput-interrupts-ratio") ) ) kvm_halt_exits = KvmHaltExitsSensor( GraphErrorBarsGnuplot("msg size", "Halt exits count (per sec)", path.join(self.DIR, "throughput-kvm_halt_exits"), normalize=self.netperf_runtime) ) batch_size = QemuBatchSizeSensor( Graph("msg size", "Average batch size (in packets)", path.join(self.DIR, "throughput-batch_size")) ) batch_descriptos_size = QemuBatchDescriptorsSizeSensor( Graph("msg size", "Average batch size (in descriptors)", path.join(self.DIR, "throughput-batch_descriptors_size")) ) batch_count = QemuBatchCountSensor( Graph("msg size", "Average batch Count (per Sec)", path.join(self.DIR, "throughput-batch_count"), normalize=self.netperf_runtime) ) batch_halt_ratio = DummySensor( RatioGraph(batch_count.graph, kvm_halt_exits.graph, "msg size", "batch count / kvm halt", path.join(self.DIR, "throughtput-batchCount_kvmHalt")) ) cpu_sensors = get_all_cpu_sensors(self.DIR, "throughput", self.netperf_runtime) return [ self.netperf, packet_sensor_tx_bytes, packet_sensor_tx_packets, packet_sensor_avg_size, interrupt_sensor, kvm_exits, kvm_exits_ratio, kvm_halt_exits, interrupt_ratio, batch_size, batch_descriptos_size, batch_count, batch_halt_ratio, ] + cpu_sensors