示例#1
0
 def collect_kpi(self):
     # check if the tg processes have exited
     for proc in (self._tg_process, self._traffic_process):
         check_if_process_failed(proc)
     result = self.resource_helper.collect_kpi()
     LOG.debug("%s collect KPIs %s", self.APP_NAME, result)
     return result
示例#2
0
    def collect_kpi(self):
        # we can't get KPIs if the VNF is down
        check_if_process_failed(self._vnf_process)
        physical_node = ctx_base.Context.get_physical_node_from_server(
            self.scenario_helper.nodes[self.name])

        result = {
            "physical_node": physical_node,
            'pkt_in_up_stream': 0,
            'pkt_drop_up_stream': 0,
            'pkt_in_down_stream': 0,
            'pkt_drop_down_stream': 0,
            'collect_stats': self.resource_helper.collect_kpi(),
        }

        indexes_in = [1]
        indexes_drop = [2, 3]
        command = 'p {0} stats port {1} 0'
        for index, direction in ((5, 'up'), (9, 'down')):
            key_in = "pkt_in_{0}_stream".format(direction)
            key_drop = "pkt_drop_{0}_stream".format(direction)
            for mode in ('in', 'out'):
                stats = self.vnf_execute(command.format(index, mode))
                match = re.search(self.COLLECT_KPI, stats, re.MULTILINE)
                if not match:
                    continue
                result[key_in] += sum(int(match.group(x)) for x in indexes_in)
                result[key_drop] += sum(
                    int(match.group(x)) for x in indexes_drop)

        LOG.debug("%s collect KPIs %s", self.APP_NAME, result)
        return result
示例#3
0
    def collect_kpi(self):
        def get_sum(offset):
            return sum(int(i) for i in split_stats[offset::5])

        # we can't get KPIs if the VNF is down
        check_if_process_failed(self._vnf_process)

        number_of_ports = len(self.vnfd_helper.port_pairs.all_ports)

        stats = self.get_stats()
        stats_words = stats.split()
        split_stats = stats_words[stats_words.index('0'):][:number_of_ports *
                                                           5]

        physical_node = ctx_base.Context.get_physical_node_from_server(
            self.scenario_helper.nodes[self.name])

        result = {
            "physical_node": physical_node,
            "packets_in": get_sum(1),
            "packets_fwd": get_sum(2),
            "packets_dropped": get_sum(3) + get_sum(4),
            'collect_stats': self.resource_helper.collect_kpi(),
        }

        LOG.debug("UDP Replay collect KPIs %s", result)
        return result
示例#4
0
    def collect_kpi(self):
        # we can't get KPIs if the VNF is down
        check_if_process_failed(self._vnf_process)

        if self.resource_helper is None:
            result = {
                "packets_in": 0,
                "packets_dropped": 0,
                "packets_fwd": 0,
                "collect_stats": {
                    "core": {}
                },
            }
            return result

        # use all_ports so we only use ports matched in topology
        port_count = len(self.vnfd_helper.port_pairs.all_ports)
        if port_count not in {1, 2, 4}:
            raise RuntimeError("Failed ..Invalid no of ports .. "
                               "1, 2 or 4 ports only supported at this time")

        self.port_stats = self.vnf_execute('port_stats', range(port_count))
        curr_time = time.time()
        try:
            rx_total = self.port_stats[6]
            tx_total = self.port_stats[7]
        except IndexError:
            LOG.debug("port_stats parse fail ")
            # return empty dict so we don't mess up existing KPIs
            return {}

        result = {
            "packets_in": rx_total,
            "packets_dropped": max((tx_total - rx_total), 0),
            "packets_fwd": tx_total,
            # we share ProxResourceHelper with TG, but we want to collect
            # collectd KPIs here and not TG KPIs, so use a different method name
            "collect_stats": self.resource_helper.collect_collectd_kpi(),
        }
        curr_packets_in = int(
            (rx_total - self.prev_packets_in) / (curr_time - self.prev_time))
        curr_packets_fwd = int(
            (tx_total - self.prev_packets_sent) / (curr_time - self.prev_time))

        result["curr_packets_in"] = curr_packets_in
        result["curr_packets_fwd"] = curr_packets_fwd

        self.prev_packets_in = rx_total
        self.prev_packets_sent = tx_total
        self.prev_time = curr_time

        LOG.debug("%s collect KPIs %s %s", self.APP_NAME,
                  datetime.datetime.now(), result)
        return result
示例#5
0
    def collect_kpi(self):
        # check if the tg processes have exited
        physical_node = Context.get_physical_node_from_server(
            self.scenario_helper.nodes[self.name])

        result = {"physical_node": physical_node}
        for proc in (self._tg_process, self._traffic_process):
            check_if_process_failed(proc)

        result["collect_stats"] = self.resource_helper.collect_kpi()
        LOG.debug("%s collect KPIs %s", self.APP_NAME, result)
        return result
示例#6
0
 def collect_kpi(self):
     # we can't get KPIs if the VNF is down
     check_if_process_failed(self._vnf_process)
     stats = self.get_stats()
     m = re.search(self.COLLECT_KPI, stats, re.MULTILINE)
     if m:
         result = {k: int(m.group(v)) for k, v in self.COLLECT_MAP.items()}
         result["collect_stats"] = self.resource_helper.collect_kpi()
     else:
         result = {
             "packets_in": 0,
             "packets_fwd": 0,
             "packets_dropped": 0,
         }
     LOG.debug("%s collect KPIs %s", self.APP_NAME, result)
     return result
示例#7
0
    def collect_kpi(self):
        # we can't get KPIs if the VNF is down
        check_if_process_failed(self._vnf_process, 0.01)
        stats = self.get_stats()
        m = re.search(self.COLLECT_KPI, stats, re.MULTILINE)
        physical_node = Context.get_physical_node_from_server(
            self.scenario_helper.nodes[self.name])

        result = {"physical_node": physical_node}
        if m:
            result.update({k: int(m.group(v)) for k, v in self.COLLECT_MAP.items()})
            result["collect_stats"] = self.resource_helper.collect_kpi()
        else:
            result.update({"packets_in": 0,
                           "packets_fwd": 0,
                           "packets_dropped": 0})

        LOG.debug("%s collect KPIs %s", self.APP_NAME, result)
        return result
示例#8
0
    def collect_kpi(self):
        # check if the tg processes have exited
        physical_node = Context.get_physical_node_from_server(
            self.scenario_helper.nodes[self.name])

        result = {"physical_node": physical_node}
        for proc in (self._tg_process, self._traffic_process):
            check_if_process_failed(proc)

        if self.resource_helper is None:
            return result

        if self.irq_cores is None:
            self.setup_helper.build_config_file()
            self.irq_cores = self.get_irq_cores()

        data = self.resource_helper.sut.irq_core_stats(self.irq_cores)
        new_data = copy.deepcopy(data)

        self.end_test_time = time.time()
        self.resource_helper.sut.reset_stats()

        if self.start_test_time is None:
            new_data = {}
        else:
            test_time = self.end_test_time - self.start_test_time
            for index, item in data.items():
                for counter, value in item.items():
                    if counter.startswith("bucket_") or \
                            counter.startswith("overflow"):
                        if value is 0:
                            del new_data[index][counter]
                        else:
                            new_data[index][counter] = float(value) / test_time

        self.start_test_time = time.time()

        result["collect_stats"] = new_data
        LOG.debug("%s collect KPIs %s", self.APP_NAME, result)

        return result
示例#9
0
 def test_check_if_procces_failed_1(self):
     p = mock.MagicMock(**{"exitcode": 1, "name": "debug"})
     with self.assertRaises(RuntimeError):
         process.check_if_process_failed(p)
示例#10
0
 def test_check_if_procces_failed_0(self):
     p = mock.MagicMock(**{"exitcode": 0, "name": "debug"})
     process.check_if_process_failed(p)
示例#11
0
    def collect_kpi(self):
        # we can't get KPIs if the VNF is down
        check_if_process_failed(self._vnf_process, 0.01)

        physical_node = context_base.Context.get_physical_node_from_server(
            self.scenario_helper.nodes[self.name])

        result = {"physical_node": physical_node}

        if self.resource_helper is None:
            result.update({
                "packets_in": 0,
                "packets_dropped": 0,
                "packets_fwd": 0,
                "curr_packets_in": 0,
                "curr_packets_fwd": 0,
                "collect_stats": {
                    "core": {}
                },
            })
            return result

        if (self.tsc_hz == 0):
            self.tsc_hz = float(self.resource_helper.sut.hz())
            LOG.debug("TSC = %f", self.tsc_hz)
            if (self.tsc_hz == 0):
                raise RuntimeError("Unable to retrieve TSC")

        # use all_ports so we only use ports matched in topology
        port_count = len(self.vnfd_helper.port_pairs.all_ports)
        if port_count not in {1, 2, 4}:
            raise RuntimeError("Failed ..Invalid no of ports .. "
                               "1, 2 or 4 ports only supported at this time")

        tmpPorts = [
            self.vnfd_helper.port_num(port_name)
            for port_name in self.vnfd_helper.port_pairs.all_ports
        ]
        ok = False
        timeout = time.time() + constants.RETRY_TIMEOUT
        while not ok:
            ok, all_port_stats = self.vnf_execute('multi_port_stats', tmpPorts)
            if time.time() > timeout:
                break

        if ok:
            rx_total = tx_total = tsc = 0
            try:
                for single_port_stats in all_port_stats:
                    rx_total = rx_total + single_port_stats[1]
                    tx_total = tx_total + single_port_stats[2]
                    tsc = tsc + single_port_stats[5]
            except (TypeError, IndexError):
                LOG.error("Invalid data ...")
                return {}
        else:
            return {}

        tsc = tsc / port_count

        result.update({
            "packets_in":
            rx_total,
            "packets_dropped":
            max((tx_total - rx_total), 0),
            "packets_fwd":
            tx_total,
            # we share ProxResourceHelper with TG, but we want to collect
            # collectd KPIs here and not TG KPIs, so use a different method name
            "collect_stats":
            self.resource_helper.collect_collectd_kpi(),
        })
        try:
            curr_packets_in = int(
                ((rx_total - self.prev_packets_in) * self.tsc_hz) /
                (tsc - self.prev_tsc))
        except ZeroDivisionError:
            LOG.error("Error.... Divide by Zero")
            curr_packets_in = 0

        try:
            curr_packets_fwd = int(
                ((tx_total - self.prev_packets_sent) * self.tsc_hz) /
                (tsc - self.prev_tsc))
        except ZeroDivisionError:
            LOG.error("Error.... Divide by Zero")
            curr_packets_fwd = 0

        result["curr_packets_in"] = curr_packets_in
        result["curr_packets_fwd"] = curr_packets_fwd

        self.prev_packets_in = rx_total
        self.prev_packets_sent = tx_total
        self.prev_tsc = tsc

        LOG.debug("%s collect KPIs %s %s", self.APP_NAME,
                  datetime.datetime.now(), result)
        return result