def setup_test(self):
        super().setup_test()

        self.tpc_sweep_range = range(self.atten_min, self.pl10_atten)
        self.log.info(self.current_test_name)
        self.tpc_plots_figure = wifi_utils.BokehFigure(
            title='{}_{}'.format(self.current_test_name, 'curve'),
            x_label='Pathloss(dBm)',
            primary_y_label='Tx Power(dBm)')

        self.tpc_plots_derivative_figure = wifi_utils.BokehFigure(
            title='{}_{}'.format(self.current_test_name, 'curve_derivative'),
            x_label='Pathloss(dBm)',
            primary_y_label='Tx Power(dB)')
Пример #2
0
    def plot_rssi_vs_attenuation(self, postprocessed_results):
        """Function to plot RSSI vs attenuation sweeps

        Args:
            postprocessed_results: compiled arrays of RSSI data.
        """
        figure = wputils.BokehFigure(self.current_test_name,
                                     x_label='Attenuation (dB)',
                                     primary_y_label='RSSI (dBm)')
        figure.add_line(postprocessed_results['total_attenuation'],
                        postprocessed_results['signal_poll_rssi']['mean'],
                        'Signal Poll RSSI',
                        marker='circle')
        figure.add_line(postprocessed_results['total_attenuation'],
                        postprocessed_results['scan_rssi']['mean'],
                        'Scan RSSI',
                        marker='circle')
        figure.add_line(postprocessed_results['total_attenuation'],
                        postprocessed_results['chain_0_rssi']['mean'],
                        'Chain 0 RSSI',
                        marker='circle')
        figure.add_line(postprocessed_results['total_attenuation'],
                        postprocessed_results['chain_1_rssi']['mean'],
                        'Chain 1 RSSI',
                        marker='circle')
        figure.add_line(postprocessed_results['total_attenuation'],
                        postprocessed_results['predicted_rssi'],
                        'Predicted RSSI',
                        marker='circle')

        output_file_path = os.path.join(self.log_path,
                                        self.current_test_name + '.html')
        figure.generate_figure(output_file_path)
 def process_testclass_results(self):
     """Saves plot with all test results to enable comparison."""
     # Plot and save all results
     plots = collections.OrderedDict()
     for result in self.testclass_results:
         plot_id = (result['testcase_params']['channel'],
                    result['testcase_params']['mode'])
         if plot_id not in plots:
             plots[plot_id] = wputils.BokehFigure(
                 title='Channel {} {} ({})'.format(
                     result['testcase_params']['channel'],
                     result['testcase_params']['mode'],
                     result['testcase_params']['traffic_type']),
                 x_label='Attenuation (dB)',
                 primary_y_label='Throughput (Mbps)')
         plots[plot_id].add_line(result['total_attenuation'],
                                 result['throughput_receive'],
                                 result['test_name'],
                                 marker='circle')
     figure_list = []
     for plot_id, plot in plots.items():
         plot.generate_figure()
         figure_list.append(plot)
     output_file_path = os.path.join(self.log_path, 'results.html')
     wputils.BokehFigure.save_figures(figure_list, output_file_path)
    def plot_iperf_result(self,
                          testcase_params,
                          result,
                          figure=None,
                          output_file_path=None):
        """Function to plot iperf results.

        The function plots iperf throughput and RSSI over time during a roaming
        test.

        Args:
            testcase_params: dict containing all test params
            result: dict containing test results
            figure: optional bokeh figure object to add current plot to
            output_file_path: optional path to output file
        """
        if not figure:
            figure = wputils.BokehFigure(title=self.current_test_name,
                                         x_label='Time (s)',
                                         primary_y_label='Throughput (Mbps)',
                                         secondary_y_label='RSSI (dBm)')
        iperf_time_stamps = [
            idx * IPERF_INTERVAL for idx in range(len(result['throughput']))
        ]
        figure.add_line(iperf_time_stamps,
                        result['throughput'],
                        'Throughput',
                        width=1)
        figure.add_line(result['rssi_result']['time_stamp'],
                        result['rssi_result']['signal_poll_rssi']['data'],
                        'RSSI',
                        y_axis='secondary')

        figure.generate_figure(output_file_path)
    def plot_ping_result(self,
                         testcase_params,
                         result,
                         figure=None,
                         output_file_path=None):
        """Function to plot ping results.

        The function plots ping RTTs along with RSSI over time during a roaming
        test.

        Args:
            testcase_params: dict containing all test params
            result: dict containing test results
            figure: optional bokeh figure object to add current plot to
            output_file_path: optional path to output file
        """
        if not figure:
            figure = wputils.BokehFigure(title=self.current_test_name,
                                         x_label='Time (ms)',
                                         primary_y_label='RTT (ms)',
                                         secondary_y_label='RSSI (dBm)')
        figure.add_line(x_data=result['ping_result']['time_stamp'],
                        y_data=result['ping_result']['rtt'],
                        legend='Ping RTT',
                        width=1)
        figure.add_line(
            x_data=result['rssi_result']['time_stamp'],
            y_data=result['rssi_result']['signal_poll_rssi']['data'],
            legend='RSSI',
            y_axis='secondary')
        figure.generate_figure(output_file_path)
Пример #6
0
    def process_testclass_results(self):
        """Saves all test results to enable comparison."""
        testclass_data = collections.OrderedDict()
        for test_result in self.testclass_results:
            current_params = test_result['testcase_params']

            channel = current_params['channel']
            channel_data = testclass_data.setdefault(
                channel,
                collections.OrderedDict(orientation=[],
                                        rssi=collections.OrderedDict(
                                            signal_poll_rssi=[],
                                            chain_0_rssi=[],
                                            chain_1_rssi=[])))

            channel_data['orientation'].append(current_params['orientation'])
            channel_data['rssi']['signal_poll_rssi'].append(
                test_result['postprocessed_results']['signal_poll_rssi']
                ['mean'][0])
            channel_data['rssi']['chain_0_rssi'].append(
                test_result['postprocessed_results']['chain_0_rssi']['mean']
                [0])
            channel_data['rssi']['chain_1_rssi'].append(
                test_result['postprocessed_results']['chain_1_rssi']['mean']
                [0])

        # Publish test class metrics
        for channel, channel_data in testclass_data.items():
            for rssi_metric, rssi_metric_value in channel_data['rssi'].items():
                metric_name = 'ota_summary_ch{}.avg_{}'.format(
                    channel, rssi_metric)
                metric_value = numpy.mean(rssi_metric_value)
                self.testclass_metric_logger.add_metric(
                    metric_name, metric_value)

        # Plot test class results
        chamber_mode = self.testclass_results[0]['testcase_params'][
            'chamber_mode']
        if chamber_mode == 'orientation':
            x_label = 'Angle (deg)'
        elif chamber_mode == 'stepped stirrers':
            x_label = 'Position Index'
        elif chamber_mode == 'StirrersOn':
            return
        plots = []
        for channel, channel_data in testclass_data.items():
            current_plot = wputils.BokehFigure(
                title='Channel {} - Rssi vs. Position'.format(channel),
                x_label=x_label,
                primary_y_label='RSSI (dBm)',
            )
            for rssi_metric, rssi_metric_value in channel_data['rssi'].items():
                legend = rssi_metric
                current_plot.add_line(channel_data['orientation'],
                                      rssi_metric_value, legend)
            current_plot.generate_figure()
            plots.append(current_plot)
        current_context = context.get_current_context().get_full_output_path()
        plot_file_path = os.path.join(current_context, 'results.html')
        wputils.BokehFigure.save_figures(plots, plot_file_path)
Пример #7
0
    def post_process_results(self, test_result):
        """Extracts results and saves plots and JSON formatted results.

        Args:
            test_result: dict containing attenuation, iPerfResult object and
            other meta data
        Returns:
            test_result_dict: dict containing post-processed results including
            avg throughput, other metrics, and other meta data
        """
        # Save output as text file
        test_name = self.current_test_name
        results_file_path = os.path.join(self.log_path,
                                         '{}.txt'.format(test_name))
        test_result_dict = {}
        test_result_dict['ap_settings'] = test_result['ap_settings'].copy()
        test_result_dict['attenuation'] = test_result['attenuation']
        test_result_dict['rssi'] = test_result['rssi_result'][
            'signal_poll_rssi']['mean']
        test_result_dict['llstats'] = (
            'TX MCS = {0} ({1:.1f}%). '
            'RX MCS = {2} ({3:.1f}%)'.format(
                test_result['llstats']['summary']['common_tx_mcs'],
                test_result['llstats']['summary']['common_tx_mcs_freq'] * 100,
                test_result['llstats']['summary']['common_rx_mcs'],
                test_result['llstats']['summary']['common_rx_mcs_freq'] * 100))
        if test_result['iperf_result'].instantaneous_rates:
            instantaneous_rates_Mbps = [
                rate * 8 * (1.024**2)
                for rate in test_result['iperf_result'].instantaneous_rates[
                    self.testclass_params['iperf_ignored_interval']:-1]
            ]
            tput_standard_deviation = test_result[
                'iperf_result'].get_std_deviation(
                    self.testclass_params['iperf_ignored_interval']) * 8
        else:
            instantaneous_rates_Mbps = float('nan')
            tput_standard_deviation = float('nan')
        test_result_dict['iperf_results'] = {
            'instantaneous_rates': instantaneous_rates_Mbps,
            'avg_throughput': numpy.mean(instantaneous_rates_Mbps),
            'std_deviation': tput_standard_deviation,
            'min_throughput': min(instantaneous_rates_Mbps)
        }
        with open(results_file_path, 'w') as results_file:
            json.dump(test_result_dict, results_file)
        # Plot and save
        figure = wputils.BokehFigure(test_name,
                                     x_label='Time (s)',
                                     primary_y_label='Throughput (Mbps)')
        time_data = list(range(0, len(instantaneous_rates_Mbps)))
        figure.add_line(time_data,
                        instantaneous_rates_Mbps,
                        legend=self.current_test_name,
                        marker='circle')
        output_file_path = os.path.join(self.log_path,
                                        '{}.html'.format(test_name))
        figure.generate_figure(output_file_path)
        return test_result_dict
Пример #8
0
    def plot_rssi_vs_time(self, rssi_result, postprocessed_results,
                          center_curves):
        """Function to plot RSSI vs time.

        Args:
            rssi_result: dict containing raw RSSI data
            postprocessed_results: compiled arrays of RSSI data
            center_curvers: boolean indicating whether to shift curves to align
            them with predicted RSSIs
        """
        figure = wputils.BokehFigure(
            self.current_test_name,
            x_label='Time (s)',
            primary_y_label=center_curves * 'Centered' + 'RSSI (dBm)',
        )

        # yapf: disable
        rssi_time_series = collections.OrderedDict(
            [('signal_poll_rssi', []),
             ('signal_poll_avg_rssi', []),
             ('scan_rssi', []),
             ('chain_0_rssi', []),
             ('chain_1_rssi', []),
             ('predicted_rssi', [])])
        # yapf: enable
        for key, val in rssi_time_series.items():
            if 'predicted_rssi' in key:
                rssi_time_series[key] = [
                    x for x in postprocessed_results[key] for copies in range(
                        len(rssi_result['rssi_result'][0]['signal_poll_rssi']
                            ['data']))
                ]
            elif 'rssi' in key:
                if center_curves:
                    filtered_error = [
                        x for x in postprocessed_results[key]['error']
                        if not math.isnan(x)
                    ]
                    if filtered_error:
                        avg_shift = statistics.mean(filtered_error)
                    else:
                        avg_shift = 0
                    rssi_time_series[key] = [
                        x - avg_shift
                        for x in postprocessed_results[key]['data']
                    ]
                else:
                    rssi_time_series[key] = postprocessed_results[key]['data']
            time_vec = [
                self.testclass_params['polling_frequency'] * x
                for x in range(len(rssi_time_series[key]))
            ]
            if len(rssi_time_series[key]) > 0:
                figure.add_line(time_vec, rssi_time_series[key], key)

        output_file_path = os.path.join(self.log_path,
                                        self.current_test_name + '.html')
        figure.generate_figure(output_file_path)
    def process_testclass_results(self):
        """Saves all test results to enable comparison."""
        WifiPingTest.process_testclass_results(self)

        range_vs_angle = collections.OrderedDict()
        for test in self.testclass_results:
            curr_params = test['testcase_params']
            curr_config = curr_params['channel']
            if curr_config in range_vs_angle:
                if curr_params['position'] not in range_vs_angle[curr_config][
                        'position']:
                    range_vs_angle[curr_config]['position'].append(
                        curr_params['position'])
                    range_vs_angle[curr_config]['range'].append(test['range'])
                    range_vs_angle[curr_config]['llstats_at_range'].append(
                        test['llstats_at_range'])
                else:
                    range_vs_angle[curr_config]['range'][-1] = test['range']
                    range_vs_angle[curr_config]['llstats_at_range'][-1] = test[
                        'llstats_at_range']
            else:
                range_vs_angle[curr_config] = {
                    'position': [curr_params['position']],
                    'range': [test['range']],
                    'llstats_at_range': [test['llstats_at_range']]
                }
        chamber_mode = self.testclass_results[0]['testcase_params'][
            'chamber_mode']
        if chamber_mode == 'orientation':
            x_label = 'Angle (deg)'
        elif chamber_mode == 'stepped stirrers':
            x_label = 'Position Index'
        figure = wputils.BokehFigure(
            title='Range vs. Position',
            x_label=x_label,
            primary_y_label='Range (dB)',
        )
        for channel, channel_data in range_vs_angle.items():
            figure.add_line(x_data=channel_data['position'],
                            y_data=channel_data['range'],
                            hover_text=channel_data['llstats_at_range'],
                            legend='Channel {}'.format(channel))
            average_range = sum(channel_data['range']) / len(
                channel_data['range'])
            self.log.info('Average range for Channel {} is: {}dB'.format(
                channel, average_range))
            metric_name = 'ota_summary_ch{}.avg_range'.format(channel)
            self.testclass_metric_logger.add_metric(metric_name, average_range)
        current_context = context.get_current_context().get_full_output_path()
        plot_file_path = os.path.join(current_context, 'results.html')
        figure.generate_figure(plot_file_path)

        # Save results
        results_file_path = os.path.join(current_context,
                                         'testclass_summary.json')
        with open(results_file_path, 'w') as results_file:
            json.dump(range_vs_angle, results_file, indent=4)
    def process_consistency_results(self, testcase_params, results_dict):
        """Function to process roaming consistency results.

        The function looks compiles the test of roams recorded in consistency
        tests and plots results for easy visualization.

        Args:
            testcase_params: dict containing all test results and meta data
            results_dict: dict containing consistency test results
        """
        # make figure placeholder and get relevant functions
        if 'ping' in self.current_test_name:
            detect_gaps = self.detect_ping_gaps
            plot_result = self.plot_ping_result
            primary_y_axis = 'RTT (ms)'
        elif 'iperf' in self.current_test_name:
            detect_gaps = self.detect_iperf_gaps
            plot_result = self.plot_iperf_result
            primary_y_axis = 'Throughput (Mbps)'
        # loop over results
        roam_stats = collections.OrderedDict()
        current_context = context.get_current_context().get_full_output_path()
        for secondary_atten, results_list in results_dict.items():
            figure = wputils.BokehFigure(title=self.current_test_name,
                                         x_label='Time (ms)',
                                         primary_y_label=primary_y_axis,
                                         secondary_y_label='RSSI (dBm)')
            roam_stats[secondary_atten] = collections.OrderedDict()
            for result in results_list:
                self.detect_roam_events(result)
                for roam_transition, count in result['roam_counts'].items():
                    roam_stats[secondary_atten][
                        roam_transition] = roam_stats[secondary_atten].get(
                            roam_transition, 0) + count
                detect_gaps(result)
                plot_result(testcase_params, result, figure=figure)
            # save plot
            plot_file_name = (self.current_test_name + '_' +
                              str(secondary_atten) + '.html')

            plot_file_path = os.path.join(current_context, plot_file_name)
            figure.save_figure(plot_file_path)
        results_dict['roam_stats'] = roam_stats

        results_file_path = os.path.join(current_context,
                                         self.current_test_name + '.json')
        with open(results_file_path, 'w') as results_file:
            json.dump(wputils.serialize_dict(result), results_file, indent=4)
Пример #11
0
    def plot_rssi_distribution(self, postprocessed_results):
        """Function to plot RSSI distributions.

        Args:
            postprocessed_results: compiled arrays of RSSI data
        """
        monitored_rssis = ['signal_poll_rssi', 'chain_0_rssi', 'chain_1_rssi']

        rssi_dist = collections.OrderedDict()
        for rssi_key in monitored_rssis:
            rssi_data = postprocessed_results[rssi_key]
            rssi_dist[rssi_key] = collections.OrderedDict()
            unique_rssi = sorted(set(rssi_data['data']))
            rssi_counts = []
            for value in unique_rssi:
                rssi_counts.append(rssi_data['data'].count(value))
            total_count = sum(rssi_counts)
            rssi_dist[rssi_key]['rssi_values'] = unique_rssi
            rssi_dist[rssi_key]['rssi_pdf'] = [
                x / total_count for x in rssi_counts
            ]
            rssi_dist[rssi_key]['rssi_cdf'] = []
            cum_prob = 0
            for prob in rssi_dist[rssi_key]['rssi_pdf']:
                cum_prob += prob
                rssi_dist[rssi_key]['rssi_cdf'].append(cum_prob)

        figure = wputils.BokehFigure(self.current_test_name,
                                     x_label='RSSI (dBm)',
                                     primary_y_label='p(RSSI = x)',
                                     secondary_y_label='p(RSSI <= x)')
        for rssi_key, rssi_data in rssi_dist.items():
            figure.add_line(x_data=rssi_data['rssi_values'],
                            y_data=rssi_data['rssi_pdf'],
                            legend='{} PDF'.format(rssi_key),
                            y_axis='default')
            figure.add_line(x_data=rssi_data['rssi_values'],
                            y_data=rssi_data['rssi_cdf'],
                            legend='{} CDF'.format(rssi_key),
                            y_axis='secondary')
        output_file_path = os.path.join(self.log_path,
                                        self.current_test_name + '_dist.html')
        figure.generate_figure(output_file_path)
    def setup_test(self):

        #self.dut.droid.bluetoothFactoryReset()
        #bt_utils.enable_bluetooth(self.dut.droid, self.bt_device.ed)

        # To prevent default file from being overwritten
        self.dut.adb.shell('cp {} {}'.format(self.power_file_paths[0],
                                             self.power_file_paths[1]))

        self.sar_file_path = self.power_file_paths[1]
        self.sar_file_name = os.path.basename(self.power_file_paths[1])
        self.bt_sar_df = self.read_sar_table(self.dut)

        utils.set_location_service(self.bt_device, True)
        utils.set_location_service(self.dut, True)

        self.attenuator.set_atten(FIXED_ATTENUATION)
        self.log.info('Attenuation set to {} dB'.format(FIXED_ATTENUATION))

        # BokehFigure object
        self.plot = wifi_utils.BokehFigure(title='{}'.format(
            self.current_test_name),
                                           x_label='Scenarios',
                                           primary_y_label='TX power(dBm)')
    def process_testclass_results(self):
        """Saves plot with all test results to enable comparison."""
        # Plot individual test id results raw data and compile metrics
        plots = collections.OrderedDict()
        compiled_data = collections.OrderedDict()
        for result in self.testclass_results:
            test_id = tuple(
                self.extract_test_id(
                    result['testcase_params'],
                    ['channel', 'mode', 'traffic_type', 'traffic_direction'
                     ]).items())
            if test_id not in plots:
                # Initialize test id data when not present
                compiled_data[test_id] = {'throughput': [], 'metrics': {}}
                compiled_data[test_id]['metrics'] = {
                    key: []
                    for key in result['metrics'].keys()
                }
                plots[test_id] = wputils.BokehFigure(
                    title='Channel {} {} ({} {})'.format(
                        result['testcase_params']['channel'],
                        result['testcase_params']['mode'],
                        result['testcase_params']['traffic_type'],
                        result['testcase_params']['traffic_direction']),
                    x_label='Attenuation (dB)',
                    primary_y_label='Throughput (Mbps)')
            # Compile test id data and metrics
            compiled_data[test_id]['throughput'].append(
                result['throughput_receive'])
            compiled_data[test_id]['total_attenuation'] = result[
                'total_attenuation']
            for metric_key, metric_value in result['metrics'].items():
                compiled_data[test_id]['metrics'][metric_key].append(
                    metric_value)
            # Add test id to plots
            plots[test_id].add_line(result['total_attenuation'],
                                    result['throughput_receive'],
                                    result['test_name'],
                                    width=1,
                                    style='dashed',
                                    marker='circle')

        # Compute average RvRs and compount metrics over orientations
        for test_id, test_data in compiled_data.items():
            test_id_dict = dict(test_id)
            metric_tag = '{}_{}_ch{}_{}'.format(
                test_id_dict['traffic_type'],
                test_id_dict['traffic_direction'], test_id_dict['channel'],
                test_id_dict['mode'])
            high_tput_hit_freq = numpy.mean(
                numpy.not_equal(test_data['metrics']['high_tput_range'], -1))
            self.testclass_metric_logger.add_metric(
                '{}.high_tput_hit_freq'.format(metric_tag), high_tput_hit_freq)
            for metric_key, metric_value in test_data['metrics'].items():
                metric_key = "{}.avg_{}".format(metric_tag, metric_key)
                metric_value = numpy.mean(metric_value)
                self.testclass_metric_logger.add_metric(
                    metric_key, metric_value)
            test_data['avg_rvr'] = numpy.mean(test_data['throughput'], 0)
            test_data['median_rvr'] = numpy.median(test_data['throughput'], 0)
            plots[test_id].add_line(test_data['total_attenuation'],
                                    test_data['avg_rvr'],
                                    legend='Average Throughput',
                                    marker='circle')
            plots[test_id].add_line(test_data['total_attenuation'],
                                    test_data['median_rvr'],
                                    legend='Median Throughput',
                                    marker='square')

        figure_list = []
        for test_id, plot in plots.items():
            plot.generate_figure()
            figure_list.append(plot)
        output_file_path = os.path.join(self.log_path, 'results.html')
        wputils.BokehFigure.save_figures(figure_list, output_file_path)
    def process_test_results(self, rvr_result):
        """Saves plots and JSON formatted results.

        Args:
            rvr_result: dict containing attenuation, throughput and other meta
            data
        """
        # Save output as text file
        test_name = self.current_test_name
        results_file_path = os.path.join(
            self.log_path, '{}.json'.format(self.current_test_name))
        with open(results_file_path, 'w') as results_file:
            json.dump(rvr_result, results_file, indent=4)
        # Plot and save
        figure = wputils.BokehFigure(title=test_name,
                                     x_label='Attenuation (dB)',
                                     primary_y_label='Throughput (Mbps)')
        try:
            golden_path = next(file_name
                               for file_name in self.golden_files_list
                               if test_name in file_name)
            with open(golden_path, 'r') as golden_file:
                golden_results = json.load(golden_file)
            golden_attenuation = [
                att + golden_results['fixed_attenuation']
                for att in golden_results['attenuation']
            ]
            throughput_limits = self.compute_throughput_limits(rvr_result)
            shaded_region = {
                'x_vector': throughput_limits['attenuation'],
                'lower_limit': throughput_limits['lower_limit'],
                'upper_limit': throughput_limits['upper_limit']
            }
            figure.add_line(golden_attenuation,
                            golden_results['throughput_receive'],
                            'Golden Results',
                            color='green',
                            marker='circle',
                            shaded_region=shaded_region)
        except:
            self.log.warning('ValueError: Golden file not found')

        # Generate graph annotatios
        hover_text = [
            'TX MCS = {0} ({1:.1f}%). RX MCS = {2} ({3:.1f}%)'.format(
                curr_llstats['summary']['common_tx_mcs'],
                curr_llstats['summary']['common_tx_mcs_freq'] * 100,
                curr_llstats['summary']['common_rx_mcs'],
                curr_llstats['summary']['common_rx_mcs_freq'] * 100)
            for curr_llstats in rvr_result['llstats']
        ]
        figure.add_line(rvr_result['total_attenuation'],
                        rvr_result['throughput_receive'],
                        'Test Results',
                        hover_text=hover_text,
                        color='red',
                        marker='circle')

        output_file_path = os.path.join(self.log_path,
                                        '{}.html'.format(test_name))
        figure.generate_figure(output_file_path)

        #Set test metrics
        rvr_result['metrics'] = {}
        rvr_result['metrics']['peak_tput'] = max(
            rvr_result['throughput_receive'])
        if self.publish_testcase_metrics:
            self.testcase_metric_logger.add_metric(
                'peak_tput', rvr_result['metrics']['peak_tput'])

        tput_below_limit = [
            tput < self.testclass_params['tput_metric_targets'][
                rvr_result['testcase_params']['mode']]['high']
            for tput in rvr_result['throughput_receive']
        ]
        rvr_result['metrics']['high_tput_range'] = -1
        for idx in range(len(tput_below_limit)):
            if all(tput_below_limit[idx:]):
                if idx == 0:
                    #Throughput was never above limit
                    rvr_result['metrics']['high_tput_range'] = -1
                else:
                    rvr_result['metrics']['high_tput_range'] = rvr_result[
                        'total_attenuation'][max(idx, 1) - 1]
                break
        if self.publish_testcase_metrics:
            self.testcase_metric_logger.add_metric(
                'high_tput_range', rvr_result['metrics']['high_tput_range'])

        tput_below_limit = [
            tput < self.testclass_params['tput_metric_targets'][
                rvr_result['testcase_params']['mode']]['low']
            for tput in rvr_result['throughput_receive']
        ]
        for idx in range(len(tput_below_limit)):
            if all(tput_below_limit[idx:]):
                rvr_result['metrics']['low_tput_range'] = rvr_result[
                    'total_attenuation'][max(idx, 1) - 1]
                break
        else:
            rvr_result['metrics']['low_tput_range'] = -1
        if self.publish_testcase_metrics:
            self.testcase_metric_logger.add_metric(
                'low_tput_range', rvr_result['metrics']['low_tput_range'])
    def process_testclass_results(self):
        """Saves and plots test results from all executed test cases."""
        testclass_results_dict = collections.OrderedDict()
        id_fields = ['channel', 'mode', 'rate']
        plots = []
        for result in self.testclass_results:
            test_id = self.extract_test_id(result['testcase_params'],
                                           id_fields)
            test_id = tuple(test_id.items())
            chain_mask = result['testcase_params']['chain_mask']
            num_streams = result['testcase_params']['num_streams']
            line_id = (chain_mask, num_streams)
            if test_id not in testclass_results_dict:
                testclass_results_dict[test_id] = collections.OrderedDict()
            if line_id not in testclass_results_dict[test_id]:
                testclass_results_dict[test_id][line_id] = {
                    'orientation': [],
                    'sensitivity': []
                }
            orientation = result['testcase_params']['orientation']
            if result['peak_throughput_pct'] >= 95:
                sensitivity = result['sensitivity']
            else:
                sensitivity = float('nan')
            if orientation not in testclass_results_dict[test_id][line_id][
                    'orientation']:
                testclass_results_dict[test_id][line_id]['orientation'].append(
                    orientation)
                testclass_results_dict[test_id][line_id]['sensitivity'].append(
                    sensitivity)
            else:
                testclass_results_dict[test_id][line_id]['sensitivity'][
                    -1] = sensitivity

        for test_id, test_data in testclass_results_dict.items():
            test_id_dict = dict(test_id)
            if 'legacy' in test_id_dict['mode']:
                test_id_str = 'Channel {} - {} {}Mbps'.format(
                    test_id_dict['channel'], test_id_dict['mode'],
                    test_id_dict['rate'])
            else:
                test_id_str = 'Channel {} - {} MCS{}'.format(
                    test_id_dict['channel'], test_id_dict['mode'],
                    test_id_dict['rate'])
            curr_plot = wputils.BokehFigure(
                title=str(test_id_str),
                x_label='Orientation (deg)',
                primary_y_label='Sensitivity (dBm)')
            for line_id, line_results in test_data.items():
                curr_plot.add_line(line_results['orientation'],
                                   line_results['sensitivity'],
                                   legend='Nss{} - Chain Mask {}'.format(
                                       line_id[1], line_id[0]),
                                   marker='circle')
                if 'legacy' in test_id_dict['mode']:
                    metric_tag = 'ota_summary_ch{}_{}_{}_ch{}'.format(
                        test_id_dict['channel'], test_id_dict['mode'],
                        test_id_dict['rate'], line_id[0])
                else:
                    metric_tag = 'ota_summary_ch{}_{}_mcs{}_nss{}_ch{}'.format(
                        test_id_dict['channel'], test_id_dict['mode'],
                        test_id_dict['rate'], line_id[1], line_id[0])

                metric_name = metric_tag + '.avg_sensitivity'
                metric_value = numpy.nanmean(line_results['sensitivity'])
                self.testclass_metric_logger.add_metric(
                    metric_name, metric_value)
                self.log.info(("Average Sensitivity for {}: {:.1f}").format(
                    metric_tag, metric_value))
            current_context = (
                context.get_current_context().get_full_output_path())
            output_file_path = os.path.join(current_context,
                                            str(test_id_str) + '.html')
            curr_plot.generate_figure(output_file_path)
            plots.append(curr_plot)
        output_file_path = os.path.join(current_context, 'results.html')
        wputils.BokehFigure.save_figures(plots, output_file_path)
Пример #16
0
    def process_testclass_results(self):
        """Saves all test results to enable comparison."""
        testclass_data = collections.OrderedDict()
        for test in self.testclass_results:
            current_params = test['testcase_params']
            channel_data = testclass_data.setdefault(current_params['channel'],
                                                     collections.OrderedDict())
            test_id = tuple(
                self.extract_test_id(current_params, [
                    'mode', 'traffic_type', 'traffic_direction', 'signal_level'
                ]).items())
            test_data = channel_data.setdefault(
                test_id, collections.OrderedDict(position=[], throughput=[]))
            current_throughput = (numpy.mean(
                test['iperf_result'].instantaneous_rates[
                    self.testclass_params['iperf_ignored_interval']:-1])
                                  ) * 8 * (1.024**2)
            test_data['position'].append(current_params['position'])
            test_data['throughput'].append(current_throughput)

        chamber_mode = self.testclass_results[0]['testcase_params'][
            'chamber_mode']
        if chamber_mode == 'orientation':
            x_label = 'Angle (deg)'
        elif chamber_mode == 'stepped stirrers':
            x_label = 'Position Index'

        # Publish test class metrics
        for channel, channel_data in testclass_data.items():
            for test_id, test_data in channel_data.items():
                test_id_dict = dict(test_id)
                metric_tag = 'ota_summary_{}_{}_{}_ch{}_{}'.format(
                    test_id_dict['signal_level'], test_id_dict['traffic_type'],
                    test_id_dict['traffic_direction'], channel,
                    test_id_dict['mode'])
                metric_name = metric_tag + '.avg_throughput'
                metric_value = numpy.mean(test_data['throughput'])
                self.testclass_metric_logger.add_metric(
                    metric_name, metric_value)
                metric_name = metric_tag + '.min_throughput'
                metric_value = min(test_data['throughput'])
                self.testclass_metric_logger.add_metric(
                    metric_name, metric_value)

        # Plot test class results
        plots = []
        for channel, channel_data in testclass_data.items():
            current_plot = wputils.BokehFigure(
                title='Channel {} - Rate vs. Position'.format(channel),
                x_label=x_label,
                primary_y_label='Rate (Mbps)',
            )
            for test_id, test_data in channel_data.items():
                test_id_dict = dict(test_id)
                legend = '{}, {} {}, {} RSSI'.format(
                    test_id_dict['mode'], test_id_dict['traffic_type'],
                    test_id_dict['traffic_direction'],
                    test_id_dict['signal_level'])
                current_plot.add_line(test_data['position'],
                                      test_data['throughput'], legend)
            current_plot.generate_figure()
            plots.append(current_plot)
        current_context = context.get_current_context().get_full_output_path()
        plot_file_path = os.path.join(current_context, 'results.html')
        wputils.BokehFigure.save_figures(plots, plot_file_path)
    def process_ping_results(self, testcase_params, ping_range_result):
        """Saves and plots ping results.

        Args:
            ping_range_result: dict containing ping results and metadata
        """
        # Compute range
        ping_loss_over_att = [
            x['packet_loss_percentage']
            for x in ping_range_result['ping_results']
        ]
        ping_loss_above_threshold = [
            x > self.testclass_params['range_ping_loss_threshold']
            for x in ping_loss_over_att
        ]
        for idx in range(len(ping_loss_above_threshold)):
            if all(ping_loss_above_threshold[idx:]):
                range_index = max(idx, 1) - 1
                break
        else:
            range_index = -1
        ping_range_result['atten_at_range'] = testcase_params['atten_range'][
            range_index]
        ping_range_result['peak_throughput_pct'] = 100 - min(
            ping_loss_over_att)
        ping_range_result['range'] = (ping_range_result['atten_at_range'] +
                                      ping_range_result['fixed_attenuation'])
        ping_range_result['llstats_at_range'] = (
            'TX MCS = {0} ({1:.1f}%). '
            'RX MCS = {2} ({3:.1f}%)'.format(
                ping_range_result['llstats'][range_index]['summary']
                ['common_tx_mcs'], ping_range_result['llstats'][range_index]
                ['summary']['common_tx_mcs_freq'] * 100,
                ping_range_result['llstats'][range_index]['summary']
                ['common_rx_mcs'], ping_range_result['llstats'][range_index]
                ['summary']['common_rx_mcs_freq'] * 100))

        # Save results
        results_file_path = os.path.join(
            self.log_path, '{}.json'.format(self.current_test_name))
        with open(results_file_path, 'w') as results_file:
            json.dump(ping_range_result, results_file, indent=4)

        # Plot results
        if 'range' not in self.current_test_name:
            figure = wputils.BokehFigure(
                self.current_test_name,
                x_label='Timestamp (s)',
                primary_y_label='Round Trip Time (ms)')
            for idx, result in enumerate(ping_range_result['ping_results']):
                if len(result['rtt']) > 1:
                    x_data = [
                        t - result['time_stamp'][0]
                        for t in result['time_stamp']
                    ]
                    figure.add_line(
                        x_data, result['rtt'], 'RTT @ {}dB'.format(
                            ping_range_result['attenuation'][idx]))

            output_file_path = os.path.join(
                self.log_path, '{}.html'.format(self.current_test_name))
            figure.generate_figure(output_file_path)