def pass_fail_roaming_consistency(self, results_dict):
        """Function to evaluate roaming consistency results.

        The function looks for the roams recorded in multiple runs of the same
        attenuation waveform and checks that the DUT reliably roams to the
        same network

        Args:
            results_dict: dict containing consistency test results
        """
        test_fail = False
        for secondary_atten, roam_stats in results_dict['roam_stats'].items():
            total_roams = sum(list(roam_stats.values()))
            common_roam = max(roam_stats.keys(), key=(lambda k: roam_stats[k]))
            common_roam_frequency = roam_stats[common_roam] / total_roams
            self.log.info(
                '{}dB secondary atten. Most common roam: {}. Frequency: {}'.
                format(secondary_atten, common_roam, common_roam_frequency))
            if common_roam_frequency < self.testclass_params[
                    'consistency_threshold']:
                test_fail = True
                self.log.info('Unstable Roams at {}dB secondary att'.format(
                    secondary_atten))
        self.testcase_metric_logger.add_metric('common_roam_frequency',
                                               common_roam_frequency)
        if test_fail:
            asserts.fail('Incosistent roaming detected.')
        else:
            asserts.explicit_pass('Consistent roaming at all levels.')
Example #2
0
    def process_results(self, sar_df, type='EDR'):
        """Determines the test results of the sweep.

         Parses the processed table with computed BT TX power values
         to return pass or fail.

        Args:
             sar_df: processed BT SAR table
        """
        if self.sar_version_2:
            breach_error_result = (
                sar_df['expected_tx_power'] + self.sar_margin[type] >
                sar_df['measured_tx_power']).all()
            if not breach_error_result:
                asserts.fail('Measured TX power exceeds expected')

        else:
            # checks for errors at particular points in the sweep
            max_error_result = abs(
                sar_df['delta']) > self.max_error_threshold[type]
            if max_error_result:
                asserts.fail('Maximum Error Threshold Exceeded')

            # checks for error accumulation across the sweep
            if sar_df['delta'].sum() > self.agg_error_threshold[type]:
                asserts.fail(
                    'Aggregate Error Threshold Exceeded. Error: {} Threshold: {}'
                    .format(sar_df['delta'].sum(), self.agg_error_threshold))

        self.sar_test_result.metric_value = 1
        asserts.explicit_pass('Measured and Expected Power Values in line')
 def test_iperf_single_ndp_aware_only_oob(self):
     """Measure throughput using iperf on a single NDP, with Aware enabled and
 no infrastructure connection. Use out-of-band discovery."""
     results = {}
     self.run_iperf_single_ndp_aware_only(use_ib=False, results=results)
     asserts.explicit_pass("test_iperf_single_ndp_aware_only_oob passes",
                           extras=results)
Example #4
0
    def pass_fail_check_rssi_stability(self, postprocessed_results):
        """Check the test result and decide if it passed or failed.

        Checks the RSSI test result and fails the test if the standard
        deviation of signal_poll_rssi is beyond the threshold defined in the
        config file.

        Args:
            postprocessed_results: compiled arrays of RSSI measurements
        """
        test_failed = any([
            stdev > self.test_params["stdev_tolerance"]
            for stdev in postprocessed_results["signal_poll_rssi"]["stdev"]
        ])
        test_message = (
            "RSSI stability {0}. Standard deviation was {1} dB "
            "(limit {2}), per chain standard deviation [{3}, {4}] dB".format(
                "failed" * test_failed + "passed" * (not test_failed), [
                    float("{:.2f}".format(x))
                    for x in postprocessed_results["signal_poll_rssi"]["stdev"]
                ], self.test_params["stdev_tolerance"], [
                    float("{:.2f}".format(x))
                    for x in postprocessed_results["chain_0_rssi"]["stdev"]
                ], [
                    float("{:.2f}".format(x))
                    for x in postprocessed_results["chain_1_rssi"]["stdev"]
                ]))
        if test_failed:
            asserts.fail(test_message)
        asserts.explicit_pass(test_message)
Example #5
0
    def measure_and_process_result(self):
        """Measure the current drawn by the device for the period of
        self.duration, at the frequency of self.hz.

        If self.threshold exists, also verify that the average current of the
        measurement is below the acceptable threshold.
        """
        tag = self.current_test_name
        result = self.mon.measure_power(self.hz,
                                        self.duration,
                                        tag=tag,
                                        offset=self.offset)
        asserts.assert_true(result,
                            "Got empty measurement data set in %s." % tag)
        self.log.info(repr(result))
        data_path = os.path.join(self.mon_data_path, "%s.txt" % tag)
        monsoon.MonsoonData.save_to_text_file([result], data_path)
        actual_current = result.average_current
        actual_current_str = "%.2fmA" % actual_current
        result_extra = {"Average Current": actual_current_str}
        if self.threshold:
            model = utils.trim_model_name(self.dut.model)
            asserts.assert_true(tag in self.threshold[model],
                                "Acceptance threshold for %s is missing" % tag,
                                extras=result_extra)
            acceptable_threshold = self.threshold[model][tag]
            asserts.assert_true(
                actual_current < acceptable_threshold,
                ("Measured average current for %s - %s - is "
                 "higher than acceptable threshold %.2f.") %
                (tag, actual_current_str, acceptable_threshold),
                extras=result_extra)
        asserts.explicit_pass("Measurement finished for %s." % tag,
                              extras=result_extra)
    def test_rtt_non_80211mc_supporting_ap_faked_as_supporting(self):
        """Scan for APs which do not support IEEE 802.11mc, maliciously modify the
        Responder config to indicate support and pass-through to service. Verify
        that get an error result.
        """
        dut = self.android_devices[0]
        non_rtt_aps = rutils.select_best_scan_results(
            rutils.scan_with_rtt_support_constraint(dut, False),
            select_count=1)
        dut.log.debug("Visible non-IEEE 802.11mc APs=%s", non_rtt_aps)
        asserts.assert_true(len(non_rtt_aps) > 0, "Need at least one AP!")
        non_rtt_aps = non_rtt_aps[0:1]  # pick first
        non_rtt_aps[0][rconsts.SCAN_RESULT_KEY_RTT_RESPONDER] = True  # falsify
        dut.log.debug("Visible non-IEEE 802.11mc APs=%s", non_rtt_aps)
        events = rutils.run_ranging(dut, non_rtt_aps, self.NUM_ITER,
                                    self.TIME_BETWEEN_ITERATIONS)
        stats = rutils.analyze_results(events, self.rtt_reference_distance_mm,
                                       self.rtt_reference_distance_margin_mm,
                                       self.rtt_min_expected_rssi_dbm,
                                       self.lci_reference, self.lcr_reference)
        dut.log.debug("Stats=%s", stats)

        for bssid, stat in stats.items():
            asserts.assert_true(stat['num_no_results'] == 0,
                                "Missing (timed-out) results",
                                extras=stats)
            asserts.assert_true(
                stat['num_failures'] == self.NUM_ITER,
                "Failures expected for falsified responder config",
                extras=stats)
        asserts.explicit_pass("RTT test done", extras=stats)
    def test_rtt_non_80211mc_supporting_aps_wo_privilege(self):
        """Scan for APs and perform RTT on non-IEEE 802.11mc supporting APs with the
        device not having privilege access (expect failures).
        """
        dut = self.android_devices[0]
        rutils.config_privilege_override(dut, True)
        non_rtt_aps = rutils.select_best_scan_results(
            rutils.scan_with_rtt_support_constraint(dut, False),
            select_count=1)
        dut.log.debug("Visible non-IEEE 802.11mc APs=%s", non_rtt_aps)
        asserts.assert_true(len(non_rtt_aps) > 0, "Need at least one AP!")
        events = rutils.run_ranging(dut, non_rtt_aps, self.NUM_ITER,
                                    self.TIME_BETWEEN_ITERATIONS)
        stats = rutils.analyze_results(events, self.rtt_reference_distance_mm,
                                       self.rtt_reference_distance_margin_mm,
                                       self.rtt_min_expected_rssi_dbm,
                                       self.lci_reference, self.lcr_reference)
        dut.log.debug("Stats=%s", stats)

        for bssid, stat in stats.items():
            asserts.assert_true(stat['num_no_results'] == 0,
                                "Missing (timed-out) results",
                                extras=stats)
            asserts.assert_true(
                stat['num_failures'] == self.NUM_ITER,
                "All one-sided RTT requests must fail when executed without privilege",
                extras=stats)
            for code in stat['status_codes']:
                asserts.assert_true(
                    code == rconsts.
                    EVENT_CB_RANGING_STATUS_RESPONDER_DOES_NOT_SUPPORT_IEEE80211MC,
                    "Expected non-support error code",
                    extras=stats)
        asserts.explicit_pass("RTT test done", extras=stats)
Example #8
0
    def pass_fail_check(self, average_current=None):
        """Check the test result and decide if it passed or failed.

        The threshold is provided in the config file. In this class, result is
        current in mA.
        """

        if not self.threshold or self.test_name not in self.threshold:
            self.log.error("No threshold is provided for the test '{}' in "
                           "the configuration file.".format(self.test_name))
            return

        current_threshold = self.threshold[self.test_name]
        if average_current:
            asserts.assert_true(
                abs(average_current - current_threshold) / current_threshold <
                self.pass_fail_tolerance,
                'Measured average current in [{}]: {:.2f}mA, which is '
                'out of the acceptable range {:.2f}±{:.2f}mA'.format(
                    self.test_name, average_current, current_threshold,
                    self.pass_fail_tolerance * current_threshold))
            asserts.explicit_pass(
                'Measurement finished for [{}]: {:.2f}mA, which is '
                'within the acceptable range {:.2f}±{:.2f}'.format(
                    self.test_name, average_current, current_threshold,
                    self.pass_fail_tolerance * current_threshold))
        else:
            asserts.fail(
                'Something happened, measurement is not complete, test failed')
    def pass_fail_check_ping_range(self, result):
        """Check the test result and decide if it passed or failed.

        Checks whether the attenuation at which ping packet losses begin to
        exceed the threshold matches the range derived from golden
        rate-vs-range result files. The test fails is ping range is
        range_gap_threshold worse than RvR range.

        Args:
            result: dict containing ping results and meta data
        """
        # Get target range
        #rvr_range = self.get_range_from_rvr()
        # Set Blackbox metric
        if self.publish_testcase_metrics:
            self.testcase_metric_logger.add_metric('ping_range',
                                                   result['range'])
        # Evaluate test pass/fail
        test_message = ('Attenuation at range is {}dB. '
                        'LLStats at Range: {}'.format(
                            result['range'], result['llstats_at_range']))
        if result['peak_throughput_pct'] < 95:
            asserts.fail("(RESULT NOT RELIABLE) {}".format(test_message))
        else:
            asserts.explicit_pass(test_message)
    def test_rtt_supporting_ap_only(self):
        """Scan for APs and perform RTT only to those which support 802.11mc.

    Stress test: repeat ranging to the same AP. Verify rate of success and
    stability of results.
    """
        dut = self.android_devices[0]
        rtt_supporting_aps = rutils.scan_with_rtt_support_constraint(dut,
                                                                     True,
                                                                     repeat=10)
        dut.log.debug("RTT Supporting APs=%s", rtt_supporting_aps)

        num_iter = self.stress_test_min_iteration_count

        max_peers = dut.droid.wifiRttMaxPeersInRequest()
        asserts.assert_true(
            len(rtt_supporting_aps) > 0,
            "Need at least one AP which supports 802.11mc!")
        if len(rtt_supporting_aps) > max_peers:
            rtt_supporting_aps = rtt_supporting_aps[0:max_peers]

        events = rutils.run_ranging(dut, rtt_supporting_aps, num_iter, 0,
                                    self.stress_test_target_run_time_sec)
        stats = rutils.analyze_results(events,
                                       self.rtt_reference_distance_mm,
                                       self.rtt_reference_distance_margin_mm,
                                       self.rtt_min_expected_rssi_dbm,
                                       self.lci_reference,
                                       self.lcr_reference,
                                       summary_only=True)
        dut.log.debug("Stats=%s", stats)

        for bssid, stat in stats.items():
            asserts.assert_true(stat['num_no_results'] == 0,
                                "Missing (timed-out) results",
                                extras=stats)
            asserts.assert_false(stat['any_lci_mismatch'],
                                 "LCI mismatch",
                                 extras=stats)
            asserts.assert_false(stat['any_lcr_mismatch'],
                                 "LCR mismatch",
                                 extras=stats)
            asserts.assert_equal(stat['num_invalid_rssi'],
                                 0,
                                 "Invalid RSSI",
                                 extras=stats)
            asserts.assert_true(
                stat['num_failures'] <=
                self.rtt_max_failure_rate_two_sided_rtt_percentage *
                stat['num_results'] / 100,
                "Failure rate is too high",
                extras=stats)
            asserts.assert_true(
                stat['num_range_out_of_margin'] <=
                self.rtt_max_margin_exceeded_rate_two_sided_rtt_percentage *
                stat['num_success_results'] / 100,
                "Results exceeding error margin rate is too high",
                extras=stats)
        asserts.explicit_pass("RTT test done", extras=stats)
Example #11
0
def validate_setup_ap_and_associate(*args, **kwargs):
    """Validates if setup_ap_and_associate was a success or not

       Args: Args match setup_ap_and_associate
    """
    asserts.assert_true(setup_ap_and_associate(*args, **kwargs),
                        'Failed to associate.')
    asserts.explicit_pass('Successfully associated.')
 def test_iperf_max_ndi_aware_only_passphrases(self):
     """Test throughput for multiple NDIs configured with different passphrases.
 """
     results = {}
     self.run_iperf_max_ndi_aware_only([self.PASSPHRASE, self.PASSPHRASE2],
                                       results=results)
     asserts.explicit_pass(
         "test_iperf_max_ndi_aware_only_passphrases passes", extras=results)
 def test_iperf_max_ndp_aware_only_oob(self):
     """Measure throughput using iperf on all possible concurrent NDPs, with
 Aware enabled and no infrastructure connection. Use out-of-band discovery.
 """
     results = {}
     self.run_iperf_max_ndp_aware_only(results=results)
     asserts.explicit_pass("test_iperf_max_ndp_aware_only_oob passes",
                           extras=results)
    def test_rtt_mixed_80211mc_supporting_aps_wo_privilege(self):
        """Scan for APs and perform RTT on one supporting and one non-supporting
    IEEE 802.11mc APs with the device not having privilege access (expect
    failures)."""
        dut = self.android_devices[0]
        rutils.config_privilege_override(dut, True)
        rtt_aps = rutils.scan_with_rtt_support_constraint(dut, True)
        non_rtt_aps = rutils.scan_with_rtt_support_constraint(dut, False)
        mix_list = [rtt_aps[0], non_rtt_aps[0]]
        dut.log.debug("Visible non-IEEE 802.11mc APs=%s", mix_list)
        events = rutils.run_ranging(dut, mix_list, self.NUM_ITER,
                                    self.TIME_BETWEEN_ITERATIONS)
        stats = rutils.analyze_results(events, self.rtt_reference_distance_mm,
                                       self.rtt_reference_distance_margin_mm,
                                       self.rtt_min_expected_rssi_dbm,
                                       self.lci_reference, self.lcr_reference)
        dut.log.debug("Stats=%s", stats)

        for bssid, stat in stats.items():
            asserts.assert_true(stat['num_no_results'] == 0,
                                "Missing (timed-out) results",
                                extras=stats)
            if bssid == rtt_aps[0][wutils.WifiEnums.BSSID_KEY]:
                asserts.assert_false(stat['any_lci_mismatch'],
                                     "LCI mismatch",
                                     extras=stats)
                asserts.assert_false(stat['any_lcr_mismatch'],
                                     "LCR mismatch",
                                     extras=stats)
                asserts.assert_equal(stat['num_invalid_rssi'],
                                     0,
                                     "Invalid RSSI",
                                     extras=stats)
                asserts.assert_true(
                    stat['num_failures'] <=
                    self.rtt_max_failure_rate_two_sided_rtt_percentage *
                    stat['num_results'] / 100,
                    "Failure rate is too high",
                    extras=stats)
                asserts.assert_true(
                    stat['num_range_out_of_margin'] <=
                    self.rtt_max_margin_exceeded_rate_two_sided_rtt_percentage
                    * stat['num_success_results'] / 100,
                    "Results exceeding error margin rate is too high",
                    extras=stats)
            else:
                asserts.assert_true(
                    stat['num_failures'] == self.NUM_ITER,
                    "All one-sided RTT requests must fail when executed without privilege",
                    extras=stats)
                for code in stat['status_codes']:
                    asserts.assert_true(
                        code == rconsts.
                        EVENT_CB_RANGING_STATUS_RESPONDER_DOES_NOT_SUPPORT_IEEE80211MC,
                        "Expected non-support error code",
                        extras=stats)
        asserts.explicit_pass("RTT test done", extras=stats)
Example #15
0
    def run_test_rtt_80211mc_supporting_aps(self,
                                            dut,
                                            accuracy_evaluation=False):
        """Scan for APs and perform RTT only to those which support 802.11mc
        Args:
            dut: test device
            accuracy_evaluation: False - only evaluate success rate.
                                 True - evaluate both success rate and accuracy
                                 default is False.
        """
        rtt_supporting_aps = rutils.select_best_scan_results(
            rutils.scan_with_rtt_support_constraint(dut, True, repeat=10),
            select_count=2)
        dut.log.debug("RTT Supporting APs=%s", rtt_supporting_aps)
        events = rutils.run_ranging(dut, rtt_supporting_aps, self.NUM_ITER,
                                    self.TIME_BETWEEN_ITERATIONS)
        stats = rutils.analyze_results(events, self.rtt_reference_distance_mm,
                                       self.rtt_reference_distance_margin_mm,
                                       self.rtt_min_expected_rssi_dbm,
                                       self.lci_reference, self.lcr_reference)
        dut.log.debug("Stats=%s", stats)

        for bssid, stat in stats.items():
            asserts.assert_true(stat['num_no_results'] == 0,
                                "Missing (timed-out) results",
                                extras=stats)
            asserts.assert_false(stat['any_lci_mismatch'],
                                 "LCI mismatch",
                                 extras=stats)
            asserts.assert_false(stat['any_lcr_mismatch'],
                                 "LCR mismatch",
                                 extras=stats)
            asserts.assert_false(stat['invalid_num_attempted'],
                                 "Invalid (0) number of attempts",
                                 extras=stats)
            asserts.assert_false(stat['invalid_num_successful'],
                                 "Invalid (0) number of successes",
                                 extras=stats)
            asserts.assert_equal(stat['num_invalid_rssi'],
                                 0,
                                 "Invalid RSSI",
                                 extras=stats)
            asserts.assert_true(
                stat['num_failures'] <=
                self.rtt_max_failure_rate_two_sided_rtt_percentage *
                stat['num_results'] / 100,
                "Failure rate is too high",
                extras=stats)
            if accuracy_evaluation:
                asserts.assert_true(
                    stat['num_range_out_of_margin'] <=
                    self.rtt_max_margin_exceeded_rate_two_sided_rtt_percentage
                    * stat['num_success_results'] / 100,
                    "Results exceeding error margin rate is too high",
                    extras=stats)
        asserts.explicit_pass("RTT test done", extras=stats)
Example #16
0
 def test_oob_ndp_setup_latency_default_dws(self):
     """Measure the NDP setup latency with the default DW configuration. The
 NDP is setup with OOB (out-of-band) configuration."""
     results = {}
     self.run_ndp_oob_latency(results=results,
                              dw_24ghz=aconsts.POWER_DW_24_INTERACTIVE,
                              dw_5ghz=aconsts.POWER_DW_5_INTERACTIVE,
                              num_iterations=100)
     asserts.explicit_pass("test_ndp_setup_latency_default_dws finished",
                           extras=results)
Example #17
0
 def test_message_latency_default_dws(self):
     """Measure the send message latency with the default DW configuration. Test
 performed on non-queued message transmission - i.e. waiting for confirmation
 of reception (ACK) before sending the next message."""
     results = {}
     self.run_message_latency(results=results,
                              dw_24ghz=aconsts.POWER_DW_24_INTERACTIVE,
                              dw_5ghz=aconsts.POWER_DW_5_INTERACTIVE,
                              num_iterations=100)
     asserts.explicit_pass("test_message_latency_default_dws finished",
                           extras=results)
    def run_test_rtt_non_80211mc_supporting_aps(self,
                                                dut,
                                                accuracy_evaluation=False):
        """Scan for APs and perform RTT on non-IEEE 802.11mc supporting APs
        Args:
            dut: test device
            accuracy_evaluation: False - only evaluate success rate.
                                 True - evaluate both success rate and accuracy
                                 default is False.
        """
        asserts.skip_if(
            not dut.rtt_capabilities[rconsts.CAP_RTT_ONE_SIDED_SUPPORTED],
            "Device does not support one-sided RTT")

        non_rtt_aps = rutils.select_best_scan_results(
            rutils.scan_with_rtt_support_constraint(dut, False),
            select_count=1)
        dut.log.debug("Visible non-IEEE 802.11mc APs=%s", non_rtt_aps)
        asserts.assert_true(len(non_rtt_aps) > 0, "Need at least one AP!")
        events = rutils.run_ranging(dut, non_rtt_aps, self.NUM_ITER,
                                    self.TIME_BETWEEN_ITERATIONS)
        stats = rutils.analyze_results(events, self.rtt_reference_distance_mm,
                                       self.rtt_reference_distance_margin_mm,
                                       self.rtt_min_expected_rssi_dbm, [], [])
        dut.log.debug("Stats=%s", stats)

        for bssid, stat in stats.items():
            asserts.assert_true(stat['num_no_results'] == 0,
                                "Missing (timed-out) results",
                                extras=stats)
            asserts.assert_false(stat['any_lci_mismatch'],
                                 "LCI mismatch",
                                 extras=stats)
            asserts.assert_false(stat['any_lcr_mismatch'],
                                 "LCR mismatch",
                                 extras=stats)
            asserts.assert_equal(stat['num_invalid_rssi'],
                                 0,
                                 "Invalid RSSI",
                                 extras=stats)
            asserts.assert_true(
                stat['num_failures'] <=
                self.rtt_max_failure_rate_one_sided_rtt_percentage *
                stat['num_results'] / 100,
                "Failure rate is too high",
                extras=stats)
            if accuracy_evaluation:
                asserts.assert_true(
                    stat['num_range_out_of_margin'] <=
                    self.rtt_max_margin_exceeded_rate_one_sided_rtt_percentage
                    * stat['num_success_results'] / 100,
                    "Results exceeding error margin rate is too high",
                    extras=stats)
        asserts.explicit_pass("RTT test done", extras=stats)
Example #19
0
 def test_discovery_latency_default_dws(self):
     """Measure the service discovery latency with the default DW configuration.
 """
     results = {}
     self.run_discovery_latency(results=results,
                                do_unsolicited_passive=True,
                                dw_24ghz=aconsts.POWER_DW_24_INTERACTIVE,
                                dw_5ghz=aconsts.POWER_DW_5_INTERACTIVE,
                                num_iterations=100)
     asserts.explicit_pass(
         "test_discovery_latency_default_parameters finished",
         extras=results)
Example #20
0
 def test_oob_ndp_setup_latency_non_interactive_dws(self):
     """Measure the NDP setup latency with the DW configuration for
 non-interactive mode. The NDP is setup with OOB (out-of-band)
 configuration"""
     results = {}
     self.run_ndp_oob_latency(results=results,
                              dw_24ghz=aconsts.DW_24_NON_INTERACTIVE,
                              dw_5ghz=aconsts.DW_5_NON_INTERACTIVE,
                              num_iterations=100)
     asserts.explicit_pass(
         "test_ndp_setup_latency_non_interactive_dws finished",
         extras=results)
Example #21
0
 def test_discovery_latency_non_interactive_dws(self):
     """Measure the service discovery latency with the DW configuration for non
 -interactive mode (lower power)."""
     results = {}
     self.run_discovery_latency(results=results,
                                do_unsolicited_passive=True,
                                dw_24ghz=aconsts.DW_24_NON_INTERACTIVE,
                                dw_5ghz=aconsts.DW_5_NON_INTERACTIVE,
                                num_iterations=100)
     asserts.explicit_pass(
         "test_discovery_latency_non_interactive_dws finished",
         extras=results)
    def run_config_stress_test(self, settings):
        """Runs test based on config parameters.

        Args:
            settings: test configuration settings, see
                test_soft_ap_stress_from_config for details
        """
        client = settings['client']
        test_type = settings['test_type']
        if not test_type in TEST_TYPES:
            raise ValueError('Unrecognized test type %s' % test_type)
        reconnect_loops = settings['reconnect_loops']
        self.log.info('Running test type %s in loop %s times' %
                      (test_type, reconnect_loops))

        self.start_soft_ap(settings)

        passed_count = 0
        for run in range(reconnect_loops):
            try:
                # Associate with SoftAp
                self.log.info('Starting SoftApTest run %s' % str(run + 1))
                self.associate_with_soft_ap(client.w_device, settings)

                if test_type != TEST_TYPE_ASSOCIATE_ONLY:
                    # Verify client and SoftAP can ping
                    dut_ap_interface = self.get_dut_interface_by_role(
                        INTERFACE_ROLE_AP)
                    client_ipv4 = self.wait_for_ipv4_address(
                        client.w_device, ANDROID_DEFAULT_WLAN_PORT)
                    self.verify_ping(client.w_device, dut_ap_interface.ipv4)
                    self.verify_ping(self.dut, client_ipv4)

                    if test_type != TEST_TYPE_ASSOCIATE_AND_PING:
                        # Run traffic between client and SoftAp
                        self.run_iperf_traffic(client.ip_client,
                                               dut_ap_interface.ipv4)
                # Disconnect
                self.disconnect_from_soft_ap(client.w_device)

            except signals.TestFailure as err:
                self.log.error('SoftApTest run %s failed. Err: %s' %
                               (str(run + 1), err.details))
            else:
                self.log.info('SoftApTest run %s successful.' % run)
                passed_count += 1

        if passed_count < reconnect_loops:
            asserts.fail('SoftAp reconnect test passed on %s/%s runs.' %
                         (passed_count, reconnect_loops))

        asserts.explicit_pass('SoftAp reconnect test passed on %s/%s runs.' %
                              (passed_count, reconnect_loops))
    def pass_fail_check(self, result):
        """Checks sensitivity results and decides on pass/fail.

        Args:
            result: dict containing attenuation, throughput and other meta
                data
        """
        result_string = ('Throughput = {}%, Sensitivity = {}.'.format(
            result['peak_throughput_pct'], result['sensitivity']))
        if result['peak_throughput_pct'] < 95:
            asserts.fail('Result unreliable. {}'.format(result_string))
        else:
            asserts.explicit_pass('Test Passed. {}'.format(result_string))
Example #24
0
 def test_discovery_latency_all_dws(self):
     """Measure the service discovery latency with all DW combinations (low
 iteration count)"""
     results = {}
     for dw24 in range(1, 6):  # permitted values: 1-5
         for dw5 in range(0, 6):  # permitted values: 0, 1-5
             self.run_discovery_latency(results=results,
                                        do_unsolicited_passive=True,
                                        dw_24ghz=dw24,
                                        dw_5ghz=dw5,
                                        num_iterations=10)
     asserts.explicit_pass("test_discovery_latency_all_dws finished",
                           extras=results)
    def eap_negative_connect_logic(self, config, ad):
        """Tries to connect to an enterprise network with invalid credentials
        and expect a failure.

        Args:
            config: A dict representing an invalid EAP credential.

        Returns:
            True if connection failed as expected, False otherwise.
        """
        with asserts.assert_raises(signals.TestFailure, extras=config):
            verdict = wutils.eap_connect(config, ad)
        asserts.explicit_pass("Connection failed as expected.")
Example #26
0
 def _safe_wrap_test_case(self, *args, **kwargs):
     test_id = "%s:%s:%s" % (self.__class__.__name__, self.test_name,
                             self.log_begin_time.replace(' ', '-'))
     self.test_id = test_id
     self.result_detail = ""
     self.testsignal_details = ""
     self.testsignal_extras = {}
     tries = int(self.user_params.get("telephony_auto_rerun", 1))
     for ad in self.android_devices:
         ad.log_path = self.log_path
     for i in range(tries + 1):
         result = True
         if i > 0:
             log_string = "[Test Case] RERUN %s" % self.test_name
             self.log.info(log_string)
             self._teardown_test(self.test_name)
             self._setup_test(self.test_name)
         try:
             result = fn(self, *args, **kwargs)
         except signals.TestFailure as e:
             self.testsignal_details = e.details
             self.testsignal_extras = e.extras
             result = False
         except signals.TestSignal:
             raise
         except Exception as e:
             self.log.exception(e)
             asserts.fail(self.result_detail)
         if result is False:
             if i < tries:
                 continue
         else:
             break
     if self.user_params.get("check_crash", True):
         new_crash = ad.check_crash_report(self.test_name,
                                           self.begin_time, True)
         if new_crash:
             msg = "Find new crash reports %s" % new_crash
             ad.log.error(msg)
             self.result_detail = "%s %s %s" % (self.result_detail,
                                                ad.serial, msg)
             result = False
     if result is not False:
         asserts.explicit_pass(self.result_detail)
     else:
         if self.result_detail:
             asserts.fail(self.result_detail)
         else:
             asserts.fail(self.testsignal_details, self.testsignal_extras)
    def test_rtt_without_responder_aware(self):
        """Try to perform RTT operation when there is no peer Aware session (on the
        Responder). Should FAIL."""
        init_dut = self.android_devices[0]
        resp_dut = self.android_devices[1]

        # Enable a Responder and start a Publisher
        resp_id = resp_dut.droid.wifiAwareAttach(True)
        autils.wait_for_event(resp_dut, aconsts.EVENT_CB_ON_ATTACHED)
        resp_ident_event = autils.wait_for_event(
            resp_dut, aconsts.EVENT_CB_ON_IDENTITY_CHANGED)
        resp_mac = resp_ident_event['data']['mac']

        resp_config = autils.add_ranging_to_pub(autils.create_discovery_config(
            self.SERVICE_NAME, aconsts.PUBLISH_TYPE_UNSOLICITED),
                                                enable_ranging=True)
        resp_dut.droid.wifiAwarePublish(resp_id, resp_config)
        autils.wait_for_event(resp_dut, aconsts.SESSION_CB_ON_PUBLISH_STARTED)

        # Disable Responder
        resp_dut.droid.wifiAwareDestroy(resp_id)

        # Enable the Initiator
        init_id = init_dut.droid.wifiAwareAttach()
        autils.wait_for_event(init_dut, aconsts.EVENT_CB_ON_ATTACHED)

        # Initiate an RTT to Responder (no Aware started on Initiator!)
        results = []
        num_no_responses = 0
        num_successes = 0
        for i in range(self.NUM_ITER):
            result = self.run_rtt_discovery(init_dut, resp_mac=resp_mac)
            self.log.debug("result: %s", result)
            results.append(result)
            if result is None:
                num_no_responses = num_no_responses + 1
            elif (result[rconsts.EVENT_CB_RANGING_KEY_STATUS] ==
                  rconsts.EVENT_CB_RANGING_STATUS_SUCCESS):
                num_successes = num_successes + 1

        asserts.assert_equal(num_no_responses,
                             0,
                             "No RTT response?",
                             extras={"data": results})
        asserts.assert_equal(num_successes,
                             0,
                             "Aware RTT w/o Aware should FAIL!",
                             extras={"data": results})
        asserts.explicit_pass("RTT Aware test done", extras={"data": results})
Example #28
0
 def test_synchronization_default_dws(self):
     """Measure the device synchronization for default dws. Loop over values
 from 0 to 4 seconds."""
     results = {}
     for startup_offset in range(5):
         self.run_synchronization_latency(
             results=results,
             do_unsolicited_passive=True,
             dw_24ghz=aconsts.POWER_DW_24_INTERACTIVE,
             dw_5ghz=aconsts.POWER_DW_5_INTERACTIVE,
             num_iterations=10,
             startup_offset=startup_offset,
             timeout_period=20)
     asserts.explicit_pass("test_synchronization_default_dws finished",
                           extras=results)
 def parallel_tests(self, change_env_func, setup_func=None):
     if setup_func and not setup_func():
         self.log.error("Test setup %s failed", setup_func.__name__)
         return False
     self.result_info = collections.defaultdict(int)
     self.finishing_time = time.time() + self.max_run_time
     results = run_multithread_func(self.log, [(self.call_test, []), (
         self.message_test, []), (self.data_test, []), (
             self.crash_check_test, []), (change_env_func, [])])
     result_message = "%s" % dict(self.result_info)
     self.log.info(result_message)
     if all(results):
         explicit_pass(result_message)
     else:
         fail(result_message)
 def logic(self, setting, arg, special_arg=None):
     asserts.assert_true(
         setting in itrs,
         ("%s is not in acceptable settings range %s") % (setting,
                                                          itrs))
     asserts.assert_true(arg == static_arg,
                         "Expected %s, got %s" % (static_arg, arg))
     asserts.assert_true(arg == static_arg, "Expected %s, got %s" %
                         (static_kwarg, special_arg))
     if setting == "pass":
         asserts.explicit_pass(
             MSG_EXPECTED_EXCEPTION, extras=MOCK_EXTRA)
     elif setting == "fail":
         asserts.fail(MSG_EXPECTED_EXCEPTION, extras=MOCK_EXTRA)
     elif setting == "skip":
         asserts.skip(MSG_EXPECTED_EXCEPTION, extras=MOCK_EXTRA)