Ejemplo n.º 1
0
    def test_local_hotspot_process(self):
        """Test for basic local hotspot process flow.

    Steps:
      1. Ap sets up a local hotspot and retrieves the credentials.
      2. Station connects to the hotspot.

    Verifies:
      Station can connect to the local hotspot created by ap.
    """
        wifi_on_before = self.ap.android.wifiIsEnabled()
        start_localhotspot_callback = self.ap.android.startLocalHotspot()
        start_result = start_localhotspot_callback.waitAndGet('onStarted', 30)
        local_hotspot_info = start_result.data
        self.ap.log.info('Local hotspot started')
        network_found = self.station.android.wifiScanAndFindSsid(
            local_hotspot_info[SSID], SCAN_TIMEOUT)
        asserts.assert_true(network_found,
                            'Network is not found within 30 seconds')
        self.station.android.wifiConnectByUpdate(
            local_hotspot_info[SSID], local_hotspot_info['Password'])
        self.station.log.info('Connected to the network %s.' %
                              local_hotspot_info[SSID])
        self.ap.android.stopLocalHotspot()
        time.sleep(RESTORE_TIME)
        wifi_on_after = self.ap.android.wifiIsEnabled()
        asserts.assert_equal(wifi_on_before, wifi_on_after)
        self.ap.log.info('Wifi state restored')
Ejemplo n.º 2
0
def WriteCharacteristic(client, server):
  """Logic for BLE characteristic write.

  Args:
    client: AndroidDevice. The device that behaves as GATT client.
    server: AndroidDevice. The device that behaves as GATT server.

  Steps:
    1. Client writes a characteristic to server & gets true.
    2. Server calls sendResponse & client gets onCharacteristicWrite.

  Verifies:
    Client gets corresponding callback.
  """
  write_operation_result = client.android.bleWriteOperation(
      TEST_BLE_SERVICE_UUID, TEST_WRITE_UUID, WRITE_DATA)
  asserts.assert_true(write_operation_result,
                      'BLE write operation failed to start')
  server_write_operation_result = server.server_callback.waitAndGet(
      'onCharacteristicWriteRequest', 30)
  asserts.assert_equal(server_write_operation_result.data['Data'], WRITE_DATA)
  client.client_callback.waitAndGet('onCharacteristicWrite', 30)
  client.log.info('Write operation finished')
  write_operation_result = client.android.bleWriteOperation(
      TEST_BLE_SERVICE_UUID, TEST_SECOND_WRITE_UUID, SECOND_WRITE_DATA)
  asserts.assert_true(write_operation_result,
                      'BLE write operation failed to start')
  server_write_operation_result = server.server_callback.waitAndGet(
      'onCharacteristicWriteRequest', 30)
  asserts.assert_equal(server_write_operation_result.data['Data'],
                       SECOND_WRITE_DATA)
  client.client_callback.waitAndGet('onCharacteristicWrite', 30)
  client.log.info('Second write operation finished')
Ejemplo n.º 3
0
    def test_wifi_direct_legacy_connection(self):
        """Test for basic Wifi Direct process flow.

    Steps:
      1. Group owner sets up a wifi p2p group.
      2. Station connects to the group as a regular hotspot.

    Verifies:
      Station can connect to the wifi p2p group created by group owner.
    """
        callback = self.group_owner.android.wifiP2pSetChannel(CHANNEL, CHANNEL)
        assert_utils.AssertAsyncSuccess(callback)
        self.group_owner.log.info('Wifi direct channel set.')
        callback = self.group_owner.android.wifiP2pStartGroup()
        group_info = assert_utils.AssertAsyncSuccess(callback)
        self.group_owner.log.info(
            'Wifi direct group started as a temporary group.')
        network_found = self.station.android.wifiScanAndFindSsid(
            group_info.data[SSID], SCAN_TIMEOUT)
        asserts.assert_true(network_found,
                            'Network is not found within 30 seconds')
        asserts.assert_equal(network_found['frequency'], FREQUENCY)
        self.station.log.info('Network is found, connecting...')
        connect_result = self.station.android.wifiConnectByUpdate(
            group_info.data[SSID], group_info.data['Password'])
        asserts.assert_true(connect_result, 'Failed to connect to the network')
        self.station.log.info('Connected to the network')
        callback = self.group_owner.android.wifiP2pRemoveGroup()
        assert_utils.AssertAsyncSuccess(callback)
        self.group_owner.log.info('Wifi direct group removed')
Ejemplo n.º 4
0
 def test_hello_world(self):
     asserts.assert_equal(self.user_params["icecream"], 42)
     asserts.assert_equal(self.user_params["extra_param"], "haha")
     self.log.info(
         "This is a bare minimal test to make sure the basic MOBLY"
         "test flow works.")
     asserts.explicit_pass("Hello World")
Ejemplo n.º 5
0
 def test_3110_verify_usb_state_after_close(self):
     """Verify that close does not change the usb port power mode."""
     current_state = self.device.usb_hub.get_device_power()
     self.device.close()
     asserts.assert_equal(
         self.device.usb_hub.get_device_power(), current_state,
         "Device USB hub port should be left in the same power state after the device is "
         "closed")
 def test_hello(self):
     expected = "hello!"
     status, payload = self.dut.rpcs().EchoService.Echo(msg=expected)
     asserts.assert_true(status.ok(), "Status is %s" % status)
     asserts.assert_equal(
         payload.msg,
         expected,
         'Returned payload is "%s" expected "%s"' % (payload.msg, expected),
     )
Ejemplo n.º 7
0
 def test_hello_world(self):
   asserts.assert_equal(self.user_params['icecream'], 42)
   asserts.assert_equal(self.user_params['extra_param'], 'haha')
   logging.info('This is a bare minimal test to make sure the basic MOBLY'
          ' test flow works.')
   asserts.explicit_pass(
     'Hello World',
     # Use a unicode string here to make sure the full log pipeline
     # works with unicode.
     extras=u'\u2022')
Ejemplo n.º 8
0
 def test_3301_shell_ssh_capability_with_return_code(self):
     """Test case for shell_ssh capability to verify command completes successfully."""
     try:
         response, code = self.device.shell_capability.shell(
             self.test_config["shell_cmd"], include_return_code=True)
         asserts.assert_equal(code, SUCCESS_RETURN_CODE)
     except Exception:
         traceback_message = traceback.format_exc()
         asserts.fail(
             "Error happened during call to shell_ssh capability shell method "
             + traceback_message)
    def test_device_power_on_off(self):
        """Verifies on and off works."""
        original_mode = self.device.device_power.port_mode

        try:
            self.device.device_power.off()
            asserts.assert_equal(self.device.device_power.port_mode, "off",
                                 f'{self.device.name} port {self.device.device_power.port_number} should have been set to off')
            self.device.device_power.on()
            asserts.assert_true(self.device.device_power.port_mode in ["on", "charge", "sync"],
                                f'{self.device.name} port {self.device.device_power.port_number} should have been set to on')
        finally:
            if original_mode == "off":
                self.logger.info("restoring device power back to its original mode of off")
                self.device.device_power.off()
Ejemplo n.º 10
0
 def test_5805_power_off_and_on(self):
     """Verify that setting the power mode with power_on and Power_off works as expected."""
     self.device.switch_power.power_off(1)
     time.sleep(5)
     mode = self.device.switch_power.get_mode(1)
     asserts.assert_equal(
         mode, "off",
         "{} expected usb_hub mode to be 'off' found: {}".format(
             self.device.name, mode))
     self.device.switch_power.power_on(1)
     time.sleep(5)
     mode = self.device.switch_power.get_mode(1)
     asserts.assert_equal(
         mode, "sync",
         "{} expected usb_hub mode to be 'sync' found: {}".format(
             self.device.name, mode))
Ejemplo n.º 11
0
 def test_5801_set_and_get_mode(self):
     """Verify that setting and getting a single port works as expected"""
     self.device.switch_power.set_mode("off", 1)
     time.sleep(5)
     mode = self.device.switch_power.get_mode(1)
     asserts.assert_equal(
         mode, "off",
         "{} expected usb_hub mode to be 'off' found: {}".format(
             self.device.name, mode))
     self.device.switch_power.set_mode("sync", 1)
     time.sleep(5)
     mode = self.device.switch_power.get_mode(1)
     asserts.assert_equal(
         mode, "sync",
         "{} expected usb_hub mode to be 'sync' found: {}".format(
             self.device.name, mode))
Ejemplo n.º 12
0
    def test_metadata_pts_test_id_and_description(self):
        @metadata(pts_test_id="A/B/C", pts_test_name="Hello world")
        def simple_pass_test(arg):
            pass

        try:
            simple_pass_test(1)
        except signals.TestPass as e:
            asserts.assert_true("pts_test_id" in e.extras,
                                msg=("pts_test_id not in extra: %s" %
                                     str(e.extras)))
            asserts.assert_equal(e.extras["pts_test_id"], "A/B/C")
            asserts.assert_true("pts_test_name" in e.extras,
                                msg=("pts_test_name not in extra: %s" %
                                     str(e.extras)))
            asserts.assert_equal(e.extras["pts_test_name"], "Hello world")
        else:
            asserts.fail("Must throw an exception using @metadata decorator")
Ejemplo n.º 13
0
    def test_bluetooth_process(self):
        """Test for basic bluetooth rfcomm process flow.

    Steps:
      1. Receiver becomes discoverable.
      2. Initiator discovers receiver via bluetooth.
      3. Initiator connects to receiver via rfcomm profile.
      4. Initiator sends a message to receiver and receiver receives the exact
      message.

    Verifies:
      Receiver receives the correct message.
    """
        # Name value for RfComm connection
        rfcomm_name = utils.rand_ascii_str(8)
        self.receiver.connection_callback = (
            self.receiver.android.btRfcommStartServer(rfcomm_name,
                                                      RFCOMM_UUID))
        self.receiver.log.info('Start Rfcomm server with name: %s uuid: %s' %
                               (rfcomm_name, RFCOMM_UUID))
        target_name = self.receiver.android.btGetName()
        self.receiver.log.info('Become discoverable with name "%s" for %ds.',
                               target_name, DISCOVERABLE_TIME)
        self.receiver.android.btBecomeDiscoverable(DISCOVERABLE_TIME)
        self.initiator.log.info('Looking for Bluetooth devices.')
        discovered_device = DiscoverBluetoothDeviceByName(
            self.initiator, target_name)
        self.initiator.log.info('Target device is found. Device: %s' %
                                discovered_device)
        remote_address = discovered_device['Address']
        self.initiator.android.btRfcommConnect(remote_address, RFCOMM_UUID)
        self.receiver.connection_callback.waitAndGet('onAccepted', 30)
        # self.initiator.connection_callback.waitAndGet('onConnected', 30)
        self.initiator.log.info('Connection established')
        # Random data to be sent through bluetooth rfcomm.
        data = utils.rand_ascii_str(8)
        self.receiver.read_callback = self.receiver.android.btRfcommRead()
        self.initiator.android.btRfcommWrite(data)
        read_result = self.receiver.read_callback.waitAndGet(
            'onDataAvailable', 30)
        asserts.assert_equal(read_result.data['Data'], data)
        self.receiver.log.info('Received correct message from the other side')
        self.initiator.android.btRfcommDisconnect()
        self.receiver.android.btRfcommStopServer()
Ejemplo n.º 14
0
    async def fill_user_label_list(self, dev_ctrl, target_node_id):
        logging.info("Step 9: Fill UserLabel clusters on each endpoint")
        user_labels = await dev_ctrl.ReadAttribute(target_node_id,
                                                   [Clusters.UserLabel])

        # Build 4 sets of maximized labels
        random_label = self.random_string(16)
        random_value = self.random_string(16)
        labels = [
            Clusters.UserLabel.Structs.LabelStruct(label=random_label,
                                                   value=random_value)
            for _ in range(4)
        ]

        for endpoint_id in user_labels:
            clusters = user_labels[endpoint_id]
            for cluster in clusters:
                if cluster == Clusters.UserLabel:
                    logging.info(
                        "Step 9a: Filling UserLabel cluster on endpoint %d" %
                        endpoint_id)
                    statuses = await dev_ctrl.WriteAttribute(
                        target_node_id,
                        [(endpoint_id,
                          Clusters.UserLabel.Attributes.LabelList(labels))])
                    asserts.assert_equal(statuses[0].Status,
                                         StatusEnum.Success,
                                         "Label write must succeed")

                    logging.info(
                        "Step 9b: Validate UserLabel cluster contents after write on endpoint %d"
                        % endpoint_id)
                    read_back_labels = await self.read_single_attribute(
                        dev_ctrl,
                        node_id=target_node_id,
                        endpoint=endpoint_id,
                        attribute=Clusters.UserLabel.Attributes.LabelList)
                    print(read_back_labels)

                    asserts.assert_equal(
                        read_back_labels, labels,
                        "LabelList attribute must match what was written")
    def test_3113_verify_set_all_port_mode(self):
        """Verifies set_all_ports_mode works."""
        original_mode = self.device.switch_power.get_all_ports_mode()
        try:
            expectant_value = ['on', 'on', 'on', 'on', 'on', 'on', 'on', 'on']
            self.device.switch_power.set_all_ports_mode("on")
            asserts.assert_equal(
                self.device.switch_power.get_all_ports_mode(), expectant_value,
                '{} expected to set port values to {} but got {}'.format(
                    self.device.name, expectant_value,
                    self.device.switch_power.get_all_ports_mode()))

        finally:
            self.logger.info("set the powerswitch power back to the "
                             "original mode: {}".format(original_mode))
            for indx, mode in enumerate(original_mode):
                if "on" in mode:
                    self.device.switch_power.power_on(port=indx)
                else:
                    self.device.switch_power.power_off(port=indx)
    def test_comm_power_on_and_off(self):
        """Verifies comm_power methods on and off work."""
        self.test_port = self._get_port()
        original_mode = self._get_port_state()

        try:
            self.device.comm_power.off()
            asserts.assert_equal(self._get_port_state(), "off",
                                 f'{self.device.name} port {self.test_port} should have been '
                                 'set to off')
            self.device.comm_power.on()
            asserts.assert_equal(self._get_port_state(), "on",
                                 f'{self.device.name} port {self.test_port} should have been '
                                 'set to on')
        finally:
            if self._get_port_state() != original_mode:
                if original_mode == "off":
                    self.logger.info('Restoring device communication power back to its '
                                     f'original mode of {original_mode}.')
                else:
                    self.device.comm_power.on()
Ejemplo n.º 17
0
def CancelOpen(client, server):
  """Logic for BLE client cancel open and reconnect.

  Args:
    client: AndroidDevice. The device that behaves as GATT client.
    server: AndroidDevice. The device that behaves as GATT server.

  Steps:
    1. Server starts and service added properly.
    2. Client stars to connect to server via Gatt, but the connection has not
    been established.
    3. Client calls disconnect to cancel the connection.
    4. Client connects to server, connection completes with GATT_SUCCESS within
    some TIMEOUT, onConnectionStateChange/STATE_CONNECTEDcalled EXACTLY once.

  Verifies:
    Client gets corresponding callback.
  """
  server.server_callback = server.android.bleStartServer([SERVICE])
  start_server_result = server.server_callback.waitAndGet('onServiceAdded', 30)
  asserts.assert_equal(start_server_result.data[STATUS], GATT_SUCCESS)
  uuids = [
      characteristic[UUID]
      for characteristic in start_server_result.data['Service'][
          'Characteristics']
  ]
  for uuid in [
      characteristic[UUID] for characteristic in SERVICE['Characteristics']
  ]:
    asserts.assert_true(uuid in uuids, 'Failed to find uuid %s.' % uuid)
  server.log.info('BLE server started')
  client.client_callback = client.android.bleConnectGatt(
      client.connect_to_address)
  time.sleep(CANCEL_CONNECTION_WAIT_TIME)
  start_client_results = client.client_callback.getAll(
      'onConnectionStateChange')
  if not start_client_results:
    client.android.bleDisconnect()
    client.log.info('BLE client cancel open')
    time.sleep(CANCEL_CONNECTION_WAIT_TIME)
    client.client_callback = client.android.bleConnectGatt(
        client.connect_to_address)
    time.sleep(CONNECTION_TIMEOUT)
    start_client_results = client.client_callback.getAll(
        'onConnectionStateChange')
    asserts.assert_equal(len(start_client_results), 1)
    for start_client_result in start_client_results:
      asserts.assert_equal(start_client_result.data[STATUS], GATT_SUCCESS)
      asserts.assert_equal(start_client_result.data[STATE], 'STATE_CONNECTED')
      client.log.info('BLE client connected')
Ejemplo n.º 18
0
    def test_3112_verify_power_on_power_off(self):
        """Verifies power_on and power_off works."""
        original_mode = self.device.switch_power.get_mode(port=1)
        try:
            self.device.switch_power.power_off(port=1)
            asserts.assert_equal(
                self.device.switch_power.get_mode(port=1), "off",
                '{} port {} should have been set to {}'.format(
                    self.device.name, '1', 'off'))
            self.device.switch_power.power_on(port=1)
            asserts.assert_equal(
                self.device.switch_power.get_mode(port=1), "on",
                '{} port {} should have been set to {}'.format(
                    self.device.name, '1', 'on'))

        finally:
            self.logger.info("set the unifi_switch power back to the "
                             "original mode: {}".format(original_mode))
            if original_mode == "on":
                self.device.switch_power.power_on(port=1)
            else:
                self.device.switch_power.power_off(port=1)
Ejemplo n.º 19
0
def expect_equal(first, second, msg=None, extras=None):
    """Expects the equality of objects, otherwise fail the test.

    If the expectation is not met, the test is marked as fail after its
    execution finishes.

    Error message is "first != second" by default. Additional explanation can
    be supplied in the message.

    Args:
        first: The first object to compare.
        second: The second object to compare.
        msg: A string that adds additional info about the failure.
        extras: An optional field for extra information to be included in test
            result.
    """
    try:
        asserts.assert_equal(first, second, msg, extras)
    except signals.TestSignal as e:
        logging.exception('Expected %s equals to %s, but they are not.', first,
                          second)
        recorder.add_error(e)
    def test_3114_verify_set_mode(self):
        """Verifies set_mode works."""
        original_mode = self.device.switch_power.get_mode(port=0)
        try:
            self.device.switch_power.set_mode(mode="off", port=0)
            asserts.assert_equal(
                self.device.switch_power.get_mode(port=0), "off",
                '{} port {} should have been set to {}'.format(
                    self.device.name, '0', 'off'))
            self.device.switch_power.set_mode(mode="on", port=0)
            asserts.assert_equal(
                self.device.switch_power.get_mode(port=0), "on",
                '{} port {} should have been set to {}'.format(
                    self.device.name, '0', 'on'))

        finally:
            self.logger.info("set the powerswitch power back to the "
                             "original mode: {}".format(original_mode))
            if original_mode == "on":
                self.device.switch_power.power_on(port=0)
            else:
                self.device.switch_power.power_off(port=0)
Ejemplo n.º 21
0
def DiscoverServices(client):
  """Logic for BLE services discovery.

  Args:
    client: AndroidDevice. The device that behaves as GATT client.

  Steps:
    1. Client successfully completes service discovery & gets
    onServicesDiscovered callback within some TIMEOUT, onServicesDiscovered/
    GATT_SUCCESS called EXACTLY once.
    2. Client discovers the readable and writable characteristics.

  Verifies:
    Client gets corresponding callback.
  """
  client.android.bleDiscoverServices()
  time.sleep(CONNECTION_TIMEOUT)
  discover_services_results = client.client_callback.getAll(
      'onServiceDiscovered')
  asserts.assert_equal(len(discover_services_results), 1)
  service_discovered = False
  asserts.assert_equal(discover_services_results[0].data[STATUS],
                       GATT_SUCCESS)
  for service in discover_services_results[0].data['Services']:
    if service['UUID'] == TEST_BLE_SERVICE_UUID:
      service_discovered = True
      uuids = [
          characteristic[UUID]
          for characteristic in service['Characteristics']
      ]
      for uuid in [
          characteristic[UUID]
          for characteristic in SERVICE['Characteristics']
      ]:
        asserts.assert_true(uuid in uuids, 'Failed to find uuid %s.' % uuid)
  asserts.assert_true(service_discovered,
                      'Failed to discover the customize service')
  client.log.info('BLE discover services finished')
Ejemplo n.º 22
0
    def test_metadata_test_with_exception_stacktrace(self):
        @metadata(pts_test_id="A/B/C", pts_test_name="Hello world")
        def simple_fail_test(failure_argument):
            raise ValueError(failure_argument)

        try:
            simple_fail_test("BEEFBEEF")
        except signals.TestError as e:
            asserts.assert_true("pts_test_id" in e.extras,
                                msg=("pts_test_id not in extra: %s" %
                                     str(e.extras)))
            asserts.assert_equal(e.extras["pts_test_id"], "A/B/C")
            asserts.assert_true("pts_test_name" in e.extras,
                                msg=("pts_test_name not in extra: %s" %
                                     str(e.extras)))
            asserts.assert_equal(e.extras["pts_test_name"], "Hello world")
            trace_str = traceback.format_exc()
            asserts.assert_true(
                "raise ValueError(failure_argument)" in trace_str,
                msg="Failed test method not in error stack trace: %s" %
                trace_str)
        else:
            asserts.fail("Must throw an exception using @metadata decorator")
Ejemplo n.º 23
0
def Disconnect(client, server):
  """Logic for stopping BLE client and server.

  Args:
    client: AndroidDevice. The device that behaves as GATT client.
    server: AndroidDevice. The device that behaves as GATT server.

  Steps:
    1. Client calls disconnect, gets a callback with STATE_DISCONNECTED
    and GATT_SUCCESS.
    2. Server closes.

  Verifies:
    Client gets corresponding callback.
  """
  client.android.bleDisconnect()
  stop_client_result = client.client_callback.waitAndGet(
      'onConnectionStateChange', 30)
  asserts.assert_equal(stop_client_result.data[STATUS], GATT_SUCCESS)
  asserts.assert_equal(stop_client_result.data[STATE], 'STATE_DISCONNECTED')
  client.log.info('BLE client disconnected')
  server.android.bleStopServer()
  server.log.info('BLE server stopped')
Ejemplo n.º 24
0
 async def test_failure_on_wrong_endpoint(self):
     dev_ctrl = self.default_controller
     result = await self.read_single_attribute(dev_ctrl, self.dut_node_id, 9999, Clusters.Basic.Attributes.ProductName)
     asserts.assert_true(isinstance(result, Clusters.Attribute.ValueDecodeFailure), "Should fail to read on endpoint 9999")
     asserts.assert_equal(result.Reason.status, Status.UnsupportedEndpoint, "Failure reason should be UnsupportedEndpoint")
Ejemplo n.º 25
0
    async def test_names_as_expected(self):
        dev_ctrl = self.default_controller
        vendor_name = await self.read_single_attribute(dev_ctrl, self.dut_node_id, 0, Clusters.Basic.Attributes.VendorName)

        logging.info("Found VendorName: %s" % (vendor_name))
        asserts.assert_equal(vendor_name, "TEST_VENDOR", "VendorName must be TEST_VENDOR!")
Ejemplo n.º 26
0
    async def test_TC_SC_3_6(self):
        dev_ctrl = self.default_controller

        # Get overrides for debugging the test
        num_fabrics_to_commission = self.user_params.get(
            "num_fabrics_to_commission", 5)
        num_controllers_per_fabric = self.user_params.get(
            "num_controllers_per_fabric", 3)
        # Immediate reporting
        min_report_interval_sec = self.user_params.get(
            "min_report_interval_sec", 0)
        # 10 minutes max reporting interval --> We don't care about keep-alives per-se and
        # want to avoid resubscriptions
        max_report_interval_sec = self.user_params.get(
            "max_report_interval_sec", 10 * 60)
        # Time to wait after changing NodeLabel for subscriptions to all hit. This is dependant
        # on MRP params of subscriber and on actual min_report_interval.
        # TODO: Determine the correct max value depending on target. Test plan doesn't say!
        timeout_delay_sec = self.user_params.get("timeout_delay_sec",
                                                 max_report_interval_sec * 2)

        BEFORE_LABEL = "Before Subscriptions"
        AFTER_LABEL = "After Subscriptions"

        # Generate list of all clients names
        all_names = []
        for fabric_idx in range(num_fabrics_to_commission):
            for controller_idx in range(num_controllers_per_fabric):
                all_names.append(
                    "RD%d%s" %
                    (fabric_idx + 1, chr(ord('A') + controller_idx)))
        logging.info("Client names that will be used: %s" % all_names)
        client_list = []

        logging.info(
            "Pre-conditions: validate CapabilityMinima.CaseSessionsPerFabric >= 3"
        )

        capability_minima = await self.read_single_attribute(
            dev_ctrl,
            node_id=self.dut_node_id,
            endpoint=0,
            attribute=Clusters.Basic.Attributes.CapabilityMinima)
        asserts.assert_greater_equal(capability_minima.caseSessionsPerFabric,
                                     3)

        logging.info(
            "Pre-conditions: use existing fabric to configure new fabrics so that total is %d fabrics"
            % num_fabrics_to_commission)

        # Generate Node IDs for subsequent controllers start at 200, follow 200, 300, ...
        node_ids = [
            200 + (i * 100) for i in range(num_controllers_per_fabric - 1)
        ]

        # Prepare clients for first fabric, that includes the default controller
        dev_ctrl.name = all_names.pop(0)
        client_list.append(dev_ctrl)

        if num_controllers_per_fabric > 1:
            new_controllers = await CommissioningBuildingBlocks.CreateControllersOnFabric(
                fabricAdmin=dev_ctrl.fabricAdmin,
                adminDevCtrl=dev_ctrl,
                controllerNodeIds=node_ids,
                privilege=Clusters.AccessControl.Enums.Privilege.kAdminister,
                targetNodeId=self.dut_node_id)
            for controller in new_controllers:
                controller.name = all_names.pop(0)
            client_list.extend(new_controllers)

        # Prepare clients for subsequent fabrics
        for i in range(num_fabrics_to_commission - 1):
            admin_index = 2 + i
            logging.info("Commissioning fabric %d/%d" %
                         (admin_index, num_fabrics_to_commission))
            new_certificate_authority = self.certificate_authority_manager.NewCertificateAuthority(
            )
            new_fabric_admin = new_certificate_authority.NewFabricAdmin(
                vendorId=0xFFF1, fabricId=1)
            new_admin_ctrl = new_fabric_admin.NewController(
                nodeId=dev_ctrl.nodeId)
            new_admin_ctrl.name = all_names.pop(0)
            client_list.append(new_admin_ctrl)
            await CommissioningBuildingBlocks.AddNOCForNewFabricFromExisting(
                commissionerDevCtrl=dev_ctrl,
                newFabricDevCtrl=new_admin_ctrl,
                existingNodeId=self.dut_node_id,
                newNodeId=self.dut_node_id)

            if num_controllers_per_fabric > 1:
                new_controllers = await CommissioningBuildingBlocks.CreateControllersOnFabric(
                    fabricAdmin=new_fabric_admin,
                    adminDevCtrl=new_admin_ctrl,
                    controllerNodeIds=node_ids,
                    privilege=Clusters.AccessControl.Enums.Privilege.
                    kAdminister,
                    targetNodeId=self.dut_node_id)
                for controller in new_controllers:
                    controller.name = all_names.pop(0)

                client_list.extend(new_controllers)

        asserts.assert_equal(
            len(client_list),
            num_fabrics_to_commission * num_controllers_per_fabric,
            "Must have the right number of clients")

        # Before subscribing, set the NodeLabel to "Before Subscriptions"
        logging.info(
            "Pre-conditions: writing initial value of NodeLabel, so that we can control for change of attribute detection"
        )
        await client_list[0].WriteAttribute(
            self.dut_node_id,
            [(0, Clusters.Basic.Attributes.NodeLabel(value=BEFORE_LABEL))])

        # Subscribe with all clients to NodeLabel attribute
        sub_handlers = []
        resub_catchers = []
        output_queue = queue.Queue()

        logging.info(
            "Step 1 (first part): Establish subscription with all %d clients" %
            len(client_list))
        for sub_idx, client in enumerate(client_list):
            logging.info(
                "Establishing subscription %d/%d from controller node %s" %
                (sub_idx + 1, len(client_list), client.name))

            sub = await client.ReadAttribute(
                nodeid=self.dut_node_id,
                attributes=[(0, Clusters.Basic.Attributes.NodeLabel)],
                reportInterval=(min_report_interval_sec,
                                max_report_interval_sec),
                keepSubscriptions=False)
            self._subscriptions.append(sub)

            attribute_handler = AttributeChangeAccumulator(
                name=client.name,
                expected_attribute=Clusters.Basic.Attributes.NodeLabel,
                output=output_queue)
            sub.SetAttributeUpdateCallback(attribute_handler)
            sub_handlers.append(attribute_handler)

            # TODO: Replace resubscription catcher with API to disable re-subscription on failure
            resub_catcher = ResubscriptionCatcher(name=client.name)
            sub.SetResubscriptionAttemptedCallback(resub_catcher)
            resub_catchers.append(resub_catcher)

        asserts.assert_equal(len(self._subscriptions), len(client_list),
                             "Must have the right number of subscriptions")

        # Trigger a change on NodeLabel
        logging.info(
            "Step 1 (second part): Change attribute with one client, await all attributes changed within time"
        )
        await asyncio.sleep(1)
        await client_list[0].WriteAttribute(
            self.dut_node_id,
            [(0, Clusters.Basic.Attributes.NodeLabel(value=AFTER_LABEL))])

        all_changes = {client.name: False for client in client_list}

        # Await a stabilization delay in increments to let the event loops run
        start_time = time.time()
        elapsed = 0
        time_remaining = timeout_delay_sec

        while time_remaining > 0:
            try:
                item = output_queue.get(block=True, timeout=time_remaining)
                client_name, endpoint, attribute, value = item['name'], item[
                    'endpoint'], item['attribute'], item['value']

                # Record arrival of an expected subscription change when seen
                if endpoint == 0 and attribute == Clusters.Basic.Attributes.NodeLabel and value == AFTER_LABEL:
                    if not all_changes[client_name]:
                        logging.info(
                            "Got expected attribute change for client %s" %
                            client_name)
                        all_changes[client_name] = True

                # We are done waiting when we have accumulated all results
                if all(all_changes.values()):
                    logging.info("All clients have reported, done waiting.")
                    break
            except queue.Empty:
                # No error, we update timeouts and keep going
                pass

            elapsed = time.time() - start_time
            time_remaining = timeout_delay_sec - elapsed

        logging.info("Validation of results")
        failed = False

        for catcher in resub_catchers:
            if catcher.caught_resubscription:
                logging.error("Client %s saw a resubscription" % catcher.name)
                failed = True
            else:
                logging.info(
                    "Client %s correctly did not see a resubscription" %
                    catcher.name)

        all_reports_gotten = all(all_changes.values())
        if not all_reports_gotten:
            logging.error(
                "Missing reports from the following clients: %s" % ", ".join([
                    name
                    for name, value in all_changes.items() if value is False
                ]))
            failed = True
        else:
            logging.info(
                "Got successful reports from all clients, meaning all concurrent CASE sessions worked"
            )

        # Determine final result
        if failed:
            asserts.fail("Failed test !")
Ejemplo n.º 27
0
    async def test_TC_RR_1_1(self):
        dev_ctrl = self.default_controller

        # Debug/test arguments

        # Get overrides for debugging the test
        num_fabrics_to_commission = self.user_params.get(
            "num_fabrics_to_commission", 5)
        num_controllers_per_fabric = self.user_params.get(
            "num_controllers_per_fabric", 3)
        # Immediate reporting
        min_report_interval_sec = self.user_params.get(
            "min_report_interval_sec", 0)
        # 10 minutes max reporting interval --> We don't care about keep-alives per-se and
        # want to avoid resubscriptions
        max_report_interval_sec = self.user_params.get(
            "max_report_interval_sec", 10 * 60)
        # Time to wait after changing NodeLabel for subscriptions to all hit. This is dependant
        # on MRP params of subscriber and on actual min_report_interval.
        # TODO: Determine the correct max value depending on target. Test plan doesn't say!
        timeout_delay_sec = self.user_params.get("timeout_delay_sec",
                                                 max_report_interval_sec * 2)
        # Whether to skip filling the UserLabel clusters
        skip_user_label_cluster_steps = self.user_params.get(
            "skip_user_label_cluster_steps", False)

        BEFORE_LABEL = "Before Subscriptions 12345678912"
        AFTER_LABEL = "After Subscriptions 123456789123"

        # Pre-conditions

        # Make sure all certificates are installed with maximal size
        dev_ctrl.fabricAdmin.certificateAuthority.maximizeCertChains = True

        # TODO: Do from PICS list. The reflection approach here what a real client would do,
        #       and it respects what the test says: "TH writes 4 entries per endpoint where LabelList is supported"
        logging.info(
            "Pre-condition: determine whether any endpoints have UserLabel cluster (ULABEL.S.A0000(LabelList))"
        )
        endpoints_with_user_label_list = await dev_ctrl.ReadAttribute(
            self.dut_node_id, [Clusters.UserLabel.Attributes.LabelList])
        has_user_labels = len(endpoints_with_user_label_list) > 0
        if has_user_labels:
            logging.info(
                "--> User label cluster present on endpoints %s" % ", ".join([
                    "%d" % ep for ep in endpoints_with_user_label_list.keys()
                ]))
        else:
            logging.info("--> User label cluster not present on any endpoitns")

        # Generate list of all clients names
        all_names = []
        for fabric_idx in range(num_fabrics_to_commission):
            for controller_idx in range(num_controllers_per_fabric):
                all_names.append(
                    "RD%d%s" %
                    (fabric_idx + 1, chr(ord('A') + controller_idx)))
        logging.info(f"Client names that will be used: {all_names}")
        client_list = []

        # TODO: Shall we also verify SupportedFabrics attribute, and the CapabilityMinima attribute?
        logging.info(
            "Pre-conditions: validate CapabilityMinima.CaseSessionsPerFabric >= 3"
        )

        capability_minima = await self.read_single_attribute(
            dev_ctrl,
            node_id=self.dut_node_id,
            endpoint=0,
            attribute=Clusters.Basic.Attributes.CapabilityMinima)
        asserts.assert_greater_equal(capability_minima.caseSessionsPerFabric,
                                     3)

        # Step 1: Commission 5 fabrics with maximized NOC chains
        logging.info(
            f"Step 1: use existing fabric to configure new fabrics so that total is {num_fabrics_to_commission} fabrics"
        )

        # Generate Node IDs for subsequent controllers start at 200, follow 200, 300, ...
        node_ids = [
            200 + (i * 100) for i in range(num_controllers_per_fabric - 1)
        ]

        # Prepare clients for first fabric, that includes the default controller
        dev_ctrl.name = all_names.pop(0)
        client_list.append(dev_ctrl)

        if num_controllers_per_fabric > 1:
            new_controllers = await CommissioningBuildingBlocks.CreateControllersOnFabric(
                fabricAdmin=dev_ctrl.fabricAdmin,
                adminDevCtrl=dev_ctrl,
                controllerNodeIds=node_ids,
                privilege=Clusters.AccessControl.Enums.Privilege.kAdminister,
                targetNodeId=self.dut_node_id,
                catTags=[0x0001_0001])
            for controller in new_controllers:
                controller.name = all_names.pop(0)
            client_list.extend(new_controllers)

        # Prepare clients for subsequent fabrics
        for i in range(num_fabrics_to_commission - 1):
            admin_index = 2 + i
            logging.info("Commissioning fabric %d/%d" %
                         (admin_index, num_fabrics_to_commission))
            new_certificate_authority = self.certificate_authority_manager.NewCertificateAuthority(
            )
            new_fabric_admin = new_certificate_authority.NewFabricAdmin(
                vendorId=0xFFF1, fabricId=admin_index)

            new_admin_ctrl = new_fabric_admin.NewController(
                nodeId=dev_ctrl.nodeId, catTags=[0x0001_0001])
            new_admin_ctrl.name = all_names.pop(0)
            client_list.append(new_admin_ctrl)
            await CommissioningBuildingBlocks.AddNOCForNewFabricFromExisting(
                commissionerDevCtrl=dev_ctrl,
                newFabricDevCtrl=new_admin_ctrl,
                existingNodeId=self.dut_node_id,
                newNodeId=self.dut_node_id)

            if num_controllers_per_fabric > 1:
                new_controllers = await CommissioningBuildingBlocks.CreateControllersOnFabric(
                    fabricAdmin=new_fabric_admin,
                    adminDevCtrl=new_admin_ctrl,
                    controllerNodeIds=node_ids,
                    privilege=Clusters.AccessControl.Enums.Privilege.
                    kAdminister,
                    targetNodeId=self.dut_node_id,
                    catTags=[0x0001_0001])
                for controller in new_controllers:
                    controller.name = all_names.pop(0)

                client_list.extend(new_controllers)

        asserts.assert_equal(
            len(client_list),
            num_fabrics_to_commission * num_controllers_per_fabric,
            "Must have the right number of clients")

        client_by_name = {client.name: client for client in client_list}

        # Step 2: Set the Label field for each fabric and BasicInformation.NodeLabel to 32 characters
        logging.info(
            "Step 2: Setting the Label field for each fabric and BasicInformation.NodeLabel to 32 characters"
        )

        for idx in range(num_fabrics_to_commission):
            fabric_number = idx + 1
            # Client is client A for each fabric to set the Label field
            client_name = "RD%dA" % fabric_number
            client = client_by_name[client_name]

            # Send the UpdateLabel command
            label = ("%d" % fabric_number) * 32
            logging.info(
                "Step 2a: Setting fabric label on fabric %d to '%s' using client %s"
                % (fabric_number, label, client_name))
            await client.SendCommand(
                self.dut_node_id, 0,
                Clusters.OperationalCredentials.Commands.UpdateFabricLabel(
                    label))

            # Read back
            fabric_metadata = await self.read_single_attribute(
                client,
                node_id=self.dut_node_id,
                endpoint=0,
                attribute=Clusters.OperationalCredentials.Attributes.Fabrics)
            print(fabric_metadata)
            asserts.assert_equal(
                fabric_metadata[0].label, label,
                "Fabrics[x].label must match what was written")

        # Before subscribing, set the NodeLabel to "Before Subscriptions"
        logging.info(
            f"Step 2b: Set BasicInformation.NodeLabel to {BEFORE_LABEL}")
        await client_list[0].WriteAttribute(
            self.dut_node_id,
            [(0, Clusters.Basic.Attributes.NodeLabel(value=BEFORE_LABEL))])

        node_label = await self.read_single_attribute(
            client,
            node_id=self.dut_node_id,
            endpoint=0,
            attribute=Clusters.Basic.Attributes.NodeLabel)
        asserts.assert_equal(node_label, BEFORE_LABEL,
                             "NodeLabel must match what was written")

        # Step 3: Add 3 Access Control entries on DUT with a list of 4 Subjects and 3 Targets with the following parameters (...)
        logging.info("Step 3: Fill ACL table so that all minimas are reached")

        for idx in range(num_fabrics_to_commission):
            fabric_number = idx + 1
            # Client is client A for each fabric
            client_name = "RD%dA" % fabric_number
            client = client_by_name[client_name]

            acl = self.build_acl(fabric_number, client_by_name,
                                 num_controllers_per_fabric)

            logging.info(
                f"Step 3a: Writing ACL entry for fabric {fabric_number}")
            await client.WriteAttribute(
                self.dut_node_id,
                [(0, Clusters.AccessControl.Attributes.Acl(acl))])

            logging.info(
                f"Step 3b: Validating ACL entry for fabric {fabric_number}")
            acl_readback = await self.read_single_attribute(
                client,
                node_id=self.dut_node_id,
                endpoint=0,
                attribute=Clusters.AccessControl.Attributes.Acl)
            fabric_index = 9999
            for entry in acl_readback:
                asserts.assert_equal(
                    entry.fabricIndex, fabric_number,
                    "Fabric Index of response entries must match")
                fabric_index = entry.fabricIndex

            for entry in acl:
                # Fix-up the original ACL list items (that all had fabricIndex of 0 on write, since ignored)
                # so that they match incoming fabric index. Allows checking by equality of the structs
                entry.fabricIndex = fabric_index
            asserts.assert_equal(acl_readback, acl,
                                 "ACL must match what was written")

        # Step 4 and 5 (the operations cannot be separated): establish all CASE sessions and subscriptions

        # Subscribe with all clients to NodeLabel attribute and 2 more paths
        sub_handlers = []
        resub_catchers = []
        output_queue = queue.Queue()
        subscription_contents = [
            (0, Clusters.Basic.Attributes.NodeLabel),  # Single attribute
            (0, Clusters.OperationalCredentials
             ),  # Wildcard all of opcreds attributes on EP0
            Clusters.Descriptor  # All descriptors on all endpoints
        ]

        logging.info(
            "Step 4 and 5 (first part): Establish subscription with all %d clients"
            % len(client_list))
        for sub_idx, client in enumerate(client_list):
            logging.info(
                "Establishing subscription %d/%d from controller node %s" %
                (sub_idx + 1, len(client_list), client.name))

            sub = await client.ReadAttribute(
                nodeid=self.dut_node_id,
                attributes=subscription_contents,
                reportInterval=(min_report_interval_sec,
                                max_report_interval_sec),
                keepSubscriptions=False)
            self._subscriptions.append(sub)

            attribute_handler = AttributeChangeAccumulator(
                name=client.name,
                expected_attribute=Clusters.Basic.Attributes.NodeLabel,
                output=output_queue)
            sub.SetAttributeUpdateCallback(attribute_handler)
            sub_handlers.append(attribute_handler)

            # TODO: Replace resubscription catcher with API to disable re-subscription on failure
            resub_catcher = ResubscriptionCatcher(name=client.name)
            sub.SetResubscriptionAttemptedCallback(resub_catcher)
            resub_catchers.append(resub_catcher)

        asserts.assert_equal(len(self._subscriptions), len(client_list),
                             "Must have the right number of subscriptions")

        # Step 6: Read 9 paths and validate success
        logging.info(
            "Step 6: Read 9 paths (first 9 attributes of Basic Information cluster) and validate success"
        )

        large_read_contents = [
            Clusters.Basic.Attributes.DataModelRevision,
            Clusters.Basic.Attributes.VendorName,
            Clusters.Basic.Attributes.VendorID,
            Clusters.Basic.Attributes.ProductName,
            Clusters.Basic.Attributes.ProductID,
            Clusters.Basic.Attributes.NodeLabel,
            Clusters.Basic.Attributes.Location,
            Clusters.Basic.Attributes.HardwareVersion,
            Clusters.Basic.Attributes.HardwareVersionString,
        ]
        large_read_paths = [(0, attrib) for attrib in large_read_contents]
        basic_info = await dev_ctrl.ReadAttribute(self.dut_node_id,
                                                  large_read_paths)

        # Make sure everything came back from the read that we expected
        asserts.assert_true(0 in basic_info.keys(),
                            "Must have read endpoint 0 data")
        asserts.assert_true(Clusters.Basic in basic_info[0].keys(),
                            "Must have read Basic Information cluster data")
        for attribute in large_read_contents:
            asserts.assert_true(
                attribute in basic_info[0][Clusters.Basic],
                "Must have read back attribute %s" % (attribute.__name__))

        # Step 7: Trigger a change on NodeLabel
        logging.info(
            "Step 7: Change attribute with one client, await all attributes changed successfully without loss of subscriptions"
        )
        await asyncio.sleep(1)
        await client_list[0].WriteAttribute(
            self.dut_node_id,
            [(0, Clusters.Basic.Attributes.NodeLabel(value=AFTER_LABEL))])

        all_changes = {client.name: False for client in client_list}

        # Await a stabilization delay in increments to let the event loops run
        start_time = time.time()
        elapsed = 0
        time_remaining = timeout_delay_sec

        while time_remaining > 0:
            try:
                item = output_queue.get(block=True, timeout=time_remaining)
                client_name, endpoint, attribute, value = item['name'], item[
                    'endpoint'], item['attribute'], item['value']

                # Record arrival of an expected subscription change when seen
                if endpoint == 0 and attribute == Clusters.Basic.Attributes.NodeLabel and value == AFTER_LABEL:
                    if not all_changes[client_name]:
                        logging.info(
                            "Got expected attribute change for client %s" %
                            client_name)
                        all_changes[client_name] = True

                # We are done waiting when we have accumulated all results
                if all(all_changes.values()):
                    logging.info("All clients have reported, done waiting.")
                    break
            except queue.Empty:
                # No error, we update timeouts and keep going
                pass

            elapsed = time.time() - start_time
            time_remaining = timeout_delay_sec - elapsed

        logging.info("Step 7: Validation of results")
        sub_test_failed = False

        for catcher in resub_catchers:
            if catcher.caught_resubscription:
                logging.error("Client %s saw a resubscription" % catcher.name)
                sub_test_failed = True
            else:
                logging.info(
                    "Client %s correctly did not see a resubscription" %
                    catcher.name)

        all_reports_gotten = all(all_changes.values())
        if not all_reports_gotten:
            logging.error(
                "Missing reports from the following clients: %s" % ", ".join([
                    name
                    for name, value in all_changes.items() if value is False
                ]))
            sub_test_failed = True
        else:
            logging.info(
                "Got successful reports from all clients, meaning all concurrent CASE sessions worked"
            )

        # Determine result of Step 7
        if sub_test_failed:
            asserts.fail("Failed step 7 !")

        # Step 8: Validate sessions have not changed by doing a read on NodeLabel from all clients
        logging.info("Step 8: Read back NodeLabel directly from all clients")
        for sub_idx, client in enumerate(client_list):
            logging.info("Reading NodeLabel (%d/%d) from controller node %s" %
                         (sub_idx + 1, len(client_list), client.name))

            label_readback = await self.read_single_attribute(
                client,
                node_id=self.dut_node_id,
                endpoint=0,
                attribute=Clusters.Basic.Attributes.NodeLabel)
            asserts.assert_equal(label_readback, AFTER_LABEL)

            # TODO: Compare before/after session IDs. Requires more native changes, and the
            #       subcription method above is actually good enough we think.

        # Step 9: Fill user label list
        if has_user_labels and not skip_user_label_cluster_steps:
            await self.fill_user_label_list(dev_ctrl, self.dut_node_id)
        else:
            logging.info(
                "Step 9: Skipped due to no UserLabel cluster instances")
Ejemplo n.º 28
0
def ReadCharacteristic(client):
  """Logic for BLE characteristic read.

  Args:
    client: AndroidDevice. The device that behaves as GATT client.

  Steps:
    1. Client reads a characteristic from server & gets true.
    2. Server calls sendResponse & client gets onCharacteristicRead.

  Verifies:
    Client gets corresponding callback.
  """
  read_operation_result = client.android.bleReadOperation(
      TEST_BLE_SERVICE_UUID, TEST_READ_UUID)
  asserts.assert_true(read_operation_result,
                      'BLE read operation failed to start')
  read_operation_result = client.client_callback.waitAndGet(
      'onCharacteristicRead', 30)
  asserts.assert_equal(read_operation_result.data[STATUS], GATT_SUCCESS)
  asserts.assert_equal(read_operation_result.data['Data'], READ_DATA)
  client.log.info('Read operation finished')
  read_operation_result = client.android.bleReadOperation(
      TEST_BLE_SERVICE_UUID, TEST_SECOND_READ_UUID)
  asserts.assert_true(read_operation_result,
                      'BLE read operation failed to start')
  read_operation_result = client.client_callback.waitAndGet(
      'onCharacteristicRead', 30)
  asserts.assert_equal(read_operation_result.data[STATUS], GATT_SUCCESS)
  asserts.assert_equal(read_operation_result.data['Data'], SECOND_READ_DATA)
  client.log.info('Second read operation finished')
  read_operation_result = client.android.bleReadOperation(
      TEST_BLE_SERVICE_UUID, TEST_THIRD_READ_UUID)
  asserts.assert_true(read_operation_result,
                      'BLE read operation failed to start')
  read_operation_result = client.client_callback.waitAndGet(
      'onCharacteristicRead', 30)
  asserts.assert_equal(read_operation_result.data[STATUS], GATT_SUCCESS)
  asserts.assert_equal(read_operation_result.data['Data'], THIRD_READ_DATA)
  client.log.info('Third read operation finished')
Ejemplo n.º 29
0
 def test_func(self):
     asserts.assert_equal(1, 2, extras=MOCK_EXTRA)
Ejemplo n.º 30
0
 def test_func(self):
     asserts.assert_equal(
         1, 2, msg=MSG_EXPECTED_EXCEPTION, extras=MOCK_EXTRA)