Beispiel #1
0
    async def test_TC_SC_3_6(self):
        dev_ctrl = self.default_controller

        # Get overrides for debugging the test
        num_fabrics_to_commission = self.user_params.get(
            "num_fabrics_to_commission", 5)
        num_controllers_per_fabric = self.user_params.get(
            "num_controllers_per_fabric", 3)
        # Immediate reporting
        min_report_interval_sec = self.user_params.get(
            "min_report_interval_sec", 0)
        # 10 minutes max reporting interval --> We don't care about keep-alives per-se and
        # want to avoid resubscriptions
        max_report_interval_sec = self.user_params.get(
            "max_report_interval_sec", 10 * 60)
        # Time to wait after changing NodeLabel for subscriptions to all hit. This is dependant
        # on MRP params of subscriber and on actual min_report_interval.
        # TODO: Determine the correct max value depending on target. Test plan doesn't say!
        timeout_delay_sec = self.user_params.get("timeout_delay_sec",
                                                 max_report_interval_sec * 2)

        BEFORE_LABEL = "Before Subscriptions"
        AFTER_LABEL = "After Subscriptions"

        # Generate list of all clients names
        all_names = []
        for fabric_idx in range(num_fabrics_to_commission):
            for controller_idx in range(num_controllers_per_fabric):
                all_names.append(
                    "RD%d%s" %
                    (fabric_idx + 1, chr(ord('A') + controller_idx)))
        logging.info("Client names that will be used: %s" % all_names)
        client_list = []

        logging.info(
            "Pre-conditions: validate CapabilityMinima.CaseSessionsPerFabric >= 3"
        )

        capability_minima = await self.read_single_attribute(
            dev_ctrl,
            node_id=self.dut_node_id,
            endpoint=0,
            attribute=Clusters.Basic.Attributes.CapabilityMinima)
        asserts.assert_greater_equal(capability_minima.caseSessionsPerFabric,
                                     3)

        logging.info(
            "Pre-conditions: use existing fabric to configure new fabrics so that total is %d fabrics"
            % num_fabrics_to_commission)

        # Generate Node IDs for subsequent controllers start at 200, follow 200, 300, ...
        node_ids = [
            200 + (i * 100) for i in range(num_controllers_per_fabric - 1)
        ]

        # Prepare clients for first fabric, that includes the default controller
        dev_ctrl.name = all_names.pop(0)
        client_list.append(dev_ctrl)

        if num_controllers_per_fabric > 1:
            new_controllers = await CommissioningBuildingBlocks.CreateControllersOnFabric(
                fabricAdmin=dev_ctrl.fabricAdmin,
                adminDevCtrl=dev_ctrl,
                controllerNodeIds=node_ids,
                privilege=Clusters.AccessControl.Enums.Privilege.kAdminister,
                targetNodeId=self.dut_node_id)
            for controller in new_controllers:
                controller.name = all_names.pop(0)
            client_list.extend(new_controllers)

        # Prepare clients for subsequent fabrics
        for i in range(num_fabrics_to_commission - 1):
            admin_index = 2 + i
            logging.info("Commissioning fabric %d/%d" %
                         (admin_index, num_fabrics_to_commission))
            new_certificate_authority = self.certificate_authority_manager.NewCertificateAuthority(
            )
            new_fabric_admin = new_certificate_authority.NewFabricAdmin(
                vendorId=0xFFF1, fabricId=1)
            new_admin_ctrl = new_fabric_admin.NewController(
                nodeId=dev_ctrl.nodeId)
            new_admin_ctrl.name = all_names.pop(0)
            client_list.append(new_admin_ctrl)
            await CommissioningBuildingBlocks.AddNOCForNewFabricFromExisting(
                commissionerDevCtrl=dev_ctrl,
                newFabricDevCtrl=new_admin_ctrl,
                existingNodeId=self.dut_node_id,
                newNodeId=self.dut_node_id)

            if num_controllers_per_fabric > 1:
                new_controllers = await CommissioningBuildingBlocks.CreateControllersOnFabric(
                    fabricAdmin=new_fabric_admin,
                    adminDevCtrl=new_admin_ctrl,
                    controllerNodeIds=node_ids,
                    privilege=Clusters.AccessControl.Enums.Privilege.
                    kAdminister,
                    targetNodeId=self.dut_node_id)
                for controller in new_controllers:
                    controller.name = all_names.pop(0)

                client_list.extend(new_controllers)

        asserts.assert_equal(
            len(client_list),
            num_fabrics_to_commission * num_controllers_per_fabric,
            "Must have the right number of clients")

        # Before subscribing, set the NodeLabel to "Before Subscriptions"
        logging.info(
            "Pre-conditions: writing initial value of NodeLabel, so that we can control for change of attribute detection"
        )
        await client_list[0].WriteAttribute(
            self.dut_node_id,
            [(0, Clusters.Basic.Attributes.NodeLabel(value=BEFORE_LABEL))])

        # Subscribe with all clients to NodeLabel attribute
        sub_handlers = []
        resub_catchers = []
        output_queue = queue.Queue()

        logging.info(
            "Step 1 (first part): Establish subscription with all %d clients" %
            len(client_list))
        for sub_idx, client in enumerate(client_list):
            logging.info(
                "Establishing subscription %d/%d from controller node %s" %
                (sub_idx + 1, len(client_list), client.name))

            sub = await client.ReadAttribute(
                nodeid=self.dut_node_id,
                attributes=[(0, Clusters.Basic.Attributes.NodeLabel)],
                reportInterval=(min_report_interval_sec,
                                max_report_interval_sec),
                keepSubscriptions=False)
            self._subscriptions.append(sub)

            attribute_handler = AttributeChangeAccumulator(
                name=client.name,
                expected_attribute=Clusters.Basic.Attributes.NodeLabel,
                output=output_queue)
            sub.SetAttributeUpdateCallback(attribute_handler)
            sub_handlers.append(attribute_handler)

            # TODO: Replace resubscription catcher with API to disable re-subscription on failure
            resub_catcher = ResubscriptionCatcher(name=client.name)
            sub.SetResubscriptionAttemptedCallback(resub_catcher)
            resub_catchers.append(resub_catcher)

        asserts.assert_equal(len(self._subscriptions), len(client_list),
                             "Must have the right number of subscriptions")

        # Trigger a change on NodeLabel
        logging.info(
            "Step 1 (second part): Change attribute with one client, await all attributes changed within time"
        )
        await asyncio.sleep(1)
        await client_list[0].WriteAttribute(
            self.dut_node_id,
            [(0, Clusters.Basic.Attributes.NodeLabel(value=AFTER_LABEL))])

        all_changes = {client.name: False for client in client_list}

        # Await a stabilization delay in increments to let the event loops run
        start_time = time.time()
        elapsed = 0
        time_remaining = timeout_delay_sec

        while time_remaining > 0:
            try:
                item = output_queue.get(block=True, timeout=time_remaining)
                client_name, endpoint, attribute, value = item['name'], item[
                    'endpoint'], item['attribute'], item['value']

                # Record arrival of an expected subscription change when seen
                if endpoint == 0 and attribute == Clusters.Basic.Attributes.NodeLabel and value == AFTER_LABEL:
                    if not all_changes[client_name]:
                        logging.info(
                            "Got expected attribute change for client %s" %
                            client_name)
                        all_changes[client_name] = True

                # We are done waiting when we have accumulated all results
                if all(all_changes.values()):
                    logging.info("All clients have reported, done waiting.")
                    break
            except queue.Empty:
                # No error, we update timeouts and keep going
                pass

            elapsed = time.time() - start_time
            time_remaining = timeout_delay_sec - elapsed

        logging.info("Validation of results")
        failed = False

        for catcher in resub_catchers:
            if catcher.caught_resubscription:
                logging.error("Client %s saw a resubscription" % catcher.name)
                failed = True
            else:
                logging.info(
                    "Client %s correctly did not see a resubscription" %
                    catcher.name)

        all_reports_gotten = all(all_changes.values())
        if not all_reports_gotten:
            logging.error(
                "Missing reports from the following clients: %s" % ", ".join([
                    name
                    for name, value in all_changes.items() if value is False
                ]))
            failed = True
        else:
            logging.info(
                "Got successful reports from all clients, meaning all concurrent CASE sessions worked"
            )

        # Determine final result
        if failed:
            asserts.fail("Failed test !")
Beispiel #2
0
 def test_assert_greater_equal_fail_with_msg_and_extras(self):
     with self.assertRaises(signals.TestFailure) as cm:
         asserts.assert_greater_equal(1, 2, msg='Message', extras='Extras')
     self.assertEqual(cm.exception.details,
                      '1 not greater than or equal to 2 Message')
     self.assertEqual(cm.exception.extras, 'Extras')
Beispiel #3
0
 def test_assert_greater_equal_pass(self):
     asserts.assert_greater_equal(2, 1.0)
     asserts.assert_greater_equal(1, 1)
Beispiel #4
0
 def test_assert_greater_equal_fail(self):
     with self.assertRaises(signals.TestFailure) as cm:
         asserts.assert_greater_equal(1, 2)
     self.assertEqual(cm.exception.details,
                      '1 not greater than or equal to 2')
    async def test_TC_RR_1_1(self):
        dev_ctrl = self.default_controller

        # Debug/test arguments

        # Get overrides for debugging the test
        num_fabrics_to_commission = self.user_params.get(
            "num_fabrics_to_commission", 5)
        num_controllers_per_fabric = self.user_params.get(
            "num_controllers_per_fabric", 3)
        # Immediate reporting
        min_report_interval_sec = self.user_params.get(
            "min_report_interval_sec", 0)
        # 10 minutes max reporting interval --> We don't care about keep-alives per-se and
        # want to avoid resubscriptions
        max_report_interval_sec = self.user_params.get(
            "max_report_interval_sec", 10 * 60)
        # Time to wait after changing NodeLabel for subscriptions to all hit. This is dependant
        # on MRP params of subscriber and on actual min_report_interval.
        # TODO: Determine the correct max value depending on target. Test plan doesn't say!
        timeout_delay_sec = self.user_params.get("timeout_delay_sec",
                                                 max_report_interval_sec * 2)
        # Whether to skip filling the UserLabel clusters
        skip_user_label_cluster_steps = self.user_params.get(
            "skip_user_label_cluster_steps", False)

        BEFORE_LABEL = "Before Subscriptions 12345678912"
        AFTER_LABEL = "After Subscriptions 123456789123"

        # Pre-conditions

        # Make sure all certificates are installed with maximal size
        dev_ctrl.fabricAdmin.certificateAuthority.maximizeCertChains = True

        # TODO: Do from PICS list. The reflection approach here what a real client would do,
        #       and it respects what the test says: "TH writes 4 entries per endpoint where LabelList is supported"
        logging.info(
            "Pre-condition: determine whether any endpoints have UserLabel cluster (ULABEL.S.A0000(LabelList))"
        )
        endpoints_with_user_label_list = await dev_ctrl.ReadAttribute(
            self.dut_node_id, [Clusters.UserLabel.Attributes.LabelList])
        has_user_labels = len(endpoints_with_user_label_list) > 0
        if has_user_labels:
            logging.info(
                "--> User label cluster present on endpoints %s" % ", ".join([
                    "%d" % ep for ep in endpoints_with_user_label_list.keys()
                ]))
        else:
            logging.info("--> User label cluster not present on any endpoitns")

        # Generate list of all clients names
        all_names = []
        for fabric_idx in range(num_fabrics_to_commission):
            for controller_idx in range(num_controllers_per_fabric):
                all_names.append(
                    "RD%d%s" %
                    (fabric_idx + 1, chr(ord('A') + controller_idx)))
        logging.info(f"Client names that will be used: {all_names}")
        client_list = []

        # TODO: Shall we also verify SupportedFabrics attribute, and the CapabilityMinima attribute?
        logging.info(
            "Pre-conditions: validate CapabilityMinima.CaseSessionsPerFabric >= 3"
        )

        capability_minima = await self.read_single_attribute(
            dev_ctrl,
            node_id=self.dut_node_id,
            endpoint=0,
            attribute=Clusters.Basic.Attributes.CapabilityMinima)
        asserts.assert_greater_equal(capability_minima.caseSessionsPerFabric,
                                     3)

        # Step 1: Commission 5 fabrics with maximized NOC chains
        logging.info(
            f"Step 1: use existing fabric to configure new fabrics so that total is {num_fabrics_to_commission} fabrics"
        )

        # Generate Node IDs for subsequent controllers start at 200, follow 200, 300, ...
        node_ids = [
            200 + (i * 100) for i in range(num_controllers_per_fabric - 1)
        ]

        # Prepare clients for first fabric, that includes the default controller
        dev_ctrl.name = all_names.pop(0)
        client_list.append(dev_ctrl)

        if num_controllers_per_fabric > 1:
            new_controllers = await CommissioningBuildingBlocks.CreateControllersOnFabric(
                fabricAdmin=dev_ctrl.fabricAdmin,
                adminDevCtrl=dev_ctrl,
                controllerNodeIds=node_ids,
                privilege=Clusters.AccessControl.Enums.Privilege.kAdminister,
                targetNodeId=self.dut_node_id,
                catTags=[0x0001_0001])
            for controller in new_controllers:
                controller.name = all_names.pop(0)
            client_list.extend(new_controllers)

        # Prepare clients for subsequent fabrics
        for i in range(num_fabrics_to_commission - 1):
            admin_index = 2 + i
            logging.info("Commissioning fabric %d/%d" %
                         (admin_index, num_fabrics_to_commission))
            new_certificate_authority = self.certificate_authority_manager.NewCertificateAuthority(
            )
            new_fabric_admin = new_certificate_authority.NewFabricAdmin(
                vendorId=0xFFF1, fabricId=admin_index)

            new_admin_ctrl = new_fabric_admin.NewController(
                nodeId=dev_ctrl.nodeId, catTags=[0x0001_0001])
            new_admin_ctrl.name = all_names.pop(0)
            client_list.append(new_admin_ctrl)
            await CommissioningBuildingBlocks.AddNOCForNewFabricFromExisting(
                commissionerDevCtrl=dev_ctrl,
                newFabricDevCtrl=new_admin_ctrl,
                existingNodeId=self.dut_node_id,
                newNodeId=self.dut_node_id)

            if num_controllers_per_fabric > 1:
                new_controllers = await CommissioningBuildingBlocks.CreateControllersOnFabric(
                    fabricAdmin=new_fabric_admin,
                    adminDevCtrl=new_admin_ctrl,
                    controllerNodeIds=node_ids,
                    privilege=Clusters.AccessControl.Enums.Privilege.
                    kAdminister,
                    targetNodeId=self.dut_node_id,
                    catTags=[0x0001_0001])
                for controller in new_controllers:
                    controller.name = all_names.pop(0)

                client_list.extend(new_controllers)

        asserts.assert_equal(
            len(client_list),
            num_fabrics_to_commission * num_controllers_per_fabric,
            "Must have the right number of clients")

        client_by_name = {client.name: client for client in client_list}

        # Step 2: Set the Label field for each fabric and BasicInformation.NodeLabel to 32 characters
        logging.info(
            "Step 2: Setting the Label field for each fabric and BasicInformation.NodeLabel to 32 characters"
        )

        for idx in range(num_fabrics_to_commission):
            fabric_number = idx + 1
            # Client is client A for each fabric to set the Label field
            client_name = "RD%dA" % fabric_number
            client = client_by_name[client_name]

            # Send the UpdateLabel command
            label = ("%d" % fabric_number) * 32
            logging.info(
                "Step 2a: Setting fabric label on fabric %d to '%s' using client %s"
                % (fabric_number, label, client_name))
            await client.SendCommand(
                self.dut_node_id, 0,
                Clusters.OperationalCredentials.Commands.UpdateFabricLabel(
                    label))

            # Read back
            fabric_metadata = await self.read_single_attribute(
                client,
                node_id=self.dut_node_id,
                endpoint=0,
                attribute=Clusters.OperationalCredentials.Attributes.Fabrics)
            print(fabric_metadata)
            asserts.assert_equal(
                fabric_metadata[0].label, label,
                "Fabrics[x].label must match what was written")

        # Before subscribing, set the NodeLabel to "Before Subscriptions"
        logging.info(
            f"Step 2b: Set BasicInformation.NodeLabel to {BEFORE_LABEL}")
        await client_list[0].WriteAttribute(
            self.dut_node_id,
            [(0, Clusters.Basic.Attributes.NodeLabel(value=BEFORE_LABEL))])

        node_label = await self.read_single_attribute(
            client,
            node_id=self.dut_node_id,
            endpoint=0,
            attribute=Clusters.Basic.Attributes.NodeLabel)
        asserts.assert_equal(node_label, BEFORE_LABEL,
                             "NodeLabel must match what was written")

        # Step 3: Add 3 Access Control entries on DUT with a list of 4 Subjects and 3 Targets with the following parameters (...)
        logging.info("Step 3: Fill ACL table so that all minimas are reached")

        for idx in range(num_fabrics_to_commission):
            fabric_number = idx + 1
            # Client is client A for each fabric
            client_name = "RD%dA" % fabric_number
            client = client_by_name[client_name]

            acl = self.build_acl(fabric_number, client_by_name,
                                 num_controllers_per_fabric)

            logging.info(
                f"Step 3a: Writing ACL entry for fabric {fabric_number}")
            await client.WriteAttribute(
                self.dut_node_id,
                [(0, Clusters.AccessControl.Attributes.Acl(acl))])

            logging.info(
                f"Step 3b: Validating ACL entry for fabric {fabric_number}")
            acl_readback = await self.read_single_attribute(
                client,
                node_id=self.dut_node_id,
                endpoint=0,
                attribute=Clusters.AccessControl.Attributes.Acl)
            fabric_index = 9999
            for entry in acl_readback:
                asserts.assert_equal(
                    entry.fabricIndex, fabric_number,
                    "Fabric Index of response entries must match")
                fabric_index = entry.fabricIndex

            for entry in acl:
                # Fix-up the original ACL list items (that all had fabricIndex of 0 on write, since ignored)
                # so that they match incoming fabric index. Allows checking by equality of the structs
                entry.fabricIndex = fabric_index
            asserts.assert_equal(acl_readback, acl,
                                 "ACL must match what was written")

        # Step 4 and 5 (the operations cannot be separated): establish all CASE sessions and subscriptions

        # Subscribe with all clients to NodeLabel attribute and 2 more paths
        sub_handlers = []
        resub_catchers = []
        output_queue = queue.Queue()
        subscription_contents = [
            (0, Clusters.Basic.Attributes.NodeLabel),  # Single attribute
            (0, Clusters.OperationalCredentials
             ),  # Wildcard all of opcreds attributes on EP0
            Clusters.Descriptor  # All descriptors on all endpoints
        ]

        logging.info(
            "Step 4 and 5 (first part): Establish subscription with all %d clients"
            % len(client_list))
        for sub_idx, client in enumerate(client_list):
            logging.info(
                "Establishing subscription %d/%d from controller node %s" %
                (sub_idx + 1, len(client_list), client.name))

            sub = await client.ReadAttribute(
                nodeid=self.dut_node_id,
                attributes=subscription_contents,
                reportInterval=(min_report_interval_sec,
                                max_report_interval_sec),
                keepSubscriptions=False)
            self._subscriptions.append(sub)

            attribute_handler = AttributeChangeAccumulator(
                name=client.name,
                expected_attribute=Clusters.Basic.Attributes.NodeLabel,
                output=output_queue)
            sub.SetAttributeUpdateCallback(attribute_handler)
            sub_handlers.append(attribute_handler)

            # TODO: Replace resubscription catcher with API to disable re-subscription on failure
            resub_catcher = ResubscriptionCatcher(name=client.name)
            sub.SetResubscriptionAttemptedCallback(resub_catcher)
            resub_catchers.append(resub_catcher)

        asserts.assert_equal(len(self._subscriptions), len(client_list),
                             "Must have the right number of subscriptions")

        # Step 6: Read 9 paths and validate success
        logging.info(
            "Step 6: Read 9 paths (first 9 attributes of Basic Information cluster) and validate success"
        )

        large_read_contents = [
            Clusters.Basic.Attributes.DataModelRevision,
            Clusters.Basic.Attributes.VendorName,
            Clusters.Basic.Attributes.VendorID,
            Clusters.Basic.Attributes.ProductName,
            Clusters.Basic.Attributes.ProductID,
            Clusters.Basic.Attributes.NodeLabel,
            Clusters.Basic.Attributes.Location,
            Clusters.Basic.Attributes.HardwareVersion,
            Clusters.Basic.Attributes.HardwareVersionString,
        ]
        large_read_paths = [(0, attrib) for attrib in large_read_contents]
        basic_info = await dev_ctrl.ReadAttribute(self.dut_node_id,
                                                  large_read_paths)

        # Make sure everything came back from the read that we expected
        asserts.assert_true(0 in basic_info.keys(),
                            "Must have read endpoint 0 data")
        asserts.assert_true(Clusters.Basic in basic_info[0].keys(),
                            "Must have read Basic Information cluster data")
        for attribute in large_read_contents:
            asserts.assert_true(
                attribute in basic_info[0][Clusters.Basic],
                "Must have read back attribute %s" % (attribute.__name__))

        # Step 7: Trigger a change on NodeLabel
        logging.info(
            "Step 7: Change attribute with one client, await all attributes changed successfully without loss of subscriptions"
        )
        await asyncio.sleep(1)
        await client_list[0].WriteAttribute(
            self.dut_node_id,
            [(0, Clusters.Basic.Attributes.NodeLabel(value=AFTER_LABEL))])

        all_changes = {client.name: False for client in client_list}

        # Await a stabilization delay in increments to let the event loops run
        start_time = time.time()
        elapsed = 0
        time_remaining = timeout_delay_sec

        while time_remaining > 0:
            try:
                item = output_queue.get(block=True, timeout=time_remaining)
                client_name, endpoint, attribute, value = item['name'], item[
                    'endpoint'], item['attribute'], item['value']

                # Record arrival of an expected subscription change when seen
                if endpoint == 0 and attribute == Clusters.Basic.Attributes.NodeLabel and value == AFTER_LABEL:
                    if not all_changes[client_name]:
                        logging.info(
                            "Got expected attribute change for client %s" %
                            client_name)
                        all_changes[client_name] = True

                # We are done waiting when we have accumulated all results
                if all(all_changes.values()):
                    logging.info("All clients have reported, done waiting.")
                    break
            except queue.Empty:
                # No error, we update timeouts and keep going
                pass

            elapsed = time.time() - start_time
            time_remaining = timeout_delay_sec - elapsed

        logging.info("Step 7: Validation of results")
        sub_test_failed = False

        for catcher in resub_catchers:
            if catcher.caught_resubscription:
                logging.error("Client %s saw a resubscription" % catcher.name)
                sub_test_failed = True
            else:
                logging.info(
                    "Client %s correctly did not see a resubscription" %
                    catcher.name)

        all_reports_gotten = all(all_changes.values())
        if not all_reports_gotten:
            logging.error(
                "Missing reports from the following clients: %s" % ", ".join([
                    name
                    for name, value in all_changes.items() if value is False
                ]))
            sub_test_failed = True
        else:
            logging.info(
                "Got successful reports from all clients, meaning all concurrent CASE sessions worked"
            )

        # Determine result of Step 7
        if sub_test_failed:
            asserts.fail("Failed step 7 !")

        # Step 8: Validate sessions have not changed by doing a read on NodeLabel from all clients
        logging.info("Step 8: Read back NodeLabel directly from all clients")
        for sub_idx, client in enumerate(client_list):
            logging.info("Reading NodeLabel (%d/%d) from controller node %s" %
                         (sub_idx + 1, len(client_list), client.name))

            label_readback = await self.read_single_attribute(
                client,
                node_id=self.dut_node_id,
                endpoint=0,
                attribute=Clusters.Basic.Attributes.NodeLabel)
            asserts.assert_equal(label_readback, AFTER_LABEL)

            # TODO: Compare before/after session IDs. Requires more native changes, and the
            #       subcription method above is actually good enough we think.

        # Step 9: Fill user label list
        if has_user_labels and not skip_user_label_cluster_steps:
            await self.fill_user_label_list(dev_ctrl, self.dut_node_id)
        else:
            logging.info(
                "Step 9: Skipped due to no UserLabel cluster instances")