def take_action(self, parsed_args):

        self.log.debug("take_action(%s)" % parsed_args)
        client = self.app.client_manager.rdomanager_oscplugin.baremetal()

        auth_token = self.app.client_manager.auth_ref.auth_token

        node_uuids = []

        print("Setting available nodes to manageable...")
        self.log.debug("Moving available nodes to manageable state.")
        available_nodes = [node for node in client.node.list()
                           if node.provision_state == "available"]
        for uuid in utils.set_nodes_state(client, available_nodes, 'manage',
                                          'manageable'):
            self.log.debug("Node {0} has been set to manageable.".format(uuid))

        for node in client.node.list():

            node_uuids.append(node.uuid)

            print("Starting introspection of node: {0}".format(node.uuid))
            discoverd_client.introspect(
                node.uuid,
                base_url=parsed_args.discoverd_url,
                auth_token=auth_token)

            # NOTE(dtantsur): PXE firmware on virtual machines misbehaves when
            # a lot of nodes start DHCPing simultaneously: it ignores NACK from
            # DHCP server, tries to get the same address, then times out. Work
            # around it by using sleep, anyway introspection takes much longer.
            time.sleep(5)

        print("Waiting for discovery to finish...")
        for uuid, status in utils.wait_for_node_discovery(
                discoverd_client, auth_token, parsed_args.discoverd_url,
                node_uuids):
            if status['error'] is None:
                print("Discovery for UUID {0} finished successfully."
                      .format(uuid))
            else:
                print("Discovery for UUID {0} finished with error: {1}"
                      .format(uuid, status['error']))

        clients = self.app.client_manager
        baremetal_client = clients.rdomanager_oscplugin.baremetal()
        print("Setting manageable nodes to available...")

        self.log.debug("Moving manageable nodes to available state.")
        available_nodes = [node for node in client.node.list()
                           if node.provision_state == "manageable"]
        for uuid in utils.set_nodes_state(
                baremetal_client, baremetal_client.node.list(), 'provide',
                'available', skipped_states=("available", "active")):
            print("Node {0} has been set to available.".format(uuid))

        print("Discovery completed.")
    def test_set_nodes_state(self, wait_for_state_mock):

        wait_for_state_mock.return_value = True
        bm_client = mock.Mock()

        # One node already deployed, one in the manageable state after
        # introspection.
        nodes = [
            mock.Mock(uuid="ABCDEFGH", provision_state="active"),
            mock.Mock(uuid="IJKLMNOP", provision_state="manageable")
        ]

        skipped_states = ('active', 'available')
        uuids = list(utils.set_nodes_state(bm_client, nodes, 'provide',
                                           'available', skipped_states))

        bm_client.node.set_provision_state.assert_has_calls([
            mock.call('IJKLMNOP', 'provide'),
        ])

        self.assertEqual(uuids, ['IJKLMNOP', ])
    def test_set_nodes_state(self, wait_for_state_mock):

        wait_for_state_mock.return_value = True
        bm_client = mock.Mock()

        # One node already deployed, one in the manageable state after
        # introspection.
        nodes = [
            mock.Mock(uuid="ABCDEFGH", provision_state="active"),
            mock.Mock(uuid="IJKLMNOP", provision_state="manageable")
        ]

        skipped_states = ('active', 'available')
        uuids = list(
            utils.set_nodes_state(bm_client, nodes, 'provide', 'available',
                                  skipped_states))

        bm_client.node.set_provision_state.assert_has_calls([
            mock.call('IJKLMNOP', 'provide'),
        ])

        self.assertEqual(uuids, [
            'IJKLMNOP',
        ])