Пример #1
0
    def take_action(self, parsed_args):

        self.log.debug("take_action(%s)" % parsed_args)

        if parsed_args.json or parsed_args.file_in.name.endswith('.json'):
            nodes_config = json.load(parsed_args.file_in)
        elif parsed_args.csv or parsed_args.file_in.name.endswith('.csv'):
            nodes_config = _csv_to_nodes_dict(parsed_args.file_in)
        elif parsed_args.file_in.name.endswith('.yaml'):
            nodes_config = yaml.safe_load(parsed_args.file_in)
        else:
            raise exceptions.InvalidConfiguration(
                _("Invalid file extension for %s, must be json, yaml or csv") %
                parsed_args.file_in.name)

        if 'nodes' in nodes_config:
            nodes_config = nodes_config['nodes']

        client = self.app.client_manager.baremetal
        if parsed_args.initial_state == "enroll":
            api_version = client.http_client.os_ironic_api_version
            if [int(part) for part in api_version.split('.')] < [1, 11]:
                raise exceptions.InvalidConfiguration(
                    _("OS_BAREMETAL_API_VERSION must be >=1.11 for use of "
                      "'enroll' provision state; currently %s") % api_version)

        for node in nodes_config:
            caps = utils.capabilities_to_dict(node.get('capabilities', {}))
            caps.setdefault('boot_option', parsed_args.instance_boot_option)
            node['capabilities'] = utils.dict_to_capabilities(caps)

        new_nodes = nodes.register_all_nodes(
            parsed_args.service_host,
            nodes_config,
            client=client,
            keystone_client=self.app.client_manager.identity,
            glance_client=self.app.client_manager.image,
            kernel_name=(parsed_args.deploy_kernel
                         if not parsed_args.no_deploy_image else None),
            ramdisk_name=(parsed_args.deploy_ramdisk
                          if not parsed_args.no_deploy_image else None))

        if parsed_args.initial_state == "available":
            manageable_node_uuids = list(
                utils.set_nodes_state(
                    client,
                    new_nodes,
                    "manage",
                    "manageable",
                    skipped_states={'manageable', 'available'}))
            manageable_nodes = [
                n for n in new_nodes if n.uuid in manageable_node_uuids
            ]
            list(
                utils.set_nodes_state(client,
                                      manageable_nodes,
                                      "provide",
                                      "available",
                                      skipped_states={'available'}))
    def take_action(self, parsed_args):

        self.log.debug("take_action(%s)" % parsed_args)
        client = self.app.client_manager.baremetal
        inspector_client = self.app.client_manager.baremetal_introspection

        node_uuids = []

        print("Setting nodes for introspection to manageable...")
        self.log.debug("Moving available/enroll nodes to manageable state.")
        available_nodes = utils.nodes_in_states(client,
                                                ("available", "enroll"))
        for uuid in utils.set_nodes_state(client, available_nodes, 'manage',
                                          'manageable'):
            self.log.debug("Node {0} has been set to manageable.".format(uuid))

        manageable_nodes = utils.nodes_in_states(client, ("manageable",))
        for node in manageable_nodes:
            node_uuids.append(node.uuid)

            print("Starting introspection of node: {0}".format(node.uuid))
            inspector_client.introspect(node.uuid)

            # NOTE(dtantsur): PXE firmware on virtual machines misbehaves when
            # a lot of nodes start DHCPing simultaneously: it ignores NACK from
            # DHCP server, tries to get the same address, then times out. Work
            # around it by using sleep, anyway introspection takes much longer.
            time.sleep(5)

        print("Waiting for introspection to finish...")
        errors = []
        successful_node_uuids = set()
        for uuid, status in utils.wait_for_node_introspection(
                inspector_client, node_uuids):
            if status['error'] is None:
                print("Introspection for UUID {0} finished successfully."
                      .format(uuid))
                successful_node_uuids.add(uuid)
            else:
                print("Introspection for UUID {0} finished with error: {1}"
                      .format(uuid, status['error']))
                errors.append("%s: %s" % (uuid, status['error']))

        print("Setting manageable nodes to available...")

        self.log.debug("Moving manageable nodes to available state.")
        successful_nodes = [n for n in manageable_nodes
                            if n.uuid in successful_node_uuids]
        for uuid in utils.set_nodes_state(
                client, successful_nodes, 'provide',
                'available', skipped_states=("available", "active")):
            print("Node {0} has been set to available.".format(uuid))

        if errors:
            raise exceptions.IntrospectionError(
                "Introspection completed with errors:\n%s" % '\n'.join(errors))
        else:
            print("Introspection completed.")
Пример #3
0
    def take_action(self, parsed_args):

        self.log.debug("take_action(%s)" % parsed_args)
        client = self.app.client_manager.baremetal
        inspector_client = self.app.client_manager.baremetal_introspection

        node_uuids = []

        print("Setting nodes for introspection to manageable...")
        self.log.debug("Moving available/enroll nodes to manageable state.")
        available_nodes = utils.nodes_in_states(client,
                                                ("available", "enroll"))
        for uuid in utils.set_nodes_state(client, available_nodes, 'manage',
                                          'manageable'):
            self.log.debug("Node {0} has been set to manageable.".format(uuid))

        manageable_nodes = utils.nodes_in_states(client, ("manageable", ))
        for node in manageable_nodes:
            node_uuids.append(node.uuid)

            print("Starting introspection of node: {0}".format(node.uuid))
            inspector_client.introspect(node.uuid)

        print("Waiting for introspection to finish...")
        errors = []
        successful_node_uuids = set()
        results = inspector_client.wait_for_finish(node_uuids)
        for uuid, status in results.items():
            if status['error'] is None:
                print(
                    "Introspection for UUID {0} finished successfully.".format(
                        uuid))
                successful_node_uuids.add(uuid)
            else:
                print("Introspection for UUID {0} finished with error: {1}".
                      format(uuid, status['error']))
                errors.append("%s: %s" % (uuid, status['error']))

        print("Setting manageable nodes to available...")

        self.log.debug("Moving manageable nodes to available state.")
        successful_nodes = [
            n for n in manageable_nodes if n.uuid in successful_node_uuids
        ]
        for uuid in utils.set_nodes_state(client,
                                          successful_nodes,
                                          'provide',
                                          'available',
                                          skipped_states=("available",
                                                          "active")):
            print("Node {0} has been set to available.".format(uuid))

        if errors:
            raise exceptions.IntrospectionError(
                "Introspection completed with errors:\n%s" % '\n'.join(errors))
        else:
            print("Introspection completed.")
Пример #4
0
    def take_action(self, parsed_args):

        self.log.debug("take_action(%s)" % parsed_args)

        if parsed_args.json or parsed_args.file_in.name.endswith('.json'):
            nodes_config = json.load(parsed_args.file_in)
        elif parsed_args.csv or parsed_args.file_in.name.endswith('.csv'):
            nodes_config = _csv_to_nodes_dict(parsed_args.file_in)
        elif parsed_args.file_in.name.endswith('.yaml'):
            nodes_config = yaml.safe_load(parsed_args.file_in)
        else:
            raise exceptions.InvalidConfiguration(
                _("Invalid file extension for %s, must be json, yaml or csv") %
                parsed_args.file_in.name)

        if 'nodes' in nodes_config:
            nodes_config = nodes_config['nodes']

        client = self.app.client_manager.baremetal
        if parsed_args.initial_state == "enroll":
            api_version = client.http_client.os_ironic_api_version
            if [int(part) for part in api_version.split('.')] < [1, 11]:
                raise exceptions.InvalidConfiguration(
                    _("OS_BAREMETAL_API_VERSION must be >=1.11 for use of "
                      "'enroll' provision state; currently %s") % api_version)

        for node in nodes_config:
            caps = utils.capabilities_to_dict(node.get('capabilities', {}))
            caps.setdefault('boot_option', parsed_args.instance_boot_option)
            node['capabilities'] = utils.dict_to_capabilities(caps)

        new_nodes = nodes.register_all_nodes(
            parsed_args.service_host,
            nodes_config,
            client=client,
            keystone_client=self.app.client_manager.identity,
            glance_client=self.app.client_manager.image,
            kernel_name=(parsed_args.deploy_kernel if not
                         parsed_args.no_deploy_image else None),
            ramdisk_name=(parsed_args.deploy_ramdisk if not
                          parsed_args.no_deploy_image else None))

        if parsed_args.initial_state == "available":
            manageable_node_uuids = list(utils.set_nodes_state(
                client, new_nodes, "manage", "manageable",
                skipped_states={'manageable', 'available'}
            ))
            manageable_nodes = [
                n for n in new_nodes if n.uuid in manageable_node_uuids
            ]
            list(utils.set_nodes_state(
                client, manageable_nodes, "provide", "available",
                skipped_states={'available'}
            ))
Пример #5
0
    def take_action(self, parsed_args):

        self.log.debug("take_action(%s)" % parsed_args)
        client = self.app.client_manager.baremetal
        inspector_client = self.app.client_manager.baremetal_introspection

        node_uuids = []

        print("Setting nodes for introspection to manageable...")
        self.log.debug("Moving available/enroll nodes to manageable state.")
        available_nodes = utils.nodes_in_states(client,
                                                ("available", "enroll"))
        for uuid in utils.set_nodes_state(client, available_nodes, 'manage',
                                          'manageable'):
            self.log.debug("Node {0} has been set to manageable.".format(uuid))

        manageable_nodes = utils.nodes_in_states(client, ("manageable",))
        for node in manageable_nodes:
            node_uuids.append(node.uuid)

            print("Starting introspection of node: {0}".format(node.uuid))
            inspector_client.introspect(node.uuid)

        print("Waiting for introspection to finish...")
        errors = []
        successful_node_uuids = set()
        results = inspector_client.wait_for_finish(node_uuids)
        for uuid, status in results.items():
            if status['error'] is None:
                print("Introspection for UUID {0} finished successfully."
                      .format(uuid))
                successful_node_uuids.add(uuid)
            else:
                print("Introspection for UUID {0} finished with error: {1}"
                      .format(uuid, status['error']))
                errors.append("%s: %s" % (uuid, status['error']))

        print("Setting manageable nodes to available...")

        self.log.debug("Moving manageable nodes to available state.")
        successful_nodes = [n for n in manageable_nodes
                            if n.uuid in successful_node_uuids]
        for uuid in utils.set_nodes_state(
                client, successful_nodes, 'provide',
                'available', skipped_states=("available", "active")):
            print("Node {0} has been set to available.".format(uuid))

        if errors:
            raise exceptions.IntrospectionError(
                "Introspection completed with errors:\n%s" % '\n'.join(errors))
        else:
            print("Introspection completed.")
Пример #6
0
    def take_action(self, parsed_args):

        self.log.debug("take_action(%s)" % parsed_args)
        self.log.warning('This command is deprecated. Please use "openstack '
                         'overcloud node introspect" to introspect manageable '
                         'nodes instead.\n')

        queue_name = str(uuid.uuid4())
        clients = self.app.client_manager
        client = self.app.client_manager.baremetal

        # TODO(d0ugal): We don't yet have a workflow to move from available
        # or enroll to manageable. Once we do, this should be switched over.
        print("Setting nodes for introspection to manageable...")
        self.log.debug("Moving available/enroll nodes to manageable state.")
        available_nodes = utils.nodes_in_states(client, ("available",
                                                         "enroll"))
        for node_uuid in utils.set_nodes_state(client, available_nodes,
                                               'manage', 'manageable'):
            self.log.debug(
                "Node {0} has been set to manageable.".format(node_uuid))

        print("Starting introspection of manageable nodes")
        baremetal.introspect_manageable_nodes(clients, queue_name=queue_name)

        print("Setting manageable nodes to available...")
        self.log.debug("Moving manageable nodes to available state.")

        baremetal.provide_manageable_nodes(clients, queue_name=queue_name)
    def take_action(self, parsed_args):

        self.log.debug("take_action(%s)" % parsed_args)
        self.log.warning('This command is deprecated. Please use "openstack '
                         'overcloud node introspect" to introspect manageable '
                         'nodes instead.\n')

        queue_name = str(uuid.uuid4())
        clients = self.app.client_manager
        client = self.app.client_manager.baremetal

        # TODO(d0ugal): We don't yet have a workflow to move from available
        # or enroll to manageable. Once we do, this should be switched over.
        print("Setting nodes for introspection to manageable...")
        self.log.debug("Moving available/enroll nodes to manageable state.")
        available_nodes = utils.nodes_in_states(client,
                                                ("available", "enroll"))
        for node_uuid in utils.set_nodes_state(client, available_nodes,
                                               'manage', 'manageable'):
            self.log.debug(
                "Node {0} has been set to manageable.".format(node_uuid))

        print("Starting introspection of manageable nodes")
        baremetal.introspect_manageable_nodes(clients, queue_name=queue_name)

        print("Setting manageable nodes to available...")
        self.log.debug("Moving manageable nodes to available state.")

        baremetal.provide_manageable_nodes(clients, queue_name=queue_name)
Пример #8
0
    def test_set_nodes_state(self, wait_for_state_mock):

        wait_for_state_mock.return_value = True
        bm_client = mock.Mock()

        # One node already deployed, one in the manageable state after
        # introspection.
        nodes = [
            mock.Mock(uuid="ABCDEFGH", provision_state="active"),
            mock.Mock(uuid="IJKLMNOP", provision_state="manageable"),
        ]

        skipped_states = ("active", "available")
        uuids = list(utils.set_nodes_state(bm_client, nodes, "provide", "available", skipped_states))

        bm_client.node.set_provision_state.assert_has_calls([mock.call("IJKLMNOP", "provide")])

        self.assertEqual(uuids, ["IJKLMNOP"])
    def test_set_nodes_state(self, wait_for_state_mock):

        wait_for_state_mock.return_value = True
        bm_client = mock.Mock()

        # One node already deployed, one in the manageable state after
        # introspection.
        nodes = [
            mock.Mock(uuid="ABCDEFGH", provision_state="active"),
            mock.Mock(uuid="IJKLMNOP", provision_state="manageable")
        ]

        skipped_states = ('active', 'available')
        uuids = list(utils.set_nodes_state(bm_client, nodes, 'provide',
                                           'available', skipped_states))

        bm_client.node.set_provision_state.assert_has_calls([
            mock.call('IJKLMNOP', 'provide'),
        ])

        self.assertEqual(uuids, ['IJKLMNOP', ])
    def take_action(self, parsed_args):

        self.log.debug("take_action(%s)" % parsed_args)
        client = self.app.client_manager.tripleoclient.baremetal()

        auth_token = self.app.client_manager.auth_ref.auth_token
        node_uuids = []

        print("Setting available nodes to manageable...")
        self.log.debug("Moving available nodes to manageable state.")
        available_nodes = [node for node in client.node.list()
                           if node.provision_state == "available"]
        for uuid in utils.set_nodes_state(client, available_nodes, 'manage',
                                          'manageable'):
            self.log.debug("Node {0} has been set to manageable.".format(uuid))

        for node in client.node.list():
            if node.provision_state != "manageable":
                continue

            node_uuids.append(node.uuid)

            print("Starting introspection of node: {0}".format(node.uuid))
            inspector_client.introspect(
                node.uuid,
                base_url=parsed_args.inspector_url,
                auth_token=auth_token)

            # NOTE(dtantsur): PXE firmware on virtual machines misbehaves when
            # a lot of nodes start DHCPing simultaneously: it ignores NACK from
            # DHCP server, tries to get the same address, then times out. Work
            # around it by using sleep, anyway introspection takes much longer.
            time.sleep(5)

        print("Waiting for introspection to finish...")
        has_errors = False
        for uuid, status in utils.wait_for_node_introspection(
                inspector_client, auth_token, parsed_args.inspector_url,
                node_uuids):
            if status['error'] is None:
                print("Introspection for UUID {0} finished successfully."
                      .format(uuid))
            else:
                print("Introspection for UUID {0} finished with error: {1}"
                      .format(uuid, status['error']))
                has_errors = True

        clients = self.app.client_manager
        baremetal_client = clients.tripleoclient.baremetal()
        print("Setting manageable nodes to available...")

        self.log.debug("Moving manageable nodes to available state.")
        available_nodes = [node for node in client.node.list()
                           if node.provision_state == "manageable"]
        for uuid in utils.set_nodes_state(
                baremetal_client, baremetal_client.node.list(), 'provide',
                'available', skipped_states=("available", "active")):
            print("Node {0} has been set to available.".format(uuid))

        if has_errors:
            print("Introspection completed with errors.")
        else:
            print("Introspection completed.")
Пример #11
0
    def take_action(self, parsed_args):

        self.log.debug("take_action(%s)" % parsed_args)
        client = self.app.client_manager.baremetal
        inspector_client = self.app.client_manager.baremetal_introspection

        node_uuids = []

        print("Setting nodes for introspection to manageable...")
        self.log.debug("Moving available/enroll nodes to manageable state.")
        available_nodes = utils.nodes_in_states(client,
                                                ("available", "enroll"))
        for uuid in utils.set_nodes_state(client, available_nodes, 'manage',
                                          'manageable'):
            self.log.debug("Node {0} has been set to manageable.".format(uuid))

        manageable_nodes = utils.nodes_in_states(client, ("manageable", ))
        for node in manageable_nodes:
            node_uuids.append(node.uuid)

            print("Starting introspection of node: {0}".format(node.uuid))
            inspector_client.introspect(node.uuid)

            # NOTE(dtantsur): PXE firmware on virtual machines misbehaves when
            # a lot of nodes start DHCPing simultaneously: it ignores NACK from
            # DHCP server, tries to get the same address, then times out. Work
            # around it by using sleep, anyway introspection takes much longer.
            time.sleep(5)

        print("Waiting for introspection to finish...")
        errors = []
        successful_node_uuids = set()
        for uuid, status in utils.wait_for_node_introspection(
                inspector_client, node_uuids):
            if status['error'] is None:
                print(
                    "Introspection for UUID {0} finished successfully.".format(
                        uuid))
                successful_node_uuids.add(uuid)
            else:
                print("Introspection for UUID {0} finished with error: {1}".
                      format(uuid, status['error']))
                errors.append("%s: %s" % (uuid, status['error']))

        print("Setting manageable nodes to available...")

        self.log.debug("Moving manageable nodes to available state.")
        successful_nodes = [
            n for n in manageable_nodes if n.uuid in successful_node_uuids
        ]
        for uuid in utils.set_nodes_state(client,
                                          successful_nodes,
                                          'provide',
                                          'available',
                                          skipped_states=("available",
                                                          "active")):
            print("Node {0} has been set to available.".format(uuid))

        if errors:
            raise exceptions.IntrospectionError(
                "Introspection completed with errors:\n%s" % '\n'.join(errors))
        else:
            print("Introspection completed.")
Пример #12
0
    def take_action(self, parsed_args):

        self.log.debug("take_action(%s)" % parsed_args)
        client = self.app.client_manager.tripleoclient.baremetal

        auth_token = self.app.client_manager.auth_ref.auth_token
        node_uuids = []

        print("Setting available nodes to manageable...")
        self.log.debug("Moving available nodes to manageable state.")
        available_nodes = [
            node
            for node in client.node.list(maintenance=False, associated=False)
            if node.provision_state == "available"
        ]
        for uuid in utils.set_nodes_state(client, available_nodes, 'manage',
                                          'manageable'):
            self.log.debug("Node {0} has been set to manageable.".format(uuid))

        for node in client.node.list(maintenance=False, associated=False):
            if node.provision_state != "manageable":
                continue

            node_uuids.append(node.uuid)

            print("Starting introspection of node: {0}".format(node.uuid))
            inspector_client.introspect(node.uuid,
                                        base_url=parsed_args.inspector_url,
                                        auth_token=auth_token)

            # NOTE(dtantsur): PXE firmware on virtual machines misbehaves when
            # a lot of nodes start DHCPing simultaneously: it ignores NACK from
            # DHCP server, tries to get the same address, then times out. Work
            # around it by using sleep, anyway introspection takes much longer.
            time.sleep(5)

        print("Waiting for introspection to finish...")
        has_errors = False
        for uuid, status in utils.wait_for_node_introspection(
                inspector_client, auth_token, parsed_args.inspector_url,
                node_uuids):
            if status['error'] is None:
                print(
                    "Introspection for UUID {0} finished successfully.".format(
                        uuid))
            else:
                print("Introspection for UUID {0} finished with error: {1}".
                      format(uuid, status['error']))
                has_errors = True

        print("Setting manageable nodes to available...")

        self.log.debug("Moving manageable nodes to available state.")
        available_nodes = [
            node
            for node in client.node.list(maintenance=False, associated=False)
            if node.provision_state == "manageable"
        ]
        for uuid in utils.set_nodes_state(client,
                                          available_nodes,
                                          'provide',
                                          'available',
                                          skipped_states=("available",
                                                          "active")):
            print("Node {0} has been set to available.".format(uuid))

        if has_errors:
            print("Introspection completed with errors.")
        else:
            print("Introspection completed.")