Пример #1
0
    def update_nodes(cls, instance, nodes_ids):
        """Update Cluster nodes by specified node IDs.
        Nodes with specified IDs will replace existing ones in Cluster

        :param instance: Cluster instance
        :param nodes_ids: list of nodes ids
        :returns: None
        """

        # TODO(NAME): sepatate nodes
        #for deletion and addition by set().
        new_nodes = []
        if nodes_ids:
            new_nodes = db().query(models.Node).filter(
                models.Node.id.in_(nodes_ids))

        nodes_to_remove = [n for n in instance.nodes if n not in new_nodes]
        nodes_to_add = [n for n in new_nodes if n not in instance.nodes]

        for node in nodes_to_add:
            if not node.online:
                raise errors.NodeOffline(u"Cannot add offline node "
                                         u"'{0}' to environment".format(
                                             node.id))

        map(instance.nodes.remove, nodes_to_remove)
        map(instance.nodes.append, nodes_to_add)

        net_manager = cls.get_network_manager(instance)
        map(net_manager.clear_assigned_networks, nodes_to_remove)
        map(net_manager.clear_bond_configuration, nodes_to_remove)
        cls.replace_provisioning_info_on_nodes(instance, [], nodes_to_remove)
        cls.replace_deployment_info_on_nodes(instance, [], nodes_to_remove)
        map(net_manager.assign_networks_by_default, nodes_to_add)
        db().flush()
Пример #2
0
    def update_nodes(cls, instance, nodes_ids):
        """Update Cluster nodes by specified node IDs

        Nodes with specified IDs will replace existing ones in Cluster

        :param instance: Cluster instance
        :param nodes_ids: list of nodes ids
        :returns: None
        """

        # TODO(NAME): sepatate nodes
        # for deletion and addition by set().
        new_nodes = []
        if nodes_ids:
            new_nodes = db().query(models.Node).filter(
                models.Node.id.in_(nodes_ids)
            )

        nodes_to_remove = [n for n in instance.nodes
                           if n not in new_nodes]
        nodes_to_add = [n for n in new_nodes
                        if n not in instance.nodes]

        for node in nodes_to_add:
            if not node.online:
                raise errors.NodeOffline(
                    u"Cannot add offline node "
                    u"'{0}' to environment".format(node.id)
                )

        # we should reset hostname to default value to guarantee
        # hostnames uniqueness for nodes outside clusters
        from nailgun.objects import Node
        for node in nodes_to_remove:
            node.hostname = Node.default_slave_name(node)

        map(instance.nodes.remove, nodes_to_remove)
        map(instance.nodes.append, nodes_to_add)

        net_manager = cls.get_network_manager(instance)
        map(
            net_manager.clear_assigned_networks,
            nodes_to_remove
        )
        map(
            net_manager.clear_bond_configuration,
            nodes_to_remove
        )
        cls.replace_provisioning_info_on_nodes(instance, [], nodes_to_remove)
        cls.replace_deployment_info_on_nodes(instance, [], nodes_to_remove)
        from nailgun.objects import NodeCollection
        NodeCollection.reset_network_template(nodes_to_remove)

        map(
            net_manager.assign_networks_by_default,
            nodes_to_add
        )
        cls.update_nodes_network_template(instance, nodes_to_add)
        db().flush()
Пример #3
0
    def raise_if_node_offline(cls, nodes):
        offline_nodes = filter(lambda n: n.offline, nodes)

        if offline_nodes:
            node_names = ','.join(map(lambda n: n.full_name, offline_nodes))
            raise errors.NodeOffline(
                u'Nodes "%s" are offline.'
                ' Remove them from environment and try again.' % node_names)
Пример #4
0
    def _check_nodes_are_online(cls, task):
        offline_nodes = db().query(Node).\
            filter(Node.cluster == task.cluster).\
            filter_by(online=False).\
            filter_by(pending_deletion=False).\
            filter(not_(Node.status.in_(['ready'])))

        if offline_nodes.count():
            node_names = ','.join(map(lambda n: n.full_name, offline_nodes))
            raise errors.NodeOffline(u'Nodes "{0}" are offline.'
                                     ' Remove them from environment '
                                     'and try again.'.format(node_names))
Пример #5
0
    def _check_nodes_are_online(cls, task):
        offline_nodes = db().query(Node).\
            filter(Node.cluster == task.cluster).\
            filter_by(online=False).\
            filter_by(pending_deletion=False)

        offline_nodes_not_ready = [n for n in offline_nodes
                                   if n.status != consts.NODE_STATUSES.ready]
        nodes_to_deploy = TaskHelper.nodes_to_deploy(task.cluster)
        offline_nodes_to_redeploy = [
            n for n in offline_nodes
            if n.status == consts.NODE_STATUSES.ready and n in nodes_to_deploy]

        if offline_nodes_not_ready or offline_nodes_to_redeploy:
            node_names = ','.join(
                map(lambda n: n.full_name,
                    offline_nodes_not_ready + offline_nodes_to_redeploy))
            raise errors.NodeOffline(
                u'Nodes "{0}" are offline.'
                ' Remove them from environment '
                'and try again.'.format(node_names))
Пример #6
0
    def message(cls, task):
        logger.debug("ProvisionTask.message(task=%s)" % task.uuid)
        nodes = TaskHelper.nodes_to_provision(task.cluster)
        USE_FAKE = settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP

        # We need to assign admin ips
        # and only after that prepare syslog
        # directories
        task.cluster.prepare_for_provisioning()

        for node in nodes:
            if USE_FAKE:
                continue

            if node.offline:
                raise errors.NodeOffline(
                    u'Node "%s" is offline.'
                    ' Remove it from environment and try again.' %
                    node.full_name)

            TaskHelper.prepare_syslog_dir(node)

            node.status = 'provisioning'
            db().commit()

        serialized_cluster = task.cluster.replaced_provisioning_info or \
            provisioning_serializers.serialize(task.cluster)

        message = {
            'method': 'provision',
            'respond_to': 'provision_resp',
            'args': {
                'task_uuid': task.uuid,
                'provisioning_info': serialized_cluster
            }
        }

        return message
Пример #7
0
    def message(cls, task):
        logger.debug("ProvisionTask.message(task=%s)" % task.uuid)
        # this variable is used to set 'auth_key' in cobbler ks_meta
        cluster_attrs = task.cluster.attributes.merged_attrs_values()
        nodes = TaskHelper.nodes_to_provision(task.cluster)
        netmanager = NetworkManager()

        USE_FAKE = settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP
        # TODO: For now we send nodes data to orchestrator
        # which is cobbler oriented. But for future we
        # need to use more abstract data structure.
        nodes_data = []
        for node in nodes:
            if not node.online:
                if not USE_FAKE:
                    raise errors.NodeOffline(
                        u"Node '%s' (id=%s) is offline."
                        " Remove it from environment and try again." %
                        (node.name, node.id))
                else:
                    logger.warning(
                        u"Node '%s' (id=%s) is offline."
                        " Remove it from environment and try again." %
                        (node.name, node.id))

            cobbler_profile = cluster_attrs['cobbler']['profile']

            node_data = {
                'profile': cobbler_profile,
                'power_type': 'ssh',
                'power_user': '******',
                'power_address': node.ip,
                'name': TaskHelper.make_slave_name(node.id, node.role),
                'hostname': node.fqdn,
                'name_servers': '\"%s\"' % settings.DNS_SERVERS,
                'name_servers_search': '\"%s\"' % settings.DNS_SEARCH,
                'netboot_enabled': '1',
                'ks_meta': {
                    'puppet_auto_setup':
                    1,
                    'puppet_master':
                    settings.PUPPET_MASTER_HOST,
                    'puppet_version':
                    settings.PUPPET_VERSION,
                    'puppet_enable':
                    0,
                    'mco_auto_setup':
                    1,
                    'install_log_2_syslog':
                    1,
                    'mco_pskey':
                    settings.MCO_PSKEY,
                    'mco_vhost':
                    settings.MCO_VHOST,
                    'mco_host':
                    settings.MCO_HOST,
                    'mco_user':
                    settings.MCO_USER,
                    'mco_password':
                    settings.MCO_PASSWORD,
                    'mco_connector':
                    settings.MCO_CONNECTOR,
                    'mco_enable':
                    1,
                    'auth_key':
                    "\"%s\"" % cluster_attrs.get('auth_key', ''),
                    'ks_spaces':
                    "\"%s\"" %
                    json.dumps(node.attributes.volumes).replace("\"", "\\\"")
                }
            }

            if node.status == "discover":
                logger.info("Node %s seems booted with bootstrap image",
                            node.id)
                node_data['power_pass'] = settings.PATH_TO_BOOTSTRAP_SSH_KEY
            else:
                # If it's not in discover, we expect it to be booted
                #   in target system.
                # TODO: Get rid of expectations!
                logger.info("Node %s seems booted with real system", node.id)
                node_data['power_pass'] = settings.PATH_TO_SSH_KEY

            # FIXME: move this code (updating) into receiver.provision_resp
            if not USE_FAKE:
                node.status = "provisioning"
                db().add(node)
                db().commit()

            # here we assign admin network IPs for node
            # one IP for every node interface
            netmanager.assign_admin_ips(node.id,
                                        len(node.meta.get('interfaces', [])))
            admin_net_id = netmanager.get_admin_network_id()
            admin_ips = set([
                i.ip_addr for i in db().query(IPAddr).filter_by(
                    node=node.id).filter_by(network=admin_net_id)
            ])
            for i in node.meta.get('interfaces', []):
                if 'interfaces' not in node_data:
                    node_data['interfaces'] = {}
                node_data['interfaces'][i['name']] = {
                    'mac_address': i['mac'],
                    'static': '0',
                    'netmask': settings.ADMIN_NETWORK['netmask'],
                    'ip_address': admin_ips.pop(),
                }
                # interfaces_extra field in cobbler ks_meta
                # means some extra data for network interfaces
                # configuration. It is used by cobbler snippet.
                # For example, cobbler interface model does not
                # have 'peerdns' field, but we need this field
                # to be configured. So we use interfaces_extra
                # branch in order to set this unsupported field.
                if 'interfaces_extra' not in node_data:
                    node_data['interfaces_extra'] = {}
                node_data['interfaces_extra'][i['name']] = {
                    'peerdns': 'no',
                    'onboot': 'no'
                }

                # We want node to be able to PXE boot via any of its
                # interfaces. That is why we add all discovered
                # interfaces into cobbler system. But we want
                # assignted fqdn to be resolved into one IP address
                # because we don't completely support multiinterface
                # configuration yet.
                if i['mac'] == node.mac:
                    node_data['interfaces'][i['name']]['dns_name'] = node.fqdn
                    node_data['interfaces_extra'][i['name']]['onboot'] = 'yes'

            nodes_data.append(node_data)
            if not USE_FAKE:
                TaskHelper.prepare_syslog_dir(node)

        message = {
            'method': 'provision',
            'respond_to': 'provision_resp',
            'args': {
                'task_uuid': task.uuid,
                'engine': {
                    'url': settings.COBBLER_URL,
                    'username': settings.COBBLER_USER,
                    'password': settings.COBBLER_PASSWORD,
                },
                'nodes': nodes_data
            }
        }
        return message