Beispiel #1
0
    def message(cls, task, nodes_to_provisioning):
        logger.debug("ProvisionTask.message(task=%s)" % task.uuid)

        serialized_cluster = task.cluster.replaced_provisioning_info or \
            provisioning_serializers.serialize(
                task.cluster, nodes_to_provisioning)

        for node in nodes_to_provisioning:
            if settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP:
                continue

            admin_net_id = objects.Node.get_network_manager(
                node
            ).get_admin_network_group_id()

            TaskHelper.prepare_syslog_dir(node, admin_net_id)

        return make_astute_message(
            'provision',
            'provision_resp',
            {
                'task_uuid': task.uuid,
                'provisioning_info': serialized_cluster
            }
        )
Beispiel #2
0
    def message(cls, task, nodes_to_provisioning):
        logger.debug("ProvisionTask.message(task=%s)" % task.uuid)
        task = objects.Task.get_by_uid(
            task.id,
            fail_if_not_found=True,
            lock_for_update=True
        )
        objects.NodeCollection.lock_nodes(nodes_to_provisioning)
        serialized_cluster = provisioning_serializers.serialize(
            task.cluster, nodes_to_provisioning)

        for node in nodes_to_provisioning:
            if settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP:
                continue

            admin_net_id = objects.Node.get_network_manager(
                node
            ).get_admin_network_group_id(node.id)

            TaskHelper.prepare_syslog_dir(node, admin_net_id)

        rpc_message = make_astute_message(
            task,
            'provision',
            'provision_resp',
            {
                'provisioning_info': serialized_cluster
            }
        )
        db().commit()
        return rpc_message
Beispiel #3
0
    def message(cls, task):
        logger.debug("ProvisionTask.message(task=%s)" % task.uuid)
        nodes = TaskHelper.nodes_to_provision(task.cluster)
        USE_FAKE = settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP

        # We need to assign admin ips
        # and only after that prepare syslog
        # directories
        task.cluster.prepare_for_provisioning()

        for node in nodes:
            if USE_FAKE:
                continue

            if node.offline:
                raise errors.NodeOffline(
                    u'Node "%s" is offline.' " Remove it from environment and try again." % node.full_name
                )

            TaskHelper.prepare_syslog_dir(node)

        serialized_cluster = task.cluster.replaced_provisioning_info or provisioning_serializers.serialize(task.cluster)

        message = {
            "method": "provision",
            "respond_to": "provision_resp",
            "args": {"task_uuid": task.uuid, "provisioning_info": serialized_cluster},
        }

        return message
Beispiel #4
0
    def message(cls, task, nodes_to_provisioning):
        logger.debug("ProvisionTask.message(task=%s)" % task.uuid)

        serialized_cluster = task.cluster.replaced_provisioning_info or \
            provisioning_serializers.serialize(
                task.cluster, nodes_to_provisioning)

        for node in nodes_to_provisioning:
            if settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP:
                continue

            TaskHelper.prepare_syslog_dir(node)

        return make_astute_message('provision', 'provision_resp', {
            'task_uuid': task.uuid,
            'provisioning_info': serialized_cluster
        })
Beispiel #5
0
    def message(cls, task, nodes_to_provisioning):
        logger.debug("ProvisionTask.message(task=%s)" % task.uuid)

        serialized_cluster = task.cluster.replaced_provisioning_info or \
            provisioning_serializers.serialize(
                task.cluster, nodes_to_provisioning)

        for node in nodes_to_provisioning:
            if settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP:
                continue

            TaskHelper.prepare_syslog_dir(node)

        message = {
            'method': 'provision',
            'respond_to': 'provision_resp',
            'args': {
                'task_uuid': task.uuid,
                'provisioning_info': serialized_cluster}}

        return message
Beispiel #6
0
    def message(cls, task, nodes_to_provisioning):
        logger.debug("ProvisionTask.message(task=%s)" % task.uuid)
        TaskHelper.raise_if_node_offline(nodes_to_provisioning)
        serialized_cluster = task.cluster.replaced_provisioning_info or \
            provisioning_serializers.serialize(
                task.cluster, nodes_to_provisioning)

        for node in nodes_to_provisioning:
            if settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP:
                continue

            TaskHelper.prepare_syslog_dir(node)

        message = {
            'method': 'provision',
            'respond_to': 'provision_resp',
            'args': {
                'task_uuid': task.uuid,
                'provisioning_info': serialized_cluster}}

        return message
Beispiel #7
0
    def message(cls, task):
        logger.debug("ProvisionTask.message(task=%s)" % task.uuid)
        nodes = TaskHelper.nodes_to_provision(task.cluster)
        USE_FAKE = settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP

        # We need to assign admin ips
        # and only after that prepare syslog
        # directories
        task.cluster.prepare_for_provisioning()

        for node in nodes:
            if USE_FAKE:
                continue

            if node.offline:
                raise errors.NodeOffline(
                    u'Node "%s" is offline.'
                    ' Remove it from environment and try again.' %
                    node.full_name)

            TaskHelper.prepare_syslog_dir(node)

            node.status = 'provisioning'
            db().commit()

        serialized_cluster = task.cluster.replaced_provisioning_info or \
            provisioning_serializers.serialize(task.cluster)

        message = {
            'method': 'provision',
            'respond_to': 'provision_resp',
            'args': {
                'task_uuid': task.uuid,
                'provisioning_info': serialized_cluster
            }
        }

        return message
Beispiel #8
0
    def message(cls, task, nodes_to_provisioning):
        logger.debug("ProvisionTask.message(task=%s)" % task.uuid)
        task = objects.Task.get_by_uid(task.id,
                                       fail_if_not_found=True,
                                       lock_for_update=True)
        objects.NodeCollection.lock_nodes(nodes_to_provisioning)
        serialized_cluster = provisioning_serializers.serialize(
            task.cluster, nodes_to_provisioning)

        for node in nodes_to_provisioning:
            if settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP:
                continue

            admin_net_id = objects.Node.get_network_manager(
                node).get_admin_network_group_id(node.id)

            TaskHelper.prepare_syslog_dir(node, admin_net_id)

        rpc_message = make_astute_message(
            task, cls._get_provision_method(task.cluster), 'provision_resp',
            {'provisioning_info': serialized_cluster})
        db().commit()
        return rpc_message
Beispiel #9
0
    def message(cls, task):
        logger.debug("ProvisionTask.message(task=%s)" % task.uuid)
        nodes = TaskHelper.nodes_to_provision(task.cluster)
        USE_FAKE = settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP

        # We need to assign admin ips
        # and only after that prepare syslog
        # directories
        task.cluster.prepare_for_provisioning()

        for node in nodes:
            if USE_FAKE:
                continue

            if node.offline:
                raise errors.NodeOffline(
                    u'Node "%s" is offline.'
                    ' Remove it from environment and try again.' %
                    node.full_name)

            TaskHelper.prepare_syslog_dir(node)

            node.status = 'provisioning'
            db().commit()

        serialized_cluster = task.cluster.replaced_provisioning_info or \
            provisioning_serializers.serialize(task.cluster)

        message = {
            'method': 'provision',
            'respond_to': 'provision_resp',
            'args': {
                'task_uuid': task.uuid,
                'provisioning_info': serialized_cluster}}

        return message
Beispiel #10
0
    def message(cls, task):
        logger.debug("ProvisionTask.message(task=%s)" % task.uuid)
        # this variable is used to set 'auth_key' in cobbler ks_meta
        cluster_attrs = task.cluster.attributes.merged_attrs_values()
        nodes = TaskHelper.nodes_to_provision(task.cluster)
        netmanager = NetworkManager()

        USE_FAKE = settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP
        # TODO: For now we send nodes data to orchestrator
        # which is cobbler oriented. But for future we
        # need to use more abstract data structure.
        nodes_data = []
        for node in nodes:
            if not node.online:
                if not USE_FAKE:
                    raise Exception(
                        u"Node '%s' (id=%s) is offline."
                        " Remove it from environment and try again." %
                        (node.name, node.id)
                    )
                else:
                    logger.warning(
                        u"Node '%s' (id=%s) is offline."
                        " Remove it from environment and try again." %
                        (node.name, node.id)
                    )

            node_data = {
                'profile': settings.COBBLER_PROFILE,
                'power_type': 'ssh',
                'power_user': '******',
                'power_address': node.ip,
                'name': TaskHelper.make_slave_name(node.id, node.role),
                'hostname': node.fqdn,
                'name_servers': '\"%s\"' % settings.DNS_SERVERS,
                'name_servers_search': '\"%s\"' % settings.DNS_SEARCH,
                'netboot_enabled': '1',
                'ks_meta': {
                    'puppet_auto_setup': 1,
                    'puppet_master': settings.PUPPET_MASTER_HOST,
                    'puppet_version': settings.PUPPET_VERSION,
                    'puppet_enable': 0,
                    'mco_auto_setup': 1,
                    'install_log_2_syslog': 1,
                    'mco_pskey': settings.MCO_PSKEY,
                    'mco_vhost': settings.MCO_VHOST,
                    'mco_host': settings.MCO_HOST,
                    'mco_user': settings.MCO_USER,
                    'mco_password': settings.MCO_PASSWORD,
                    'mco_connector': settings.MCO_CONNECTOR,
                    'mco_enable': 1,
                    'auth_key': "\"%s\"" % cluster_attrs.get('auth_key', ''),
                    'ks_spaces': "\"%s\"" % json.dumps(
                        node.attributes.volumes).replace("\"", "\\\"")
                }
            }

            if node.status == "discover":
                logger.info(
                    "Node %s seems booted with bootstrap image",
                    node.id
                )
                node_data['power_pass'] = settings.PATH_TO_BOOTSTRAP_SSH_KEY
            else:
                # If it's not in discover, we expect it to be booted
                #   in target system.
                # TODO: Get rid of expectations!
                logger.info(
                    "Node %s seems booted with real system",
                    node.id
                )
                node_data['power_pass'] = settings.PATH_TO_SSH_KEY

            # FIXME: move this code (updating) into receiver.provision_resp
            if not USE_FAKE:
                node.status = "provisioning"
                orm().add(node)
                orm().commit()

            # here we assign admin network IPs for node
            # one IP for every node interface
            netmanager.assign_admin_ips(
                node.id,
                len(node.meta.get('interfaces', []))
            )
            admin_net_id = netmanager.get_admin_network_id()
            admin_ips = set([i.ip_addr for i in orm().query(IPAddr).
                            filter_by(node=node.id).
                            filter_by(network=admin_net_id)])
            for i in node.meta.get('interfaces', []):
                if 'interfaces' not in node_data:
                    node_data['interfaces'] = {}
                node_data['interfaces'][i['name']] = {
                    'mac_address': i['mac'],
                    'static': '0',
                    'netmask': settings.ADMIN_NETWORK['netmask'],
                    'ip_address': admin_ips.pop(),
                }
                # interfaces_extra field in cobbler ks_meta
                # means some extra data for network interfaces
                # configuration. It is used by cobbler snippet.
                # For example, cobbler interface model does not
                # have 'peerdns' field, but we need this field
                # to be configured. So we use interfaces_extra
                # branch in order to set this unsupported field.
                if 'interfaces_extra' not in node_data:
                    node_data['interfaces_extra'] = {}
                node_data['interfaces_extra'][i['name']] = {
                    'peerdns': 'no',
                    'onboot': 'no'
                }

                # We want node to be able to PXE boot via any of its
                # interfaces. That is why we add all discovered
                # interfaces into cobbler system. But we want
                # assignted fqdn to be resolved into one IP address
                # because we don't completely support multiinterface
                # configuration yet.
                if i['mac'] == node.mac:
                    node_data['interfaces'][i['name']]['dns_name'] = node.fqdn
                    node_data['interfaces_extra'][i['name']]['onboot'] = 'yes'

            nodes_data.append(node_data)
            if not USE_FAKE:
                TaskHelper.prepare_syslog_dir(node)

        message = {
            'method': 'provision',
            'respond_to': 'provision_resp',
            'args': {
                'task_uuid': task.uuid,
                'engine': {
                    'url': settings.COBBLER_URL,
                    'username': settings.COBBLER_USER,
                    'password': settings.COBBLER_PASSWORD,
                },
                'nodes': nodes_data
            }
        }
        return message
Beispiel #11
0
    def message(cls, task):
        logger.debug("ProvisionTask.message(task=%s)" % task.uuid)
        task_uuid = task.uuid
        cluster_id = task.cluster.id
        cluster_attrs = task.cluster.attributes.merged_attrs_values()

        netmanager = NetworkManager()
        nodes = orm().query(Node).filter_by(
            cluster_id=task.cluster.id,
            pending_deletion=False).order_by(Node.id)

        USE_FAKE = settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP

        nodes_to_provision = []

        # FIXME: why can't we use needs_reprovision and pending_addition
        # attributes of node to constract valid list of nodes which need
        # to be provisioned and instead use this ugly loop?
        for node in nodes:
            if not node.online:
                if not USE_FAKE:
                    raise Exception(
                        u"Node '%s' (id=%s) is offline."
                        " Remove it from environment and try again." %
                        (node.name, node.id)
                    )
                else:
                    logger.warning(
                        u"Node '%s' (id=%s) is offline."
                        " Remove it from environment and try again." %
                        (node.name, node.id)
                    )
            if node.status in ('discover', 'provisioning') or \
                    (node.status == 'error' and
                     node.error_type == 'provision'):
                nodes_to_provision.append(node)

        # TODO: For now we send nodes data to orchestrator
        # which are cobbler oriented. But for future we
        # need to use more abstract data structure.
        nodes_data = []
        for node in nodes_to_provision:
            node_data = {
                'profile': settings.COBBLER_PROFILE,
                'power_type': 'ssh',
                'power_user': '******',
                'power_address': node.ip,
                'name': TaskHelper.make_slave_name(node.id, node.role),
                'hostname': node.fqdn,
                'name_servers': '\"%s\"' % settings.DNS_SERVERS,
                'name_servers_search': '\"%s\"' % settings.DNS_SEARCH,
                'netboot_enabled': '1',
                'ks_meta': {
                    'puppet_auto_setup': 1,
                    'puppet_master': settings.PUPPET_MASTER_HOST,
                    'puppet_version': settings.PUPPET_VERSION,
                    'puppet_enable': 0,
                    'mco_auto_setup': 1,
                    'install_log_2_syslog': 1,
                    'mco_pskey': settings.MCO_PSKEY,
                    'mco_vhost': settings.MCO_VHOST,
                    'mco_host': settings.MCO_HOST,
                    'mco_user': settings.MCO_USER,
                    'mco_password': settings.MCO_PASSWORD,
                    'mco_connector': settings.MCO_CONNECTOR,
                    'mco_enable': 1,
                    'auth_key': "\"%s\"" % cluster_attrs.get('auth_key', ''),
                    'ks_spaces': "\"%s\"" % json.dumps(
                        node.attributes.volumes).replace("\"", "\\\"")
                }
            }

            if node.status == "discover":
                logger.info(
                    "Node %s seems booted with bootstrap image",
                    node.id
                )
                node_data['power_pass'] = settings.PATH_TO_BOOTSTRAP_SSH_KEY
            else:
                # If it's not in discover, we expect it to be booted
                #   in target system.
                # TODO: Get rid of expectations!
                logger.info(
                    "Node %s seems booted with real system",
                    node.id
                )
                node_data['power_pass'] = settings.PATH_TO_SSH_KEY

            # FIXME: move this code (updating) into receiver.provision_resp
            if not USE_FAKE:
                node.status = "provisioning"
                orm().add(node)
                orm().commit()

            # here we assign admin network IPs for node
            # one IP for every node interface
            netmanager.assign_admin_ips(
                node.id,
                len(node.meta.get('interfaces', []))
            )
            admin_net_id = netmanager.get_admin_network_id()
            admin_ips = set([i.ip_addr for i in orm().query(IPAddr).
                            filter_by(node=node.id).
                            filter_by(network=admin_net_id)])
            for i in node.meta.get('interfaces', []):
                if 'interfaces' not in node_data:
                    node_data['interfaces'] = {}
                node_data['interfaces'][i['name']] = {
                    'mac_address': i['mac'],
                    'static': '0',
                    'netmask': settings.ADMIN_NETWORK['netmask'],
                    'ip_address': admin_ips.pop(),
                }
                # interfaces_extra field in cobbler ks_meta
                # means some extra data for network interfaces
                # configuration. It is used by cobbler snippet.
                # For example, cobbler interface model does not
                # have 'peerdns' field, but we need this field
                # to be configured. So we use interfaces_extra
                # branch in order to set this unsupported field.
                if 'interfaces_extra' not in node_data:
                    node_data['interfaces_extra'] = {}
                node_data['interfaces_extra'][i['name']] = {
                    'peerdns': 'no',
                    'onboot': 'no'
                }

                # We want node to be able to PXE boot via any of its
                # interfaces. That is why we add all discovered
                # interfaces into cobbler system. But we want
                # assignted fqdn to be resolved into one IP address
                # because we don't completely support multiinterface
                # configuration yet.
                if i['mac'] == node.mac:
                    node_data['interfaces'][i['name']]['dns_name'] = node.fqdn
                    node_data['interfaces_extra'][i['name']]['onboot'] = 'yes'

            nodes_data.append(node_data)
            if not USE_FAKE:
                TaskHelper.prepare_syslog_dir(node)

        message = {
            'method': 'provision',
            'respond_to': 'provision_resp',
            'args': {
                'task_uuid': task.uuid,
                'engine': {
                    'url': settings.COBBLER_URL,
                    'username': settings.COBBLER_USER,
                    'password': settings.COBBLER_PASSWORD,
                },
                'nodes': nodes_data
            }
        }
        return message