Example #1
0
    def test_deploy_grow_controllers(self):
        cluster = self.create_env(
            nodes_kwargs=[
                {"roles": ["controller"]},
                {"roles": ["controller"], "pending_addition": True},
                {"roles": ["controller"], "pending_addition": True},
            ]
        )

        # We have to build 2 new controllers
        n_nodes = TaskHelper.nodes_to_provision(cluster)
        self.assertEqual(len(n_nodes), 2)

        # All controllers must re-deploy (run puppet)
        r_nodes = TaskHelper.nodes_to_deploy(cluster)
        self.assertEqual(len(r_nodes), 3)

        supertask = self.env.launch_deployment()
        self.assertEqual(supertask.name, "deploy")

        self.env.wait_ready(supertask)
        self.assertEqual(supertask.status, "ready")

        controllers = self.filter_by_role(cluster.nodes, "controller")
        self.assertEqual(len(controllers), 3)
Example #2
0
    def message(cls, task):
        logger.debug("ProvisionTask.message(task=%s)" % task.uuid)
        nodes = TaskHelper.nodes_to_provision(task.cluster)
        USE_FAKE = settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP

        # We need to assign admin ips
        # and only after that prepare syslog
        # directories
        task.cluster.prepare_for_provisioning()

        for node in nodes:
            if USE_FAKE:
                continue

            if node.offline:
                raise errors.NodeOffline(
                    u'Node "%s" is offline.' " Remove it from environment and try again." % node.full_name
                )

            TaskHelper.prepare_syslog_dir(node)

        serialized_cluster = task.cluster.replaced_provisioning_info or provisioning_serializers.serialize(task.cluster)

        message = {
            "method": "provision",
            "respond_to": "provision_resp",
            "args": {"task_uuid": task.uuid, "provisioning_info": serialized_cluster},
        }

        return message
Example #3
0
    def prepare_for_provisioning(self):
        from nailgun.network.manager import NetworkManager
        from nailgun.task.helpers import TaskHelper

        netmanager = NetworkManager()
        for node in TaskHelper.nodes_to_provision(self):
            netmanager.assign_admin_ips(
                node.id, len(node.meta.get('interfaces', [])))
Example #4
0
    def prepare_for_provisioning(self):
        from nailgun.network.manager import NetworkManager
        from nailgun.task.helpers import TaskHelper

        netmanager = NetworkManager()
        nodes = TaskHelper.nodes_to_provision(self)
        TaskHelper.update_slave_nodes_fqdn(nodes)
        for node in nodes:
            netmanager.assign_admin_ips(node.id, len(node.meta.get("interfaces", [])))
    def serialize_nodes(cls, cluster):
        """Serialize nodes."""
        nodes_to_provision = TaskHelper.nodes_to_provision(cluster)
        cluster_attrs = cluster.attributes.merged_attrs_values()

        serialized_nodes = []
        for node in nodes_to_provision:
            serialized_node = cls.serialize_node(cluster_attrs, node)
            serialized_nodes.append(serialized_node)

        return serialized_nodes
    def test_deploy_grow_controllers(self):
        cluster = self.create_env(
            nodes_kwargs=[
                {'roles': ['controller']},
                {'roles': ['controller'], 'pending_addition': True},
                {'roles': ['controller'], 'pending_addition': True}])

        # We have to build 2 new controllers
        n_nodes = TaskHelper.nodes_to_provision(cluster)
        self.assertEqual(len(n_nodes), 2)

        # All controllers must re-deploy
        r_nodes = TaskHelper.nodes_to_deploy(cluster)
        self.assertEqual(len(r_nodes), 3)

        supertask = self.env.launch_deployment()
        self.assertEqual(supertask.name, consts.TASK_NAMES.deploy)
        self.assertNotEqual(supertask.status, consts.TASK_STATUSES.error)

        controllers = self.filter_by_role(cluster.nodes, 'controller')
        self.assertEqual(len(controllers), 3)
Example #7
0
    def message(cls, task):
        logger.debug("ProvisionTask.message(task=%s)" % task.uuid)
        nodes = TaskHelper.nodes_to_provision(task.cluster)
        USE_FAKE = settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP

        # We need to assign admin ips
        # and only after that prepare syslog
        # directories
        task.cluster.prepare_for_provisioning()

        for node in nodes:
            if USE_FAKE:
                continue

            if node.offline:
                raise errors.NodeOffline(
                    u'Node "%s" is offline.'
                    ' Remove it from environment and try again.' %
                    node.full_name)

            TaskHelper.prepare_syslog_dir(node)

            node.status = 'provisioning'
            db().commit()

        serialized_cluster = task.cluster.replaced_provisioning_info or \
            provisioning_serializers.serialize(task.cluster)

        message = {
            'method': 'provision',
            'respond_to': 'provision_resp',
            'args': {
                'task_uuid': task.uuid,
                'provisioning_info': serialized_cluster
            }
        }

        return message
    def test_deploy_grow_controllers(self):
        cluster = self.create_env(
            nodes_kwargs=[
                {'roles': ['controller']},
                {'roles': ['controller'], 'pending_addition': True},
                {'roles': ['controller'], 'pending_addition': True}])

        # We have to build 2 new controllers
        n_nodes = TaskHelper.nodes_to_provision(cluster)
        self.assertEquals(len(n_nodes), 2)

        # All controllers must re-deploy (run puppet)
        r_nodes = TaskHelper.nodes_to_deploy(cluster)
        self.assertEquals(len(r_nodes), 3)

        supertask = self.env.launch_deployment()
        self.assertEquals(supertask.name, 'deploy')

        self.env.wait_ready(supertask)
        self.assertEquals(supertask.status, 'ready')

        controllers = self.filter_by_role(cluster.nodes, 'controller')
        self.assertEquals(len(controllers), 3)
Example #9
0
    def message(cls, task):
        logger.debug("ProvisionTask.message(task=%s)" % task.uuid)
        nodes = TaskHelper.nodes_to_provision(task.cluster)
        USE_FAKE = settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP

        # We need to assign admin ips
        # and only after that prepare syslog
        # directories
        task.cluster.prepare_for_provisioning()

        for node in nodes:
            if USE_FAKE:
                continue

            if node.offline:
                raise errors.NodeOffline(
                    u'Node "%s" is offline.'
                    ' Remove it from environment and try again.' %
                    node.full_name)

            TaskHelper.prepare_syslog_dir(node)

            node.status = 'provisioning'
            db().commit()

        serialized_cluster = task.cluster.replaced_provisioning_info or \
            provisioning_serializers.serialize(task.cluster)

        message = {
            'method': 'provision',
            'respond_to': 'provision_resp',
            'args': {
                'task_uuid': task.uuid,
                'provisioning_info': serialized_cluster}}

        return message
Example #10
0
 def get_default_nodes(self, cluster):
     TaskHelper.nodes_to_provision(cluster)
Example #11
0
    def message(cls, task):
        logger.debug("ProvisionTask.message(task=%s)" % task.uuid)
        # this variable is used to set 'auth_key' in cobbler ks_meta
        cluster_attrs = task.cluster.attributes.merged_attrs_values()
        nodes = TaskHelper.nodes_to_provision(task.cluster)
        netmanager = NetworkManager()

        USE_FAKE = settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP
        # TODO: For now we send nodes data to orchestrator
        # which is cobbler oriented. But for future we
        # need to use more abstract data structure.
        nodes_data = []
        for node in nodes:
            if not node.online:
                if not USE_FAKE:
                    raise Exception(
                        u"Node '%s' (id=%s) is offline."
                        " Remove it from environment and try again." %
                        (node.name, node.id)
                    )
                else:
                    logger.warning(
                        u"Node '%s' (id=%s) is offline."
                        " Remove it from environment and try again." %
                        (node.name, node.id)
                    )

            node_data = {
                'profile': settings.COBBLER_PROFILE,
                'power_type': 'ssh',
                'power_user': '******',
                'power_address': node.ip,
                'name': TaskHelper.make_slave_name(node.id, node.role),
                'hostname': node.fqdn,
                'name_servers': '\"%s\"' % settings.DNS_SERVERS,
                'name_servers_search': '\"%s\"' % settings.DNS_SEARCH,
                'netboot_enabled': '1',
                'ks_meta': {
                    'puppet_auto_setup': 1,
                    'puppet_master': settings.PUPPET_MASTER_HOST,
                    'puppet_version': settings.PUPPET_VERSION,
                    'puppet_enable': 0,
                    'mco_auto_setup': 1,
                    'install_log_2_syslog': 1,
                    'mco_pskey': settings.MCO_PSKEY,
                    'mco_vhost': settings.MCO_VHOST,
                    'mco_host': settings.MCO_HOST,
                    'mco_user': settings.MCO_USER,
                    'mco_password': settings.MCO_PASSWORD,
                    'mco_connector': settings.MCO_CONNECTOR,
                    'mco_enable': 1,
                    'auth_key': "\"%s\"" % cluster_attrs.get('auth_key', ''),
                    'ks_spaces': "\"%s\"" % json.dumps(
                        node.attributes.volumes).replace("\"", "\\\"")
                }
            }

            if node.status == "discover":
                logger.info(
                    "Node %s seems booted with bootstrap image",
                    node.id
                )
                node_data['power_pass'] = settings.PATH_TO_BOOTSTRAP_SSH_KEY
            else:
                # If it's not in discover, we expect it to be booted
                #   in target system.
                # TODO: Get rid of expectations!
                logger.info(
                    "Node %s seems booted with real system",
                    node.id
                )
                node_data['power_pass'] = settings.PATH_TO_SSH_KEY

            # FIXME: move this code (updating) into receiver.provision_resp
            if not USE_FAKE:
                node.status = "provisioning"
                orm().add(node)
                orm().commit()

            # here we assign admin network IPs for node
            # one IP for every node interface
            netmanager.assign_admin_ips(
                node.id,
                len(node.meta.get('interfaces', []))
            )
            admin_net_id = netmanager.get_admin_network_id()
            admin_ips = set([i.ip_addr for i in orm().query(IPAddr).
                            filter_by(node=node.id).
                            filter_by(network=admin_net_id)])
            for i in node.meta.get('interfaces', []):
                if 'interfaces' not in node_data:
                    node_data['interfaces'] = {}
                node_data['interfaces'][i['name']] = {
                    'mac_address': i['mac'],
                    'static': '0',
                    'netmask': settings.ADMIN_NETWORK['netmask'],
                    'ip_address': admin_ips.pop(),
                }
                # interfaces_extra field in cobbler ks_meta
                # means some extra data for network interfaces
                # configuration. It is used by cobbler snippet.
                # For example, cobbler interface model does not
                # have 'peerdns' field, but we need this field
                # to be configured. So we use interfaces_extra
                # branch in order to set this unsupported field.
                if 'interfaces_extra' not in node_data:
                    node_data['interfaces_extra'] = {}
                node_data['interfaces_extra'][i['name']] = {
                    'peerdns': 'no',
                    'onboot': 'no'
                }

                # We want node to be able to PXE boot via any of its
                # interfaces. That is why we add all discovered
                # interfaces into cobbler system. But we want
                # assignted fqdn to be resolved into one IP address
                # because we don't completely support multiinterface
                # configuration yet.
                if i['mac'] == node.mac:
                    node_data['interfaces'][i['name']]['dns_name'] = node.fqdn
                    node_data['interfaces_extra'][i['name']]['onboot'] = 'yes'

            nodes_data.append(node_data)
            if not USE_FAKE:
                TaskHelper.prepare_syslog_dir(node)

        message = {
            'method': 'provision',
            'respond_to': 'provision_resp',
            'args': {
                'task_uuid': task.uuid,
                'engine': {
                    'url': settings.COBBLER_URL,
                    'username': settings.COBBLER_USER,
                    'password': settings.COBBLER_PASSWORD,
                },
                'nodes': nodes_data
            }
        }
        return message
Example #12
0
    def message(cls, task):
        logger.debug("ProvisionTask.message(task=%s)" % task.uuid)
        # this variable is used to set 'auth_key' in cobbler ks_meta
        cluster_attrs = task.cluster.attributes.merged_attrs_values()
        nodes = TaskHelper.nodes_to_provision(task.cluster)
        netmanager = NetworkManager()

        USE_FAKE = settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP
        # TODO: For now we send nodes data to orchestrator
        # which is cobbler oriented. But for future we
        # need to use more abstract data structure.
        nodes_data = []
        for node in nodes:
            if not node.online:
                if not USE_FAKE:
                    raise errors.NodeOffline(
                        u"Node '%s' (id=%s) is offline."
                        " Remove it from environment and try again." %
                        (node.name, node.id))
                else:
                    logger.warning(
                        u"Node '%s' (id=%s) is offline."
                        " Remove it from environment and try again." %
                        (node.name, node.id))

            cobbler_profile = cluster_attrs['cobbler']['profile']

            node_data = {
                'profile': cobbler_profile,
                'power_type': 'ssh',
                'power_user': '******',
                'power_address': node.ip,
                'name': TaskHelper.make_slave_name(node.id, node.role),
                'hostname': node.fqdn,
                'name_servers': '\"%s\"' % settings.DNS_SERVERS,
                'name_servers_search': '\"%s\"' % settings.DNS_SEARCH,
                'netboot_enabled': '1',
                'ks_meta': {
                    'puppet_auto_setup':
                    1,
                    'puppet_master':
                    settings.PUPPET_MASTER_HOST,
                    'puppet_version':
                    settings.PUPPET_VERSION,
                    'puppet_enable':
                    0,
                    'mco_auto_setup':
                    1,
                    'install_log_2_syslog':
                    1,
                    'mco_pskey':
                    settings.MCO_PSKEY,
                    'mco_vhost':
                    settings.MCO_VHOST,
                    'mco_host':
                    settings.MCO_HOST,
                    'mco_user':
                    settings.MCO_USER,
                    'mco_password':
                    settings.MCO_PASSWORD,
                    'mco_connector':
                    settings.MCO_CONNECTOR,
                    'mco_enable':
                    1,
                    'auth_key':
                    "\"%s\"" % cluster_attrs.get('auth_key', ''),
                    'ks_spaces':
                    "\"%s\"" %
                    json.dumps(node.attributes.volumes).replace("\"", "\\\"")
                }
            }

            if node.status == "discover":
                logger.info("Node %s seems booted with bootstrap image",
                            node.id)
                node_data['power_pass'] = settings.PATH_TO_BOOTSTRAP_SSH_KEY
            else:
                # If it's not in discover, we expect it to be booted
                #   in target system.
                # TODO: Get rid of expectations!
                logger.info("Node %s seems booted with real system", node.id)
                node_data['power_pass'] = settings.PATH_TO_SSH_KEY

            # FIXME: move this code (updating) into receiver.provision_resp
            if not USE_FAKE:
                node.status = "provisioning"
                db().add(node)
                db().commit()

            # here we assign admin network IPs for node
            # one IP for every node interface
            netmanager.assign_admin_ips(node.id,
                                        len(node.meta.get('interfaces', [])))
            admin_net_id = netmanager.get_admin_network_id()
            admin_ips = set([
                i.ip_addr for i in db().query(IPAddr).filter_by(
                    node=node.id).filter_by(network=admin_net_id)
            ])
            for i in node.meta.get('interfaces', []):
                if 'interfaces' not in node_data:
                    node_data['interfaces'] = {}
                node_data['interfaces'][i['name']] = {
                    'mac_address': i['mac'],
                    'static': '0',
                    'netmask': settings.ADMIN_NETWORK['netmask'],
                    'ip_address': admin_ips.pop(),
                }
                # interfaces_extra field in cobbler ks_meta
                # means some extra data for network interfaces
                # configuration. It is used by cobbler snippet.
                # For example, cobbler interface model does not
                # have 'peerdns' field, but we need this field
                # to be configured. So we use interfaces_extra
                # branch in order to set this unsupported field.
                if 'interfaces_extra' not in node_data:
                    node_data['interfaces_extra'] = {}
                node_data['interfaces_extra'][i['name']] = {
                    'peerdns': 'no',
                    'onboot': 'no'
                }

                # We want node to be able to PXE boot via any of its
                # interfaces. That is why we add all discovered
                # interfaces into cobbler system. But we want
                # assignted fqdn to be resolved into one IP address
                # because we don't completely support multiinterface
                # configuration yet.
                if i['mac'] == node.mac:
                    node_data['interfaces'][i['name']]['dns_name'] = node.fqdn
                    node_data['interfaces_extra'][i['name']]['onboot'] = 'yes'

            nodes_data.append(node_data)
            if not USE_FAKE:
                TaskHelper.prepare_syslog_dir(node)

        message = {
            'method': 'provision',
            'respond_to': 'provision_resp',
            'args': {
                'task_uuid': task.uuid,
                'engine': {
                    'url': settings.COBBLER_URL,
                    'username': settings.COBBLER_USER,
                    'password': settings.COBBLER_PASSWORD,
                },
                'nodes': nodes_data
            }
        }
        return message