예제 #1
0
    def test_recalculates_vg_sizes_when_disks_volumes_size_update(self):
        node_db = self.create_node()
        disks = self.get(node_db.id)

        vgs_before_update = filter(
            lambda volume: volume.get('type') == 'vg',
            VolumeManagerExtension.get_node_volumes(node_db))

        new_volume_size = 4200
        updated_disks_count = 0
        for disk in disks:
            if disk['size'] > 0:
                for volume in disk['volumes']:
                    volume['size'] = new_volume_size
                updated_disks_count += 1

        self.put(node_db.id, disks)

        vgs_after_update = filter(
            lambda volume: volume.get('type') == 'vg',
            VolumeManagerExtension.get_node_volumes(node_db))

        for vg_before, vg_after in zip(vgs_before_update, vgs_after_update):
            size_volumes_before = sum([
                volume.get('size', 0) for volume in vg_before['volumes']])
            size_volumes_after = sum([
                volume.get('size', 0) for volume in vg_after['volumes']])

            self.assertNotEqual(size_volumes_before, size_volumes_after)

            volume_group_size = new_volume_size * updated_disks_count
            self.assertEqual(size_volumes_after, volume_group_size)
예제 #2
0
    def test_recalculates_vg_sizes_when_disks_volumes_size_update(self):
        node_db = self.create_node()
        disks = self.get(node_db.id)

        vgs_before_update = filter(
            lambda volume: volume.get('type') == 'vg',
            VolumeManagerExtension.get_node_volumes(node_db))

        new_volume_size = 4200
        updated_disks_count = 0
        for disk in disks:
            if disk['size'] > 0:
                for volume in disk['volumes']:
                    volume['size'] = new_volume_size
                updated_disks_count += 1

        self.put(node_db.id, disks)

        vgs_after_update = filter(
            lambda volume: volume.get('type') == 'vg',
            VolumeManagerExtension.get_node_volumes(node_db))

        for vg_before, vg_after in zip(vgs_before_update, vgs_after_update):
            size_volumes_before = sum(
                [volume.get('size', 0) for volume in vg_before['volumes']])
            size_volumes_after = sum(
                [volume.get('size', 0) for volume in vg_after['volumes']])

            self.assertNotEqual(size_volumes_before, size_volumes_after)

            volume_group_size = new_volume_size * updated_disks_count
            self.assertEqual(size_volumes_after, volume_group_size)
예제 #3
0
 def test_multirole_controller_cinder_ceph(self):
     node = self.create_node('controller', 'cinder', 'ceph-osd')
     self.should_contain_os_with_minimal_size(node.volume_manager)
     self.should_allocates_same_size(
         node.volume_manager.volumes, ['image', 'cinder', 'ceph'])
     self.logical_volume_sizes_should_equal_all_phisical_volumes(
         VolumeManagerExtension.get_node_volumes(node))
     self.check_disk_size_equal_sum_of_all_volumes(
         VolumeManagerExtension.get_node_volumes(node))
예제 #4
0
 def test_allocates_all_free_space_for_vm_for_compute_role(self):
     node = self.create_node('compute')
     self.should_contain_os_with_minimal_size(node.volume_manager)
     self.all_free_space_except_os_for_volume(
         node.volume_manager.volumes, 'vm')
     self.logical_volume_sizes_should_equal_all_phisical_volumes(
         VolumeManagerExtension.get_node_volumes(node))
     self.check_disk_size_equal_sum_of_all_volumes(
         VolumeManagerExtension.get_node_volumes(node))
예제 #5
0
 def test_multirole_controller_cinder_ceph(self):
     node = self.create_node('controller', 'cinder', 'ceph-osd')
     self.should_contain_os_with_minimal_size(VolumeManager(node))
     self.should_allocates_same_size(
         VolumeManager(node).volumes, ['image', 'cinder', 'ceph'])
     self.logical_volume_sizes_should_equal_all_phisical_volumes(
         VolumeManagerExtension.get_node_volumes(node))
     self.check_disk_size_equal_sum_of_all_volumes(
         VolumeManagerExtension.get_node_volumes(node))
예제 #6
0
 def test_allocates_all_free_space_for_vm_for_compute_role(self):
     node = self.create_node('compute')
     self.should_contain_os_with_minimal_size(VolumeManager(node))
     self.all_free_space_except_os_for_volume(
         VolumeManager(node).volumes, 'vm')
     self.logical_volume_sizes_should_equal_all_phisical_volumes(
         VolumeManagerExtension.get_node_volumes(node))
     self.check_disk_size_equal_sum_of_all_volumes(
         VolumeManagerExtension.get_node_volumes(node))
예제 #7
0
    def test_allocates_all_free_space_for_os_for_controller_role(self):
        node = self.create_node('controller')
        disks = only_disks(node.volume_manager.volumes)
        disks_size_sum = sum([disk['size'] for disk in disks])
        os_sum_size = self.os_size(disks)
        glance_sum_size = self.glance_size(disks)
        reserved_size = self.reserved_size(disks)

        self.assertEqual(disks_size_sum - reserved_size,
                         os_sum_size + glance_sum_size)
        self.logical_volume_sizes_should_equal_all_phisical_volumes(
            VolumeManagerExtension.get_volumes(node))
        self.check_disk_size_equal_sum_of_all_volumes(
            VolumeManagerExtension.get_volumes(node))
예제 #8
0
 def test_allocates_all_free_space_for_vm_for_cinder_role(self):
     node = self.create_node('cinder')
     self.should_contain_os_with_minimal_size(node.volume_manager)
     self.all_free_space_except_os_for_volume(node.volume_manager.volumes,
                                              'cinder')
     self.check_disk_size_equal_sum_of_all_volumes(
         VolumeManagerExtension.get_node_volumes(node))
예제 #9
0
 def test_allocates_all_free_space_for_vm_for_cinder_role(self):
     node = self.create_node('cinder')
     self.should_contain_os_with_minimal_size(VolumeManager(node))
     self.all_free_space_except_os_for_volume(
         VolumeManager(node).volumes, 'cinder')
     self.check_disk_size_equal_sum_of_all_volumes(
         VolumeManagerExtension.get_node_volumes(node))
예제 #10
0
 def test_allocates_space_single_disk_for_ceph_for_ceph_role(self):
     node = self.create_node('ceph-osd')
     self.update_node_with_single_disk(node, 30000)
     self.should_contain_os_with_minimal_size(VolumeManager(node))
     self.all_free_space_except_os_for_volume(
         VolumeManager(node).volumes, 'ceph')
     self.check_disk_size_equal_sum_of_all_volumes(
         VolumeManagerExtension.get_node_volumes(node))
예제 #11
0
파일: node.py 프로젝트: prmtl/fuel-web
    def update_volumes(cls, instance):
        """Update volumes for Node instance.
        Adds pending "disks" changes for Cluster which Node belongs to

        :param instance: Node instance
        :returns: None
        """
        attrs = instance.attributes
        if not attrs:
            attrs = cls.create_attributes(instance)

        try:
            # TODO(eli): update volumes method should be moved
            # into an extension
            # Should be done as a part of blueprint:
            # https://blueprints.launchpad.net/fuel/+spec
            #                                 /volume-manager-refactoring
            from nailgun.extensions.volume_manager.extension \
                import VolumeManagerExtension
            VolumeManagerExtension.set_volumes(
                instance,
                instance.volume_manager.gen_volumes_info())
        except Exception as exc:
            msg = (
                u"Failed to generate volumes "
                u"info for node '{0}': '{1}'"
            ).format(
                instance.name or instance.mac or instance.id,
                str(exc) or "see logs for details"
            )
            logger.warning(traceback.format_exc())
            Notification.create({
                "topic": "error",
                "message": msg,
                "node_id": instance.id
            })

        if instance.cluster_id:
            Cluster.add_pending_changes(
                instance.cluster,
                "disks",
                node_id=instance.id
            )

        db().add(attrs)
        db().flush()
예제 #12
0
 def test_allocates_space_single_disk_for_ceph_for_ceph_role(self):
     node = self.create_node('ceph-osd')
     self.update_node_with_single_disk(node, 30000)
     self.should_contain_os_with_minimal_size(node.volume_manager)
     self.all_free_space_except_os_for_volume(
         node.volume_manager.volumes, 'ceph')
     self.check_disk_size_equal_sum_of_all_volumes(
         VolumeManagerExtension.get_node_volumes(node))
예제 #13
0
 def setUp(self):
     super(TestVolumeManagerHelpers, self).setUp()
     self.env.create(
         nodes_kwargs=[
             {'roles': ['controller']},
         ]
     )
     self.node = self.env.nodes[0]
     self.volumes = VolumeManagerExtension.get_node_volumes(self.node)
예제 #14
0
 def setUp(self):
     super(TestVolumeManagerHelpers, self).setUp()
     self.env.create(nodes_kwargs=[
         {
             'roles': ['controller']
         },
     ])
     self.node = self.env.nodes[0]
     self.volumes = VolumeManagerExtension.get_volumes(self.node)
예제 #15
0
 def get_image_cache_max_size(self, node):
     images_ceph = (node.cluster.attributes['editable']['storage']
                    ['images_ceph']['value'])
     if images_ceph:
         image_cache_max_size = '0'
     else:
         from nailgun.extensions.volume_manager.extension \
             import VolumeManagerExtension
         image_cache_max_size = volume_manager.calc_glance_cache_size(
             VolumeManagerExtension.get_volumes(node))
     return {'glance': {'image_cache_max_size': image_cache_max_size}}
예제 #16
0
파일: node.py 프로젝트: nebril/fuel-web
    def update_volumes(cls, instance):
        """Update volumes for Node instance.
        Adds pending "disks" changes for Cluster which Node belongs to

        :param instance: Node instance
        :returns: None
        """
        attrs = instance.attributes
        if not attrs:
            attrs = cls.create_attributes(instance)

        try:
            # TODO(eli): update volumes method should be moved
            # into an extension
            # Should be done as a part of blueprint:
            # https://blueprints.launchpad.net/fuel/+spec
            #                                 /volume-manager-refactoring
            from nailgun.extensions.volume_manager.extension \
                import VolumeManagerExtension
            VolumeManagerExtension.set_volumes(
                instance, instance.volume_manager.gen_volumes_info())
        except Exception as exc:
            msg = (u"Failed to generate volumes "
                   u"info for node '{0}': '{1}'").format(
                       instance.name or instance.mac or instance.id,
                       str(exc) or "see logs for details")
            logger.warning(traceback.format_exc())
            Notification.create({
                "topic": "error",
                "message": msg,
                "node_id": instance.id
            })

        if instance.cluster_id:
            Cluster.add_pending_changes(instance.cluster,
                                        "disks",
                                        node_id=instance.id)

        db().add(attrs)
        db().flush()
예제 #17
0
    def test_node_serialization(self):
        for node in self.serialized_cluster['nodes']:
            node_db = self.db.query(Node).filter_by(
                hostname=node['name']).first()
            # Get interface (in our case we created only one for each node)
            intr_db = node_db.nic_interfaces[0]
            intr_name = intr_db.name
            intr_mac = intr_db.mac
            kernal_params = self.attributes.get('kernel_params', {}) \
                .get('kernel', {}).get('value')
            self.assertEqual(
                node['ks_meta']['cloud_init_templates'], {
                    'boothook': 'boothook_fuel_6.1_centos.jinja2',
                    'cloud_config': 'cloud_config_fuel_6.1_centos.jinja2',
                    'meta_data': 'meta_data_fuel_6.1_centos.jinja2'
                })
            self.assertEqual(node['uid'], node_db.uid)
            self.assertEqual(node['power_address'], node_db.ip)
            self.assertEqual(node['name'], "node-{0}".format(node_db.id))
            self.assertEqual(node['hostname'],
                             objects.Node.get_node_fqdn(node_db))
            self.assertEqual(node['power_pass'],
                             settings.PATH_TO_BOOTSTRAP_SSH_KEY)

            self.assertDictEqual(
                node['kernel_options'], {
                    'netcfg/choose_interface':
                    objects.Node.get_admin_physical_iface(node_db).mac,
                    'udevrules':
                    '{0}_{1}'.format(intr_mac, intr_name)
                })

            self.assertDictEqual(
                node['ks_meta']['pm_data'], {
                    'ks_spaces':
                    VolumeManagerExtension.get_node_volumes(node_db),
                    'kernel_params': kernal_params
                })
            # Check node interfaces section
            self.assertEqual(node['interfaces'][intr_name]['mac_address'],
                             intr_mac)
            self.assertEqual(node['interfaces'][intr_name]['static'], '0')
            self.assertEqual(node['interfaces'][intr_name]['dns_name'],
                             objects.Node.get_node_fqdn(node_db))
            # Check node interfaces extra section
            self.assertEqual(node['interfaces_extra'][intr_name], {
                'peerdns': 'no',
                'onboot': 'yes'
            })
            # check identity key for mcollective
            self.assertEqual(node['ks_meta']['mco_identity'], node_db.id)
    def test_node_serialization(self):
        for node in self.serialized_cluster['nodes']:
            node_db = self.db.query(Node).filter_by(
                hostname=node['name']
            ).first()
            # Get interface (in our case we created only one for each node)
            intr_db = node_db.nic_interfaces[0]
            intr_name = intr_db.name
            intr_mac = intr_db.mac
            kernal_params = self.attributes.get('kernel_params', {}) \
                .get('kernel', {}).get('value')
            self.assertEqual(
                node['ks_meta']['cloud_init_templates'], {
                    'boothook': 'boothook_fuel_6.1_centos.jinja2',
                    'cloud_config': 'cloud_config_fuel_6.1_centos.jinja2',
                    'meta_data': 'meta_data_fuel_6.1_centos.jinja2'
                }
            )
            self.assertEqual(node['uid'], node_db.uid)
            self.assertEqual(node['power_address'], node_db.ip)
            self.assertEqual(node['name'], "node-{0}".format(node_db.id))
            self.assertEqual(node['hostname'],
                             objects.Node.get_node_fqdn(node_db))
            self.assertEqual(
                node['power_pass'], settings.PATH_TO_BOOTSTRAP_SSH_KEY)

            self.assertDictEqual(node['kernel_options'], {
                'netcfg/choose_interface':
                objects.Node.get_admin_physical_iface(node_db).mac,
                'udevrules': '{0}_{1}'.format(intr_mac, intr_name)
            })

            self.assertDictEqual(node['ks_meta']['pm_data'], {
                'ks_spaces': VolumeManagerExtension.get_node_volumes(node_db),
                'kernel_params': kernal_params
            })
            # Check node interfaces section
            self.assertEqual(
                node['interfaces'][intr_name]['mac_address'], intr_mac)
            self.assertEqual(
                node['interfaces'][intr_name]['static'], '0')
            self.assertEqual(
                node['interfaces'][intr_name]['dns_name'],
                objects.Node.get_node_fqdn(node_db))
            # Check node interfaces extra section
            self.assertEqual(node['interfaces_extra'][intr_name], {
                'peerdns': 'no',
                'onboot': 'yes'
            })
            # check identity key for mcollective
            self.assertEqual(node['ks_meta']['mco_identity'], node_db.id)
예제 #19
0
    def test_update_ceph_partition(self):
        node = self.create_node(roles=["ceph-osd"])
        disks = self.get(node.id)

        new_volume_size = 4321
        for disk in disks:
            if disk["size"] > 0:
                for volume in disk["volumes"]:
                    volume["size"] = new_volume_size

        self.put(node.id, disks)
        partitions_after_update = filter(
            lambda volume: volume.get("type") == "partition", VolumeManagerExtension.get_node_volumes(node)
        )

        for partition_after in partitions_after_update:
            self.assertEqual(partition_after["size"], new_volume_size)
예제 #20
0
    def test_update_ceph_partition(self):
        node = self.create_node(roles=['ceph-osd'])
        disks = self.get(node.id)

        new_volume_size = 4321
        for disk in disks:
            if disk['size'] > 0:
                for volume in disk['volumes']:
                    volume['size'] = new_volume_size

        self.put(node.id, disks)
        partitions_after_update = filter(
            lambda volume: volume.get('type') == 'partition',
            VolumeManagerExtension.get_node_volumes(node))

        for partition_after in partitions_after_update:
            self.assertEqual(partition_after['size'], new_volume_size)
예제 #21
0
    def test_update_ceph_partition(self):
        node = self.create_node(roles=['ceph-osd'])
        disks = self.get(node.id)

        new_volume_size = 4321
        for disk in disks:
            if disk['size'] > 0:
                for volume in disk['volumes']:
                    volume['size'] = new_volume_size

        self.put(node.id, disks)
        partitions_after_update = filter(
            lambda volume: volume.get('type') == 'partition',
            VolumeManagerExtension.get_node_volumes(node))

        for partition_after in partitions_after_update:
            self.assertEqual(partition_after['size'], new_volume_size)
예제 #22
0
    def set_storage_parameters(self, cluster, attrs):
        """Generate pg_num as the number of OSDs across the cluster
        multiplied by 100, divided by Ceph replication factor, and
        rounded up to the nearest power of 2.
        """
        osd_num = 0
        nodes = db().query(Node).filter(Node.cluster == cluster).filter(
            sa.or_(Node.roles.any('ceph-osd'),
                   Node.pending_roles.any('ceph-osd'))).options(
                       joinedload('attributes'))

        for node in nodes:
            from nailgun.extensions.volume_manager.extension \
                import VolumeManagerExtension
            for disk in VolumeManagerExtension.get_volumes(node):
                for part in disk.get('volumes', []):
                    if part.get('name') == 'ceph' and part.get('size', 0) > 0:
                        osd_num += 1
        if osd_num > 0:
            repl = int(attrs['storage']['osd_pool_size'])
            pg_num = 2**int(math.ceil(math.log(osd_num * 100.0 / repl, 2)))
        else:
            pg_num = 128
        attrs['storage']['pg_num'] = pg_num
예제 #23
0
    def serialize_node(cls, cluster_attrs, node):
        """Serialize a single node."""
        serialized_node = {
            'uid': node.uid,
            'power_address': node.ip,
            'name': objects.Node.make_slave_name(node),
            # right now it duplicates to avoid possible issues
            'slave_name': objects.Node.make_slave_name(node),
            'hostname': node.fqdn,
            'power_pass': cls.get_ssh_key_path(node),

            'profile': cluster_attrs['cobbler']['profile'],
            'power_type': 'ssh',
            'power_user': '******',
            'name_servers': '\"%s\"' % settings.DNS_SERVERS,
            'name_servers_search': '\"%s\"' % settings.DNS_SEARCH,
            'netboot_enabled': '1',
            # For provisioning phase
            'kernel_options': {
                'netcfg/choose_interface':
                objects.Node.get_admin_physical_iface(node).mac,
                'udevrules': cls.interfaces_mapping_for_udev(node)},
            'ks_meta': {
                'pm_data': {
                    'ks_spaces': VolumeManagerExtension.get_volumes(node),
                    'kernel_params': objects.Node.get_kernel_params(node)},
                'fuel_version': node.cluster.fuel_version,
                'puppet_auto_setup': 1,
                'puppet_master': settings.PUPPET_MASTER_HOST,
                'puppet_enable': 0,
                'mco_auto_setup': 1,
                'install_log_2_syslog': 1,
                'mco_pskey': settings.MCO_PSKEY,
                'mco_vhost': settings.MCO_VHOST,
                'mco_host': settings.MCO_HOST,
                'mco_user': settings.MCO_USER,
                'mco_password': settings.MCO_PASSWORD,
                'mco_connector': settings.MCO_CONNECTOR,
                'mco_enable': 1,
                'auth_key': "\"%s\"" % cluster_attrs.get('auth_key', ''),
                'authorized_keys':
                ["\"%s\"" % key for key in settings.AUTHORIZED_KEYS],
                'master_ip': settings.MASTER_IP,
                'timezone': settings.TIMEZONE,
            }}

        provision_data = cluster_attrs.get('provision')
        if provision_data:
            if provision_data['method'] == consts.PROVISION_METHODS.image:
                serialized_node['ks_meta']['image_data'] = \
                    provision_data['image_data']

        serialized_node['ks_meta']['repo_setup'] = cluster_attrs['repo_setup']

        vlan_splinters = cluster_attrs.get('vlan_splinters', {})
        if vlan_splinters.get('vswitch') == 'kernel_lt':
            serialized_node['ks_meta']['kernel_lt'] = 1

        mellanox_data = cluster_attrs.get('neutron_mellanox')
        if mellanox_data:
            serialized_node['ks_meta'].update({
                'mlnx_vf_num': mellanox_data['vf_num'],
                'mlnx_plugin_mode': mellanox_data['plugin'],
                'mlnx_iser_enabled': cluster_attrs['storage']['iser'],
            })
            # Add relevant kernel parameter when using Mellanox SR-IOV
            # and/or iSER (which works on top of a probed virtual function)
            # unless it was explicitly added by the user
            pm_data = serialized_node['ks_meta']['pm_data']
            if ((mellanox_data['plugin'] == 'ethernet' or
                    cluster_attrs['storage']['iser'] is True) and
                    'intel_iommu=' not in pm_data['kernel_params']):
                        pm_data['kernel_params'] += ' intel_iommu=on'

        net_manager = objects.Cluster.get_network_manager(node.cluster)
        gw = net_manager.get_default_gateway(node.id)
        serialized_node['ks_meta'].update({'gw': gw})
        serialized_node['ks_meta'].update(
            {'admin_net': net_manager.get_admin_network_group(node.id).cidr}
        )

        serialized_node.update(cls.serialize_interfaces(node))

        return serialized_node
예제 #24
0
 def is_checking_required(self):
     return VolumeManagerExtension._is_disk_checking_required(self.node)
예제 #25
0
    def serialize_node(cls, cluster_attrs, node):
        """Serialize a single node."""
        serialized_node = {
            'uid': node.uid,
            'power_address': node.ip,
            'name': objects.Node.make_slave_name(node),
            # right now it duplicates to avoid possible issues
            'slave_name': objects.Node.make_slave_name(node),
            'hostname': node.fqdn,
            'power_pass': cls.get_ssh_key_path(node),
            'profile': cluster_attrs['cobbler']['profile'],
            'power_type': 'ssh',
            'power_user': '******',
            'name_servers': '\"%s\"' % settings.DNS_SERVERS,
            'name_servers_search': '\"%s\"' % settings.DNS_SEARCH,
            'netboot_enabled': '1',
            # For provisioning phase
            'kernel_options': {
                'netcfg/choose_interface':
                objects.Node.get_admin_physical_iface(node).mac,
                'udevrules':
                cls.interfaces_mapping_for_udev(node)
            },
            'ks_meta': {
                'pm_data': {
                    'ks_spaces': VolumeManagerExtension.get_volumes(node),
                    'kernel_params': objects.Node.get_kernel_params(node)
                },
                'fuel_version':
                node.cluster.fuel_version,
                'puppet_auto_setup':
                1,
                'puppet_master':
                settings.PUPPET_MASTER_HOST,
                'puppet_enable':
                0,
                'mco_auto_setup':
                1,
                'install_log_2_syslog':
                1,
                'mco_pskey':
                settings.MCO_PSKEY,
                'mco_vhost':
                settings.MCO_VHOST,
                'mco_host':
                settings.MCO_HOST,
                'mco_user':
                settings.MCO_USER,
                'mco_password':
                settings.MCO_PASSWORD,
                'mco_connector':
                settings.MCO_CONNECTOR,
                'mco_enable':
                1,
                'auth_key':
                "\"%s\"" % cluster_attrs.get('auth_key', ''),
                'authorized_keys':
                ["\"%s\"" % key for key in settings.AUTHORIZED_KEYS],
                'master_ip':
                settings.MASTER_IP,
                'timezone':
                settings.TIMEZONE,
            }
        }

        provision_data = cluster_attrs.get('provision')
        if provision_data:
            if provision_data['method'] == consts.PROVISION_METHODS.image:
                serialized_node['ks_meta']['image_data'] = \
                    provision_data['image_data']

        serialized_node['ks_meta']['repo_setup'] = cluster_attrs['repo_setup']

        vlan_splinters = cluster_attrs.get('vlan_splinters', {})
        if vlan_splinters.get('vswitch') == 'kernel_lt':
            serialized_node['ks_meta']['kernel_lt'] = 1

        mellanox_data = cluster_attrs.get('neutron_mellanox')
        if mellanox_data:
            serialized_node['ks_meta'].update({
                'mlnx_vf_num':
                mellanox_data['vf_num'],
                'mlnx_plugin_mode':
                mellanox_data['plugin'],
                'mlnx_iser_enabled':
                cluster_attrs['storage']['iser'],
            })
            # Add relevant kernel parameter when using Mellanox SR-IOV
            # and/or iSER (which works on top of a probed virtual function)
            # unless it was explicitly added by the user
            pm_data = serialized_node['ks_meta']['pm_data']
            if ((mellanox_data['plugin'] == 'ethernet'
                 or cluster_attrs['storage']['iser'] is True)
                    and 'intel_iommu=' not in pm_data['kernel_params']):
                pm_data['kernel_params'] += ' intel_iommu=on'

        net_manager = objects.Cluster.get_network_manager(node.cluster)
        gw = net_manager.get_default_gateway(node.id)
        serialized_node['ks_meta'].update({'gw': gw})
        serialized_node['ks_meta'].update(
            {'admin_net': net_manager.get_admin_network_group(node.id).cidr})

        serialized_node.update(cls.serialize_interfaces(node))

        return serialized_node
예제 #26
0
 def is_checking_required(self):
     return VolumeManagerExtension._is_disk_checking_required(self.node)