def test_node_extension_call_extension_from_node(self, get_m):
        node = self.make_node(node_extensions=["ex1"], cluster_extensions=["ex2"])

        node_extension_call("method_call", node)
        ex1 = get_m.return_value[0]
        self.assertEqual("ex1", ex1.name)
        ex2 = get_m.return_value[1]
        self.assertEqual("ex2", ex2.name)

        ex1.method_call.assert_called_once_with(node)
        self.assertFalse(ex2.method_call.called)
Beispiel #2
0
    def test_node_extension_call_default_extension_from_cluster(self, get_m):
        node = self.make_node(node_extensions=[], cluster_extensions=['ex2'])

        node_extension_call('method_call', node)
        ex1 = get_m.return_value[0]
        self.assertEqual('ex1', ex1.name)
        ex2 = get_m.return_value[1]
        self.assertEqual('ex2', ex2.name)

        self.assertFalse(ex1.method_call.called)
        ex2.method_call.assert_called_once_with(node)
Beispiel #3
0
    def test_node_extension_call_default_extension_from_cluster(self, get_m):
        node = self.make_node(
            node_extensions=[],
            cluster_extensions=['ex2'])

        node_extension_call('method_call', node)
        ex1 = get_m.return_value[0]
        self.assertEqual('ex1', ex1.name)
        ex2 = get_m.return_value[1]
        self.assertEqual('ex2', ex2.name)

        self.assertFalse(ex1.method_call.called)
        ex2.method_call.assert_called_once_with(node)
    def generate_node_volumes_data(self, node):
        """Serialize information about disks.

        This function returns information about disks and
        volume groups for each node in cluster.
        Will be passed to Astute.
        """
        return {'node_volumes': node_extension_call('get_node_volumes', node)}
Beispiel #5
0
    def generate_node_volumes_data(self, node):
        """Serialize information about disks.

        This function returns information about disks and
        volume groups for each node in cluster.
        Will be passed to Astute.
        """
        return {'node_volumes': node_extension_call('get_node_volumes', node)}
 def get_image_cache_max_size(self, node):
     images_ceph = (node.cluster.attributes['editable']['storage']
                    ['images_ceph']['value'])
     if images_ceph:
         image_cache_max_size = '0'
     else:
         image_cache_max_size = volume_manager.calc_glance_cache_size(
             node_extension_call('get_node_volumes', node))
     return {'glance': {'image_cache_max_size': image_cache_max_size}}
 def get_image_cache_max_size(self, node):
     images_ceph = (node.cluster.attributes['editable']['storage']
                    ['images_ceph']['value'])
     if images_ceph:
         image_cache_max_size = '0'
     else:
         image_cache_max_size = volume_manager.calc_glance_cache_size(
             node_extension_call('get_node_volumes', node))
     return {'glance': {'image_cache_max_size': image_cache_max_size}}
    def set_storage_parameters(self, cluster, attrs):
        """Generate pg_num

        pg_num is generated as the number of OSDs across the cluster
        multiplied by 100, divided by Ceph replication factor, and
        rounded up to the nearest power of 2.
        """
        osd_num = 0
        nodes = db().query(Node).filter(
            Node.cluster == cluster
        ).filter(sa.or_(
            Node.roles.any('ceph-osd'),
            Node.pending_roles.any('ceph-osd')
        ))

        for node in nodes:
            for disk in node_extension_call('get_node_volumes', node):
                for part in disk.get('volumes', []):
                    if part.get('name') == 'ceph' and part.get('size', 0) > 0:
                        osd_num += 1

        storage_attrs = attrs['storage']

        pg_counts = get_pool_pg_count(
            osd_num=osd_num,
            pool_sz=int(storage_attrs['osd_pool_size']),
            ceph_version='firefly',
            volumes_ceph=storage_attrs['volumes_ceph'],
            objects_ceph=storage_attrs['objects_ceph'],
            ephemeral_ceph=storage_attrs['ephemeral_ceph'],
            images_ceph=storage_attrs['images_ceph'],
            emulate_pre_7_0=False)

        # Log {pool_name: pg_count} mapping
        pg_str = ", ".join(map("{0[0]}={0[1]}".format, pg_counts.items()))
        logger.debug("Ceph: PG values {%s}", pg_str)

        storage_attrs['pg_num'] = pg_counts['default_pg_num']
        storage_attrs['per_pool_pg_nums'] = pg_counts
    def set_storage_parameters(self, cluster, attrs):
        """Generate pg_num as the number of OSDs across the cluster
        multiplied by 100, divided by Ceph replication factor, and
        rounded up to the nearest power of 2.
        """
        osd_num = 0
        nodes = db().query(Node).filter(Node.cluster == cluster).filter(
            sa.or_(Node.roles.any('ceph-osd'),
                   Node.pending_roles.any('ceph-osd'))).options(
                       joinedload('attributes'))

        for node in nodes:
            for disk in node_extension_call('get_node_volumes', node):
                for part in disk.get('volumes', []):
                    if part.get('name') == 'ceph' and part.get('size', 0) > 0:
                        osd_num += 1
        if osd_num > 0:
            repl = int(attrs['storage']['osd_pool_size'])
            pg_num = 2**int(math.ceil(math.log(osd_num * 100.0 / repl, 2)))
        else:
            pg_num = 128
        attrs['storage']['pg_num'] = pg_num
    def set_storage_parameters(self, cluster, attrs):
        """Generate pg_num

        pg_num is generated as the number of OSDs across the cluster
        multiplied by 100, divided by Ceph replication factor, and
        rounded up to the nearest power of 2.
        """
        osd_num = 0
        nodes = db().query(Node).filter(Node.cluster == cluster).filter(
            sa.or_(Node.roles.any('ceph-osd'),
                   Node.pending_roles.any('ceph-osd'))).options(
                       joinedload('attributes'))

        for node in nodes:
            for disk in node_extension_call('get_node_volumes', node):
                for part in disk.get('volumes', []):
                    if part.get('name') == 'ceph' and part.get('size', 0) > 0:
                        osd_num += 1

        storage_attrs = attrs['storage']

        pg_counts = get_pool_pg_count(
            osd_num=osd_num,
            pool_sz=int(storage_attrs['osd_pool_size']),
            ceph_version='firefly',
            volumes_ceph=storage_attrs['volumes_ceph'],
            objects_ceph=storage_attrs['objects_ceph'],
            ephemeral_ceph=storage_attrs['ephemeral_ceph'],
            images_ceph=storage_attrs['images_ceph'],
            emulate_pre_7_0=False)

        # Log {pool_name: pg_count} mapping
        pg_str = ", ".join(map("{0[0]}={0[1]}".format, pg_counts.items()))
        logger.debug("Ceph: PG values {%s}", pg_str)

        storage_attrs['pg_num'] = pg_counts['default_pg_num']
        storage_attrs['per_pool_pg_nums'] = pg_counts
    def set_storage_parameters(self, cluster, attrs):
        """Generate pg_num as the number of OSDs across the cluster
        multiplied by 100, divided by Ceph replication factor, and
        rounded up to the nearest power of 2.
        """
        osd_num = 0
        nodes = db().query(Node).filter(
            Node.cluster == cluster
        ).filter(sa.or_(
            Node.roles.any('ceph-osd'),
            Node.pending_roles.any('ceph-osd')
        )).options(joinedload('attributes'))

        for node in nodes:
            for disk in node_extension_call('get_node_volumes', node):
                for part in disk.get('volumes', []):
                    if part.get('name') == 'ceph' and part.get('size', 0) > 0:
                        osd_num += 1
        if osd_num > 0:
            repl = int(attrs['storage']['osd_pool_size'])
            pg_num = 2 ** int(math.ceil(math.log(osd_num * 100.0 / repl, 2)))
        else:
            pg_num = 128
        attrs['storage']['pg_num'] = pg_num
    def serialize_node(cls, cluster_attrs, node):
        """Serialize a single node."""
        serialized_node = {
            'uid': node.uid,
            'power_address': node.ip,
            'name': objects.Node.get_slave_name(node),
            # right now it duplicates to avoid possible issues
            'slave_name': objects.Node.get_slave_name(node),
            'hostname': objects.Node.get_node_fqdn(node),
            'power_pass': cls.get_ssh_key_path(node),
            'profile': cluster_attrs['cobbler']['profile'],
            'power_type': 'ssh',
            'power_user': '******',
            'name_servers': '\"%s\"' % settings.DNS_SERVERS,
            'name_servers_search': '\"%s\"' % settings.DNS_SEARCH,
            'netboot_enabled': '1',
            # For provisioning phase
            'kernel_options': {
                'netcfg/choose_interface':
                objects.Node.get_admin_physical_iface(node).mac,
                'udevrules':
                cls.interfaces_mapping_for_udev(node)
            },
            'ks_meta': {
                'pm_data': {
                    'ks_spaces': node_extension_call('get_node_volumes', node),
                    'kernel_params': objects.Node.get_kernel_params(node)
                },
                'fuel_version':
                node.cluster.fuel_version,
                'puppet_auto_setup':
                1,
                'puppet_master':
                settings.PUPPET_MASTER_HOST,
                'puppet_enable':
                0,
                'mco_auto_setup':
                1,
                'install_log_2_syslog':
                1,
                'mco_pskey':
                settings.MCO_PSKEY,
                'mco_vhost':
                settings.MCO_VHOST,
                'mco_host':
                settings.MCO_HOST,
                'mco_user':
                settings.MCO_USER,
                'mco_password':
                settings.MCO_PASSWORD,
                'mco_connector':
                settings.MCO_CONNECTOR,
                'mco_enable':
                1,
                'auth_key':
                "\"%s\"" % cluster_attrs.get('auth_key', ''),
                'authorized_keys':
                ["\"%s\"" % key for key in settings.AUTHORIZED_KEYS],
                'master_ip':
                settings.MASTER_IP,
                'timezone':
                settings.TIMEZONE,
            }
        }

        provision_data = cluster_attrs.get('provision')
        if provision_data:
            if provision_data['method'] == consts.PROVISION_METHODS.image:
                serialized_node['ks_meta']['image_data'] = \
                    provision_data['image_data']

        serialized_node['ks_meta']['repo_setup'] = cluster_attrs['repo_setup']

        vlan_splinters = cluster_attrs.get('vlan_splinters', {})
        if vlan_splinters.get('vswitch') == 'kernel_lt':
            serialized_node['ks_meta']['kernel_lt'] = 1

        mellanox_data = cluster_attrs.get('neutron_mellanox')
        if mellanox_data:
            serialized_node['ks_meta'].update({
                'mlnx_vf_num':
                mellanox_data['vf_num'],
                'mlnx_plugin_mode':
                mellanox_data['plugin'],
                'mlnx_iser_enabled':
                cluster_attrs['storage']['iser'],
            })
            # Add relevant kernel parameter when using Mellanox SR-IOV
            # and/or iSER (which works on top of a probed virtual function)
            # unless it was explicitly added by the user
            pm_data = serialized_node['ks_meta']['pm_data']
            if ((mellanox_data['plugin'] == 'ethernet'
                 or cluster_attrs['storage']['iser'] is True)
                    and 'intel_iommu=' not in pm_data['kernel_params']):
                pm_data['kernel_params'] += ' intel_iommu=on'

        net_manager = objects.Cluster.get_network_manager(node.cluster)
        gw = net_manager.get_default_gateway(node.id)
        serialized_node['ks_meta'].update({'gw': gw})
        serialized_node['ks_meta'].update(
            {'admin_net': net_manager.get_admin_network_group(node.id).cidr})

        serialized_node.update(cls.serialize_interfaces(node))

        return serialized_node
    def serialize_node(cls, cluster_attrs, node):
        """Serialize a single node."""
        serialized_node = {
            'uid': node.uid,
            'power_address': node.ip,
            'name': objects.Node.get_slave_name(node),
            # right now it duplicates to avoid possible issues
            'slave_name': objects.Node.get_slave_name(node),
            'hostname': objects.Node.get_node_fqdn(node),
            'power_pass': cls.get_ssh_key_path(node),

            'profile': cluster_attrs['cobbler']['profile'],
            'power_type': 'ssh',
            'power_user': '******',
            'name_servers': '\"%s\"' % settings.DNS_SERVERS,
            'name_servers_search': '\"%s\"' % settings.DNS_SEARCH,
            'netboot_enabled': '1',
            # For provisioning phase
            'kernel_options': {
                'netcfg/choose_interface':
                objects.Node.get_admin_physical_iface(node).mac,
                'udevrules': cls.interfaces_mapping_for_udev(node)},
            'ks_meta': {
                'pm_data': {
                    'ks_spaces': node_extension_call('get_node_volumes', node),
                    'kernel_params': objects.Node.get_kernel_params(node)},
                'fuel_version': node.cluster.fuel_version,
                'cloud_init_templates':
                cls.serialize_cloud_init_templates(node.cluster.release),
                'puppet_auto_setup': 1,
                'puppet_master': settings.PUPPET_MASTER_HOST,
                'puppet_enable': 0,
                'mco_auto_setup': 1,
                'install_log_2_syslog': 1,
                'mco_pskey': settings.MCO_PSKEY,
                'mco_vhost': settings.MCO_VHOST,
                'mco_host': settings.MCO_HOST,
                'mco_user': settings.MCO_USER,
                'mco_password': settings.MCO_PASSWORD,
                'mco_connector': settings.MCO_CONNECTOR,
                'mco_enable': 1,
                'auth_key': "\"%s\"" % cluster_attrs.get('auth_key', ''),
                'authorized_keys':
                ["\"%s\"" % key for key in settings.AUTHORIZED_KEYS],
                'master_ip': settings.MASTER_IP,
                'timezone': settings.TIMEZONE,
            }}

        provision_data = cluster_attrs.get('provision')
        if provision_data:
            if provision_data['method'] == consts.PROVISION_METHODS.image:
                serialized_node['ks_meta']['image_data'] = \
                    provision_data['image_data']

        serialized_node['ks_meta']['repo_setup'] = cluster_attrs['repo_setup']

        vlan_splinters = cluster_attrs.get('vlan_splinters', {})
        if vlan_splinters.get('vswitch') == 'kernel_lt':
            serialized_node['ks_meta']['kernel_lt'] = 1

        mellanox_data = cluster_attrs.get('neutron_mellanox')
        if mellanox_data:
            serialized_node['ks_meta'].update({
                'mlnx_vf_num': mellanox_data['vf_num'],
                'mlnx_plugin_mode': mellanox_data['plugin'],
                'mlnx_iser_enabled': cluster_attrs['storage']['iser'],
            })
            # Add relevant kernel parameter when using Mellanox SR-IOV
            # and/or iSER (which works on top of a probed virtual function)
            # unless it was explicitly added by the user
            pm_data = serialized_node['ks_meta']['pm_data']
            if ((mellanox_data['plugin'] == 'ethernet' or
                    cluster_attrs['storage']['iser'] is True) and
                    'intel_iommu=' not in pm_data['kernel_params']):
                        pm_data['kernel_params'] += ' intel_iommu=on'

        net_manager = objects.Cluster.get_network_manager(node.cluster)
        gw = net_manager.get_default_gateway(node.id)
        serialized_node['ks_meta'].update({'gw': gw})
        serialized_node['ks_meta'].update(
            {'admin_net': net_manager.get_admin_network_group(node.id).cidr}
        )

        serialized_node.update(cls.serialize_interfaces(node))

        return serialized_node
    def serialize_node(cls, cluster_attrs, node):
        """Serialize a single node."""
        serialized_node = {
            'uid': node.uid,
            'power_address': node.ip,
            'name': objects.Node.get_slave_name(node),
            # right now it duplicates to avoid possible issues
            'slave_name': objects.Node.get_slave_name(node),
            'hostname': objects.Node.get_node_fqdn(node),
            'power_pass': cls.get_ssh_key_path(node),
            'profile': cluster_attrs['cobbler']['profile'],
            'power_type': 'ssh',
            'power_user': '******',
            'name_servers': '\"%s\"' % settings.DNS_SERVERS,
            'name_servers_search': '\"%s\"' % settings.DNS_SEARCH,
            'netboot_enabled': '1',
            # For provisioning phase
            'kernel_options': {
                'netcfg/choose_interface':
                objects.Node.get_admin_physical_iface(node).mac,
                'udevrules':
                cls.interfaces_mapping_for_udev(node)
            },
            'ks_meta': {
                'pm_data': {
                    'ks_spaces': node_extension_call('get_node_volumes', node),
                    'kernel_params': objects.Node.get_kernel_params(node)
                },
                'fuel_version':
                node.cluster.fuel_version,
                'cloud_init_templates':
                cls.serialize_cloud_init_templates(node.cluster.release),
                'puppet_auto_setup':
                1,
                'puppet_master':
                settings.PUPPET_MASTER_HOST,
                'puppet_enable':
                0,
                'mco_auto_setup':
                1,
                'install_log_2_syslog':
                1,
                'mco_pskey':
                settings.MCO_PSKEY,
                'mco_vhost':
                settings.MCO_VHOST,
                'mco_host':
                settings.MCO_HOST,
                'mco_user':
                settings.MCO_USER,
                'mco_password':
                settings.MCO_PASSWORD,
                'mco_connector':
                settings.MCO_CONNECTOR,
                'mco_enable':
                1,

                # NOTE(aroma): identity parameter usually is added/updated
                # by nailgun agent but due to particularities of its execution
                # flow such action may lead to deployment failures [1].
                # Hence we supply the information here so fuel-agent will
                # create mcollective config initially with the data present,
                # [1]: https://bugs.launchpad.net/fuel/+bug/1518306
                'mco_identity':
                node.id,
                'auth_key':
                "\"%s\"" % cluster_attrs.get('auth_key', ''),
                'authorized_keys':
                ["\"%s\"" % key for key in settings.AUTHORIZED_KEYS],
                'master_ip':
                settings.MASTER_IP,
                'timezone':
                settings.TIMEZONE,
            }
        }

        provision_data = cluster_attrs.get('provision')
        if provision_data:
            if provision_data['method'] == consts.PROVISION_METHODS.image:
                serialized_node['ks_meta']['image_data'] = \
                    provision_data['image_data']

        serialized_node['ks_meta']['repo_setup'] = cluster_attrs['repo_setup']

        vlan_splinters = cluster_attrs.get('vlan_splinters', {})
        if vlan_splinters.get('vswitch') == 'kernel_lt':
            serialized_node['ks_meta']['kernel_lt'] = 1

        cls.inject_mellanox_settings_for_provisioning(cluster_attrs,
                                                      serialized_node)
        net_manager = objects.Cluster.get_network_manager(node.cluster)
        gw = net_manager.get_default_gateway(node.id)
        serialized_node['ks_meta'].update({'gw': gw})
        serialized_node['ks_meta'].update(
            {'admin_net': net_manager.get_admin_network_group(node.id).cidr})

        serialized_node.update(cls.serialize_interfaces(node))

        return serialized_node
    def serialize_node(cls, cluster_attrs, node):
        """Serialize a single node."""
        serialized_node = {
            'uid': node.uid,
            'power_address': node.ip,
            'name': objects.Node.get_slave_name(node),
            # right now it duplicates to avoid possible issues
            'slave_name': objects.Node.get_slave_name(node),
            'hostname': objects.Node.get_node_fqdn(node),
            'power_pass': cls.get_ssh_key_path(node),

            'profile': cluster_attrs['cobbler']['profile'],
            'power_type': 'ssh',
            'power_user': '******',
            'name_servers': '\"%s\"' % settings.DNS_SERVERS,
            'name_servers_search': '\"%s\"' % settings.DNS_SEARCH,
            'netboot_enabled': '1',
            # For provisioning phase
            'kernel_options': {
                'netcfg/choose_interface':
                objects.Node.get_admin_physical_iface(node).mac,
                'udevrules': cls.interfaces_mapping_for_udev(node)},
            'ks_meta': {
                'pm_data': {
                    'ks_spaces': node_extension_call('get_node_volumes', node),
                    'kernel_params': objects.Node.get_kernel_params(node)},
                'fuel_version': node.cluster.fuel_version,
                'cloud_init_templates':
                cls.serialize_cloud_init_templates(node.cluster.release),
                'puppet_auto_setup': 1,
                'puppet_master': settings.PUPPET_MASTER_HOST,
                'puppet_enable': 0,
                'mco_auto_setup': 1,
                'install_log_2_syslog': 1,
                'mco_pskey': settings.MCO_PSKEY,
                'mco_vhost': settings.MCO_VHOST,
                'mco_host': settings.MCO_HOST,
                'mco_user': settings.MCO_USER,
                'mco_password': settings.MCO_PASSWORD,
                'mco_connector': settings.MCO_CONNECTOR,
                'mco_enable': 1,

                # NOTE(aroma): identity parameter usually is added/updated
                # by nailgun agent but due to particularities of its execution
                # flow such action may lead to deployment failures [1].
                # Hence we supply the information here so fuel-agent will
                # create mcollective config initially with the data present,
                # [1]: https://bugs.launchpad.net/fuel/+bug/1518306
                'mco_identity': node.id,

                'auth_key': "\"%s\"" % cluster_attrs.get('auth_key', ''),
                'authorized_keys':
                ["\"%s\"" % key for key in settings.AUTHORIZED_KEYS],
                'master_ip': settings.MASTER_IP,
                'timezone': settings.TIMEZONE,
            }}

        provision_data = cluster_attrs.get('provision')
        if provision_data:
            if provision_data['method'] == consts.PROVISION_METHODS.image:
                serialized_node['ks_meta']['image_data'] = \
                    provision_data['image_data']

        serialized_node['ks_meta']['repo_setup'] = cluster_attrs['repo_setup']

        vlan_splinters = cluster_attrs.get('vlan_splinters', {})
        if vlan_splinters.get('vswitch') == 'kernel_lt':
            serialized_node['ks_meta']['kernel_lt'] = 1

        cls.inject_mellanox_settings_for_provisioning(
            cluster_attrs, serialized_node)
        net_manager = objects.Cluster.get_network_manager(node.cluster)
        gw = net_manager.get_default_gateway(node.id)
        serialized_node['ks_meta'].update({'gw': gw})
        serialized_node['ks_meta'].update(
            {'admin_net': net_manager.get_admin_network_group(node.id).cidr}
        )

        serialized_node.update(cls.serialize_interfaces(node))

        return serialized_node
    def serialize_node(cls, cluster_attrs, node):
        """Serialize a single node."""
        serialized_node = {
            "uid": node.uid,
            "power_address": node.ip,
            "name": objects.Node.get_slave_name(node),
            # right now it duplicates to avoid possible issues
            "slave_name": objects.Node.get_slave_name(node),
            "hostname": objects.Node.get_node_fqdn(node),
            "power_pass": cls.get_ssh_key_path(node),
            "profile": cluster_attrs["cobbler"]["profile"],
            "power_type": "ssh",
            "power_user": "******",
            "name_servers": '"%s"' % settings.DNS_SERVERS,
            "name_servers_search": '"%s"' % settings.DNS_SEARCH,
            "netboot_enabled": "1",
            # For provisioning phase
            "kernel_options": {
                "netcfg/choose_interface": objects.Node.get_admin_physical_iface(node).mac,
                "udevrules": cls.interfaces_mapping_for_udev(node),
            },
            "ks_meta": {
                "pm_data": {
                    "ks_spaces": node_extension_call("get_node_volumes", node),
                    "kernel_params": objects.Node.get_kernel_params(node),
                },
                "fuel_version": node.cluster.fuel_version,
                "cloud_init_templates": cls.serialize_cloud_init_templates(node.cluster.release),
                "puppet_auto_setup": 1,
                "puppet_master": settings.PUPPET_MASTER_HOST,
                "puppet_enable": 0,
                "mco_auto_setup": 1,
                "install_log_2_syslog": 1,
                "mco_pskey": settings.MCO_PSKEY,
                "mco_vhost": settings.MCO_VHOST,
                "mco_host": settings.MCO_HOST,
                "mco_user": settings.MCO_USER,
                "mco_password": settings.MCO_PASSWORD,
                "mco_connector": settings.MCO_CONNECTOR,
                "mco_enable": 1,
                "auth_key": '"%s"' % cluster_attrs.get("auth_key", ""),
                "authorized_keys": ['"%s"' % key for key in settings.AUTHORIZED_KEYS],
                "master_ip": settings.MASTER_IP,
                "timezone": settings.TIMEZONE,
            },
        }

        provision_data = cluster_attrs.get("provision")
        if provision_data:
            if provision_data["method"] == consts.PROVISION_METHODS.image:
                serialized_node["ks_meta"]["image_data"] = provision_data["image_data"]

        serialized_node["ks_meta"]["repo_setup"] = cluster_attrs["repo_setup"]

        vlan_splinters = cluster_attrs.get("vlan_splinters", {})
        if vlan_splinters.get("vswitch") == "kernel_lt":
            serialized_node["ks_meta"]["kernel_lt"] = 1

        mellanox_data = cluster_attrs.get("neutron_mellanox")
        if mellanox_data:
            serialized_node["ks_meta"].update(
                {
                    "mlnx_vf_num": mellanox_data["vf_num"],
                    "mlnx_plugin_mode": mellanox_data["plugin"],
                    "mlnx_iser_enabled": cluster_attrs["storage"]["iser"],
                }
            )
            # Add relevant kernel parameter when using Mellanox SR-IOV
            # and/or iSER (which works on top of a probed virtual function)
            # unless it was explicitly added by the user
            pm_data = serialized_node["ks_meta"]["pm_data"]
            if (
                mellanox_data["plugin"] == "ethernet" or cluster_attrs["storage"]["iser"] is True
            ) and "intel_iommu=" not in pm_data["kernel_params"]:
                pm_data["kernel_params"] += " intel_iommu=on"

        net_manager = objects.Cluster.get_network_manager(node.cluster)
        gw = net_manager.get_default_gateway(node.id)
        serialized_node["ks_meta"].update({"gw": gw})
        serialized_node["ks_meta"].update({"admin_net": net_manager.get_admin_network_group(node.id).cidr})

        serialized_node.update(cls.serialize_interfaces(node))

        return serialized_node