コード例 #1
0
 def prepare_for_provisioning(cls, nodes):
     """Prepare environment for provisioning,
     update fqdns, assign admin IPs
     """
     cls.update_slave_nodes_fqdn(nodes)
     for node in nodes:
         NetworkManager.assign_admin_ips(node.id)
コード例 #2
0
ファイル: helpers.py プロジェクト: stamak/fuel-web
 def prepare_for_provisioning(cls, nodes):
     """Prepare environment for provisioning,
     update fqdns, assign admin IPs
     """
     cls.update_slave_nodes_fqdn(nodes)
     for node in nodes:
         NetworkManager.assign_admin_ips(node.id)
コード例 #3
0
ファイル: helpers.py プロジェクト: e0ne/fuel-web
 def prepare_for_provisioning(cls, nodes):
     """Prepare environment for provisioning,
     update fqdns, assign admin ips
     """
     cls.update_slave_nodes_fqdn(nodes)
     for node in nodes:
         NetworkManager.assign_admin_ips(
             node.id, len(node.meta.get('interfaces', [])))
コード例 #4
0
ファイル: models.py プロジェクト: mrasskazov/fuelweb
    def prepare_for_provisioning(self):
        from nailgun.network.manager import NetworkManager
        from nailgun.task.helpers import TaskHelper

        netmanager = NetworkManager()
        for node in TaskHelper.nodes_to_provision(self):
            netmanager.assign_admin_ips(
                node.id, len(node.meta.get('interfaces', [])))
コード例 #5
0
ファイル: helpers.py プロジェクト: adanin/fuel-web
 def prepare_for_provisioning(cls, nodes):
     """Prepare environment for provisioning,
     update fqdns, assign admin ips
     """
     cls.update_slave_nodes_fqdn(nodes)
     for node in nodes:
         NetworkManager.assign_admin_ips(
             node.id, len(node.meta.get('interfaces', [])))
コード例 #6
0
ファイル: cluster.py プロジェクト: rsokolkov/fuel-web
    def prepare_for_provisioning(self):
        from nailgun.network.manager import NetworkManager
        from nailgun.task.helpers import TaskHelper

        netmanager = NetworkManager()
        nodes = TaskHelper.nodes_to_provision(self)
        TaskHelper.update_slave_nodes_fqdn(nodes)
        for node in nodes:
            netmanager.assign_admin_ips(node.id, len(node.meta.get("interfaces", [])))
コード例 #7
0
ファイル: cluster.py プロジェクト: tleontovich/fuel-web
    def prepare_for_provisioning(self):
        from nailgun.network.manager import NetworkManager
        from nailgun.task.helpers import TaskHelper

        netmanager = NetworkManager()
        nodes = TaskHelper.nodes_to_provision(self)
        TaskHelper.update_slave_nodes_fqdn(nodes)
        for node in nodes:
            netmanager.assign_admin_ips(node.id,
                                        len(node.meta.get('interfaces', [])))
コード例 #8
0
ファイル: helpers.py プロジェクト: pombredanne/fuel-web
    def prepare_for_deployment(cls, nodes):
        """Prepare environment for deployment,
        assign management, public, storage ips
        """
        cls.update_slave_nodes_fqdn(nodes)

        nodes_ids = [n.id for n in nodes]
        netmanager = NetworkManager()
        if nodes_ids:
            netmanager.assign_ips(nodes_ids, 'management')
            netmanager.assign_ips(nodes_ids, 'public')
            netmanager.assign_ips(nodes_ids, 'storage')

            for node in nodes:
                netmanager.assign_admin_ips(
                    node.id, len(node.meta.get('interfaces', [])))
コード例 #9
0
ファイル: cluster.py プロジェクト: rsokolkov/fuel-web
    def prepare_for_deployment(self):
        from nailgun.network.manager import NetworkManager
        from nailgun.task.helpers import TaskHelper

        nodes = sorted(
            set(TaskHelper.nodes_to_deploy(self) + TaskHelper.nodes_in_provisioning(self)), key=lambda node: node.id
        )

        TaskHelper.update_slave_nodes_fqdn(nodes)

        nodes_ids = [n.id for n in nodes]
        netmanager = NetworkManager()
        if nodes_ids:
            netmanager.assign_ips(nodes_ids, "management")
            netmanager.assign_ips(nodes_ids, "public")
            netmanager.assign_ips(nodes_ids, "storage")

            for node in nodes:
                netmanager.assign_admin_ips(node.id, len(node.meta.get("interfaces", [])))
コード例 #10
0
ファイル: cluster.py プロジェクト: tleontovich/fuel-web
    def prepare_for_deployment(self):
        from nailgun.network.manager import NetworkManager
        from nailgun.task.helpers import TaskHelper

        nodes = sorted(set(
            TaskHelper.nodes_to_deploy(self) +
            TaskHelper.nodes_in_provisioning(self)),
                       key=lambda node: node.id)

        TaskHelper.update_slave_nodes_fqdn(nodes)

        nodes_ids = [n.id for n in nodes]
        netmanager = NetworkManager()
        if nodes_ids:
            netmanager.assign_ips(nodes_ids, 'management')
            netmanager.assign_ips(nodes_ids, 'public')
            netmanager.assign_ips(nodes_ids, 'storage')

            for node in nodes:
                netmanager.assign_admin_ips(
                    node.id, len(node.meta.get('interfaces', [])))
コード例 #11
0
ファイル: task.py プロジェクト: damjanek/fuelweb
    def message(cls, task):
        logger.debug("ProvisionTask.message(task=%s)" % task.uuid)
        # this variable is used to set 'auth_key' in cobbler ks_meta
        cluster_attrs = task.cluster.attributes.merged_attrs_values()
        nodes = TaskHelper.nodes_to_provision(task.cluster)
        netmanager = NetworkManager()

        USE_FAKE = settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP
        # TODO: For now we send nodes data to orchestrator
        # which is cobbler oriented. But for future we
        # need to use more abstract data structure.
        nodes_data = []
        for node in nodes:
            if not node.online:
                if not USE_FAKE:
                    raise Exception(
                        u"Node '%s' (id=%s) is offline."
                        " Remove it from environment and try again." %
                        (node.name, node.id)
                    )
                else:
                    logger.warning(
                        u"Node '%s' (id=%s) is offline."
                        " Remove it from environment and try again." %
                        (node.name, node.id)
                    )

            node_data = {
                'profile': settings.COBBLER_PROFILE,
                'power_type': 'ssh',
                'power_user': '******',
                'power_address': node.ip,
                'name': TaskHelper.make_slave_name(node.id, node.role),
                'hostname': node.fqdn,
                'name_servers': '\"%s\"' % settings.DNS_SERVERS,
                'name_servers_search': '\"%s\"' % settings.DNS_SEARCH,
                'netboot_enabled': '1',
                'ks_meta': {
                    'puppet_auto_setup': 1,
                    'puppet_master': settings.PUPPET_MASTER_HOST,
                    'puppet_version': settings.PUPPET_VERSION,
                    'puppet_enable': 0,
                    'mco_auto_setup': 1,
                    'install_log_2_syslog': 1,
                    'mco_pskey': settings.MCO_PSKEY,
                    'mco_vhost': settings.MCO_VHOST,
                    'mco_host': settings.MCO_HOST,
                    'mco_user': settings.MCO_USER,
                    'mco_password': settings.MCO_PASSWORD,
                    'mco_connector': settings.MCO_CONNECTOR,
                    'mco_enable': 1,
                    'auth_key': "\"%s\"" % cluster_attrs.get('auth_key', ''),
                    'ks_spaces': "\"%s\"" % json.dumps(
                        node.attributes.volumes).replace("\"", "\\\"")
                }
            }

            if node.status == "discover":
                logger.info(
                    "Node %s seems booted with bootstrap image",
                    node.id
                )
                node_data['power_pass'] = settings.PATH_TO_BOOTSTRAP_SSH_KEY
            else:
                # If it's not in discover, we expect it to be booted
                #   in target system.
                # TODO: Get rid of expectations!
                logger.info(
                    "Node %s seems booted with real system",
                    node.id
                )
                node_data['power_pass'] = settings.PATH_TO_SSH_KEY

            # FIXME: move this code (updating) into receiver.provision_resp
            if not USE_FAKE:
                node.status = "provisioning"
                orm().add(node)
                orm().commit()

            # here we assign admin network IPs for node
            # one IP for every node interface
            netmanager.assign_admin_ips(
                node.id,
                len(node.meta.get('interfaces', []))
            )
            admin_net_id = netmanager.get_admin_network_id()
            admin_ips = set([i.ip_addr for i in orm().query(IPAddr).
                            filter_by(node=node.id).
                            filter_by(network=admin_net_id)])
            for i in node.meta.get('interfaces', []):
                if 'interfaces' not in node_data:
                    node_data['interfaces'] = {}
                node_data['interfaces'][i['name']] = {
                    'mac_address': i['mac'],
                    'static': '0',
                    'netmask': settings.ADMIN_NETWORK['netmask'],
                    'ip_address': admin_ips.pop(),
                }
                # interfaces_extra field in cobbler ks_meta
                # means some extra data for network interfaces
                # configuration. It is used by cobbler snippet.
                # For example, cobbler interface model does not
                # have 'peerdns' field, but we need this field
                # to be configured. So we use interfaces_extra
                # branch in order to set this unsupported field.
                if 'interfaces_extra' not in node_data:
                    node_data['interfaces_extra'] = {}
                node_data['interfaces_extra'][i['name']] = {
                    'peerdns': 'no',
                    'onboot': 'no'
                }

                # We want node to be able to PXE boot via any of its
                # interfaces. That is why we add all discovered
                # interfaces into cobbler system. But we want
                # assignted fqdn to be resolved into one IP address
                # because we don't completely support multiinterface
                # configuration yet.
                if i['mac'] == node.mac:
                    node_data['interfaces'][i['name']]['dns_name'] = node.fqdn
                    node_data['interfaces_extra'][i['name']]['onboot'] = 'yes'

            nodes_data.append(node_data)
            if not USE_FAKE:
                TaskHelper.prepare_syslog_dir(node)

        message = {
            'method': 'provision',
            'respond_to': 'provision_resp',
            'args': {
                'task_uuid': task.uuid,
                'engine': {
                    'url': settings.COBBLER_URL,
                    'username': settings.COBBLER_USER,
                    'password': settings.COBBLER_PASSWORD,
                },
                'nodes': nodes_data
            }
        }
        return message
コード例 #12
0
    def message(cls, task):
        logger.debug("ProvisionTask.message(task=%s)" % task.uuid)
        task_uuid = task.uuid
        cluster_id = task.cluster.id
        cluster_attrs = task.cluster.attributes.merged_attrs_values()

        netmanager = NetworkManager()
        nodes = orm().query(Node).filter_by(
            cluster_id=task.cluster.id,
            pending_deletion=False).order_by(Node.id)

        USE_FAKE = settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP

        nodes_to_provision = []

        # FIXME: why can't we use needs_reprovision and pending_addition
        # attributes of node to constract valid list of nodes which need
        # to be provisioned and instead use this ugly loop?
        for node in nodes:
            if not node.online:
                if not USE_FAKE:
                    raise Exception(
                        u"Node '%s' (id=%s) is offline."
                        " Remove it from environment and try again." %
                        (node.name, node.id)
                    )
                else:
                    logger.warning(
                        u"Node '%s' (id=%s) is offline."
                        " Remove it from environment and try again." %
                        (node.name, node.id)
                    )
            if node.status in ('discover', 'provisioning') or \
                    (node.status == 'error' and
                     node.error_type == 'provision'):
                nodes_to_provision.append(node)

        # TODO: For now we send nodes data to orchestrator
        # which are cobbler oriented. But for future we
        # need to use more abstract data structure.
        nodes_data = []
        for node in nodes_to_provision:
            node_data = {
                'profile': settings.COBBLER_PROFILE,
                'power_type': 'ssh',
                'power_user': '******',
                'power_address': node.ip,
                'name': TaskHelper.make_slave_name(node.id, node.role),
                'hostname': node.fqdn,
                'name_servers': '\"%s\"' % settings.DNS_SERVERS,
                'name_servers_search': '\"%s\"' % settings.DNS_SEARCH,
                'netboot_enabled': '1',
                'ks_meta': {
                    'puppet_auto_setup': 1,
                    'puppet_master': settings.PUPPET_MASTER_HOST,
                    'puppet_version': settings.PUPPET_VERSION,
                    'puppet_enable': 0,
                    'mco_auto_setup': 1,
                    'install_log_2_syslog': 1,
                    'mco_pskey': settings.MCO_PSKEY,
                    'mco_vhost': settings.MCO_VHOST,
                    'mco_host': settings.MCO_HOST,
                    'mco_user': settings.MCO_USER,
                    'mco_password': settings.MCO_PASSWORD,
                    'mco_connector': settings.MCO_CONNECTOR,
                    'mco_enable': 1,
                    'auth_key': "\"%s\"" % cluster_attrs.get('auth_key', ''),
                    'ks_spaces': "\"%s\"" % json.dumps(
                        node.attributes.volumes).replace("\"", "\\\"")
                }
            }

            if node.status == "discover":
                logger.info(
                    "Node %s seems booted with bootstrap image",
                    node.id
                )
                node_data['power_pass'] = settings.PATH_TO_BOOTSTRAP_SSH_KEY
            else:
                # If it's not in discover, we expect it to be booted
                #   in target system.
                # TODO: Get rid of expectations!
                logger.info(
                    "Node %s seems booted with real system",
                    node.id
                )
                node_data['power_pass'] = settings.PATH_TO_SSH_KEY

            # FIXME: move this code (updating) into receiver.provision_resp
            if not USE_FAKE:
                node.status = "provisioning"
                orm().add(node)
                orm().commit()

            # here we assign admin network IPs for node
            # one IP for every node interface
            netmanager.assign_admin_ips(
                node.id,
                len(node.meta.get('interfaces', []))
            )
            admin_net_id = netmanager.get_admin_network_id()
            admin_ips = set([i.ip_addr for i in orm().query(IPAddr).
                            filter_by(node=node.id).
                            filter_by(network=admin_net_id)])
            for i in node.meta.get('interfaces', []):
                if 'interfaces' not in node_data:
                    node_data['interfaces'] = {}
                node_data['interfaces'][i['name']] = {
                    'mac_address': i['mac'],
                    'static': '0',
                    'netmask': settings.ADMIN_NETWORK['netmask'],
                    'ip_address': admin_ips.pop(),
                }
                # interfaces_extra field in cobbler ks_meta
                # means some extra data for network interfaces
                # configuration. It is used by cobbler snippet.
                # For example, cobbler interface model does not
                # have 'peerdns' field, but we need this field
                # to be configured. So we use interfaces_extra
                # branch in order to set this unsupported field.
                if 'interfaces_extra' not in node_data:
                    node_data['interfaces_extra'] = {}
                node_data['interfaces_extra'][i['name']] = {
                    'peerdns': 'no',
                    'onboot': 'no'
                }

                # We want node to be able to PXE boot via any of its
                # interfaces. That is why we add all discovered
                # interfaces into cobbler system. But we want
                # assignted fqdn to be resolved into one IP address
                # because we don't completely support multiinterface
                # configuration yet.
                if i['mac'] == node.mac:
                    node_data['interfaces'][i['name']]['dns_name'] = node.fqdn
                    node_data['interfaces_extra'][i['name']]['onboot'] = 'yes'

            nodes_data.append(node_data)
            if not USE_FAKE:
                TaskHelper.prepare_syslog_dir(node)

        message = {
            'method': 'provision',
            'respond_to': 'provision_resp',
            'args': {
                'task_uuid': task.uuid,
                'engine': {
                    'url': settings.COBBLER_URL,
                    'username': settings.COBBLER_USER,
                    'password': settings.COBBLER_PASSWORD,
                },
                'nodes': nodes_data
            }
        }
        return message
コード例 #13
0
    def test_nova_deploy_cast_with_right_args(self, mocked_rpc):
        self.env.create(nodes_kwargs=[{
            'roles': ['controller'],
            'pending_addition': True
        }, {
            'roles': ['controller'],
            'pending_addition': True
        }, {
            'roles': ['controller', 'cinder'],
            'pending_addition': True
        }, {
            'roles': ['compute', 'cinder'],
            'pending_addition': True
        }, {
            'roles': ['compute'],
            'pending_addition': True
        }, {
            'roles': ['cinder'],
            'pending_addition': True
        }])

        cluster_db = self.env.clusters[0]

        common_attrs = {
            'deployment_mode': 'ha_compact',
            'management_vip': '192.168.0.1',
            'public_vip': '172.16.0.2',
            'fixed_network_range': '10.0.0.0/16',
            'management_network_range': '192.168.0.0/24',
            'floating_network_range': ['172.16.0.128-172.16.0.254'],
            'storage_network_range': '192.168.1.0/24',
            'mp': [{
                'weight': '1',
                'point': '1'
            }, {
                'weight': '2',
                'point': '2'
            }],
            'novanetwork_parameters': {
                'network_manager': 'FlatDHCPManager',
                'network_size': 65536
            },
            'dns_nameservers': ["8.8.4.4", "8.8.8.8"],
            'management_interface': 'eth0.101',
            'fixed_interface': 'eth0.103',
            'admin_interface': 'eth1',
            'storage_interface': 'eth0.102',
            'public_interface': 'eth0',
            'floating_interface': 'eth0',
            'master_ip': '127.0.0.1',
            'use_cinder': True,
            'deployment_id': cluster_db.id,
            'openstack_version_prev': None,
            'openstack_version': cluster_db.release.version,
            'fuel_version': cluster_db.fuel_version
        }
        common_attrs.update(
            objects.Release.get_orchestrator_data_dict(cluster_db.release))

        cluster_attrs = objects.Attributes.merged_attrs_values(
            cluster_db.attributes)
        common_attrs.update(cluster_attrs)

        # Common attrs calculation
        nodes_list = []
        nodes_db = sorted(cluster_db.nodes, key=lambda n: n.id)
        assigned_ips = {}
        i = 0
        admin_ips = [
            '10.20.0.134/24', '10.20.0.133/24', '10.20.0.132/24',
            '10.20.0.131/24', '10.20.0.130/24', '10.20.0.129/24'
        ]
        for node in nodes_db:
            node_id = node.id
            admin_ip = admin_ips.pop()
            for role in sorted(node.roles + node.pending_roles):
                assigned_ips[node_id] = {}
                assigned_ips[node_id]['internal'] = '192.168.0.%d' % (i + 2)
                assigned_ips[node_id]['public'] = '172.16.0.%d' % (i + 3)
                assigned_ips[node_id]['storage'] = '192.168.1.%d' % (i + 1)
                assigned_ips[node_id]['admin'] = admin_ip

                nodes_list.append({
                    'role':
                    role,
                    'internal_address':
                    assigned_ips[node_id]['internal'],
                    'public_address':
                    assigned_ips[node_id]['public'],
                    'storage_address':
                    assigned_ips[node_id]['storage'],
                    'internal_netmask':
                    '255.255.255.0',
                    'public_netmask':
                    '255.255.255.0',
                    'storage_netmask':
                    '255.255.255.0',
                    'uid':
                    str(node_id),
                    'swift_zone':
                    str(node_id),
                    'name':
                    'node-%d' % node_id,
                    'fqdn':
                    'node-%d.%s' % (node_id, settings.DNS_DOMAIN)
                })
            i += 1

        controller_nodes = filter(lambda node: node['role'] == 'controller',
                                  deepcopy(nodes_list))

        common_attrs['nodes'] = nodes_list
        common_attrs['nodes'][0]['role'] = 'primary-controller'

        common_attrs['last_controller'] = controller_nodes[-1]['name']
        common_attrs['storage']['pg_num'] = 128

        common_attrs['test_vm_image'] = {
            'container_format':
            'bare',
            'public':
            'true',
            'disk_format':
            'qcow2',
            'img_name':
            'TestVM',
            'img_path':
            '/opt/vm/cirros-x86_64-disk.img',
            'os_name':
            'cirros',
            'min_ram':
            64,
            'glance_properties':
            ("""--property murano_image_info="""
             """'{"title": "Murano Demo", "type": "cirros.demo"}'"""),
        }

        # Individual attrs calculation and
        # merging with common attrs
        priority_mapping = {
            'controller': [600, 600, 500],
            'cinder': 700,
            'compute': 700
        }

        critical_mapping = {
            'primary-controller': True,
            'controller': False,
            'cinder': False,
            'compute': False
        }

        deployment_info = []
        for node in nodes_db:
            ips = assigned_ips[node.id]
            for role in sorted(node.roles):
                priority = priority_mapping[role]
                is_critical = critical_mapping[role]
                if isinstance(priority, list):
                    priority = priority.pop()

                individual_atts = {
                    'uid': str(node.id),
                    'status': node.status,
                    'role': role,
                    'online': node.online,
                    'fail_if_error': is_critical,
                    'fqdn': 'node-%d.%s' % (node.id, settings.DNS_DOMAIN),
                    'priority': priority,
                    'network_data': {
                        'eth0': {
                            'interface': 'eth0',
                            'ipaddr': ['%s/24' % ips['public']],
                            'gateway': '172.16.0.1'
                        },
                        'eth0.101': {
                            'interface': 'eth0.101',
                            'ipaddr': ['%s/24' % ips['internal']]
                        },
                        'eth0.102': {
                            'interface': 'eth0.102',
                            'ipaddr': ['%s/24' % ips['storage']]
                        },
                        'eth0.103': {
                            'interface': 'eth0.103',
                            'ipaddr': 'none'
                        },
                        'lo': {
                            'interface': 'lo',
                            'ipaddr': ['127.0.0.1/8']
                        },
                        'eth1': {
                            'interface': 'eth1',
                            'ipaddr': [ips['admin']]
                        }
                    }
                }

                individual_atts.update(common_attrs)
                individual_atts['glance']['image_cache_max_size'] = str(
                    manager.calc_glance_cache_size(node.attributes.volumes))
                deployment_info.append(deepcopy(individual_atts))

        controller_nodes = filter(lambda node: node['role'] == 'controller',
                                  deployment_info)
        controller_nodes[0]['role'] = 'primary-controller'
        controller_nodes[0]['fail_if_error'] = True

        supertask = self.env.launch_deployment()
        deploy_task_uuid = [
            x.uuid for x in supertask.subtasks if x.name == 'deployment'
        ][0]

        deployment_msg = {
            'api_version': '1',
            'method': 'deploy',
            'respond_to': 'deploy_resp',
            'args': {}
        }

        deployment_msg['args']['task_uuid'] = deploy_task_uuid
        deployment_msg['args']['deployment_info'] = deployment_info

        provision_nodes = []
        admin_net = self.env.network_manager.get_admin_network_group()

        for n in sorted(self.env.nodes, key=lambda n: n.id):
            udev_interfaces_mapping = ','.join(
                ['{0}_{1}'.format(i.mac, i.name) for i in n.interfaces])
            eth1_mac = [i.mac for i in n.interfaces if i.name == 'eth1'][0]

            pnd = {
                'profile': cluster_attrs['cobbler']['profile'],
                'power_type': 'ssh',
                'power_user': '******',
                'kernel_options': {
                    'netcfg/choose_interface': eth1_mac,
                    'udevrules': udev_interfaces_mapping
                },
                'power_address': n.ip,
                'power_pass': settings.PATH_TO_BOOTSTRAP_SSH_KEY,
                'name': objects.Node.make_slave_name(n),
                'hostname': n.fqdn,
                'name_servers': '\"%s\"' % settings.DNS_SERVERS,
                'name_servers_search': '\"%s\"' % settings.DNS_SEARCH,
                'netboot_enabled': '1',
                'ks_meta': {
                    'fuel_version': cluster_db.fuel_version,
                    'puppet_auto_setup': 1,
                    'puppet_master': settings.PUPPET_MASTER_HOST,
                    'puppet_enable': 0,
                    'mco_auto_setup': 1,
                    'install_log_2_syslog': 1,
                    'mco_pskey': settings.MCO_PSKEY,
                    'mco_vhost': settings.MCO_VHOST,
                    'mco_host': settings.MCO_HOST,
                    'mco_user': settings.MCO_USER,
                    'mco_password': settings.MCO_PASSWORD,
                    'mco_connector': settings.MCO_CONNECTOR,
                    'mco_enable': 1,
                    'pm_data': {
                        'ks_spaces': n.attributes.volumes,
                        'kernel_params': objects.Node.get_kernel_params(n),
                    },
                    'auth_key': "\"%s\"" % cluster_attrs.get('auth_key', ''),
                    'mlnx_vf_num': "16",
                    'mlnx_plugin_mode': "disabled",
                    'mlnx_iser_enabled': False,
                }
            }
            orchestrator_data = objects.Release.get_orchestrator_data_dict(
                cluster_db.release)
            if orchestrator_data:
                pnd['ks_meta']['repo_metadata'] = \
                    orchestrator_data['repo_metadata']

            vlan_splinters = cluster_attrs.get('vlan_splinters', None)
            if vlan_splinters == 'kernel_lt':
                pnd['ks_meta']['kernel_lt'] = 1

            NetworkManager.assign_admin_ips(n.id, 1)

            admin_ip = self.env.network_manager.get_admin_ip_for_node(n)

            for i in n.interfaces:
                if 'interfaces' not in pnd:
                    pnd['interfaces'] = {}
                pnd['interfaces'][i.name] = {
                    'mac_address': i.mac,
                    'static': '0',
                }
                if 'interfaces_extra' not in pnd:
                    pnd['interfaces_extra'] = {}
                pnd['interfaces_extra'][i.name] = {
                    'peerdns': 'no',
                    'onboot': 'no'
                }

                if i.mac == n.mac:
                    pnd['interfaces'][i.name]['dns_name'] = n.fqdn
                    pnd['interfaces_extra'][i.name]['onboot'] = 'yes'
                    pnd['interfaces'][i.name]['ip_address'] = admin_ip
                    pnd['interfaces'][i.name]['netmask'] = str(
                        netaddr.IPNetwork(admin_net.cidr).netmask)

            provision_nodes.append(pnd)

        provision_task_uuid = filter(lambda t: t.name == 'provision',
                                     supertask.subtasks)[0].uuid

        provision_msg = {
            'api_version': '1',
            'method': 'provision',
            'respond_to': 'provision_resp',
            'args': {
                'task_uuid': provision_task_uuid,
                'provisioning_info': {
                    'engine': {
                        'url': settings.COBBLER_URL,
                        'username': settings.COBBLER_USER,
                        'password': settings.COBBLER_PASSWORD,
                        'master_ip': settings.MASTER_IP
                    },
                    'nodes': provision_nodes
                }
            }
        }

        args, kwargs = nailgun.task.manager.rpc.cast.call_args
        self.assertEqual(len(args), 2)
        self.assertEqual(len(args[1]), 2)

        self.datadiff(args[1][0], provision_msg)
        self.datadiff(args[1][1], deployment_msg)
コード例 #14
0
    def test_deploy_cast_with_right_args(self, mocked_rpc):
        self.env.create(
            cluster_kwargs={
                'mode': 'ha_compact'
            },
            nodes_kwargs=[
                {'roles': ['controller'], 'pending_addition': True},
                {'roles': ['controller'], 'pending_addition': True},
                {'roles': ['controller', 'cinder'], 'pending_addition': True},
                {'roles': ['compute', 'cinder'], 'pending_addition': True},
                {'roles': ['compute'], 'pending_addition': True},
                {'roles': ['cinder'], 'pending_addition': True}])

        cluster_db = self.env.clusters[0]

        common_attrs = {
            'deployment_mode': 'ha_compact',
            'mountpoints': '1 1\\n2 2\\n',

            'management_vip': '192.168.0.2',
            'public_vip': '172.16.1.2',

            'fixed_network_range': '10.0.0.0/24',
            'management_network_range': '192.168.0.0/24',
            'floating_network_range': ['172.16.0.2-172.16.0.254'],
            'storage_network_range': '192.168.1.0/24',

            'mp': [{'weight': '1', 'point': '1'},
                   {'weight': '2', 'point': '2'}],
            'novanetwork_parameters': {
                'network_manager': 'FlatDHCPManager',
                'network_size': 256
            },

            'management_interface': 'eth0.101',
            'fixed_interface': 'eth0.103',
            'admin_interface': 'eth0',
            'storage_interface': 'eth0.102',
            'public_interface': 'eth0.100',
            'floating_interface': 'eth0.100',

            'master_ip': '127.0.0.1',
            'use_cinder': True,
            'deployment_id': cluster_db.id
        }

        cluster_attrs = cluster_db.attributes.merged_attrs_values()
        common_attrs.update(cluster_attrs)

        # Common attrs calculation
        nodes_list = []
        nodes_db = sorted(cluster_db.nodes, key=lambda n: n.id)
        assigned_ips = {}
        i = 0
        for node in nodes_db:
            node_id = node.id
            for role in sorted(node.roles + node.pending_roles):
                assigned_ips[node_id] = {}
                assigned_ips[node_id]['internal'] = '192.168.0.%d' % (i + 3)
                assigned_ips[node_id]['public'] = '172.16.1.%d' % (i + 3)
                assigned_ips[node_id]['storage'] = '192.168.1.%d' % (i + 2)

                nodes_list.append({
                    'role': role,

                    'internal_address': assigned_ips[node_id]['internal'],
                    'public_address': assigned_ips[node_id]['public'],
                    'storage_address': assigned_ips[node_id]['storage'],

                    'internal_netmask': '255.255.255.0',
                    'public_netmask': '255.255.255.0',
                    'storage_netmask': '255.255.255.0',

                    'uid': str(node_id),
                    'swift_zone': str(node_id),

                    'name': 'node-%d' % node_id,
                    'fqdn': 'node-%d.%s' % (node_id, settings.DNS_DOMAIN)})
            i += 1

        common_attrs['controller_nodes'] = filter(
            lambda node: node['role'] == 'controller',
            deepcopy(nodes_list))

        common_attrs['nodes'] = nodes_list
        common_attrs['nodes'][0]['role'] = 'primary-controller'

        common_attrs['last_controller'] = common_attrs[
            'controller_nodes'][-1]['name']

        # Individual attrs calculation and
        # merging with common attrs
        priority_mapping = {
            'controller': [600, 500, 400],
            'cinder': 700,
            'compute': 700
        }

        deployment_info = []
        for node in nodes_db:
            ips = assigned_ips[node.id]
            for role in sorted(node.roles):
                priority = priority_mapping[role]
                if isinstance(priority, list):
                    priority = priority.pop()

                individual_atts = {
                    'uid': str(node.id),
                    'status': node.status,
                    'role': role,
                    'online': node.online,
                    'fqdn': 'node-%d.%s' % (node.id, settings.DNS_DOMAIN),
                    'priority': priority,

                    'network_data': {
                        'eth0.100': {
                            'interface': 'eth0.100',
                            'ipaddr': ['%s/24' % ips['public']],
                            'gateway': '172.16.1.1',
                            '_name': 'public'},
                        'eth0.101': {
                            'interface': 'eth0.101',
                            'ipaddr': ['%s/24' % ips['internal']],
                            '_name': 'management'},
                        'eth0.102': {
                            'interface': 'eth0.102',
                            'ipaddr': ['%s/24' % ips['storage']],
                            '_name': 'storage'},
                        'eth0.103': {
                            'interface': 'eth0.103',
                            'ipaddr': 'none',
                            '_name': 'fixed'},
                        'lo': {
                            'interface': 'lo',
                            'ipaddr': ['127.0.0.1/8']},
                        'eth1': {
                            'interface': 'eth1',
                            'ipaddr': 'none'},
                        'eth0': {
                            'interface': 'eth0',
                            'ipaddr': 'dhcp',
                            '_name': 'admin'}}}

                individual_atts.update(common_attrs)
                deployment_info.append(individual_atts)

        supertask = self.env.launch_deployment()
        deploy_task_uuid = [x.uuid for x in supertask.subtasks
                            if x.name == 'deployment'][0]

        deployment_msg = {'method': 'deploy',
                          'respond_to': 'deploy_resp',
                          'args': {}}

        deployment_msg['args']['task_uuid'] = deploy_task_uuid
        deployment_msg['args']['deployment_info'] = deployment_info

        provision_nodes = []
        admin_net_id = self.env.network_manager.get_admin_network_id()

        for n in sorted(self.env.nodes, key=lambda n: n.id):
            pnd = {
                'profile': cluster_attrs['cobbler']['profile'],
                'power_type': 'ssh',
                'power_user': '******',
                'power_address': n.ip,
                'power_pass': settings.PATH_TO_BOOTSTRAP_SSH_KEY,
                'name': TaskHelper.make_slave_name(n.id),
                'hostname': n.fqdn,
                'name_servers': '\"%s\"' % settings.DNS_SERVERS,
                'name_servers_search': '\"%s\"' % settings.DNS_SEARCH,
                'netboot_enabled': '1',
                'ks_meta': {
                    'puppet_auto_setup': 1,
                    'puppet_master': settings.PUPPET_MASTER_HOST,
                    'puppet_version': settings.PUPPET_VERSION,
                    'puppet_enable': 0,
                    'mco_auto_setup': 1,
                    'install_log_2_syslog': 1,
                    'mco_pskey': settings.MCO_PSKEY,
                    'mco_vhost': settings.MCO_VHOST,
                    'mco_host': settings.MCO_HOST,
                    'mco_user': settings.MCO_USER,
                    'mco_password': settings.MCO_PASSWORD,
                    'mco_connector': settings.MCO_CONNECTOR,
                    'mco_enable': 1,
                    'ks_spaces': "\"%s\"" % json.dumps(
                        n.attributes.volumes).replace("\"", "\\\""),
                    'auth_key': "\"%s\"" % cluster_attrs.get('auth_key', ''),
                }
            }

            netmanager = NetworkManager()
            netmanager.assign_admin_ips(
                n.id,
                len(n.meta.get('interfaces', []))
            )

            admin_ips = set([i.ip_addr for i in self.db.query(IPAddr).
                            filter_by(node=n.id).
                            filter_by(network=admin_net_id)])

            for i in n.meta.get('interfaces', []):
                if 'interfaces' not in pnd:
                    pnd['interfaces'] = {}
                pnd['interfaces'][i['name']] = {
                    'mac_address': i['mac'],
                    'static': '0',
                    'netmask': settings.ADMIN_NETWORK['netmask'],
                    'ip_address': admin_ips.pop(),
                }
                if 'interfaces_extra' not in pnd:
                    pnd['interfaces_extra'] = {}
                pnd['interfaces_extra'][i['name']] = {
                    'peerdns': 'no',
                    'onboot': 'no'
                }

                if i['mac'] == n.mac:
                    pnd['interfaces'][i['name']]['dns_name'] = n.fqdn
                    pnd['interfaces_extra'][i['name']]['onboot'] = 'yes'

            provision_nodes.append(pnd)

        provision_task_uuid = filter(
            lambda t: t.name == 'provision',
            supertask.subtasks)[0].uuid

        provision_msg = {
            'method': 'provision',
            'respond_to': 'provision_resp',
            'args': {
                'task_uuid': provision_task_uuid,
                'provisioning_info': {
                    'engine': {
                        'url': settings.COBBLER_URL,
                        'username': settings.COBBLER_USER,
                        'password': settings.COBBLER_PASSWORD},
                    'nodes': provision_nodes}}}

        args, kwargs = nailgun.task.manager.rpc.cast.call_args
        self.assertEquals(len(args), 2)
        self.assertEquals(len(args[1]), 2)
        self.datadiff(args[1][0], provision_msg)
        self.datadiff(args[1][1], deployment_msg)
コード例 #15
0
    def test_nova_deploy_cast_with_right_args(self, mocked_rpc):
        self.env.create(
            nodes_kwargs=[
                {'roles': ['controller'], 'pending_addition': True},
                {'roles': ['controller'], 'pending_addition': True},
                {'roles': ['controller', 'cinder'], 'pending_addition': True},
                {'roles': ['compute', 'cinder'], 'pending_addition': True},
                {'roles': ['compute'], 'pending_addition': True},
                {'roles': ['cinder'], 'pending_addition': True}
            ]
        )

        cluster_db = self.env.clusters[0]

        common_attrs = {
            'deployment_mode': 'ha_compact',

            'management_vip': '192.168.0.1',
            'public_vip': '172.16.0.2',

            'fixed_network_range': '10.0.0.0/16',
            'management_network_range': '192.168.0.0/24',
            'floating_network_range': ['172.16.0.128-172.16.0.254'],
            'storage_network_range': '192.168.1.0/24',

            'mp': [{'weight': '1', 'point': '1'},
                   {'weight': '2', 'point': '2'}],
            'novanetwork_parameters': {
                'network_manager': 'FlatDHCPManager',
                'network_size': 256
            },
            'dns_nameservers': [
                "8.8.4.4",
                "8.8.8.8"
            ],

            'management_interface': 'eth0.101',
            'fixed_interface': 'eth0.103',
            'admin_interface': 'eth1',
            'storage_interface': 'eth0.102',
            'public_interface': 'eth0',
            'floating_interface': 'eth0',

            'master_ip': '127.0.0.1',
            'use_cinder': True,
            'deployment_id': cluster_db.id,
            'openstack_version': cluster_db.release.version,
            'fuel_version': cluster_db.fuel_version
        }
        common_attrs.update(
            objects.Release.get_orchestrator_data_dict(cluster_db.release)
        )

        cluster_attrs = objects.Attributes.merged_attrs_values(
            cluster_db.attributes
        )
        common_attrs.update(cluster_attrs)

        # Common attrs calculation
        nodes_list = []
        nodes_db = sorted(cluster_db.nodes, key=lambda n: n.id)
        assigned_ips = {}
        i = 0
        admin_ips = [
            '10.20.0.134/24',
            '10.20.0.133/24',
            '10.20.0.132/24',
            '10.20.0.131/24',
            '10.20.0.130/24',
            '10.20.0.129/24']
        for node in nodes_db:
            node_id = node.id
            admin_ip = admin_ips.pop()
            for role in sorted(node.roles + node.pending_roles):
                assigned_ips[node_id] = {}
                assigned_ips[node_id]['internal'] = '192.168.0.%d' % (i + 2)
                assigned_ips[node_id]['public'] = '172.16.0.%d' % (i + 3)
                assigned_ips[node_id]['storage'] = '192.168.1.%d' % (i + 1)
                assigned_ips[node_id]['admin'] = admin_ip

                nodes_list.append({
                    'role': role,

                    'internal_address': assigned_ips[node_id]['internal'],
                    'public_address': assigned_ips[node_id]['public'],
                    'storage_address': assigned_ips[node_id]['storage'],

                    'internal_netmask': '255.255.255.0',
                    'public_netmask': '255.255.255.0',
                    'storage_netmask': '255.255.255.0',

                    'uid': str(node_id),
                    'swift_zone': str(node_id),

                    'name': 'node-%d' % node_id,
                    'fqdn': 'node-%d.%s' % (node_id, settings.DNS_DOMAIN)})
            i += 1

        controller_nodes = filter(
            lambda node: node['role'] == 'controller',
            deepcopy(nodes_list))

        common_attrs['nodes'] = nodes_list
        common_attrs['nodes'][0]['role'] = 'primary-controller'

        common_attrs['last_controller'] = controller_nodes[-1]['name']
        common_attrs['storage']['pg_num'] = 128

        common_attrs['test_vm_image'] = {
            'container_format': 'bare',
            'public': 'true',
            'disk_format': 'qcow2',
            'img_name': 'TestVM',
            'img_path': '/opt/vm/cirros-x86_64-disk.img',
            'os_name': 'cirros',
            'min_ram': 64,
            'glance_properties': (
                """--property murano_image_info="""
                """'{"title": "Murano Demo", "type": "cirros.demo"}'"""
            ),
        }

        # Individual attrs calculation and
        # merging with common attrs
        priority_mapping = {
            'controller': [700, 600, 500],
            'cinder': 800,
            'compute': 800
        }

        deployment_info = []
        for node in nodes_db:
            ips = assigned_ips[node.id]
            for role in sorted(node.roles):
                priority = priority_mapping[role]
                if isinstance(priority, list):
                    priority = priority.pop()

                individual_atts = {
                    'uid': str(node.id),
                    'status': node.status,
                    'role': role,
                    'online': node.online,
                    'fqdn': 'node-%d.%s' % (node.id, settings.DNS_DOMAIN),
                    'priority': priority,

                    'network_data': {
                        'eth0': {
                            'interface': 'eth0',
                            'ipaddr': ['%s/24' % ips['public']],
                            'gateway': '172.16.0.1'},
                        'eth0.101': {
                            'interface': 'eth0.101',
                            'ipaddr': ['%s/24' % ips['internal']]},
                        'eth0.102': {
                            'interface': 'eth0.102',
                            'ipaddr': ['%s/24' % ips['storage']]},
                        'eth0.103': {
                            'interface': 'eth0.103',
                            'ipaddr': 'none'},
                        'lo': {
                            'interface': 'lo',
                            'ipaddr': ['127.0.0.1/8']},
                        'eth1': {
                            'interface': 'eth1',
                            'ipaddr': [ips['admin']]}
                    }}

                individual_atts.update(common_attrs)
                individual_atts['glance']['image_cache_max_size'] = str(
                    manager.calc_glance_cache_size(node.attributes.volumes)
                )
                deployment_info.append(deepcopy(individual_atts))

        controller_nodes = filter(
            lambda node: node['role'] == 'controller',
            deployment_info)
        controller_nodes[0]['role'] = 'primary-controller'

        supertask = self.env.launch_deployment()
        deploy_task_uuid = [x.uuid for x in supertask.subtasks
                            if x.name == 'deployment'][0]

        deployment_msg = {
            'api_version': '1',
            'method': 'deploy',
            'respond_to': 'deploy_resp',
            'args': {}
        }

        deployment_msg['args']['task_uuid'] = deploy_task_uuid
        deployment_msg['args']['deployment_info'] = deployment_info

        provision_nodes = []
        admin_net = self.env.network_manager.get_admin_network_group()

        for n in sorted(self.env.nodes, key=lambda n: n.id):
            udev_interfaces_mapping = ','.join([
                '{0}_{1}'.format(i.mac, i.name) for i in n.interfaces])
            eth1_mac = [i.mac for i in n.interfaces if i.name == 'eth1'][0]

            pnd = {
                'profile': cluster_attrs['cobbler']['profile'],
                'power_type': 'ssh',
                'power_user': '******',
                'kernel_options': {
                    'netcfg/choose_interface': eth1_mac,
                    'udevrules': udev_interfaces_mapping},
                'power_address': n.ip,
                'power_pass': settings.PATH_TO_BOOTSTRAP_SSH_KEY,
                'name': objects.Node.make_slave_name(n),
                'hostname': n.fqdn,
                'name_servers': '\"%s\"' % settings.DNS_SERVERS,
                'name_servers_search': '\"%s\"' % settings.DNS_SEARCH,
                'netboot_enabled': '1',
                'ks_meta': {
                    'fuel_version': cluster_db.fuel_version,
                    'puppet_auto_setup': 1,
                    'puppet_master': settings.PUPPET_MASTER_HOST,
                    'puppet_enable': 0,
                    'mco_auto_setup': 1,
                    'install_log_2_syslog': 1,
                    'mco_pskey': settings.MCO_PSKEY,
                    'mco_vhost': settings.MCO_VHOST,
                    'mco_host': settings.MCO_HOST,
                    'mco_user': settings.MCO_USER,
                    'mco_password': settings.MCO_PASSWORD,
                    'mco_connector': settings.MCO_CONNECTOR,
                    'mco_enable': 1,
                    'pm_data': {
                        'ks_spaces': n.attributes.volumes,
                        'kernel_params': n.kernel_params,
                    },
                    'auth_key': "\"%s\"" % cluster_attrs.get('auth_key', ''),
                }
            }
            orchestrator_data = objects.Release.get_orchestrator_data_dict(
                cluster_db.release)
            if orchestrator_data:
                pnd['ks_meta']['repo_metadata'] = \
                    orchestrator_data['repo_metadata']

            vlan_splinters = cluster_attrs.get('vlan_splinters', None)
            if vlan_splinters == 'kernel_lt':
                pnd['ks_meta']['kernel_lt'] = 1

            NetworkManager.assign_admin_ips(n.id, 1)

            admin_ip = self.env.network_manager.get_admin_ip_for_node(n)

            for i in n.interfaces:
                if 'interfaces' not in pnd:
                    pnd['interfaces'] = {}
                pnd['interfaces'][i.name] = {
                    'mac_address': i.mac,
                    'static': '0',
                }
                if 'interfaces_extra' not in pnd:
                    pnd['interfaces_extra'] = {}
                pnd['interfaces_extra'][i.name] = {
                    'peerdns': 'no',
                    'onboot': 'no'
                }

                if i.mac == n.mac:
                    pnd['interfaces'][i.name]['dns_name'] = n.fqdn
                    pnd['interfaces_extra'][i.name]['onboot'] = 'yes'
                    pnd['interfaces'][i.name]['ip_address'] = admin_ip
                    pnd['interfaces'][i.name]['netmask'] = str(
                        netaddr.IPNetwork(admin_net.cidr).netmask)

            provision_nodes.append(pnd)

        provision_task_uuid = filter(
            lambda t: t.name == 'provision',
            supertask.subtasks)[0].uuid

        provision_msg = {
            'api_version': '1',
            'method': 'provision',
            'respond_to': 'provision_resp',
            'args': {
                'task_uuid': provision_task_uuid,
                'provisioning_info': {
                    'engine': {
                        'url': settings.COBBLER_URL,
                        'username': settings.COBBLER_USER,
                        'password': settings.COBBLER_PASSWORD,
                        'master_ip': settings.MASTER_IP},
                    'nodes': provision_nodes}}}

        args, kwargs = nailgun.task.manager.rpc.cast.call_args
        self.assertEqual(len(args), 2)
        self.assertEqual(len(args[1]), 2)

        self.datadiff(args[1][0], provision_msg)
        self.datadiff(args[1][1], deployment_msg)
コード例 #16
0
    def test_nova_deploy_cast_with_right_args(self, mocked_rpc):
        self.env.create(cluster_kwargs={'mode': 'ha_compact'},
                        nodes_kwargs=[{
                            'roles': ['controller'],
                            'pending_addition': True
                        }, {
                            'roles': ['controller'],
                            'pending_addition': True
                        }, {
                            'roles': ['controller', 'cinder'],
                            'pending_addition': True
                        }, {
                            'roles': ['compute', 'cinder'],
                            'pending_addition': True
                        }, {
                            'roles': ['compute'],
                            'pending_addition': True
                        }, {
                            'roles': ['cinder'],
                            'pending_addition': True
                        }])

        cluster_db = self.env.clusters[0]

        common_attrs = {
            'deployment_mode': 'ha_compact',
            'management_vip': '192.168.0.2',
            'public_vip': '172.16.0.2',
            'fixed_network_range': '10.0.0.0/16',
            'management_network_range': '192.168.0.0/24',
            'floating_network_range': ['172.16.0.128-172.16.0.254'],
            'storage_network_range': '192.168.1.0/24',
            'mp': [{
                'weight': '1',
                'point': '1'
            }, {
                'weight': '2',
                'point': '2'
            }],
            'novanetwork_parameters': {
                'network_manager': 'FlatDHCPManager',
                'network_size': 256
            },
            'dns_nameservers': ["8.8.8.8", "8.8.4.4"],
            'management_interface': 'eth0.101',
            'fixed_interface': 'eth0.103',
            'admin_interface': 'eth1',
            'storage_interface': 'eth0.102',
            'public_interface': 'eth0',
            'floating_interface': 'eth0',
            'master_ip': '127.0.0.1',
            'use_cinder': True,
            'deployment_id': cluster_db.id
        }

        cluster_attrs = cluster_db.attributes.merged_attrs_values()
        common_attrs.update(cluster_attrs)

        # Common attrs calculation
        nodes_list = []
        nodes_db = sorted(cluster_db.nodes, key=lambda n: n.id)
        assigned_ips = {}
        i = 0
        admin_ips = [
            '10.20.0.139/24', '10.20.0.138/24', '10.20.0.135/24',
            '10.20.0.133/24', '10.20.0.131/24', '10.20.0.130/24'
        ]
        for node in nodes_db:
            node_id = node.id
            admin_ip = admin_ips.pop()
            for role in sorted(node.roles + node.pending_roles):
                assigned_ips[node_id] = {}
                assigned_ips[node_id]['internal'] = '192.168.0.%d' % (i + 3)
                assigned_ips[node_id]['public'] = '172.16.0.%d' % (i + 3)
                assigned_ips[node_id]['storage'] = '192.168.1.%d' % (i + 2)
                assigned_ips[node_id]['admin'] = admin_ip

                nodes_list.append({
                    'role':
                    role,
                    'internal_address':
                    assigned_ips[node_id]['internal'],
                    'public_address':
                    assigned_ips[node_id]['public'],
                    'storage_address':
                    assigned_ips[node_id]['storage'],
                    'internal_netmask':
                    '255.255.255.0',
                    'public_netmask':
                    '255.255.255.0',
                    'storage_netmask':
                    '255.255.255.0',
                    'uid':
                    str(node_id),
                    'swift_zone':
                    str(node_id),
                    'name':
                    'node-%d' % node_id,
                    'fqdn':
                    'node-%d.%s' % (node_id, settings.DNS_DOMAIN)
                })
            i += 1

        controller_nodes = filter(lambda node: node['role'] == 'controller',
                                  deepcopy(nodes_list))

        common_attrs['nodes'] = nodes_list
        common_attrs['nodes'][0]['role'] = 'primary-controller'

        common_attrs['last_controller'] = controller_nodes[-1]['name']

        # Individual attrs calculation and
        # merging with common attrs
        priority_mapping = {
            'controller': [600, 500, 400],
            'cinder': 700,
            'compute': 700
        }

        deployment_info = []
        for node in nodes_db:
            ips = assigned_ips[node.id]
            for role in sorted(node.roles):
                priority = priority_mapping[role]
                if isinstance(priority, list):
                    priority = priority.pop()

                individual_atts = {
                    'uid': str(node.id),
                    'status': node.status,
                    'role': role,
                    'online': node.online,
                    'fqdn': 'node-%d.%s' % (node.id, settings.DNS_DOMAIN),
                    'priority': priority,
                    'network_data': {
                        'eth0': {
                            'interface': 'eth0',
                            'ipaddr': ['%s/24' % ips['public']],
                            'gateway': '172.16.0.1'
                        },
                        'eth0.101': {
                            'interface': 'eth0.101',
                            'ipaddr': ['%s/24' % ips['internal']]
                        },
                        'eth0.102': {
                            'interface': 'eth0.102',
                            'ipaddr': ['%s/24' % ips['storage']]
                        },
                        'eth0.103': {
                            'interface': 'eth0.103',
                            'ipaddr': 'none'
                        },
                        'lo': {
                            'interface': 'lo',
                            'ipaddr': ['127.0.0.1/8']
                        },
                        'eth1': {
                            'interface': 'eth1',
                            'ipaddr': [ips['admin']]
                        }
                    }
                }

                individual_atts.update(common_attrs)
                individual_atts['glance']['image_cache_max_size'] = str(
                    manager.calc_glance_cache_size(node.attributes.volumes))
                deployment_info.append(deepcopy(individual_atts))

        controller_nodes = filter(lambda node: node['role'] == 'controller',
                                  deployment_info)
        controller_nodes[0]['role'] = 'primary-controller'

        supertask = self.env.launch_deployment()
        deploy_task_uuid = [
            x.uuid for x in supertask.subtasks if x.name == 'deployment'
        ][0]

        deployment_msg = {
            'method': 'deploy',
            'respond_to': 'deploy_resp',
            'args': {}
        }

        deployment_msg['args']['task_uuid'] = deploy_task_uuid
        deployment_msg['args']['deployment_info'] = deployment_info

        provision_nodes = []
        admin_net = self.env.network_manager.get_admin_network_group()

        for n in sorted(self.env.nodes, key=lambda n: n.id):
            udev_interfaces_mapping = ','.join(
                ['{0}_{1}'.format(i.mac, i.name) for i in n.interfaces])

            pnd = {
                'profile': cluster_attrs['cobbler']['profile'],
                'power_type': 'ssh',
                'power_user': '******',
                'kernel_options': {
                    'netcfg/choose_interface': 'eth1',
                    'udevrules': udev_interfaces_mapping
                },
                'power_address': n.ip,
                'power_pass': settings.PATH_TO_BOOTSTRAP_SSH_KEY,
                'name': TaskHelper.make_slave_name(n.id),
                'hostname': n.fqdn,
                'name_servers': '\"%s\"' % settings.DNS_SERVERS,
                'name_servers_search': '\"%s\"' % settings.DNS_SEARCH,
                'netboot_enabled': '1',
                'ks_meta': {
                    'puppet_auto_setup': 1,
                    'puppet_master': settings.PUPPET_MASTER_HOST,
                    'puppet_version': settings.PUPPET_VERSION,
                    'puppet_enable': 0,
                    'mco_auto_setup': 1,
                    'install_log_2_syslog': 1,
                    'mco_pskey': settings.MCO_PSKEY,
                    'mco_vhost': settings.MCO_VHOST,
                    'mco_host': settings.MCO_HOST,
                    'mco_user': settings.MCO_USER,
                    'mco_password': settings.MCO_PASSWORD,
                    'mco_connector': settings.MCO_CONNECTOR,
                    'mco_enable': 1,
                    'ks_spaces': n.attributes.volumes,
                    'auth_key': "\"%s\"" % cluster_attrs.get('auth_key', ''),
                }
            }

            NetworkManager.assign_admin_ips(n.id,
                                            len(n.meta.get('interfaces', [])))

            admin_ips = set([
                i.ip_addr for i in self.db.query(IPAddr).filter_by(
                    node=n.id).filter_by(network=admin_net.id)
            ])

            for i in n.interfaces:
                if 'interfaces' not in pnd:
                    pnd['interfaces'] = {}
                pnd['interfaces'][i.name] = {
                    'mac_address': i.mac,
                    'static': '0',
                    'netmask': admin_net.netmask,
                    'ip_address': admin_ips.pop(),
                }
                if 'interfaces_extra' not in pnd:
                    pnd['interfaces_extra'] = {}
                pnd['interfaces_extra'][i.name] = {
                    'peerdns': 'no',
                    'onboot': 'no'
                }

                if i.mac == n.mac:
                    pnd['interfaces'][i.name]['dns_name'] = n.fqdn
                    pnd['interfaces_extra'][i.name]['onboot'] = 'yes'

            provision_nodes.append(pnd)

        provision_task_uuid = filter(lambda t: t.name == 'provision',
                                     supertask.subtasks)[0].uuid

        provision_msg = {
            'method': 'provision',
            'respond_to': 'provision_resp',
            'args': {
                'task_uuid': provision_task_uuid,
                'provisioning_info': {
                    'engine': {
                        'url': settings.COBBLER_URL,
                        'username': settings.COBBLER_USER,
                        'password': settings.COBBLER_PASSWORD
                    },
                    'nodes': provision_nodes
                }
            }
        }

        args, kwargs = nailgun.task.manager.rpc.cast.call_args
        self.assertEquals(len(args), 2)
        self.assertEquals(len(args[1]), 2)

        self.datadiff(args[1][0], provision_msg)
        self.datadiff(args[1][1], deployment_msg)
コード例 #17
0
    def test_neutron_deploy_cast_with_right_args(self, mocked_rpc):
        self.env.create(cluster_kwargs={
            'mode': 'ha_compact',
            'net_provider': 'neutron',
            'net_segment_type': 'gre'
        },
                        nodes_kwargs=[{
                            'roles': ['controller'],
                            'pending_addition': True
                        }, {
                            'roles': ['controller'],
                            'pending_addition': True
                        }, {
                            'roles': ['controller', 'cinder'],
                            'pending_addition': True
                        }, {
                            'roles': ['compute', 'cinder'],
                            'pending_addition': True
                        }, {
                            'roles': ['compute'],
                            'pending_addition': True
                        }, {
                            'roles': ['cinder'],
                            'pending_addition': True
                        }])

        cluster_db = self.env.clusters[0]

        common_attrs = {
            'deployment_mode': 'ha_compact',
            'management_vip': '192.168.0.2',
            'public_vip': '172.16.0.2',
            'management_network_range': '192.168.0.0/24',
            'storage_network_range': '192.168.1.0/24',
            'mp': [{
                'weight': '1',
                'point': '1'
            }, {
                'weight': '2',
                'point': '2'
            }],
            'quantum': True,
            'quantum_settings': {},
            'master_ip': '127.0.0.1',
            'use_cinder': True,
            'deployment_id': cluster_db.id
        }

        cluster_attrs = cluster_db.attributes.merged_attrs_values()
        common_attrs.update(cluster_attrs)

        L2 = {
            "base_mac": "fa:16:3e:00:00:00",
            "segmentation_type": "gre",
            "phys_nets": {
                "physnet1": {
                    "bridge": "br-ex",
                    "vlan_range": None
                },
                "physnet2": {
                    "bridge": "br-prv",
                    "vlan_range": None
                }
            },
            "tunnel_id_ranges": "2:65535"
        }
        L3 = {"use_namespaces": True}
        predefined_networks = {
            "net04_ext": {
                'shared': False,
                'L2': {
                    'router_ext': True,
                    'network_type': 'flat',
                    'physnet': 'physnet1',
                    'segment_id': None
                },
                'L3': {
                    'subnet': u'172.16.0.0/24',
                    'enable_dhcp': False,
                    'nameservers': [],
                    'floating': '172.16.0.130:172.16.0.254',
                    'gateway': '172.16.0.1'
                },
                'tenant': 'admin'
            },
            "net04": {
                'shared': False,
                'L2': {
                    'router_ext': False,
                    'network_type': 'gre',
                    'physnet': 'physnet2',
                    'segment_id': None
                },
                'L3': {
                    'subnet': u'192.168.111.0/24',
                    'enable_dhcp': True,
                    'nameservers': ['8.8.4.4', '8.8.8.8'],
                    'floating': None,
                    'gateway': '192.168.111.1'
                },
                'tenant': 'admin'
            }
        }
        common_attrs['quantum_settings'].update(
            L2=L2, L3=L3, predefined_networks=predefined_networks)

        # Common attrs calculation
        nodes_list = []
        nodes_db = sorted(cluster_db.nodes, key=lambda n: n.id)
        assigned_ips = {}
        i = 0
        admin_ips = [
            '10.20.0.139/24', '10.20.0.138/24', '10.20.0.135/24',
            '10.20.0.133/24', '10.20.0.131/24', '10.20.0.130/24'
        ]
        for node in nodes_db:
            node_id = node.id
            admin_ip = admin_ips.pop()
            for role in sorted(node.roles + node.pending_roles):
                assigned_ips[node_id] = {}
                assigned_ips[node_id]['management'] = '192.168.0.%d' % (i + 3)
                assigned_ips[node_id]['public'] = '172.16.0.%d' % (i + 3)
                assigned_ips[node_id]['storage'] = '192.168.1.%d' % (i + 2)
                assigned_ips[node_id]['admin'] = admin_ip

                nodes_list.append({
                    'role':
                    role,
                    'internal_address':
                    assigned_ips[node_id]['management'],
                    'public_address':
                    assigned_ips[node_id]['public'],
                    'storage_address':
                    assigned_ips[node_id]['storage'],
                    'internal_netmask':
                    '255.255.255.0',
                    'public_netmask':
                    '255.255.255.0',
                    'storage_netmask':
                    '255.255.255.0',
                    'uid':
                    str(node_id),
                    'swift_zone':
                    str(node_id),
                    'name':
                    'node-%d' % node_id,
                    'fqdn':
                    'node-%d.%s' % (node_id, settings.DNS_DOMAIN)
                })
            i += 1

        controller_nodes = filter(lambda node: node['role'] == 'controller',
                                  deepcopy(nodes_list))

        common_attrs['nodes'] = nodes_list
        common_attrs['nodes'][0]['role'] = 'primary-controller'

        common_attrs['last_controller'] = controller_nodes[-1]['name']

        # Individual attrs calculation and
        # merging with common attrs
        priority_mapping = {
            'controller': [600, 500, 400],
            'cinder': 700,
            'compute': 700
        }
        deployment_info = []
        for node in nodes_db:
            ips = assigned_ips[node.id]
            for role in sorted(node.roles):
                priority = priority_mapping[role]
                if isinstance(priority, list):
                    priority = priority.pop()

                individual_atts = {
                    'uid': str(node.id),
                    'status': node.status,
                    'role': role,
                    'online': node.online,
                    'fqdn': 'node-%d.%s' % (node.id, settings.DNS_DOMAIN),
                    'priority': priority,
                    'network_scheme': {
                        "version":
                        "1.0",
                        "provider":
                        "ovs",
                        "interfaces": {
                            "eth0": {
                                "L2": {
                                    "vlan_splinters": "off"
                                },
                                "mtu": 1500
                            },
                            "eth1": {
                                "L2": {
                                    "vlan_splinters": "off"
                                },
                                "mtu": 1500
                            },
                            "eth2": {
                                "L2": {
                                    "vlan_splinters": "off"
                                },
                                "mtu": 1500
                            },
                        },
                        "endpoints": {
                            "br-mgmt": {
                                "IP": [ips['management'] + "/24"]
                            },
                            "br-ex": {
                                "IP": [ips['public'] + "/24"],
                                "gateway": "172.16.0.1"
                            },
                            "br-storage": {
                                "IP": [ips['storage'] + "/24"]
                            },
                            "eth1": {
                                "IP": [ips['admin']]
                            }
                        },
                        "roles": {
                            "management": "br-mgmt",
                            "mesh": "br-mgmt",
                            "ex": "br-ex",
                            "storage": "br-storage",
                            "fw-admin": "eth1"
                        },
                        "transformations": [{
                            "action": "add-br",
                            "name": "br-ex"
                        }, {
                            "action": "add-br",
                            "name": "br-mgmt"
                        }, {
                            "action": "add-br",
                            "name": "br-storage"
                        }, {
                            "action": "add-br",
                            "name": "br-prv"
                        }, {
                            "action": "add-br",
                            "name": u"br-eth0"
                        }, {
                            "action": "add-port",
                            "bridge": u"br-eth0",
                            "name": u"eth0"
                        }, {
                            "action":
                            "add-patch",
                            "bridges": [u"br-eth0", "br-storage"],
                            "tags": [101, 0]
                        }, {
                            "action": "add-patch",
                            "bridges": [u"br-eth0", "br-ex"],
                            "trunks": [0]
                        }, {
                            "action": "add-patch",
                            "bridges": [u"br-eth0", "br-mgmt"],
                            "tags": [100, 0]
                        }]
                    }
                }

                individual_atts.update(common_attrs)
                individual_atts['glance']['image_cache_max_size'] = str(
                    manager.calc_glance_cache_size(node.attributes.volumes))
                deployment_info.append(deepcopy(individual_atts))

        controller_nodes = filter(lambda node: node['role'] == 'controller',
                                  deployment_info)
        controller_nodes[0]['role'] = 'primary-controller'

        supertask = self.env.launch_deployment()
        deploy_task_uuid = [
            x.uuid for x in supertask.subtasks if x.name == 'deployment'
        ][0]

        deployment_msg = {
            'method': 'deploy',
            'respond_to': 'deploy_resp',
            'args': {}
        }

        deployment_msg['args']['task_uuid'] = deploy_task_uuid
        deployment_msg['args']['deployment_info'] = deployment_info

        provision_nodes = []
        admin_net = self.env.network_manager.get_admin_network_group()

        for n in sorted(self.env.nodes, key=lambda n: n.id):
            udev_interfaces_mapping = ','.join(
                ['{0}_{1}'.format(i.mac, i.name) for i in n.interfaces])

            pnd = {
                'profile': cluster_attrs['cobbler']['profile'],
                'power_type': 'ssh',
                'power_user': '******',
                'kernel_options': {
                    'netcfg/choose_interface': 'eth1',
                    'udevrules': udev_interfaces_mapping
                },
                'power_address': n.ip,
                'power_pass': settings.PATH_TO_BOOTSTRAP_SSH_KEY,
                'name': TaskHelper.make_slave_name(n.id),
                'hostname': n.fqdn,
                'name_servers': '\"%s\"' % settings.DNS_SERVERS,
                'name_servers_search': '\"%s\"' % settings.DNS_SEARCH,
                'netboot_enabled': '1',
                'ks_meta': {
                    'puppet_auto_setup': 1,
                    'puppet_master': settings.PUPPET_MASTER_HOST,
                    'puppet_version': settings.PUPPET_VERSION,
                    'puppet_enable': 0,
                    'mco_auto_setup': 1,
                    'install_log_2_syslog': 1,
                    'mco_pskey': settings.MCO_PSKEY,
                    'mco_vhost': settings.MCO_VHOST,
                    'mco_host': settings.MCO_HOST,
                    'mco_user': settings.MCO_USER,
                    'mco_password': settings.MCO_PASSWORD,
                    'mco_connector': settings.MCO_CONNECTOR,
                    'mco_enable': 1,
                    'ks_spaces': n.attributes.volumes,
                    'auth_key': "\"%s\"" % cluster_attrs.get('auth_key', ''),
                }
            }

            NetworkManager.assign_admin_ips(n.id,
                                            len(n.meta.get('interfaces', [])))

            admin_ips = set([
                i.ip_addr for i in self.db.query(IPAddr).filter_by(
                    node=n.id).filter_by(network=admin_net.id)
            ])

            for i in n.meta.get('interfaces', []):
                if 'interfaces' not in pnd:
                    pnd['interfaces'] = {}
                pnd['interfaces'][i['name']] = {
                    'mac_address': i['mac'],
                    'static': '0',
                    'netmask': admin_net.netmask,
                    'ip_address': admin_ips.pop(),
                }
                if 'interfaces_extra' not in pnd:
                    pnd['interfaces_extra'] = {}
                pnd['interfaces_extra'][i['name']] = {
                    'peerdns': 'no',
                    'onboot': 'no'
                }

                if i['mac'] == n.mac:
                    pnd['interfaces'][i['name']]['dns_name'] = n.fqdn
                    pnd['interfaces_extra'][i['name']]['onboot'] = 'yes'

            provision_nodes.append(pnd)

        provision_task_uuid = filter(lambda t: t.name == 'provision',
                                     supertask.subtasks)[0].uuid

        provision_msg = {
            'method': 'provision',
            'respond_to': 'provision_resp',
            'args': {
                'task_uuid': provision_task_uuid,
                'provisioning_info': {
                    'engine': {
                        'url': settings.COBBLER_URL,
                        'username': settings.COBBLER_USER,
                        'password': settings.COBBLER_PASSWORD
                    },
                    'nodes': provision_nodes
                }
            }
        }

        args, kwargs = nailgun.task.manager.rpc.cast.call_args
        self.assertEquals(len(args), 2)
        self.assertEquals(len(args[1]), 2)

        self.datadiff(args[1][0], provision_msg)
        self.datadiff(args[1][1], deployment_msg)
コード例 #18
0
    def test_nova_deploy_cast_with_right_args(self, mocked_rpc):
        self.env.create(
            cluster_kwargs={"mode": "ha_compact"},
            nodes_kwargs=[
                {"roles": ["controller"], "pending_addition": True},
                {"roles": ["controller"], "pending_addition": True},
                {"roles": ["controller", "cinder"], "pending_addition": True},
                {"roles": ["compute", "cinder"], "pending_addition": True},
                {"roles": ["compute"], "pending_addition": True},
                {"roles": ["cinder"], "pending_addition": True},
            ],
        )

        cluster_db = self.env.clusters[0]

        common_attrs = {
            "deployment_mode": "ha_compact",
            "management_vip": "192.168.0.2",
            "public_vip": "172.16.0.2",
            "fixed_network_range": "10.0.0.0/16",
            "management_network_range": "192.168.0.0/24",
            "floating_network_range": ["172.16.0.128-172.16.0.254"],
            "storage_network_range": "192.168.1.0/24",
            "mp": [{"weight": "1", "point": "1"}, {"weight": "2", "point": "2"}],
            "novanetwork_parameters": {"network_manager": "FlatDHCPManager", "network_size": 256},
            "dns_nameservers": ["8.8.8.8", "8.8.4.4"],
            "management_interface": "eth0.101",
            "fixed_interface": "eth0.103",
            "admin_interface": "eth1",
            "storage_interface": "eth0.102",
            "public_interface": "eth0",
            "floating_interface": "eth0",
            "master_ip": "127.0.0.1",
            "use_cinder": True,
            "deployment_id": cluster_db.id,
        }

        cluster_attrs = cluster_db.attributes.merged_attrs_values()
        common_attrs.update(cluster_attrs)

        # Common attrs calculation
        nodes_list = []
        nodes_db = sorted(cluster_db.nodes, key=lambda n: n.id)
        assigned_ips = {}
        i = 0
        admin_ips = [
            "10.20.0.139/24",
            "10.20.0.138/24",
            "10.20.0.135/24",
            "10.20.0.133/24",
            "10.20.0.131/24",
            "10.20.0.130/24",
        ]
        for node in nodes_db:
            node_id = node.id
            admin_ip = admin_ips.pop()
            for role in sorted(node.roles + node.pending_roles):
                assigned_ips[node_id] = {}
                assigned_ips[node_id]["internal"] = "192.168.0.%d" % (i + 3)
                assigned_ips[node_id]["public"] = "172.16.0.%d" % (i + 3)
                assigned_ips[node_id]["storage"] = "192.168.1.%d" % (i + 2)
                assigned_ips[node_id]["admin"] = admin_ip

                nodes_list.append(
                    {
                        "role": role,
                        "internal_address": assigned_ips[node_id]["internal"],
                        "public_address": assigned_ips[node_id]["public"],
                        "storage_address": assigned_ips[node_id]["storage"],
                        "internal_netmask": "255.255.255.0",
                        "public_netmask": "255.255.255.0",
                        "storage_netmask": "255.255.255.0",
                        "uid": str(node_id),
                        "swift_zone": str(node_id),
                        "name": "node-%d" % node_id,
                        "fqdn": "node-%d.%s" % (node_id, settings.DNS_DOMAIN),
                    }
                )
            i += 1

        controller_nodes = filter(lambda node: node["role"] == "controller", deepcopy(nodes_list))

        common_attrs["nodes"] = nodes_list
        common_attrs["nodes"][0]["role"] = "primary-controller"

        common_attrs["last_controller"] = controller_nodes[-1]["name"]

        # Individual attrs calculation and
        # merging with common attrs
        priority_mapping = {"controller": [600, 500, 400], "cinder": 700, "compute": 700}

        deployment_info = []
        for node in nodes_db:
            ips = assigned_ips[node.id]
            for role in sorted(node.roles):
                priority = priority_mapping[role]
                if isinstance(priority, list):
                    priority = priority.pop()

                individual_atts = {
                    "uid": str(node.id),
                    "status": node.status,
                    "role": role,
                    "online": node.online,
                    "fqdn": "node-%d.%s" % (node.id, settings.DNS_DOMAIN),
                    "priority": priority,
                    "network_data": {
                        "eth0": {"interface": "eth0", "ipaddr": ["%s/24" % ips["public"]], "gateway": "172.16.0.1"},
                        "eth0.101": {"interface": "eth0.101", "ipaddr": ["%s/24" % ips["internal"]]},
                        "eth0.102": {"interface": "eth0.102", "ipaddr": ["%s/24" % ips["storage"]]},
                        "eth0.103": {"interface": "eth0.103", "ipaddr": "none"},
                        "lo": {"interface": "lo", "ipaddr": ["127.0.0.1/8"]},
                        "eth1": {"interface": "eth1", "ipaddr": [ips["admin"]]},
                    },
                }

                individual_atts.update(common_attrs)
                individual_atts["glance"]["image_cache_max_size"] = str(
                    manager.calc_glance_cache_size(node.attributes.volumes)
                )
                deployment_info.append(deepcopy(individual_atts))

        controller_nodes = filter(lambda node: node["role"] == "controller", deployment_info)
        controller_nodes[0]["role"] = "primary-controller"

        supertask = self.env.launch_deployment()
        deploy_task_uuid = [x.uuid for x in supertask.subtasks if x.name == "deployment"][0]

        deployment_msg = {"method": "deploy", "respond_to": "deploy_resp", "args": {}}

        deployment_msg["args"]["task_uuid"] = deploy_task_uuid
        deployment_msg["args"]["deployment_info"] = deployment_info

        provision_nodes = []
        admin_net = self.env.network_manager.get_admin_network()

        for n in sorted(self.env.nodes, key=lambda n: n.id):
            udev_interfaces_mapping = ",".join(["{0}_{1}".format(i.mac, i.name) for i in n.interfaces])

            pnd = {
                "profile": cluster_attrs["cobbler"]["profile"],
                "power_type": "ssh",
                "power_user": "******",
                "kernel_options": {"netcfg/choose_interface": "eth1", "udevrules": udev_interfaces_mapping},
                "power_address": n.ip,
                "power_pass": settings.PATH_TO_BOOTSTRAP_SSH_KEY,
                "name": TaskHelper.make_slave_name(n.id),
                "hostname": n.fqdn,
                "name_servers": '"%s"' % settings.DNS_SERVERS,
                "name_servers_search": '"%s"' % settings.DNS_SEARCH,
                "netboot_enabled": "1",
                "ks_meta": {
                    "puppet_auto_setup": 1,
                    "puppet_master": settings.PUPPET_MASTER_HOST,
                    "puppet_version": settings.PUPPET_VERSION,
                    "puppet_enable": 0,
                    "mco_auto_setup": 1,
                    "install_log_2_syslog": 1,
                    "mco_pskey": settings.MCO_PSKEY,
                    "mco_vhost": settings.MCO_VHOST,
                    "mco_host": settings.MCO_HOST,
                    "mco_user": settings.MCO_USER,
                    "mco_password": settings.MCO_PASSWORD,
                    "mco_connector": settings.MCO_CONNECTOR,
                    "mco_enable": 1,
                    "ks_spaces": n.attributes.volumes,
                    "auth_key": '"%s"' % cluster_attrs.get("auth_key", ""),
                },
            }

            NetworkManager.assign_admin_ips(n.id, len(n.meta.get("interfaces", [])))

            admin_ips = set(
                [i.ip_addr for i in self.db.query(IPAddr).filter_by(node=n.id).filter_by(network=admin_net.id)]
            )

            for i in n.meta.get("interfaces", []):
                if "interfaces" not in pnd:
                    pnd["interfaces"] = {}
                pnd["interfaces"][i["name"]] = {
                    "mac_address": i["mac"],
                    "static": "0",
                    "netmask": admin_net.network_group.netmask,
                    "ip_address": admin_ips.pop(),
                }
                if "interfaces_extra" not in pnd:
                    pnd["interfaces_extra"] = {}
                pnd["interfaces_extra"][i["name"]] = {"peerdns": "no", "onboot": "no"}

                if i["mac"] == n.mac:
                    pnd["interfaces"][i["name"]]["dns_name"] = n.fqdn
                    pnd["interfaces_extra"][i["name"]]["onboot"] = "yes"

            provision_nodes.append(pnd)

        provision_task_uuid = filter(lambda t: t.name == "provision", supertask.subtasks)[0].uuid

        provision_msg = {
            "method": "provision",
            "respond_to": "provision_resp",
            "args": {
                "task_uuid": provision_task_uuid,
                "provisioning_info": {
                    "engine": {
                        "url": settings.COBBLER_URL,
                        "username": settings.COBBLER_USER,
                        "password": settings.COBBLER_PASSWORD,
                    },
                    "nodes": provision_nodes,
                },
            },
        }

        args, kwargs = nailgun.task.manager.rpc.cast.call_args
        self.assertEquals(len(args), 2)
        self.assertEquals(len(args[1]), 2)

        self.datadiff(args[1][0], provision_msg)
        self.datadiff(args[1][1], deployment_msg)
コード例 #19
0
    def test_deploy_cast_with_right_args(self, mocked_rpc):
        self.env.create(
            cluster_kwargs={
                "mode": "ha",
                "type": "compute"
            },
            nodes_kwargs=[
                {"role": "controller", "pending_addition": True},
                {"role": "controller", "pending_addition": True},
                {"role": "controller", "pending_addition": True},
            ]
        )
        cluster_db = self.env.clusters[0]
        cluster_depl_mode = 'ha'

        # Set ip ranges for floating ips
        ranges = [['172.16.0.2', '172.16.0.4'],
                  ['172.16.0.3', '172.16.0.5'],
                  ['172.16.0.10', '172.16.0.12']]

        floating_network_group = self.db.query(NetworkGroup).filter(
            NetworkGroup.name == 'floating').filter(
                NetworkGroup.cluster_id == cluster_db.id).first()

        # Remove floating ip addr ranges
        self.db.query(IPAddrRange).filter(
            IPAddrRange.network_group_id == floating_network_group.id).delete()

        # Add new ranges
        for ip_range in ranges:
            new_ip_range = IPAddrRange(
                first=ip_range[0],
                last=ip_range[1],
                network_group_id=floating_network_group.id)

            self.db.add(new_ip_range)
        self.db.commit()

        # Update netmask for public network
        public_network_group = self.db.query(NetworkGroup).filter(
            NetworkGroup.name == 'public').filter(
                NetworkGroup.cluster_id == cluster_db.id).first()
        public_network_group.netmask = '255.255.255.128'
        self.db.commit()

        supertask = self.env.launch_deployment()
        deploy_task_uuid = [x.uuid for x in supertask.subtasks
                            if x.name == 'deployment'][0]

        msg = {'method': 'deploy', 'respond_to': 'deploy_resp',
               'args': {}}
        self.db.add(cluster_db)
        cluster_attrs = cluster_db.attributes.merged_attrs_values()

        nets_db = self.db.query(Network).join(NetworkGroup).\
            filter(NetworkGroup.cluster_id == cluster_db.id).all()

        for net in nets_db:
            if net.name != 'public':
                cluster_attrs[net.name + '_network_range'] = net.cidr

        cluster_attrs['floating_network_range'] = [
            '172.16.0.2-172.16.0.4',
            '172.16.0.3-172.16.0.5',
            '172.16.0.10-172.16.0.12'
        ]

        management_vip = self.env.network_manager.assign_vip(
            cluster_db.id,
            'management'
        )
        public_vip = self.env.network_manager.assign_vip(
            cluster_db.id,
            'public'
        )

        net_params = {}
        net_params['network_manager'] = "FlatDHCPManager"
        net_params['network_size'] = 256

        cluster_attrs['novanetwork_parameters'] = net_params

        cluster_attrs['management_vip'] = management_vip
        cluster_attrs['public_vip'] = public_vip
        cluster_attrs['master_ip'] = '127.0.0.1'
        cluster_attrs['deployment_mode'] = cluster_depl_mode
        cluster_attrs['deployment_id'] = cluster_db.id

        msg['args']['attributes'] = cluster_attrs
        msg['args']['task_uuid'] = deploy_task_uuid
        nodes = []
        provision_nodes = []

        admin_net_id = self.env.network_manager.get_admin_network_id()

        for n in sorted(self.env.nodes, key=lambda n: n.id):

            q = self.db.query(IPAddr).join(Network).\
                filter(IPAddr.node == n.id).filter(
                    not_(IPAddr.network == admin_net_id)
                )

            """
            Here we want to get node IP addresses which belong
            to storage and management networks respectively
            """
            node_ip_management, node_ip_storage = map(
                lambda x: q.filter_by(name=x).first().ip_addr
                + "/" + cluster_attrs[x + '_network_range'].split('/')[1],
                ('management', 'storage')
            )
            node_ip_public = q.filter_by(name='public').first().ip_addr + '/25'

            nodes.append({'uid': n.id, 'status': n.status, 'ip': n.ip,
                          'error_type': n.error_type, 'mac': n.mac,
                          'role': n.role, 'id': n.id, 'fqdn':
                          '%s-%d.%s' % (n.role, n.id, settings.DNS_DOMAIN),
                          'progress': 0, 'meta': n.meta, 'online': True,
                          'network_data': [{'brd': '192.168.0.255',
                                            'ip': node_ip_management,
                                            'vlan': 101,
                                            'gateway': '192.168.0.1',
                                            'netmask': '255.255.255.0',
                                            'dev': 'eth0',
                                            'name': 'management'},
                                           {'brd': '172.16.1.255',
                                            'ip': node_ip_public,
                                            'vlan': 100,
                                            'gateway': '172.16.1.1',
                                            'netmask': '255.255.255.128',
                                            'dev': 'eth0',
                                            'name': u'public'},
                                           {'name': u'storage',
                                            'ip': node_ip_storage,
                                            'vlan': 102,
                                            'dev': 'eth0',
                                            'netmask': '255.255.255.0',
                                            'brd': '192.168.1.255',
                                            'gateway': u'192.168.1.1'},
                                           {'vlan': 100,
                                            'name': 'floating',
                                            'dev': 'eth0'},
                                           {'vlan': 103,
                                            'name': 'fixed',
                                            'dev': 'eth0'},
                                           {'name': u'admin',
                                            'dev': 'eth0'}]})

            pnd = {
                'profile': cluster_attrs['cobbler']['profile'],
                'power_type': 'ssh',
                'power_user': '******',
                'power_address': n.ip,
                'power_pass': settings.PATH_TO_BOOTSTRAP_SSH_KEY,
                'name': TaskHelper.make_slave_name(n.id, n.role),
                'hostname': n.fqdn,
                'name_servers': '\"%s\"' % settings.DNS_SERVERS,
                'name_servers_search': '\"%s\"' % settings.DNS_SEARCH,
                'netboot_enabled': '1',
                'ks_meta': {
                    'puppet_auto_setup': 1,
                    'puppet_master': settings.PUPPET_MASTER_HOST,
                    'puppet_version': settings.PUPPET_VERSION,
                    'puppet_enable': 0,
                    'mco_auto_setup': 1,
                    'install_log_2_syslog': 1,
                    'mco_pskey': settings.MCO_PSKEY,
                    'mco_vhost': settings.MCO_VHOST,
                    'mco_host': settings.MCO_HOST,
                    'mco_user': settings.MCO_USER,
                    'mco_password': settings.MCO_PASSWORD,
                    'mco_connector': settings.MCO_CONNECTOR,
                    'mco_enable': 1,
                    'ks_spaces': "\"%s\"" % json.dumps(
                        n.attributes.volumes).replace("\"", "\\\""),
                    'auth_key': "\"%s\"" % cluster_attrs.get('auth_key', ''),
                }
            }

            netmanager = NetworkManager()
            netmanager.assign_admin_ips(
                n.id,
                len(n.meta.get('interfaces', []))
            )

            admin_ips = set([i.ip_addr for i in self.db.query(IPAddr).
                            filter_by(node=n.id).
                            filter_by(network=admin_net_id)])

            for i in n.meta.get('interfaces', []):
                if 'interfaces' not in pnd:
                    pnd['interfaces'] = {}
                pnd['interfaces'][i['name']] = {
                    'mac_address': i['mac'],
                    'static': '0',
                    'netmask': settings.ADMIN_NETWORK['netmask'],
                    'ip_address': admin_ips.pop(),
                }
                if 'interfaces_extra' not in pnd:
                    pnd['interfaces_extra'] = {}
                pnd['interfaces_extra'][i['name']] = {
                    'peerdns': 'no',
                    'onboot': 'no'
                }

                if i['mac'] == n.mac:
                    pnd['interfaces'][i['name']]['dns_name'] = n.fqdn
                    pnd['interfaces_extra'][i['name']]['onboot'] = 'yes'

            provision_nodes.append(pnd)

        controller_nodes = filter(
            lambda node: node['role'] == 'controller',
            nodes)
        msg['args']['attributes']['controller_nodes'] = controller_nodes
        msg['args']['nodes'] = nodes

        provision_task_uuid = [x.uuid for x in supertask.subtasks
                               if x.name == 'provision'][0]
        provision_msg = {
            'method': 'provision',
            'respond_to': 'provision_resp',
            'args': {
                'task_uuid': provision_task_uuid,
                'engine': {
                    'url': settings.COBBLER_URL,
                    'username': settings.COBBLER_USER,
                    'password': settings.COBBLER_PASSWORD,
                },
                'nodes': provision_nodes,
            }
        }

        args, kwargs = nailgun.task.manager.rpc.cast.call_args
        self.assertEquals(len(args), 2)
        self.assertEquals(len(args[1]), 2)

        self.datadiff(args[1][0], provision_msg)
        self.datadiff(args[1][1], msg)
コード例 #20
0
    def test_neutron_deploy_cast_with_right_args(self, mocked_rpc):
        self.env.create(
            cluster_kwargs={
                'mode': 'ha_compact',
                'net_provider': 'neutron',
                'net_segment_type': 'gre'
            },
            nodes_kwargs=[
                {'roles': ['controller'], 'pending_addition': True},
                {'roles': ['controller'], 'pending_addition': True},
                {'roles': ['controller', 'cinder'], 'pending_addition': True},
                {'roles': ['compute', 'cinder'], 'pending_addition': True},
                {'roles': ['compute'], 'pending_addition': True},
                {'roles': ['cinder'], 'pending_addition': True}
            ]
        )

        cluster_db = self.env.clusters[0]

        common_attrs = {
            'deployment_mode': 'ha_compact',

            'management_vip': '192.168.0.1',
            'public_vip': '172.16.0.2',

            'management_network_range': '192.168.0.0/24',
            'storage_network_range': '192.168.1.0/24',

            'mp': [{'weight': '1', 'point': '1'},
                   {'weight': '2', 'point': '2'}],

            'quantum': True,
            'quantum_settings': {},

            'master_ip': '127.0.0.1',
            'use_cinder': True,
            'deployment_id': cluster_db.id
        }

        cluster_attrs = cluster_db.attributes.merged_attrs_values()
        common_attrs.update(cluster_attrs)

        L2 = {
            "base_mac": "fa:16:3e:00:00:00",
            "segmentation_type": "gre",
            "phys_nets": {
                "physnet1": {
                    "bridge": "br-ex",
                    "vlan_range": None},
                "physnet2": {
                    "bridge": "br-prv",
                    "vlan_range": None}
            },
            "tunnel_id_ranges": "2:65535"
        }
        L3 = {
            "use_namespaces": True
        }
        predefined_networks = {
            "net04_ext": {
                'shared': False,
                'L2': {
                    'router_ext': True,
                    'network_type': 'flat',
                    'physnet': 'physnet1',
                    'segment_id': None},
                'L3': {
                    'subnet': u'172.16.0.0/24',
                    'enable_dhcp': False,
                    'nameservers': [],
                    'floating': '172.16.0.130:172.16.0.254',
                    'gateway': '172.16.0.1'},
                'tenant': 'admin'
            },
            "net04": {
                'shared': False,
                'L2': {
                    'router_ext': False,
                    'network_type': 'gre',
                    'physnet': 'physnet2',
                    'segment_id': None},
                'L3': {
                    'subnet': u'192.168.111.0/24',
                    'enable_dhcp': True,
                    'nameservers': [
                        '8.8.4.4',
                        '8.8.8.8'],
                    'floating': None,
                    'gateway': '192.168.111.1'},
                'tenant': 'admin'
            }
        }
        common_attrs['quantum_settings'].update(
            L2=L2,
            L3=L3,
            predefined_networks=predefined_networks)

        # Common attrs calculation
        nodes_list = []
        nodes_db = sorted(cluster_db.nodes, key=lambda n: n.id)
        assigned_ips = {}
        i = 0
        admin_ips = [
            '10.20.0.139/24',
            '10.20.0.138/24',
            '10.20.0.135/24',
            '10.20.0.133/24',
            '10.20.0.131/24',
            '10.20.0.130/24']
        for node in nodes_db:
            node_id = node.id
            admin_ip = admin_ips.pop()
            for role in sorted(node.roles + node.pending_roles):
                assigned_ips[node_id] = {}
                assigned_ips[node_id]['management'] = '192.168.0.%d' % (i + 2)
                assigned_ips[node_id]['public'] = '172.16.0.%d' % (i + 3)
                assigned_ips[node_id]['storage'] = '192.168.1.%d' % (i + 1)
                assigned_ips[node_id]['admin'] = admin_ip

                nodes_list.append({
                    'role': role,

                    'internal_address': assigned_ips[node_id]['management'],
                    'public_address': assigned_ips[node_id]['public'],
                    'storage_address': assigned_ips[node_id]['storage'],

                    'internal_netmask': '255.255.255.0',
                    'public_netmask': '255.255.255.0',
                    'storage_netmask': '255.255.255.0',

                    'uid': str(node_id),
                    'swift_zone': str(node_id),

                    'name': 'node-%d' % node_id,
                    'fqdn': 'node-%d.%s' % (node_id, settings.DNS_DOMAIN)})
            i += 1

        controller_nodes = filter(
            lambda node: node['role'] == 'controller',
            deepcopy(nodes_list))

        common_attrs['nodes'] = nodes_list
        common_attrs['nodes'][0]['role'] = 'primary-controller'

        common_attrs['last_controller'] = controller_nodes[-1]['name']

        # Individual attrs calculation and
        # merging with common attrs
        priority_mapping = {
            'controller': [600, 500, 400],
            'cinder': 700,
            'compute': 700
        }
        deployment_info = []
        for node in nodes_db:
            ips = assigned_ips[node.id]
            for role in sorted(node.roles):
                priority = priority_mapping[role]
                if isinstance(priority, list):
                    priority = priority.pop()

                individual_atts = {
                    'uid': str(node.id),
                    'status': node.status,
                    'role': role,
                    'online': node.online,
                    'fqdn': 'node-%d.%s' % (node.id, settings.DNS_DOMAIN),
                    'priority': priority,

                    'network_scheme': {
                        "version": "1.0",
                        "provider": "ovs",
                        "interfaces": {
                            "eth0": {
                                "L2": {"vlan_splinters": "off"},
                                "mtu": 1500
                            },
                            "eth1": {
                                "L2": {"vlan_splinters": "off"},
                                "mtu": 1500
                            },
                            "eth2": {
                                "L2": {"vlan_splinters": "off"},
                                "mtu": 1500
                            },
                        },
                        "endpoints": {
                            "br-mgmt": {"IP": [ips['management'] + "/24"]},
                            "br-ex": {
                                "IP": [ips['public'] + "/24"],
                                "gateway": "172.16.0.1"
                            },
                            "br-storage": {"IP": [ips['storage'] + "/24"]},
                            "eth1": {"IP": [ips['admin']]}
                        },
                        "roles": {
                            "management": "br-mgmt",
                            "mesh": "br-mgmt",
                            "ex": "br-ex",
                            "storage": "br-storage",
                            "fw-admin": "eth1"
                        },
                        "transformations": [
                            {
                                "action": "add-br",
                                "name": "br-ex"},
                            {
                                "action": "add-br",
                                "name": "br-mgmt"},
                            {
                                "action": "add-br",
                                "name": "br-storage"},
                            {
                                "action": "add-br",
                                "name": "br-prv"},
                            {
                                "action": "add-br",
                                "name": u"br-eth0"},
                            {
                                "action": "add-port",
                                "bridge": u"br-eth0",
                                "name": u"eth0"},
                            {
                                "action": "add-patch",
                                "bridges": [u"br-eth0", "br-storage"],
                                "tags": [102, 0]},
                            {
                                "action": "add-patch",
                                "bridges": [u"br-eth0", "br-ex"],
                                "trunks": [0]},
                            {
                                "action": "add-patch",
                                "bridges": [u"br-eth0", "br-mgmt"],
                                "tags": [101, 0]}
                        ]
                    }
                }

                individual_atts.update(common_attrs)
                individual_atts['glance']['image_cache_max_size'] = str(
                    manager.calc_glance_cache_size(node.attributes.volumes)
                )
                deployment_info.append(deepcopy(individual_atts))

        controller_nodes = filter(
            lambda node: node['role'] == 'controller',
            deployment_info)
        controller_nodes[0]['role'] = 'primary-controller'

        supertask = self.env.launch_deployment()
        deploy_task_uuid = [x.uuid for x in supertask.subtasks
                            if x.name == 'deployment'][0]

        deployment_msg = {'method': 'deploy',
                          'respond_to': 'deploy_resp',
                          'args': {}}

        deployment_msg['args']['task_uuid'] = deploy_task_uuid
        deployment_msg['args']['deployment_info'] = deployment_info

        provision_nodes = []
        admin_net = self.env.network_manager.get_admin_network_group()

        for n in sorted(self.env.nodes, key=lambda n: n.id):
            udev_interfaces_mapping = ','.join([
                '{0}_{1}'.format(i.mac, i.name) for i in n.interfaces])
            eth1_mac = [i.mac for i in n.interfaces if i.name == 'eth1'][0]

            pnd = {
                'profile': cluster_attrs['cobbler']['profile'],
                'power_type': 'ssh',
                'power_user': '******',
                'kernel_options': {
                    'netcfg/choose_interface': eth1_mac,
                    'udevrules': udev_interfaces_mapping},
                'power_address': n.ip,
                'power_pass': settings.PATH_TO_BOOTSTRAP_SSH_KEY,
                'name': TaskHelper.make_slave_name(n.id),
                'hostname': n.fqdn,
                'name_servers': '\"%s\"' % settings.DNS_SERVERS,
                'name_servers_search': '\"%s\"' % settings.DNS_SEARCH,
                'netboot_enabled': '1',
                'ks_meta': {
                    'puppet_auto_setup': 1,
                    'puppet_master': settings.PUPPET_MASTER_HOST,
                    'puppet_version': settings.PUPPET_VERSION,
                    'puppet_enable': 0,
                    'mco_auto_setup': 1,
                    'install_log_2_syslog': 1,
                    'mco_pskey': settings.MCO_PSKEY,
                    'mco_vhost': settings.MCO_VHOST,
                    'mco_host': settings.MCO_HOST,
                    'mco_user': settings.MCO_USER,
                    'mco_password': settings.MCO_PASSWORD,
                    'mco_connector': settings.MCO_CONNECTOR,
                    'mco_enable': 1,
                    'ks_spaces': n.attributes.volumes,
                    'auth_key': "\"%s\"" % cluster_attrs.get('auth_key', ''),
                }
            }

            NetworkManager.assign_admin_ips(
                n.id,
                len(n.meta.get('interfaces', []))
            )

            admin_ips = set([i.ip_addr
                             for i in self.db.query(IPAddr).
                             filter_by(node=n.id).
                             filter_by(network=admin_net.id)])

            for i in n.meta.get('interfaces', []):
                if 'interfaces' not in pnd:
                    pnd['interfaces'] = {}
                pnd['interfaces'][i['name']] = {
                    'mac_address': i['mac'],
                    'static': '0',
                    'netmask': admin_net.netmask,
                    'ip_address': admin_ips.pop(),
                }
                if 'interfaces_extra' not in pnd:
                    pnd['interfaces_extra'] = {}
                pnd['interfaces_extra'][i['name']] = {
                    'peerdns': 'no',
                    'onboot': 'no'
                }

                if i['mac'] == n.mac:
                    pnd['interfaces'][i['name']]['dns_name'] = n.fqdn
                    pnd['interfaces_extra'][i['name']]['onboot'] = 'yes'

            provision_nodes.append(pnd)

        provision_task_uuid = filter(
            lambda t: t.name == 'provision',
            supertask.subtasks)[0].uuid

        provision_msg = {
            'method': 'provision',
            'respond_to': 'provision_resp',
            'args': {
                'task_uuid': provision_task_uuid,
                'provisioning_info': {
                    'engine': {
                        'url': settings.COBBLER_URL,
                        'username': settings.COBBLER_USER,
                        'password': settings.COBBLER_PASSWORD},
                    'nodes': provision_nodes}}}

        args, kwargs = nailgun.task.manager.rpc.cast.call_args
        self.assertEquals(len(args), 2)
        self.assertEquals(len(args[1]), 2)

        self.datadiff(args[1][0], provision_msg)
        self.datadiff(args[1][1], deployment_msg)
コード例 #21
0
    def test_neutron_deploy_cast_with_right_args(self, mocked_rpc):
        self.env.create(
            release_kwargs={
                'version': "2014.1.1-5.1"
            },
            cluster_kwargs={
                'net_provider': 'neutron',
                'net_segment_type': 'gre'
            },
            nodes_kwargs=[
                {'roles': ['controller'], 'pending_addition': True},
                {'roles': ['controller'], 'pending_addition': True},
                {'roles': ['controller', 'cinder'], 'pending_addition': True},
                {'roles': ['compute', 'cinder'], 'pending_addition': True},
                {'roles': ['compute'], 'pending_addition': True},
                {'roles': ['cinder'], 'pending_addition': True}
            ]
        )

        cluster_db = self.env.clusters[0]

        attrs = cluster_db.attributes.editable
        attrs['public_network_assignment']['assign_to_all_nodes']['value'] = \
            True
        attrs['provision']['method'] = consts.PROVISION_METHODS.image
        resp = self.app.patch(
            reverse(
                'ClusterAttributesHandler',
                kwargs={'cluster_id': cluster_db.id}),
            params=jsonutils.dumps({'editable': attrs}),
            headers=self.default_headers
        )
        self.assertEqual(200, resp.status_code)

        common_attrs = {
            'deployment_mode': 'ha_compact',

            'management_vip': '192.168.0.1',
            'public_vip': '172.16.0.2',

            'management_network_range': '192.168.0.0/24',
            'storage_network_range': '192.168.1.0/24',

            'mp': [{'weight': '1', 'point': '1'},
                   {'weight': '2', 'point': '2'}],

            'quantum': True,
            'quantum_settings': {},

            'master_ip': '127.0.0.1',
            'use_cinder': True,
            'deployment_id': cluster_db.id,
            'openstack_version_prev': None,
            'openstack_version': cluster_db.release.version,
            'fuel_version': cluster_db.fuel_version
        }
        common_attrs.update(
            objects.Release.get_orchestrator_data_dict(cluster_db.release)
        )

        cluster_attrs = objects.Attributes.merged_attrs_values(
            cluster_db.attributes
        )
        common_attrs.update(cluster_attrs)

        L2 = {
            "base_mac": "fa:16:3e:00:00:00",
            "segmentation_type": "gre",
            "phys_nets": {},
            "tunnel_id_ranges": "2:65535"
        }
        L3 = {
            "use_namespaces": True
        }
        predefined_networks = {
            "net04_ext": {
                'shared': False,
                'L2': {
                    'router_ext': True,
                    'network_type': 'local',
                    'physnet': None,
                    'segment_id': None},
                'L3': {
                    'subnet': u'172.16.0.0/24',
                    'enable_dhcp': False,
                    'nameservers': [],
                    'floating': '172.16.0.130:172.16.0.254',
                    'gateway': '172.16.0.1'},
                'tenant': 'admin'
            },
            "net04": {
                'shared': False,
                'L2': {
                    'router_ext': False,
                    'network_type': 'gre',
                    'physnet': None,
                    'segment_id': None},
                'L3': {
                    'subnet': u'192.168.111.0/24',
                    'enable_dhcp': True,
                    'nameservers': [
                        '8.8.4.4',
                        '8.8.8.8'],
                    'floating': None,
                    'gateway': '192.168.111.1'},
                'tenant': 'admin'
            }
        }
        common_attrs['quantum_settings'].update(
            L2=L2,
            L3=L3,
            predefined_networks=predefined_networks)

        # Common attrs calculation
        nodes_list = []
        nodes_db = sorted(cluster_db.nodes, key=lambda n: n.id)
        assigned_ips = {}
        i = 0
        admin_ips = [
            '10.20.0.134/24',
            '10.20.0.133/24',
            '10.20.0.132/24',
            '10.20.0.131/24',
            '10.20.0.130/24',
            '10.20.0.129/24']
        for node in nodes_db:
            node_id = node.id
            admin_ip = admin_ips.pop()
            for role in sorted(node.roles + node.pending_roles):
                assigned_ips[node_id] = {}
                assigned_ips[node_id]['management'] = '192.168.0.%d' % (i + 2)
                assigned_ips[node_id]['public'] = '172.16.0.%d' % (i + 3)
                assigned_ips[node_id]['storage'] = '192.168.1.%d' % (i + 1)
                assigned_ips[node_id]['admin'] = admin_ip

                nodes_list.append({
                    'role': role,

                    'internal_address': assigned_ips[node_id]['management'],
                    'public_address': assigned_ips[node_id]['public'],
                    'storage_address': assigned_ips[node_id]['storage'],

                    'internal_netmask': '255.255.255.0',
                    'public_netmask': '255.255.255.0',
                    'storage_netmask': '255.255.255.0',

                    'uid': str(node_id),
                    'swift_zone': str(node_id),

                    'name': 'node-%d' % node_id,
                    'fqdn': 'node-%d.%s' % (node_id, settings.DNS_DOMAIN)})
            i += 1

        controller_nodes = filter(
            lambda node: node['role'] == 'controller',
            deepcopy(nodes_list))

        common_attrs['nodes'] = nodes_list
        common_attrs['nodes'][0]['role'] = 'primary-controller'

        common_attrs['last_controller'] = controller_nodes[-1]['name']
        common_attrs['storage']['pg_num'] = 128

        common_attrs['test_vm_image'] = {
            'container_format': 'bare',
            'public': 'true',
            'disk_format': 'qcow2',
            'img_name': 'TestVM',
            'img_path': '/opt/vm/cirros-x86_64-disk.img',
            'os_name': 'cirros',
            'min_ram': 64,
            'glance_properties': (
                """--property murano_image_info="""
                """'{"title": "Murano Demo", "type": "cirros.demo"}'"""
            ),
        }

        # Individual attrs calculation and
        # merging with common attrs
        priority_mapping = {
            'controller': [600, 600, 500],
            'cinder': 700,
            'compute': 700
        }

        critical_mapping = {
            'primary-controller': True,
            'controller': False,
            'cinder': False,
            'compute': False
        }

        deployment_info = []
        for node in nodes_db:
            ips = assigned_ips[node.id]
            for role in sorted(node.roles):
                priority = priority_mapping[role]
                is_critical = critical_mapping[role]
                if isinstance(priority, list):
                    priority = priority.pop()

                individual_atts = {
                    'uid': str(node.id),
                    'status': node.status,
                    'role': role,
                    'online': node.online,
                    'fail_if_error': is_critical,
                    'fqdn': 'node-%d.%s' % (node.id, settings.DNS_DOMAIN),
                    'priority': priority,

                    'network_scheme': {
                        "version": "1.0",
                        "provider": "ovs",
                        "interfaces": {
                            "eth0": {
                                "L2": {"vlan_splinters": "off"},
                                "mtu": 1500
                            },
                            "eth1": {
                                "L2": {"vlan_splinters": "off"},
                                "mtu": 1500
                            },
                            "eth2": {
                                "L2": {"vlan_splinters": "off"},
                                "mtu": 1500
                            },
                        },
                        "endpoints": {
                            "br-mgmt": {"IP": [ips['management'] + "/24"]},
                            "br-ex": {
                                "IP": [ips['public'] + "/24"],
                                "gateway": "172.16.0.1"
                            },
                            "br-storage": {"IP": [ips['storage'] + "/24"]},
                            "br-fw-admin": {"IP": [ips['admin']]},
                        },
                        "roles": {
                            "management": "br-mgmt",
                            "mesh": "br-mgmt",
                            "ex": "br-ex",
                            "storage": "br-storage",
                            "fw-admin": "br-fw-admin",
                        },
                        "transformations": [
                            {
                                "action": "add-br",
                                "name": u"br-eth0"},
                            {
                                "action": "add-port",
                                "bridge": u"br-eth0",
                                "name": u"eth0"},
                            {
                                "action": "add-br",
                                "name": u"br-eth1"},
                            {
                                "action": "add-port",
                                "bridge": u"br-eth1",
                                "name": u"eth1"},
                            {
                                "action": "add-br",
                                "name": "br-mgmt"},
                            {
                                "action": "add-br",
                                "name": "br-storage"},
                            {
                                "action": "add-br",
                                "name": "br-fw-admin"},
                            {
                                "action": "add-br",
                                "name": "br-ex"},
                            {
                                "action": "add-patch",
                                "bridges": [u"br-eth0", "br-storage"],
                                "tags": [102, 0],
                                "vlan_ids": [102, 0]},
                            {
                                "action": "add-patch",
                                "bridges": [u"br-eth0", "br-mgmt"],
                                "tags": [101, 0],
                                "vlan_ids": [101, 0]},
                            {
                                "action": "add-patch",
                                "bridges": [u"br-eth1", "br-fw-admin"],
                                "trunks": [0]},
                            {
                                "action": "add-patch",
                                "bridges": [u"br-eth0", "br-ex"],
                                "trunks": [0]},
                        ]
                    }
                }

                individual_atts.update(common_attrs)
                individual_atts['glance']['image_cache_max_size'] = str(
                    manager.calc_glance_cache_size(node.attributes.volumes)
                )
                deployment_info.append(deepcopy(individual_atts))

        controller_nodes = filter(
            lambda node: node['role'] == 'controller',
            deployment_info)
        controller_nodes[0]['role'] = 'primary-controller'
        controller_nodes[0]['fail_if_error'] = True

        supertask = self.env.launch_deployment()
        deploy_task_uuid = [x.uuid for x in supertask.subtasks
                            if x.name == 'deployment'][0]

        deployment_msg = {
            'api_version': '1',
            'method': 'deploy',
            'respond_to': 'deploy_resp',
            'args': {}
        }

        deployment_msg['args']['task_uuid'] = deploy_task_uuid
        deployment_msg['args']['deployment_info'] = deployment_info

        provision_nodes = []
        admin_net = self.env.network_manager.get_admin_network_group()

        for n in sorted(self.env.nodes, key=lambda n: n.id):
            udev_interfaces_mapping = ','.join([
                '{0}_{1}'.format(i.mac, i.name) for i in n.interfaces])
            eth1_mac = [i.mac for i in n.interfaces if i.name == 'eth1'][0]

            pnd = {
                'profile': cluster_attrs['cobbler']['profile'],
                'power_type': 'ssh',
                'power_user': '******',
                'kernel_options': {
                    'netcfg/choose_interface': eth1_mac,
                    'udevrules': udev_interfaces_mapping},
                'power_address': n.ip,
                'power_pass': settings.PATH_TO_BOOTSTRAP_SSH_KEY,
                'name': objects.Node.make_slave_name(n),
                'hostname': n.fqdn,
                'name_servers': '\"%s\"' % settings.DNS_SERVERS,
                'name_servers_search': '\"%s\"' % settings.DNS_SEARCH,
                'netboot_enabled': '1',
                'ks_meta': {
                    'fuel_version': cluster_db.fuel_version,
                    'puppet_auto_setup': 1,
                    'puppet_master': settings.PUPPET_MASTER_HOST,
                    'puppet_enable': 0,
                    'mco_auto_setup': 1,
                    'install_log_2_syslog': 1,
                    'mco_pskey': settings.MCO_PSKEY,
                    'mco_vhost': settings.MCO_VHOST,
                    'mco_host': settings.MCO_HOST,
                    'mco_user': settings.MCO_USER,
                    'mco_password': settings.MCO_PASSWORD,
                    'mco_connector': settings.MCO_CONNECTOR,
                    'mco_enable': 1,
                    'pm_data': {
                        'ks_spaces': n.attributes.volumes,
                        'kernel_params': objects.Node.get_kernel_params(n),
                    },
                    'auth_key': "\"%s\"" % cluster_attrs.get('auth_key', ''),
                    'authorized_keys':
                    ["\"%s\"" % key for key in settings.AUTHORIZED_KEYS],
                    'timezone': settings.TIMEZONE,
                    'master_ip': settings.MASTER_IP,
                    'mlnx_vf_num': "16",
                    'mlnx_plugin_mode': "disabled",
                    'mlnx_iser_enabled': False,
                    'image_data': cluster_attrs['provision']['image_data']
                }
            }
            orchestrator_data = objects.Release.get_orchestrator_data_dict(
                cluster_db.release)
            if orchestrator_data:
                pnd['ks_meta']['repo_metadata'] = \
                    orchestrator_data['repo_metadata']

            vlan_splinters = cluster_attrs.get('vlan_splinters', None)
            if vlan_splinters == 'kernel_lt':
                pnd['ks_meta']['kernel_lt'] = 1

            NetworkManager.assign_admin_ips(n.id, 1)

            admin_ip = self.env.network_manager.get_admin_ip_for_node(n)

            for i in n.meta.get('interfaces', []):
                if 'interfaces' not in pnd:
                    pnd['interfaces'] = {}
                pnd['interfaces'][i['name']] = {
                    'mac_address': i['mac'],
                    'static': '0',
                }
                if 'interfaces_extra' not in pnd:
                    pnd['interfaces_extra'] = {}
                pnd['interfaces_extra'][i['name']] = {
                    'peerdns': 'no',
                    'onboot': 'no'
                }

                if i['mac'] == n.mac:
                    pnd['interfaces'][i['name']]['dns_name'] = n.fqdn
                    pnd['interfaces_extra'][i['name']]['onboot'] = 'yes'
                    pnd['interfaces'][i['name']]['ip_address'] = admin_ip
                    pnd['interfaces'][i['name']]['netmask'] = str(
                        netaddr.IPNetwork(admin_net.cidr).netmask)

            provision_nodes.append(pnd)

        provision_task_uuid = filter(
            lambda t: t.name == 'provision',
            supertask.subtasks)[0].uuid

        provision_msg = {
            'api_version': '1',
            'method': 'provision',
            'respond_to': 'provision_resp',
            'args': {
                'task_uuid': provision_task_uuid,
                'provisioning_info': {
                    'engine': {
                        'url': settings.COBBLER_URL,
                        'username': settings.COBBLER_USER,
                        'password': settings.COBBLER_PASSWORD,
                        'master_ip': settings.MASTER_IP,
                        'provision_method': consts.PROVISION_METHODS.image
                    },
                    'nodes': provision_nodes}}}

        args, kwargs = nailgun.task.manager.rpc.cast.call_args
        self.assertEqual(len(args), 2)
        self.assertEqual(len(args[1]), 2)

        self.datadiff(args[1][0], provision_msg)
        self.datadiff(args[1][1], deployment_msg)
コード例 #22
0
    def test_deploy_cast_with_right_args(self, mocked_rpc):
        self.env.create(
            cluster_kwargs={
                "mode": "ha",
                "type": "compute"
            },
            nodes_kwargs=[
                {"role": "controller", "pending_addition": True},
                {"role": "controller", "pending_addition": True},
                {"role": "controller", "pending_addition": True},
            ]
        )
        cluster_db = self.env.clusters[0]
        cluster_depl_mode = 'ha'

        # Set ip ranges for floating ips
        ranges = [['240.0.0.2', '240.0.0.4'],
                  ['240.0.0.3', '240.0.0.5'],
                  ['240.0.0.10', '240.0.0.12']]

        floating_network_group = self.db.query(NetworkGroup).filter(
            NetworkGroup.name == 'floating').filter(
                NetworkGroup.cluster_id == cluster_db.id).first()

        # Remove floating ip addr ranges
        self.db.query(IPAddrRange).filter(
            IPAddrRange.network_group_id == floating_network_group.id).delete()

        # Add new ranges
        for ip_range in ranges:
            new_ip_range = IPAddrRange(
                first=ip_range[0],
                last=ip_range[1],
                network_group_id=floating_network_group.id)

            self.db.add(new_ip_range)
        self.db.commit()

        # Update netmask for public network
        public_network_group = self.db.query(NetworkGroup).filter(
            NetworkGroup.name == 'public').filter(
                NetworkGroup.cluster_id == cluster_db.id).first()
        public_network_group.netmask = '255.255.255.128'
        self.db.commit()

        supertask = self.env.launch_deployment()
        deploy_task_uuid = [x.uuid for x in supertask.subtasks
                            if x.name == 'deployment'][0]

        msg = {'method': 'deploy', 'respond_to': 'deploy_resp',
               'args': {}}
        self.db.add(cluster_db)
        cluster_attrs = cluster_db.attributes.merged_attrs_values()

        nets_db = self.db.query(Network).join(NetworkGroup).\
            filter(NetworkGroup.cluster_id == cluster_db.id).all()

        for net in nets_db:
            if net.name != 'public':
                cluster_attrs[net.name + '_network_range'] = net.cidr

        cluster_attrs['floating_network_range'] = [
            '240.0.0.10',
            '240.0.0.11',
            '240.0.0.12',

            '240.0.0.2',
            '240.0.0.3',
            '240.0.0.4',
            '240.0.0.5']

        management_vip = self.env.network_manager.assign_vip(
            cluster_db.id,
            'management'
        )
        public_vip = self.env.network_manager.assign_vip(
            cluster_db.id,
            'public'
        )

        cluster_attrs['management_vip'] = management_vip
        cluster_attrs['public_vip'] = public_vip
        cluster_attrs['deployment_mode'] = cluster_depl_mode
        cluster_attrs['deployment_id'] = cluster_db.id
        cluster_attrs['network_manager'] = "FlatDHCPManager"
        cluster_attrs['network_size'] = 256

        msg['args']['attributes'] = cluster_attrs
        msg['args']['task_uuid'] = deploy_task_uuid
        nodes = []
        provision_nodes = []

        admin_net_id = self.env.network_manager.get_admin_network_id()

        for n in sorted(self.env.nodes, key=lambda n: n.id):

            q = self.db.query(IPAddr).join(Network).\
                filter(IPAddr.node == n.id).filter(
                    not_(IPAddr.network == admin_net_id)
                )

            """
            Here we want to get node IP addresses which belong
            to storage and management networks respectively
            """
            node_ip_management, node_ip_storage = map(
                lambda x: q.filter_by(name=x).first().ip_addr
                + "/" + cluster_attrs[x + '_network_range'].split('/')[1],
                ('management', 'storage')
            )
            node_ip_public = q.filter_by(name='public').first().ip_addr + '/25'

            nodes.append({'uid': n.id, 'status': n.status, 'ip': n.ip,
                          'error_type': n.error_type, 'mac': n.mac,
                          'role': n.role, 'id': n.id, 'fqdn':
                          '%s-%d.%s' % (n.role, n.id, settings.DNS_DOMAIN),
                          'progress': 0, 'meta': n.meta, 'online': True,
                          'network_data': [{'brd': '192.168.0.255',
                                            'ip': node_ip_management,
                                            'vlan': 103,
                                            'gateway': '192.168.0.1',
                                            'netmask': '255.255.255.0',
                                            'dev': 'eth0',
                                            'name': 'management'},
                                           {'brd': '240.0.1.255',
                                            'ip': node_ip_public,
                                            'vlan': 100,
                                            'gateway': '240.0.1.1',
                                            'netmask': '255.255.255.128',
                                            'dev': 'eth0',
                                            'name': u'public'},
                                           {'name': u'storage',
                                            'ip': node_ip_storage,
                                            'vlan': 102,
                                            'dev': 'eth0',
                                            'netmask': '255.255.255.0',
                                            'brd': '172.16.0.255',
                                            'gateway': u'172.16.0.1'},
                                           {'vlan': 100,
                                            'name': 'floating',
                                            'dev': 'eth0'},
                                           {'vlan': 101,
                                            'name': 'fixed',
                                            'dev': 'eth0'},
                                           {'name': u'admin',
                                            'dev': 'eth0'}]})

            pnd = {
                'profile': cluster_attrs['cobbler']['profile'],
                'power_type': 'ssh',
                'power_user': '******',
                'power_address': n.ip,
                'power_pass': settings.PATH_TO_BOOTSTRAP_SSH_KEY,
                'name': TaskHelper.make_slave_name(n.id, n.role),
                'hostname': n.fqdn,
                'name_servers': '\"%s\"' % settings.DNS_SERVERS,
                'name_servers_search': '\"%s\"' % settings.DNS_SEARCH,
                'netboot_enabled': '1',
                'ks_meta': {
                    'puppet_auto_setup': 1,
                    'puppet_master': settings.PUPPET_MASTER_HOST,
                    'puppet_version': settings.PUPPET_VERSION,
                    'puppet_enable': 0,
                    'mco_auto_setup': 1,
                    'install_log_2_syslog': 1,
                    'mco_pskey': settings.MCO_PSKEY,
                    'mco_vhost': settings.MCO_VHOST,
                    'mco_host': settings.MCO_HOST,
                    'mco_user': settings.MCO_USER,
                    'mco_password': settings.MCO_PASSWORD,
                    'mco_connector': settings.MCO_CONNECTOR,
                    'mco_enable': 1,
                    'ks_spaces': "\"%s\"" % json.dumps(
                        n.attributes.volumes).replace("\"", "\\\""),
                    'auth_key': "\"%s\"" % cluster_attrs.get('auth_key', ''),
                }
            }

            netmanager = NetworkManager()
            netmanager.assign_admin_ips(
                n.id,
                len(n.meta.get('interfaces', []))
            )

            admin_ips = set([i.ip_addr for i in self.db.query(IPAddr).
                            filter_by(node=n.id).
                            filter_by(network=admin_net_id)])

            for i in n.meta.get('interfaces', []):
                if 'interfaces' not in pnd:
                    pnd['interfaces'] = {}
                pnd['interfaces'][i['name']] = {
                    'mac_address': i['mac'],
                    'static': '0',
                    'netmask': settings.ADMIN_NETWORK['netmask'],
                    'ip_address': admin_ips.pop(),
                }
                if 'interfaces_extra' not in pnd:
                    pnd['interfaces_extra'] = {}
                pnd['interfaces_extra'][i['name']] = {
                    'peerdns': 'no',
                    'onboot': 'no'
                }

                if i['mac'] == n.mac:
                    pnd['interfaces'][i['name']]['dns_name'] = n.fqdn
                    pnd['interfaces_extra'][i['name']]['onboot'] = 'yes'

            provision_nodes.append(pnd)

        controller_nodes = filter(
            lambda node: node['role'] == 'controller',
            nodes)
        msg['args']['attributes']['controller_nodes'] = controller_nodes
        msg['args']['nodes'] = nodes

        provision_task_uuid = [x.uuid for x in supertask.subtasks
                               if x.name == 'provision'][0]
        provision_msg = {
            'method': 'provision',
            'respond_to': 'provision_resp',
            'args': {
                'task_uuid': provision_task_uuid,
                'engine': {
                    'url': settings.COBBLER_URL,
                    'username': settings.COBBLER_USER,
                    'password': settings.COBBLER_PASSWORD,
                },
                'nodes': provision_nodes,
            }
        }

        nailgun.task.manager.rpc.cast.assert_called_once_with(
            'naily', [provision_msg, msg])
コード例 #23
0
    def test_neutron_deploy_cast_with_right_args(self, mocked_rpc):
        self.env.create(release_kwargs={'version': "2014.1.1-5.1"},
                        cluster_kwargs={
                            'net_provider': 'neutron',
                            'net_segment_type': 'gre'
                        },
                        nodes_kwargs=[{
                            'roles': ['controller'],
                            'pending_addition': True
                        }, {
                            'roles': ['controller'],
                            'pending_addition': True
                        }, {
                            'roles': ['controller', 'cinder'],
                            'pending_addition': True
                        }, {
                            'roles': ['compute', 'cinder'],
                            'pending_addition': True
                        }, {
                            'roles': ['compute'],
                            'pending_addition': True
                        }, {
                            'roles': ['cinder'],
                            'pending_addition': True
                        }])

        cluster_db = self.env.clusters[0]

        attrs = cluster_db.attributes.editable
        attrs['public_network_assignment']['assign_to_all_nodes']['value'] = \
            True
        resp = self.app.patch(reverse('ClusterAttributesHandler',
                                      kwargs={'cluster_id': cluster_db.id}),
                              params=jsonutils.dumps({'editable': attrs}),
                              headers=self.default_headers)
        self.assertEqual(200, resp.status_code)

        common_attrs = {
            'deployment_mode': 'ha_compact',
            'management_vip': '192.168.0.1',
            'public_vip': '172.16.0.2',
            'management_network_range': '192.168.0.0/24',
            'storage_network_range': '192.168.1.0/24',
            'mp': [{
                'weight': '1',
                'point': '1'
            }, {
                'weight': '2',
                'point': '2'
            }],
            'quantum': True,
            'quantum_settings': {},
            'master_ip': '127.0.0.1',
            'use_cinder': True,
            'deployment_id': cluster_db.id,
            'openstack_version_prev': None,
            'openstack_version': cluster_db.release.version,
            'fuel_version': cluster_db.fuel_version
        }
        common_attrs.update(
            objects.Release.get_orchestrator_data_dict(cluster_db.release))

        cluster_attrs = objects.Attributes.merged_attrs_values(
            cluster_db.attributes)
        common_attrs.update(cluster_attrs)

        L2 = {
            "base_mac": "fa:16:3e:00:00:00",
            "segmentation_type": "gre",
            "phys_nets": {},
            "tunnel_id_ranges": "2:65535"
        }
        L3 = {"use_namespaces": True}
        predefined_networks = {
            "net04_ext": {
                'shared': False,
                'L2': {
                    'router_ext': True,
                    'network_type': 'local',
                    'physnet': None,
                    'segment_id': None
                },
                'L3': {
                    'subnet': u'172.16.0.0/24',
                    'enable_dhcp': False,
                    'nameservers': [],
                    'floating': '172.16.0.130:172.16.0.254',
                    'gateway': '172.16.0.1'
                },
                'tenant': 'admin'
            },
            "net04": {
                'shared': False,
                'L2': {
                    'router_ext': False,
                    'network_type': 'gre',
                    'physnet': None,
                    'segment_id': None
                },
                'L3': {
                    'subnet': u'192.168.111.0/24',
                    'enable_dhcp': True,
                    'nameservers': ['8.8.4.4', '8.8.8.8'],
                    'floating': None,
                    'gateway': '192.168.111.1'
                },
                'tenant': 'admin'
            }
        }
        common_attrs['quantum_settings'].update(
            L2=L2, L3=L3, predefined_networks=predefined_networks)

        # Common attrs calculation
        nodes_list = []
        nodes_db = sorted(cluster_db.nodes, key=lambda n: n.id)
        assigned_ips = {}
        i = 0
        admin_ips = [
            '10.20.0.134/24', '10.20.0.133/24', '10.20.0.132/24',
            '10.20.0.131/24', '10.20.0.130/24', '10.20.0.129/24'
        ]
        for node in nodes_db:
            node_id = node.id
            admin_ip = admin_ips.pop()
            for role in sorted(node.roles + node.pending_roles):
                assigned_ips[node_id] = {}
                assigned_ips[node_id]['management'] = '192.168.0.%d' % (i + 2)
                assigned_ips[node_id]['public'] = '172.16.0.%d' % (i + 3)
                assigned_ips[node_id]['storage'] = '192.168.1.%d' % (i + 1)
                assigned_ips[node_id]['admin'] = admin_ip

                nodes_list.append({
                    'role':
                    role,
                    'internal_address':
                    assigned_ips[node_id]['management'],
                    'public_address':
                    assigned_ips[node_id]['public'],
                    'storage_address':
                    assigned_ips[node_id]['storage'],
                    'internal_netmask':
                    '255.255.255.0',
                    'public_netmask':
                    '255.255.255.0',
                    'storage_netmask':
                    '255.255.255.0',
                    'uid':
                    str(node_id),
                    'swift_zone':
                    str(node_id),
                    'name':
                    'node-%d' % node_id,
                    'fqdn':
                    'node-%d.%s' % (node_id, settings.DNS_DOMAIN)
                })
            i += 1

        controller_nodes = filter(lambda node: node['role'] == 'controller',
                                  deepcopy(nodes_list))

        common_attrs['nodes'] = nodes_list
        common_attrs['nodes'][0]['role'] = 'primary-controller'

        common_attrs['last_controller'] = controller_nodes[-1]['name']
        common_attrs['storage']['pg_num'] = 128

        common_attrs['test_vm_image'] = {
            'container_format':
            'bare',
            'public':
            'true',
            'disk_format':
            'qcow2',
            'img_name':
            'TestVM',
            'img_path':
            '/opt/vm/cirros-x86_64-disk.img',
            'os_name':
            'cirros',
            'min_ram':
            64,
            'glance_properties':
            ("""--property murano_image_info="""
             """'{"title": "Murano Demo", "type": "cirros.demo"}'"""),
        }

        # Individual attrs calculation and
        # merging with common attrs
        priority_mapping = {
            'controller': [600, 600, 500],
            'cinder': 700,
            'compute': 700
        }

        critical_mapping = {
            'primary-controller': True,
            'controller': False,
            'cinder': False,
            'compute': False
        }

        deployment_info = []
        for node in nodes_db:
            ips = assigned_ips[node.id]
            for role in sorted(node.roles):
                priority = priority_mapping[role]
                is_critical = critical_mapping[role]
                if isinstance(priority, list):
                    priority = priority.pop()

                individual_atts = {
                    'uid': str(node.id),
                    'status': node.status,
                    'role': role,
                    'online': node.online,
                    'fail_if_error': is_critical,
                    'fqdn': 'node-%d.%s' % (node.id, settings.DNS_DOMAIN),
                    'priority': priority,
                    'network_scheme': {
                        "version":
                        "1.0",
                        "provider":
                        "ovs",
                        "interfaces": {
                            "eth0": {
                                "L2": {
                                    "vlan_splinters": "off"
                                },
                                "mtu": 1500
                            },
                            "eth1": {
                                "L2": {
                                    "vlan_splinters": "off"
                                },
                                "mtu": 1500
                            },
                            "eth2": {
                                "L2": {
                                    "vlan_splinters": "off"
                                },
                                "mtu": 1500
                            },
                        },
                        "endpoints": {
                            "br-mgmt": {
                                "IP": [ips['management'] + "/24"]
                            },
                            "br-ex": {
                                "IP": [ips['public'] + "/24"],
                                "gateway": "172.16.0.1"
                            },
                            "br-storage": {
                                "IP": [ips['storage'] + "/24"]
                            },
                            "br-fw-admin": {
                                "IP": [ips['admin']]
                            },
                        },
                        "roles": {
                            "management": "br-mgmt",
                            "mesh": "br-mgmt",
                            "ex": "br-ex",
                            "storage": "br-storage",
                            "fw-admin": "br-fw-admin",
                        },
                        "transformations": [
                            {
                                "action": "add-br",
                                "name": u"br-eth0"
                            },
                            {
                                "action": "add-port",
                                "bridge": u"br-eth0",
                                "name": u"eth0"
                            },
                            {
                                "action": "add-br",
                                "name": u"br-eth1"
                            },
                            {
                                "action": "add-port",
                                "bridge": u"br-eth1",
                                "name": u"eth1"
                            },
                            {
                                "action": "add-br",
                                "name": "br-mgmt"
                            },
                            {
                                "action": "add-br",
                                "name": "br-storage"
                            },
                            {
                                "action": "add-br",
                                "name": "br-fw-admin"
                            },
                            {
                                "action": "add-br",
                                "name": "br-ex"
                            },
                            {
                                "action": "add-patch",
                                "bridges": [u"br-eth0", "br-storage"],
                                "tags": [102, 0]
                            },
                            {
                                "action": "add-patch",
                                "bridges": [u"br-eth0", "br-mgmt"],
                                "tags": [101, 0]
                            },
                            {
                                "action": "add-patch",
                                "bridges": [u"br-eth1", "br-fw-admin"],
                                "trunks": [0]
                            },
                            {
                                "action": "add-patch",
                                "bridges": [u"br-eth0", "br-ex"],
                                "trunks": [0]
                            },
                        ]
                    }
                }

                individual_atts.update(common_attrs)
                individual_atts['glance']['image_cache_max_size'] = str(
                    manager.calc_glance_cache_size(node.attributes.volumes))
                deployment_info.append(deepcopy(individual_atts))

        controller_nodes = filter(lambda node: node['role'] == 'controller',
                                  deployment_info)
        controller_nodes[0]['role'] = 'primary-controller'
        controller_nodes[0]['fail_if_error'] = True

        supertask = self.env.launch_deployment()
        deploy_task_uuid = [
            x.uuid for x in supertask.subtasks if x.name == 'deployment'
        ][0]

        deployment_msg = {
            'api_version': '1',
            'method': 'deploy',
            'respond_to': 'deploy_resp',
            'args': {}
        }

        deployment_msg['args']['task_uuid'] = deploy_task_uuid
        deployment_msg['args']['deployment_info'] = deployment_info

        provision_nodes = []
        admin_net = self.env.network_manager.get_admin_network_group()

        for n in sorted(self.env.nodes, key=lambda n: n.id):
            udev_interfaces_mapping = ','.join(
                ['{0}_{1}'.format(i.mac, i.name) for i in n.interfaces])
            eth1_mac = [i.mac for i in n.interfaces if i.name == 'eth1'][0]

            pnd = {
                'profile': cluster_attrs['cobbler']['profile'],
                'power_type': 'ssh',
                'power_user': '******',
                'kernel_options': {
                    'netcfg/choose_interface': eth1_mac,
                    'udevrules': udev_interfaces_mapping
                },
                'power_address': n.ip,
                'power_pass': settings.PATH_TO_BOOTSTRAP_SSH_KEY,
                'name': objects.Node.make_slave_name(n),
                'hostname': n.fqdn,
                'name_servers': '\"%s\"' % settings.DNS_SERVERS,
                'name_servers_search': '\"%s\"' % settings.DNS_SEARCH,
                'netboot_enabled': '1',
                'ks_meta': {
                    'fuel_version': cluster_db.fuel_version,
                    'puppet_auto_setup': 1,
                    'puppet_master': settings.PUPPET_MASTER_HOST,
                    'puppet_enable': 0,
                    'mco_auto_setup': 1,
                    'install_log_2_syslog': 1,
                    'mco_pskey': settings.MCO_PSKEY,
                    'mco_vhost': settings.MCO_VHOST,
                    'mco_host': settings.MCO_HOST,
                    'mco_user': settings.MCO_USER,
                    'mco_password': settings.MCO_PASSWORD,
                    'mco_connector': settings.MCO_CONNECTOR,
                    'mco_enable': 1,
                    'pm_data': {
                        'ks_spaces': n.attributes.volumes,
                        'kernel_params': objects.Node.get_kernel_params(n),
                    },
                    'auth_key': "\"%s\"" % cluster_attrs.get('auth_key', ''),
                    'mlnx_vf_num': "16",
                    'mlnx_plugin_mode': "disabled",
                    'mlnx_iser_enabled': False,
                }
            }
            orchestrator_data = objects.Release.get_orchestrator_data_dict(
                cluster_db.release)
            if orchestrator_data:
                pnd['ks_meta']['repo_metadata'] = \
                    orchestrator_data['repo_metadata']

            vlan_splinters = cluster_attrs.get('vlan_splinters', None)
            if vlan_splinters == 'kernel_lt':
                pnd['ks_meta']['kernel_lt'] = 1

            NetworkManager.assign_admin_ips(n.id, 1)

            admin_ip = self.env.network_manager.get_admin_ip_for_node(n)

            for i in n.meta.get('interfaces', []):
                if 'interfaces' not in pnd:
                    pnd['interfaces'] = {}
                pnd['interfaces'][i['name']] = {
                    'mac_address': i['mac'],
                    'static': '0',
                }
                if 'interfaces_extra' not in pnd:
                    pnd['interfaces_extra'] = {}
                pnd['interfaces_extra'][i['name']] = {
                    'peerdns': 'no',
                    'onboot': 'no'
                }

                if i['mac'] == n.mac:
                    pnd['interfaces'][i['name']]['dns_name'] = n.fqdn
                    pnd['interfaces_extra'][i['name']]['onboot'] = 'yes'
                    pnd['interfaces'][i['name']]['ip_address'] = admin_ip
                    pnd['interfaces'][i['name']]['netmask'] = str(
                        netaddr.IPNetwork(admin_net.cidr).netmask)

            provision_nodes.append(pnd)

        provision_task_uuid = filter(lambda t: t.name == 'provision',
                                     supertask.subtasks)[0].uuid

        provision_msg = {
            'api_version': '1',
            'method': 'provision',
            'respond_to': 'provision_resp',
            'args': {
                'task_uuid': provision_task_uuid,
                'provisioning_info': {
                    'engine': {
                        'url': settings.COBBLER_URL,
                        'username': settings.COBBLER_USER,
                        'password': settings.COBBLER_PASSWORD,
                        'master_ip': settings.MASTER_IP
                    },
                    'nodes': provision_nodes
                }
            }
        }

        args, kwargs = nailgun.task.manager.rpc.cast.call_args
        self.assertEqual(len(args), 2)
        self.assertEqual(len(args[1]), 2)

        self.datadiff(args[1][0], provision_msg)
        self.datadiff(args[1][1], deployment_msg)