示例#1
0
    def get_default(self, node):
        if node.cluster and node.cluster.net_provider == 'neutron':
            network_manager = NeutronManager()
        else:
            network_manager = NetworkManager()

        return network_manager.get_default_networks_assignment(node)
示例#2
0
    def update(cls, cluster, network_configuration):
        from nailgun.network.neutron import NeutronManager
        network_manager = NeutronManager()
        if 'networks' in network_configuration:
            for ng in network_configuration['networks']:
                if ng['id'] == network_manager.get_admin_network_group_id():
                    continue

                ng_db = db().query(NetworkGroup).get(ng['id'])

                for key, value in ng.iteritems():
                    if key == "ip_ranges":
                        cls._set_ip_ranges(ng['id'], value)
                    else:
                        if key == 'cidr' and \
                                not ng['name'] in ('private',):
                            network_manager.update_ranges_from_cidr(
                                ng_db, value)

                        setattr(ng_db, key, value)

                if ng['name'] != 'private':
                    network_manager.create_networks(ng_db)
                ng_db.cluster.add_pending_changes('networks')

        if 'neutron_parameters' in network_configuration:
            for key, value in network_configuration['neutron_parameters'] \
                    .items():
                setattr(cluster.neutron_config, key, value)
            db().add(cluster.neutron_config)
            db().commit()
示例#3
0
    def POST(self):
        """:returns: JSONized Cluster object.
        :http: * 201 (cluster successfully created)
               * 400 (invalid cluster data specified)
               * 409 (cluster with such parameters already exists)
        """
        # It's used for cluster creating only.
        data = self.checked_data()

        cluster = Cluster()
        cluster.release = db().query(Release).get(data["release"])
        # TODO(NAME): use fields
        for field in ("name", "mode", "net_provider", "net_segment_type"):
            if data.get(field):
                setattr(cluster, field, data.get(field))
        db().add(cluster)
        db().commit()
        attributes = Attributes(
            editable=cluster.release.attributes_metadata.get("editable"),
            generated=cluster.release.attributes_metadata.get("generated"),
            cluster=cluster)
        attributes.generate_fields()

        if cluster.net_provider == 'nova_network':
            netmanager = NetworkManager()
        elif cluster.net_provider == 'neutron':
            netmanager = NeutronManager()

        try:
            netmanager.create_network_groups(cluster.id)
            if cluster.net_provider == 'neutron':
                netmanager.create_neutron_config(cluster)

            cluster.add_pending_changes("attributes")
            cluster.add_pending_changes("networks")

            if 'nodes' in data and data['nodes']:
                nodes = db().query(Node).filter(Node.id.in_(
                    data['nodes'])).all()
                map(cluster.nodes.append, nodes)
                db().commit()
                for node in nodes:
                    netmanager.allow_network_assignment_to_all_interfaces(node)
                    netmanager.assign_networks_by_default(node)

            raise web.webapi.created(
                json.dumps(ClusterHandler.render(cluster), indent=4))
        except (errors.OutOfVLANs, errors.OutOfIPs, errors.NoSuitableCIDR,
                errors.InvalidNetworkPool) as e:
            # Cluster was created in this request,
            # so we no need to use ClusterDeletionManager.
            # All relations wiil be cascade deleted automaticly.
            # TODO(NAME): investigate transactions
            db().delete(cluster)

            raise web.badrequest(e.message)
示例#4
0
    def verify_data_correctness(cls, node):
        db_node = db().query(Node).filter_by(id=node['id']).first()
        if not db_node:
            raise errors.InvalidData("There is no node with ID '%d' in DB" %
                                     node['id'],
                                     log_message=True)
        interfaces = node['interfaces']
        db_interfaces = db_node.interfaces
        if len(interfaces) != len(db_interfaces):
            raise errors.InvalidData(
                "Node '%d' has different amount of interfaces" % node['id'],
                log_message=True)
        # FIXIT: we should use not all networks but appropriate for this
        # node only.
        db_network_groups = db().query(NetworkGroup).filter_by(
            cluster_id=db_node.cluster_id).all()
        if not db_network_groups:
            raise errors.InvalidData("There are no networks related to"
                                     " node '%d' in DB" % node['id'],
                                     log_message=True)
        network_group_ids = set([ng.id for ng in db_network_groups])

        if db_node.cluster and db_node.cluster.net_provider == 'neutron':
            net_manager = NeutronManager()
        else:
            net_manager = NetworkManager()

        admin_ng_id = net_manager.get_admin_network_group_id()

        for iface in interfaces:
            db_iface = filter(lambda i: i.id == iface['id'], db_interfaces)
            if not db_iface:
                raise errors.InvalidData("There is no interface with ID '%d'"
                                         " for node '%d' in DB" %
                                         (iface['id'], node['id']),
                                         log_message=True)
            db_iface = db_iface[0]

            for net in iface['assigned_networks']:
                if net['id'] not in network_group_ids and not \
                        net['id'] == admin_ng_id:
                    raise errors.InvalidData(
                        "Node '%d' shouldn't be connected to"
                        " network with ID '%d'" % (node['id'], net['id']),
                        log_message=True)
                elif net['id'] != admin_ng_id:
                    network_group_ids.remove(net['id'])

        # Check if there are unassigned networks for this node.
        if network_group_ids:
            raise errors.InvalidData(
                "Too few networks to assign to node '%d'" % node['id'],
                log_message=True)
示例#5
0
    def network_cluster_attrs(cls, cluster):
        """Cluster attributes."""
        attrs = {
            'quantum': True,
            'quantum_settings': cls.neutron_attrs(cluster)
        }

        if cluster.mode == 'multinode':
            nm = NeutronManager()
            for node in cluster.nodes:
                if cls._node_has_role_by_name(node, 'controller'):
                    mgmt_cidr = nm.get_node_network_by_netname(
                        node.id, 'management')['ip']
                    attrs['management_vip'] = mgmt_cidr.split('/')[0]
                    break

        return attrs
示例#6
0
    def serialize_for_cluster(cls, cluster):
        result = {}
        result['net_provider'] = cluster.net_provider
        result['net_l23_provider'] = cluster.net_l23_provider
        result['net_segment_type'] = cluster.net_segment_type
        result['networks'] = map(cls.serialize_network_group,
                                 cluster.network_groups)

        net_manager = NeutronManager()
        result['networks'].append(
            cls.serialize_network_group(net_manager.get_admin_network_group()))
        # result['networks'] = [cls.serialize_network_group(ng)
        #                       for ng in cluster.network_groups
        #                       if ng.name != 'private']

        result['neutron_parameters'] = {
            'predefined_networks': cluster.neutron_config.predefined_networks,
            'L2': cluster.neutron_config.L2,
            'segmentation_type': cluster.neutron_config.segmentation_type
        }
        return result
示例#7
0
    def PUT(self):
        """:returns: Collection of JSONized Node objects.
        :http: * 200 (nodes are successfully updated)
               * 400 (invalid nodes data specified)
        """
        data = self.checked_data(self.validator.validate_collection_update)

        q = db().query(Node)
        nodes_updated = []
        for nd in data:
            is_agent = nd.pop("is_agent") if "is_agent" in nd else False
            node = None
            if "mac" in nd:
                node = q.filter_by(mac=nd["mac"]).first() \
                    or self.validator.validate_existent_node_mac_update(nd)
            else:
                node = q.get(nd["id"])
            if is_agent:
                node.timestamp = datetime.now()
                if not node.online:
                    node.online = True
                    msg = u"Node '{0}' is back online".format(
                        node.human_readable_name)
                    logger.info(msg)
                    notifier.notify("discover", msg, node_id=node.id)
                db().commit()
            old_cluster_id = node.cluster_id

            # Choosing network manager
            if nd.get('cluster_id') is not None:
                cluster = db().query(Cluster).get(nd['cluster_id'])
            else:
                cluster = node.cluster

            if cluster and cluster.net_provider == "nova_network":
                network_manager = NetworkManager()
            elif cluster and cluster.net_provider == "neutron":
                network_manager = NeutronManager()
            # essential rollback - we can't avoid it now
            elif not cluster:
                network_manager = NetworkManager()
            # /Choosing network manager

            if nd.get("pending_roles") == [] and node.cluster:
                node.cluster.clear_pending_changes(node_id=node.id)

            if "cluster_id" in nd:
                if nd["cluster_id"] is None and node.cluster:
                    node.cluster.clear_pending_changes(node_id=node.id)
                    node.roles = node.pending_roles = []
                node.cluster_id = nd["cluster_id"]

            regenerate_volumes = any(
                ('roles' in nd
                 and set(nd['roles']) != set(node.roles), 'pending_roles' in nd
                 and set(nd['pending_roles']) != set(node.pending_roles),
                 node.cluster_id != old_cluster_id))

            for key, value in nd.iteritems():
                if is_agent and (key, value) == ("status", "discover") \
                        and node.status == "provisioning":
                    # We don't update provisioning back to discover
                    logger.debug("Node is already provisioning - "
                                 "status not updated by agent")
                    continue
                if key == "meta":
                    node.update_meta(value)
                else:
                    setattr(node, key, value)
            db().commit()
            if not node.attributes:
                node.attributes = NodeAttributes()
                db().commit()
            if not node.attributes.volumes:
                node.attributes.volumes = \
                    node.volume_manager.gen_volumes_info()
                db().commit()
            if not node.status in ('provisioning', 'deploying'):
                variants = ("disks" in node.meta
                            and len(node.meta["disks"]) != len(
                                filter(lambda d: d["type"] == "disk",
                                       node.attributes.volumes)),
                            regenerate_volumes)
                if any(variants):
                    try:
                        node.attributes.volumes = \
                            node.volume_manager.gen_volumes_info()
                        if node.cluster:
                            node.cluster.add_pending_changes("disks",
                                                             node_id=node.id)
                    except Exception as exc:
                        msg = ("Failed to generate volumes "
                               "info for node '{0}': '{1}'").format(
                                   node.name or data.get("mac")
                                   or data.get("id"),
                                   str(exc) or "see logs for details")
                        logger.warning(traceback.format_exc())
                        notifier.notify("error", msg, node_id=node.id)

                db().commit()
            if is_agent:
                # Update node's NICs.
                network_manager.update_interfaces_info(node)

            nodes_updated.append(node)
            db().commit()
            if 'cluster_id' in nd and nd['cluster_id'] != old_cluster_id:
                if old_cluster_id:
                    network_manager.clear_assigned_networks(node)
                    network_manager.clear_all_allowed_networks(node.id)
                if nd['cluster_id']:
                    network_manager.assign_networks_by_default(node)
                    network_manager.allow_network_assignment_to_all_interfaces(
                        node)

        # we need eagerload everything that is used in render
        nodes = db().query(Node).options(
            joinedload('cluster'),
            joinedload('interfaces'),
            joinedload('interfaces.assigned_networks')).\
            filter(Node.id.in_([n.id for n in nodes_updated])).all()
        return self.render(nodes)