Ejemplo n.º 1
0
    def PUT(self, cluster_id):
        cluster = self.get_object_or_404(Cluster, cluster_id)
        data = self.checked_data()
        network_manager = NetworkManager()

        for key, value in data.iteritems():
            if key == "nodes":
                # Todo: sepatate nodes for deletion and addition by set().
                new_nodes = db().query(Node).filter(Node.id.in_(value))
                nodes_to_remove = [
                    n for n in cluster.nodes if n not in new_nodes
                ]
                nodes_to_add = [n for n in new_nodes if n not in cluster.nodes]
                for node in nodes_to_add:
                    if not node.online:
                        raise web.badrequest(
                            "Can not add offline node to cluster")
                map(cluster.nodes.remove, nodes_to_remove)
                map(cluster.nodes.append, nodes_to_add)
                for node in nodes_to_remove:
                    network_manager.clear_assigned_networks(node.id)
                    network_manager.clear_all_allowed_networks(node.id)
                for node in nodes_to_add:
                    network_manager.allow_network_assignment_to_all_interfaces(
                        node.id)
                    network_manager.assign_networks_to_main_interface(node.id)
            else:
                setattr(cluster, key, value)
        db().commit()
        return self.render(cluster)
Ejemplo n.º 2
0
    def PUT(self, cluster_id):
        cluster = self.get_object_or_404(Cluster, cluster_id)
        data = self.checked_data()
        network_manager = NetworkManager()

        for key, value in data.iteritems():
            if key == "nodes":
                # Todo: sepatate nodes for deletion and addition by set().
                new_nodes = db().query(Node).filter(
                    Node.id.in_(value)
                )
                nodes_to_remove = [n for n in cluster.nodes
                                   if n not in new_nodes]
                nodes_to_add = [n for n in new_nodes
                                if n not in cluster.nodes]
                for node in nodes_to_add:
                    if not node.online:
                        raise web.badrequest(
                            "Can not add offline node to cluster")
                map(cluster.nodes.remove, nodes_to_remove)
                map(cluster.nodes.append, nodes_to_add)
                for node in nodes_to_remove:
                    network_manager.clear_assigned_networks(node.id)
                    network_manager.clear_all_allowed_networks(node.id)
                for node in nodes_to_add:
                    network_manager.allow_network_assignment_to_all_interfaces(
                        node.id
                    )
                    network_manager.assign_networks_to_main_interface(node.id)
            else:
                setattr(cluster, key, value)
        db().commit()
        return self.render(cluster)
Ejemplo n.º 3
0
    def PUT(self, node_id):
        """:returns: JSONized Node object.
        :http: * 200 (OK)
               * 400 (invalid node data specified)
               * 404 (node not found in db)
        """
        node = self.get_object_or_404(Node, node_id)
        if not node.attributes:
            node.attributes = NodeAttributes(node_id=node.id)

        data = self.checked_data(self.validator.validate_update)

        network_manager = NetworkManager()

        old_cluster_id = node.cluster_id

        if data.get("pending_roles") == [] and node.cluster:
            node.cluster.clear_pending_changes(node_id=node.id)

        if "cluster_id" in data:
            if data["cluster_id"] is None and node.cluster:
                node.cluster.clear_pending_changes(node_id=node.id)
                node.roles = node.pending_roles = []
            node.cluster_id = data["cluster_id"]
            if node.cluster_id != old_cluster_id:
                if old_cluster_id:
                    network_manager.clear_assigned_networks(node)
                    network_manager.clear_all_allowed_networks(node.id)
                if node.cluster_id:
                    network_manager.assign_networks_by_default(node)
                    network_manager.allow_network_assignment_to_all_interfaces(
                        node)

        regenerate_volumes = any(
            ('roles' in data
             and set(data['roles']) != set(node.roles), 'pending_roles' in data
             and set(data['pending_roles']) != set(node.pending_roles),
             node.cluster_id != old_cluster_id))

        for key, value in data.iteritems():
            # we don't allow to update id explicitly
            # and updated cluster_id before all other fields
            if key in ("id", "cluster_id"):
                continue
            setattr(node, key, value)

        if not node.status in ('provisioning',
                               'deploying') and regenerate_volumes:
            try:
                node.attributes.volumes = \
                    node.volume_manager.gen_volumes_info()
            except Exception as exc:
                msg = (u"Failed to generate volumes "
                       "info for node '{0}': '{1}'").format(
                           node.name or data.get("mac") or data.get("id"),
                           str(exc) or "see logs for details")
                logger.warning(traceback.format_exc())
                notifier.notify("error", msg, node_id=node.id)
        db().commit()
        return self.render(node)
Ejemplo n.º 4
0
    def PUT(self, node_id):
        """:returns: JSONized Node object.
        :http: * 200 (OK)
               * 400 (invalid node data specified)
               * 404 (node not found in db)
        """
        node = self.get_object_or_404(Node, node_id)
        if not node.attributes:
            node.attributes = NodeAttributes(node_id=node.id)

        data = self.checked_data(self.validator.validate_update)

        network_manager = NetworkManager()

        old_cluster_id = node.cluster_id

        if data.get("pending_roles") == [] and node.cluster:
            node.cluster.clear_pending_changes(node_id=node.id)

        if "cluster_id" in data:
            if data["cluster_id"] is None and node.cluster:
                node.cluster.clear_pending_changes(node_id=node.id)
                node.roles = node.pending_roles = []
            node.cluster_id = data["cluster_id"]
            if node.cluster_id != old_cluster_id:
                if old_cluster_id:
                    network_manager.clear_assigned_networks(node)
                    network_manager.clear_all_allowed_networks(node.id)
                if node.cluster_id:
                    network_manager.assign_networks_by_default(node)
                    network_manager.allow_network_assignment_to_all_interfaces(node)

        regenerate_volumes = any(
            (
                "roles" in data and set(data["roles"]) != set(node.roles),
                "pending_roles" in data and set(data["pending_roles"]) != set(node.pending_roles),
                node.cluster_id != old_cluster_id,
            )
        )

        for key, value in data.iteritems():
            # we don't allow to update id explicitly
            # and updated cluster_id before all other fields
            if key in ("id", "cluster_id"):
                continue
            setattr(node, key, value)

        if not node.status in ("provisioning", "deploying") and regenerate_volumes:
            try:
                node.attributes.volumes = node.volume_manager.gen_volumes_info()
            except Exception as exc:
                msg = (u"Failed to generate volumes " "info for node '{0}': '{1}'").format(
                    node.name or data.get("mac") or data.get("id"), str(exc) or "see logs for details"
                )
                logger.warning(traceback.format_exc())
                notifier.notify("error", msg, node_id=node.id)
        db().commit()
        return self.render(node)
Ejemplo n.º 5
0
    def PUT(self, node_id):
        node = self.get_object_or_404(Node, node_id)
        if not node.attributes:
            node.attributes = NodeAttributes(node_id=node.id)

        data = self.checked_data(self.validator.validate_update)

        network_manager = NetworkManager()

        for key, value in data.iteritems():
            # we don't allow to update id explicitly
            if key == "id":
                continue
            setattr(node, key, value)
            if key == 'cluster_id':
                if value:
                    network_manager.allow_network_assignment_to_all_interfaces(
                        node.id
                    )
                    network_manager.assign_networks_to_main_interface(node.id)
                else:
                    network_manager.clear_assigned_networks(node.id)
                    network_manager.clear_all_allowed_networks(node.id)
        if not node.status in ('provisioning', 'deploying') \
                and "role" in data or "cluster_id" in data:
            try:
                node.attributes.volumes = \
                    node.volume_manager.gen_volumes_info()
            except Exception as exc:
                msg = (
                    u"Failed to generate volumes "
                    "info for node '{0}': '{1}'"
                ).format(
                    node.name or data.get("mac") or data.get("id"),
                    str(exc) or "see logs for details"
                )
                logger.warning(traceback.format_exc())
                notifier.notify("error", msg, node_id=node.id)
        db().commit()
        return self.render(node)
Ejemplo n.º 6
0
    def PUT(self, cluster_id):
        """:returns: JSONized Cluster object.
        :http: * 200 (OK)
               * 400 (invalid cluster data specified)
               * 404 (cluster not found in db)
        """
        cluster = self.get_object_or_404(Cluster, cluster_id)
        data = self.checked_data(cluster_id=cluster_id)
        network_manager = NetworkManager()

        for key, value in data.iteritems():
            if key == "nodes":
                # TODO(NAME): sepatate nodes
                #for deletion and addition by set().
                new_nodes = db().query(Node).filter(
                    Node.id.in_(value)
                )
                nodes_to_remove = [n for n in cluster.nodes
                                   if n not in new_nodes]
                nodes_to_add = [n for n in new_nodes
                                if n not in cluster.nodes]
                for node in nodes_to_add:
                    if not node.online:
                        raise web.badrequest(
                            "Can not add offline node to cluster")
                map(cluster.nodes.remove, nodes_to_remove)
                map(cluster.nodes.append, nodes_to_add)
                for node in nodes_to_remove:
                    network_manager.clear_assigned_networks(node)
                    network_manager.clear_all_allowed_networks(node.id)
                for node in nodes_to_add:
                    network_manager.allow_network_assignment_to_all_interfaces(
                        node)
                    network_manager.assign_networks_by_default(node)
            else:
                setattr(cluster, key, value)
        db().commit()
        return self.render(cluster)
Ejemplo n.º 7
0
    def PUT(self, cluster_id):
        """:returns: JSONized Cluster object.
        :http: * 200 (OK)
               * 400 (invalid cluster data specified)
               * 404 (cluster not found in db)
        """
        cluster = self.get_object_or_404(Cluster, cluster_id)
        data = self.checked_data(cluster_id=cluster_id)
        network_manager = NetworkManager()

        for key, value in data.iteritems():
            if key == "nodes":
                # TODO(NAME): sepatate nodes
                #for deletion and addition by set().
                new_nodes = db().query(Node).filter(Node.id.in_(value))
                nodes_to_remove = [
                    n for n in cluster.nodes if n not in new_nodes
                ]
                nodes_to_add = [n for n in new_nodes if n not in cluster.nodes]
                for node in nodes_to_add:
                    if not node.online:
                        raise web.badrequest(
                            "Can not add offline node to cluster")
                map(cluster.nodes.remove, nodes_to_remove)
                map(cluster.nodes.append, nodes_to_add)
                for node in nodes_to_remove:
                    network_manager.clear_assigned_networks(node)
                    network_manager.clear_all_allowed_networks(node.id)
                for node in nodes_to_add:
                    network_manager.allow_network_assignment_to_all_interfaces(
                        node)
                    network_manager.assign_networks_by_default(node)
            else:
                setattr(cluster, key, value)
        db().commit()
        return self.render(cluster)
Ejemplo n.º 8
0
    def PUT(self, node_id):
        node = self.get_object_or_404(Node, node_id)
        if not node.attributes:
            node.attributes = NodeAttributes(node_id=node.id)

        data = self.checked_data(self.validator.validate_update)

        network_manager = NetworkManager()

        for key, value in data.iteritems():
            # we don't allow to update id explicitly
            if key == "id":
                continue
            setattr(node, key, value)
            if key == 'cluster_id':
                if value:
                    network_manager.allow_network_assignment_to_all_interfaces(
                        node.id)
                    network_manager.assign_networks_to_main_interface(node.id)
                else:
                    network_manager.clear_assigned_networks(node.id)
                    network_manager.clear_all_allowed_networks(node.id)
        if not node.status in ('provisioning', 'deploying') \
                and "role" in data or "cluster_id" in data:
            try:
                node.attributes.volumes = \
                    node.volume_manager.gen_volumes_info()
            except Exception as exc:
                msg = (u"Failed to generate volumes "
                       "info for node '{0}': '{1}'").format(
                           node.name or data.get("mac") or data.get("id"),
                           str(exc) or "see logs for details")
                logger.warning(traceback.format_exc())
                notifier.notify("error", msg, node_id=node.id)
        db().commit()
        return self.render(node)
Ejemplo n.º 9
0
    def PUT(self):
        """:returns: Collection of JSONized Node objects.
        :http: * 200 (nodes are successfully updated)
               * 400 (invalid nodes data specified)
        """
        data = self.checked_data(
            self.validator.validate_collection_update
        )

        network_manager = NetworkManager()
        q = db().query(Node)
        nodes_updated = []
        for nd in data:
            is_agent = nd.pop("is_agent") if "is_agent" in nd else False
            node = None
            if "mac" in nd:
                node = q.filter_by(mac=nd["mac"]).first() \
                    or self.validator.validate_existent_node_mac_update(nd)
            else:
                node = q.get(nd["id"])
            if is_agent:
                node.timestamp = datetime.now()
                if not node.online:
                    node.online = True
                    msg = u"Node '{0}' is back online".format(
                        node.human_readable_name)
                    logger.info(msg)
                    notifier.notify("discover", msg, node_id=node.id)
                db().commit()
            old_cluster_id = node.cluster_id

            if nd.get("pending_roles") == [] and node.cluster:
                node.cluster.clear_pending_changes(node_id=node.id)

            if "cluster_id" in nd:
                if nd["cluster_id"] is None and node.cluster:
                    node.cluster.clear_pending_changes(node_id=node.id)
                    node.roles = node.pending_roles = []
                node.cluster_id = nd["cluster_id"]
            for key, value in nd.iteritems():
                if is_agent and (key, value) == ("status", "discover") \
                        and node.status == "provisioning":
                    # We don't update provisioning back to discover
                    logger.debug(
                        "Node is already provisioning - "
                        "status not updated by agent"
                    )
                    continue
                if key == "meta":
                    node.update_meta(value)
                else:
                    setattr(node, key, value)
            db().commit()
            if not node.attributes:
                node.attributes = NodeAttributes()
                db().commit()
            if not node.attributes.volumes:
                node.attributes.volumes = \
                    node.volume_manager.gen_volumes_info()
                db().commit()
            if not node.status in ('provisioning', 'deploying'):
                variants = (
                    "disks" in node.meta and
                    len(node.meta["disks"]) != len(
                        filter(
                            lambda d: d["type"] == "disk",
                            node.attributes.volumes
                        )
                    ),
                    "roles" in nd,
                    "cluster_id" in nd
                )
                if any(variants):
                    try:
                        node.attributes.volumes = \
                            node.volume_manager.gen_volumes_info()
                        if node.cluster:
                            node.cluster.add_pending_changes(
                                "disks",
                                node_id=node.id
                            )
                    except Exception as exc:
                        msg = (
                            "Failed to generate volumes "
                            "info for node '{0}': '{1}'"
                        ).format(
                            node.name or data.get("mac") or data.get("id"),
                            str(exc) or "see logs for details"
                        )
                        logger.warning(traceback.format_exc())
                        notifier.notify("error", msg, node_id=node.id)

                db().commit()
            if is_agent:
                # Update node's NICs.
                if node.meta and 'interfaces' in node.meta:
                    # we won't update interfaces if data is invalid
                    network_manager.update_interfaces_info(node.id)

            nodes_updated.append(node)
            db().commit()
            if 'cluster_id' in nd and nd['cluster_id'] != old_cluster_id:
                if old_cluster_id:
                    network_manager.clear_assigned_networks(node)
                    network_manager.clear_all_allowed_networks(node.id)
                if nd['cluster_id']:
                    network_manager.allow_network_assignment_to_all_interfaces(
                        node)
                    network_manager.assign_networks_to_main_interface(node)

        # we need eagerload everything that is used in render
        nodes = db().query(Node).options(
            joinedload('cluster'),
            joinedload('interfaces'),
            joinedload('interfaces.assigned_networks')).\
            filter(Node.id.in_([n.id for n in nodes_updated])).all()
        return self.render(nodes)
Ejemplo n.º 10
0
    def PUT(self):
        """:returns: Collection of JSONized Node objects.
        :http: * 200 (nodes are successfully updated)
               * 400 (invalid nodes data specified)
        """
        data = self.checked_data(self.validator.validate_collection_update)

        network_manager = NetworkManager()
        q = db().query(Node)
        nodes_updated = []
        for nd in data:
            is_agent = nd.pop("is_agent") if "is_agent" in nd else False
            node = None
            if "mac" in nd:
                node = q.filter_by(mac=nd["mac"]).first() \
                    or self.validator.validate_existent_node_mac_update(nd)
            else:
                node = q.get(nd["id"])
            if is_agent:
                node.timestamp = datetime.now()
                if not node.online:
                    node.online = True
                    msg = u"Node '{0}' is back online".format(
                        node.human_readable_name)
                    logger.info(msg)
                    notifier.notify("discover", msg, node_id=node.id)
                db().commit()
            old_cluster_id = node.cluster_id

            if nd.get("pending_roles") == [] and node.cluster:
                node.cluster.clear_pending_changes(node_id=node.id)

            if "cluster_id" in nd:
                if nd["cluster_id"] is None and node.cluster:
                    node.cluster.clear_pending_changes(node_id=node.id)
                    node.roles = node.pending_roles = []
                node.cluster_id = nd["cluster_id"]
            for key, value in nd.iteritems():
                if is_agent and (key, value) == ("status", "discover") \
                        and node.status == "provisioning":
                    # We don't update provisioning back to discover
                    logger.debug("Node is already provisioning - "
                                 "status not updated by agent")
                    continue
                if key == "meta":
                    node.update_meta(value)
                else:
                    setattr(node, key, value)
            db().commit()
            if not node.attributes:
                node.attributes = NodeAttributes()
                db().commit()
            if not node.attributes.volumes:
                node.attributes.volumes = \
                    node.volume_manager.gen_volumes_info()
                db().commit()
            if not node.status in ('provisioning', 'deploying'):
                variants = ("disks" in node.meta
                            and len(node.meta["disks"]) != len(
                                filter(lambda d: d["type"] == "disk",
                                       node.attributes.volumes)), "roles"
                            in nd, "cluster_id" in nd)
                if any(variants):
                    try:
                        node.attributes.volumes = \
                            node.volume_manager.gen_volumes_info()
                        if node.cluster:
                            node.cluster.add_pending_changes("disks",
                                                             node_id=node.id)
                    except Exception as exc:
                        msg = ("Failed to generate volumes "
                               "info for node '{0}': '{1}'").format(
                                   node.name or data.get("mac")
                                   or data.get("id"),
                                   str(exc) or "see logs for details")
                        logger.warning(traceback.format_exc())
                        notifier.notify("error", msg, node_id=node.id)

                db().commit()
            if is_agent:
                # Update node's NICs.
                if node.meta and 'interfaces' in node.meta:
                    # we won't update interfaces if data is invalid
                    network_manager.update_interfaces_info(node.id)

            nodes_updated.append(node)
            db().commit()
            if 'cluster_id' in nd and nd['cluster_id'] != old_cluster_id:
                if old_cluster_id:
                    network_manager.clear_assigned_networks(node.id)
                    network_manager.clear_all_allowed_networks(node.id)
                if nd['cluster_id']:
                    network_manager.allow_network_assignment_to_all_interfaces(
                        node.id)
                    network_manager.assign_networks_to_main_interface(node.id)
        return map(NodeHandler.render, nodes_updated)
Ejemplo n.º 11
0
    def PUT(self):
        """:returns: Collection of JSONized Node objects.
        :http: * 200 (nodes are successfully updated)
               * 400 (invalid nodes data specified)
        """
        data = self.checked_data(self.validator.validate_collection_update)

        q = db().query(Node)
        nodes_updated = []
        for nd in data:
            is_agent = nd.pop("is_agent") if "is_agent" in nd else False
            node = None
            if "mac" in nd:
                node = q.filter_by(mac=nd["mac"]).first() \
                    or self.validator.validate_existent_node_mac_update(nd)
            else:
                node = q.get(nd["id"])
            if is_agent:
                node.timestamp = datetime.now()
                if not node.online:
                    node.online = True
                    msg = u"Node '{0}' is back online".format(
                        node.human_readable_name)
                    logger.info(msg)
                    notifier.notify("discover", msg, node_id=node.id)
                db().commit()
            old_cluster_id = node.cluster_id

            # Choosing network manager
            if nd.get('cluster_id') is not None:
                cluster = db().query(Cluster).get(nd['cluster_id'])
            else:
                cluster = node.cluster

            if cluster and cluster.net_provider == "nova_network":
                network_manager = NetworkManager()
            elif cluster and cluster.net_provider == "neutron":
                network_manager = NeutronManager()
            # essential rollback - we can't avoid it now
            elif not cluster:
                network_manager = NetworkManager()
            # /Choosing network manager

            if nd.get("pending_roles") == [] and node.cluster:
                node.cluster.clear_pending_changes(node_id=node.id)

            if "cluster_id" in nd:
                if nd["cluster_id"] is None and node.cluster:
                    node.cluster.clear_pending_changes(node_id=node.id)
                    node.roles = node.pending_roles = []
                node.cluster_id = nd["cluster_id"]

            regenerate_volumes = any(
                ('roles' in nd
                 and set(nd['roles']) != set(node.roles), 'pending_roles' in nd
                 and set(nd['pending_roles']) != set(node.pending_roles),
                 node.cluster_id != old_cluster_id))

            for key, value in nd.iteritems():
                if is_agent and (key, value) == ("status", "discover") \
                        and node.status == "provisioning":
                    # We don't update provisioning back to discover
                    logger.debug("Node is already provisioning - "
                                 "status not updated by agent")
                    continue
                if key == "meta":
                    node.update_meta(value)
                else:
                    setattr(node, key, value)
            db().commit()
            if not node.attributes:
                node.attributes = NodeAttributes()
                db().commit()
            if not node.attributes.volumes:
                node.attributes.volumes = \
                    node.volume_manager.gen_volumes_info()
                db().commit()
            if not node.status in ('provisioning', 'deploying'):
                variants = ("disks" in node.meta
                            and len(node.meta["disks"]) != len(
                                filter(lambda d: d["type"] == "disk",
                                       node.attributes.volumes)),
                            regenerate_volumes)
                if any(variants):
                    try:
                        node.attributes.volumes = \
                            node.volume_manager.gen_volumes_info()
                        if node.cluster:
                            node.cluster.add_pending_changes("disks",
                                                             node_id=node.id)
                    except Exception as exc:
                        msg = ("Failed to generate volumes "
                               "info for node '{0}': '{1}'").format(
                                   node.name or data.get("mac")
                                   or data.get("id"),
                                   str(exc) or "see logs for details")
                        logger.warning(traceback.format_exc())
                        notifier.notify("error", msg, node_id=node.id)

                db().commit()
            if is_agent:
                # Update node's NICs.
                network_manager.update_interfaces_info(node)

            nodes_updated.append(node)
            db().commit()
            if 'cluster_id' in nd and nd['cluster_id'] != old_cluster_id:
                if old_cluster_id:
                    network_manager.clear_assigned_networks(node)
                    network_manager.clear_all_allowed_networks(node.id)
                if nd['cluster_id']:
                    network_manager.assign_networks_by_default(node)
                    network_manager.allow_network_assignment_to_all_interfaces(
                        node)

        # we need eagerload everything that is used in render
        nodes = db().query(Node).options(
            joinedload('cluster'),
            joinedload('interfaces'),
            joinedload('interfaces.assigned_networks')).\
            filter(Node.id.in_([n.id for n in nodes_updated])).all()
        return self.render(nodes)