Esempio n. 1
0
    def update(cls, instance, data):
        """Update Node instance with specified parameters in DB.

        This includes:

        * adding node to Cluster (if cluster_id is not None in data) \
        (see :func:`add_into_cluster`)
        * updating roles for Node if it belongs to Cluster \
        (see :func:`update_roles` and :func:`update_pending_roles`)
        * removing node from Cluster (if cluster_id is None in data) \
        (see :func:`remove_from_cluster`)
        * updating interfaces for Node in DB (see :func:`update_interfaces`)
        * creating default Node attributes (see :func:`create_attributes`)

        :param data: dictionary of key-value pairs as object fields
        :returns: Node instance
        """
        data.pop("id", None)
        data.pop("network_data", None)

        roles = data.pop("roles", None)
        pending_roles = data.pop("pending_roles", None)
        new_meta = data.pop("meta", None)

        update_by_agent = data.pop("is_agent", False)

        disks_changed = None
        if new_meta and "disks" in new_meta and "disks" in instance.meta:
            key = operator.itemgetter("name")

            new_disks = sorted(new_meta["disks"], key=key)
            old_disks = sorted(instance.meta["disks"], key=key)

            disks_changed = (new_disks != old_disks)

        # TODO(enchantner): fix this temporary hack in clients
        if "cluster_id" not in data and "cluster" in data:
            cluster_id = data.pop("cluster", None)
            data["cluster_id"] = cluster_id

        if new_meta:
            instance.update_meta(new_meta)
            # The call to update_interfaces will execute a select query for
            # the current instance. This appears to overwrite the object in the
            # current session and we lose the meta changes.
            db().flush()
            if cls.hardware_info_locked(instance):
                logger.debug("Interfaces are locked for update on node %s",
                             instance.human_readable_name)
            else:
                instance.ip = data.pop("ip", None) or instance.ip
                instance.mac = data.pop("mac", None) or instance.mac
                db().flush()
                cls.update_interfaces(instance, update_by_agent)

        cluster_changed = False
        add_to_cluster = False
        if "cluster_id" in data:
            new_cluster_id = data.pop("cluster_id")
            if instance.cluster_id:
                if new_cluster_id is None:
                    # removing node from cluster
                    cluster_changed = True
                    cls.remove_from_cluster(instance)
                elif new_cluster_id != instance.cluster_id:
                    # changing node cluster to another
                    # (is currently not allowed)
                    raise errors.CannotUpdate(
                        u"Changing cluster on the fly is not allowed"
                    )
            else:
                if new_cluster_id is not None:
                    # assigning node to cluster
                    cluster_changed = True
                    add_to_cluster = True
                    instance.cluster_id = new_cluster_id

        if "group_id" in data:
            new_group_id = data.pop("group_id")
            if instance.group_id != new_group_id:
                nm = Cluster.get_network_manager(instance.cluster)
                nm.clear_assigned_networks(instance)
                nm.clear_bond_configuration(instance)
            instance.group_id = new_group_id
            add_to_cluster = True

        # calculating flags
        roles_changed = (
            roles is not None and set(roles) != set(instance.roles)
        )
        pending_roles_changed = (
            pending_roles is not None and
            set(pending_roles) != set(instance.pending_roles)
        )

        super(Node, cls).update(instance, data)

        if roles_changed:
            cls.update_roles(instance, roles)
        if pending_roles_changed:
            cls.update_pending_roles(instance, pending_roles)

        if add_to_cluster:
            cls.add_into_cluster(instance, instance.cluster_id)

        if any((
            roles_changed,
            pending_roles_changed,
            cluster_changed,
            disks_changed,
        )) and instance.status not in (
            consts.NODE_STATUSES.provisioning,
            consts.NODE_STATUSES.deploying
        ):
            # TODO(eli): we somehow should move this
            # condition into extension, in order to do
            # that probably we will have to create separate
            # table to keep disks which were used to create
            # volumes mapping.
            # Should be solved as a part of blueprint
            # https://blueprints.launchpad.net/fuel/+spec
            #                                 /volume-manager-refactoring
            fire_callback_on_node_update(instance)

        return instance
Esempio n. 2
0
    def test_fire_callback_on_node_update(self, get_m):
        node = mock.MagicMock()
        fire_callback_on_node_update(node)

        for ext in get_m.return_value:
            ext.on_node_update.assert_called_once_with(node)
Esempio n. 3
0
    def update(cls, instance, data):
        """Update Node instance with specified parameters in DB.
        This includes:

        * adding node to Cluster (if cluster_id is not None in data) \
        (see :func:`add_into_cluster`)
        * updating roles for Node if it belongs to Cluster \
        (see :func:`update_roles` and :func:`update_pending_roles`)
        * removing node from Cluster (if cluster_id is None in data) \
        (see :func:`remove_from_cluster`)
        * updating interfaces for Node in DB (see :func:`update_interfaces`)
        * creating default Node attributes (see :func:`create_attributes`)

        :param data: dictionary of key-value pairs as object fields
        :returns: Node instance
        """
        data.pop("id", None)
        data.pop("network_data", None)

        roles = data.pop("roles", None)
        pending_roles = data.pop("pending_roles", None)
        new_meta = data.pop("meta", None)

        update_by_agent = data.pop("is_agent", False)

        disks_changed = None
        if new_meta and "disks" in new_meta and "disks" in instance.meta:
            key = operator.itemgetter("name")

            new_disks = sorted(new_meta["disks"], key=key)
            old_disks = sorted(instance.meta["disks"], key=key)

            disks_changed = (new_disks != old_disks)

        # TODO(enchantner): fix this temporary hack in clients
        if "cluster_id" not in data and "cluster" in data:
            cluster_id = data.pop("cluster", None)
            data["cluster_id"] = cluster_id

        if new_meta:
            instance.update_meta(new_meta)
            # The call to update_interfaces will execute a select query for
            # the current instance. This appears to overwrite the object in the
            # current session and we lose the meta changes.
            db().flush()
            if cls.hardware_info_locked(instance):
                logger.info("Interfaces are locked for update on node %s",
                            instance.human_readable_name)
            else:
                cls.update_interfaces(instance, update_by_agent)

        cluster_changed = False
        if "cluster_id" in data:
            new_cluster_id = data.pop("cluster_id")
            if instance.cluster_id:
                if new_cluster_id is None:
                    # removing node from cluster
                    cluster_changed = True
                    cls.remove_from_cluster(instance)
                elif new_cluster_id != instance.cluster_id:
                    # changing node cluster to another
                    # (is currently not allowed)
                    raise errors.CannotUpdate(
                        u"Changing cluster on the fly is not allowed")
            else:
                if new_cluster_id is not None:
                    # assigning node to cluster
                    cluster_changed = True
                    cls.add_into_cluster(instance, new_cluster_id)

        if "group_id" in data:
            new_group_id = data.pop("group_id")
            if instance.group_id != new_group_id:
                nm = Cluster.get_network_manager(instance.cluster)
                nm.clear_assigned_networks(instance)
                nm.clear_bond_configuration(instance)
            instance.group_id = new_group_id
            cls.add_into_cluster(instance, instance.cluster_id)

        # calculating flags
        roles_changed = (roles is not None
                         and set(roles) != set(instance.roles))
        pending_roles_changed = (
            pending_roles is not None
            and set(pending_roles) != set(instance.pending_roles))

        super(Node, cls).update(instance, data)

        if roles_changed:
            cls.update_roles(instance, roles)
        if pending_roles_changed:
            cls.update_pending_roles(instance, pending_roles)

        if any((
                roles_changed,
                pending_roles_changed,
                cluster_changed,
                disks_changed,
        )) and instance.status not in (consts.NODE_STATUSES.provisioning,
                                       consts.NODE_STATUSES.deploying):
            # TODO(eli): we somehow should move this
            # condition into extension, in order to do
            # that probably we will have to create separate
            # table to keep disks which were used to create
            # volumes mapping.
            # Should be solved as a part of blueprint
            # https://blueprints.launchpad.net/fuel/+spec
            #                                 /volume-manager-refactoring
            fire_callback_on_node_update(instance)

        return instance