Пример #1
0
    def mellanox_settings(cls, node_attrs, cluster, networks):
        """Mellanox settings

        Serialize mellanox node attrsthen it will be
        merged with common attributes, if mellanox plugin or iSER storage
        enabled.
        """
        # Get Mellanox data
        neutron_mellanox_data = Cluster.get_attributes(cluster).editable.get("neutron_mellanox", {})

        # Get storage data
        storage_data = Cluster.get_attributes(cluster).editable.get("storage", {})

        # Get network manager
        nm = Cluster.get_network_manager(cluster)

        # Init mellanox dict
        node_attrs["neutron_mellanox"] = {}

        # Find Physical port for VFs generation
        if "plugin" in neutron_mellanox_data and neutron_mellanox_data["plugin"]["value"] == "ethernet":
            node_attrs = cls.set_mellanox_ml2_config(node_attrs, nm, networks)

        # Fix network scheme to have physical port for RDMA if iSER enabled
        if "iser" in storage_data and storage_data["iser"]["value"]:
            node_attrs = cls.fix_iser_port(node_attrs, nm, networks)

        return node_attrs
Пример #2
0
    def update_pending_roles(cls, instance, new_pending_roles):
        """Update pending_roles for Node instance.
        Logs an error if node doesn't belong to Cluster

        :param instance: Node instance
        :param new_pending_roles: list of new pending role names
        :returns: None
        """
        if not instance.cluster_id:
            logger.warning(
                u"Attempting to assign pending roles to node "
                u"'{0}' which isn't added to cluster".format(instance.name or instance.id)
            )
            return

        logger.debug(u"Updating pending roles for node {0}: {1}".format(instance.id, new_pending_roles))

        if new_pending_roles == []:
            instance.pending_role_list = []
            # TODO(enchantner): research why the hell we need this
            Cluster.clear_pending_changes(instance.cluster, node_id=instance.id)
        else:
            instance.pending_role_list = (
                db()
                .query(models.Role)
                .filter_by(release_id=instance.cluster.release_id)
                .filter(models.Role.name.in_(new_pending_roles))
                .all()
            )

        db().flush()
        db().refresh(instance)
Пример #3
0
    def mellanox_settings(cls, node_attrs, node):
        """Serialize mellanox node attrs, then it will be
        merged with common attributes, if mellanox plugin or iSER storage
        enabled.
        """
        # Get Mellanox data
        neutron_mellanox_data =  \
            Cluster.get_attributes(node.cluster).editable\
            .get('neutron_mellanox', {})

        # Get storage data
        storage_data = \
            Cluster.get_attributes(node.cluster).editable.get('storage', {})

        # Get network manager
        nm = objects.Node.get_network_manager(node)

        # Init mellanox dict
        node_attrs['neutron_mellanox'] = {}

        # Find Physical port for VFs generation
        if 'plugin' in neutron_mellanox_data and \
           neutron_mellanox_data['plugin']['value'] == 'ethernet':
            node_attrs = cls.set_mellanox_ml2_config(node_attrs, node, nm)

        # Fix network scheme to have physical port for RDMA if iSER enabled
        if 'iser' in storage_data and storage_data['iser']['value']:
            node_attrs = cls.fix_iser_port(node_attrs, node, nm)

        return node_attrs
Пример #4
0
    def remove_from_cluster(cls, instance):
        """Remove Node from Cluster.

        Also drops networks assignment for Node and clears both
        roles and pending roles

        :param instance: Node instance
        :returns: None
        """
        if instance.cluster:
            Cluster.clear_pending_changes(
                instance.cluster,
                node_id=instance.id
            )
            netmanager = Cluster.get_network_manager(
                instance.cluster
            )
            netmanager.clear_assigned_networks(instance)
            netmanager.clear_bond_configuration(instance)
        cls.update_roles(instance, [])
        cls.update_pending_roles(instance, [])
        cls.remove_replaced_params(instance)
        instance.cluster_id = None
        instance.group_id = None
        instance.kernel_params = None
        instance.primary_roles = []
        instance.hostname = cls.default_slave_name(instance)

        from nailgun.objects import OpenstackConfig
        OpenstackConfig.disable_by_nodes([instance])

        db().flush()
        db().refresh(instance)
Пример #5
0
    def test_get_auth_credentials(self):
        expected_username = "******"
        expected_password = "******"
        expected_tenant = "test"
        expected_auth_host = "0.0.0.0"
        expected_auth_url = "http://{0}:{1}/{2}/".format(
            expected_auth_host, settings.AUTH_PORT,
            settings.OPENSTACK_API_VERSION["keystone"])

        expected = (expected_username, expected_password, expected_tenant,
                    expected_auth_url)

        cluster = self.env.create_cluster(api=False)
        updated_attributes = {
            "editable": {
                "workloads_collector": {
                    "username": {"value": expected_username},
                    "password": {"value": expected_password},
                    "tenant": {"value": expected_tenant}
                }
            }
        }
        Cluster.update_attributes(cluster, updated_attributes)

        get_host_for_auth_path = ("nailgun.statistics.utils."
                                  "get_mgmt_ip_of_cluster_controller")

        with patch(get_host_for_auth_path,
                   return_value=expected_auth_host):
            client_provider = helpers.ClientProvider(cluster)
            creds = client_provider.credentials

            self.assertEqual(expected, creds)
Пример #6
0
    def _generate_external_network(cls, cluster):
        public_cidr, public_gw = db().query(
            NetworkGroup.cidr,
            NetworkGroup.gateway
        ).filter_by(
            group_id=Cluster.get_default_group(cluster).id,
            name='public'
        ).first()

        return {
            "L3": {
                "subnet": public_cidr,
                "gateway": public_gw,
                "nameservers": [],
                "floating": utils.join_range(
                    cluster.network_config.floating_ranges[0]),
                "enable_dhcp": False
            },
            "L2": {
                "network_type": "flat",
                "segment_id": None,
                "router_ext": True,
                "physnet": "physnet1"
            },
            "tenant": Cluster.get_creds(cluster)['tenant']['value'],
            "shared": False
        }
Пример #7
0
    def create_cluster(self, api=True, exclude=None, **kwargs):
        cluster_data = {
            'name': 'cluster-api-' + str(randint(0, 1000000)),
        }

        if kwargs:
            cluster_data.update(kwargs)

        if 'release_id' not in cluster_data:
            cluster_data['release_id'] = self.create_release(api=False).id

        if exclude and isinstance(exclude, list):
            for ex in exclude:
                try:
                    del cluster_data[ex]
                except KeyError as err:
                    logger.warning(err)
        if api:
            resp = self.app.post(
                reverse('ClusterCollectionHandler'),
                jsonutils.dumps(cluster_data),
                headers=self.default_headers,
                expect_errors=True
            )
            self.tester.assertEqual(resp.status_code, 201)
            cluster = resp.json_body
            self.clusters.append(
                Cluster.get_by_uid(cluster['id'])
            )
        else:
            cluster = Cluster.create(cluster_data)
            db().commit()
            self.clusters.append(cluster)

        return cluster
Пример #8
0
    def create(cls, data):
        new_group = super(NodeGroup, cls).create(data)
        try:
            cluster = Cluster.get_by_uid(new_group.cluster_id)
            nm = Cluster.get_network_manager(cluster)
            nst = cluster.network_config.segmentation_type
            # We have two node groups here when user adds the first custom
            # node group.
            if NodeGroupCollection.get_by_cluster_id(cluster.id).count() == 2:
                nm.ensure_gateways_present_in_default_node_group(cluster)
            nm.create_network_groups(
                cluster, neutron_segment_type=nst, node_group_id=new_group.id,
                set_all_gateways=True)
            nm.create_admin_network_group(new_group.cluster_id, new_group.id)
        except (
            errors.OutOfVLANs,
            errors.OutOfIPs,
            errors.NoSuitableCIDR
        ) as exc:
            db().delete(new_group)
            raise errors.CannotCreate(exc.message)

        db().flush()
        db().refresh(cluster)
        return new_group
Пример #9
0
    def update_pending_roles(cls, instance, new_pending_roles):
        """Update pending_roles for Node instance.

        Logs an error if node doesn't belong to Cluster

        :param instance: Node instance
        :param new_pending_roles: list of new pending role names
        :returns: None
        """
        if not instance.cluster_id:
            logger.warning(
                u"Attempting to assign pending roles to node "
                u"'{0}' which isn't added to cluster".format(
                    instance.full_name))
            return

        logger.debug(
            u"Updating pending roles for node {0}: {1}".format(
                instance.full_name,
                new_pending_roles))

        if new_pending_roles == []:
            # TODO(enchantner): research why the hell we need this
            Cluster.clear_pending_changes(
                instance.cluster,
                node_id=instance.id
            )

        instance.pending_roles = new_pending_roles
        db().flush()
Пример #10
0
    def update_pending_roles(cls, instance, new_pending_roles):
        if not instance.cluster_id:
            logger.warning(
                u"Attempting to assign pending roles to node "
                u"'{0}' which isn't added to cluster".format(
                    instance.name or instance.id
                )
            )
            return

        logger.debug(
            u"Updating pending roles for node {0}: {1}".format(
                instance.id,
                new_pending_roles
            )
        )

        if new_pending_roles == []:
            instance.pending_role_list = []
            # research why the hell we need this
            Cluster.clear_pending_changes(
                instance.cluster,
                node_id=instance.id
            )
        else:
            instance.pending_role_list = db().query(models.Role).filter_by(
                release_id=instance.cluster.release_id,
            ).filter(
                models.Role.name.in_(new_pending_roles)
            ).all()

        db().flush()
        db().refresh(instance)
Пример #11
0
    def update_volumes(cls, instance):
        attrs = instance.attributes
        if not attrs:
            attrs = cls.create_attributes(instance)

        try:
            attrs.volumes = instance.volume_manager.gen_volumes_info()
        except Exception as exc:
            msg = (
                u"Failed to generate volumes "
                u"info for node '{0}': '{1}'"
            ).format(
                instance.name or instance.mac or instance.id,
                str(exc) or "see logs for details"
            )
            logger.warning(traceback.format_exc())
            Notification.create({
                "topic": "error",
                "message": msg,
                "node_id": instance.id
            })

        if instance.cluster_id:
            Cluster.add_pending_changes(
                instance.cluster,
                "disks",
                node_id=instance.id
            )

        db().add(attrs)
        db().flush()
Пример #12
0
    def create_cluster(self, api=True, exclude=None, **kwargs):
        cluster_data = {"name": "cluster-api-" + str(randint(0, 1000000))}
        editable_attributes = kwargs.pop("editable_attributes", None)

        if kwargs:
            cluster_data.update(kwargs)

        if "release_id" not in cluster_data:
            cluster_data["release_id"] = self.create_release(api=False).id

        if exclude and isinstance(exclude, list):
            for ex in exclude:
                try:
                    del cluster_data[ex]
                except KeyError as err:
                    logger.warning(err)
        if api:
            resp = self.app.post(
                reverse("ClusterCollectionHandler"),
                jsonutils.dumps(cluster_data),
                headers=self.default_headers,
                expect_errors=True,
            )
            self.tester.assertEqual(resp.status_code, 201, resp.body)
            cluster = resp.json_body
            cluster_db = Cluster.get_by_uid(cluster["id"])
        else:
            cluster = Cluster.create(cluster_data)
            cluster_db = cluster
            db().commit()
        self.clusters.append(cluster_db)

        if editable_attributes:
            Cluster.patch_attributes(cluster_db, {"editable": editable_attributes})
        return cluster
Пример #13
0
    def remove_from_cluster(cls, instance):
        """Remove Node from Cluster.
        Also drops networks assignment for Node and clears both
        roles and pending roles

        :param instance: Node instance
        :returns: None
        """
        if instance.cluster:
            Cluster.clear_pending_changes(
                instance.cluster,
                node_id=instance.id
            )
            netmanager = Cluster.get_network_manager(
                instance.cluster
            )
            netmanager.clear_assigned_networks(instance)
            netmanager.clear_bond_configuration(instance)
        cls.update_roles(instance, [])
        cls.update_pending_roles(instance, [])
        cls.remove_replaced_params(instance)
        instance.cluster_id = None
        instance.group_id = None
        instance.kernel_params = None
        instance.reset_name_to_default()
        db().flush()
        db().refresh(instance)
        def check_creds(updated_attrs, expected_creds):
            Cluster.update_attributes(cluster, updated_attributes)
            with patch(get_host_for_auth_path,
                       return_value=expected_auth_host):
                client_provider = helpers.ClientProvider(cluster)
                creds = client_provider.credentials

                self.assertEqual(expected_creds, creds)
Пример #15
0
 def fn():
     # try to commit wrong data
     Cluster.create(
         {
             "id": "500",
             "release_id": "500"
         }
     )
     self.db.commit()
Пример #16
0
 def remove_tag(cls, instance, tag_name):
     from nailgun.objects import Cluster
     cls.remove_tag_from_roles(instance, tag_name)
     res = instance.tags_metadata.pop(tag_name, None)
     for cluster in instance.clusters:
         if tag_name not in cluster.tags_metadata:
             Cluster.remove_tag_from_roles(cluster, tag_name)
             Cluster.remove_primary_tag(cluster, tag_name)
     return bool(res)
Пример #17
0
    def add_pending_change(cls, instance, change):
        """Add pending change into Cluster.

        :param instance: Node instance
        :param change: string value of cluster change
        :returns: None
        """
        if instance.cluster:
            Cluster.add_pending_changes(instance.cluster, change, node_id=instance.id)
Пример #18
0
    def generate_network_metadata(cls, cluster):
        nodes = dict()
        nm = Cluster.get_network_manager(cluster)

        for n in Cluster.get_nodes_not_for_deletion(cluster):
            name = Node.get_slave_name(n)
            node_roles = Node.all_roles(n)

            ip_by_net = {"fuelweb_admin": None, "storage": None, "management": None, "public": None}
            for net in ip_by_net:
                netgroup = nm.get_node_network_by_netname(n, net)
                if netgroup.get("ip"):
                    ip_by_net[net] = netgroup["ip"].split("/")[0]

            netw_roles = {
                "admin/pxe": ip_by_net["fuelweb_admin"],
                "fw-admin": ip_by_net["fuelweb_admin"],
                "keystone/api": ip_by_net["management"],
                "swift/api": ip_by_net["management"],
                "sahara/api": ip_by_net["management"],
                "ceilometer/api": ip_by_net["management"],
                "cinder/api": ip_by_net["management"],
                "glance/api": ip_by_net["management"],
                "heat/api": ip_by_net["management"],
                "nova/api": ip_by_net["management"],
                "murano/api": ip_by_net["management"],
                "horizon": ip_by_net["management"],
                "management": ip_by_net["management"],
                "mgmt/api": ip_by_net["management"],
                "mgmt/database": ip_by_net["management"],
                "mgmt/messaging": ip_by_net["management"],
                "mgmt/corosync": ip_by_net["management"],
                "mgmt/memcache": ip_by_net["management"],
                "mgmt/vip": ip_by_net["management"],
                "mongo/db": ip_by_net["management"],
                "ceph/public": ip_by_net["management"],
                "storage": ip_by_net["storage"],
                "ceph/replication": ip_by_net["storage"],
                "swift/replication": ip_by_net["storage"],
                "cinder/iscsi": ip_by_net["storage"],
                "ex": ip_by_net["public"],
                "public/vip": ip_by_net["public"],
                "ceph/radosgw": ip_by_net["public"],
            }

            nodes[name] = {
                "uid": n.uid,
                "fqdn": n.fqdn,
                "name": name,
                "user_node_name": n.name,
                "swift_zone": n.uid,
                "node_roles": node_roles,
                "network_roles": netw_roles,
            }

        return dict(nodes=nodes, vips=nm.assign_vips_for_net_groups(cluster))
Пример #19
0
    def _update_cluster_data(cls, instance):
        cluster = instance.cluster

        if instance.name == "deploy":
            if instance.status == "ready":
                # If for some reasosns orchestrator
                # didn't send ready status for node
                # we should set it explicitly
                for n in cluster.nodes:
                    if n.status == "deploying":
                        n.status = "ready"
                        n.progress = 100

                cls.__update_cluster_status(cluster, "operational")

                Cluster.clear_pending_changes(cluster)

            elif instance.status == "error" and not TaskHelper.before_deployment_error(instance):
                # We don't want to set cluster status to
                # error because we don't want to lock
                # settings if cluster wasn't delpoyed

                cls.__update_cluster_status(cluster, "error")

        elif instance.name == "deployment" and instance.status == "error":
            cls.__update_cluster_status(cluster, "error")

            q_nodes_to_error = TaskHelper.get_nodes_to_deployment_error(cluster)

            cls.__update_nodes_to_error(q_nodes_to_error, error_type="deploy")

        elif instance.name == "provision" and instance.status == "error":
            cls.__update_cluster_status(cluster, "error")

            q_nodes_to_error = TaskHelper.get_nodes_to_provisioning_error(cluster)

            cls.__update_nodes_to_error(q_nodes_to_error, error_type="provision")

        elif instance.name == "stop_deployment":
            if instance.status == "error":
                cls.__update_cluster_status(cluster, "error")
            else:
                cls.__update_cluster_status(cluster, "stopped")

        elif instance.name == consts.TASK_NAMES.update:
            if instance.status == consts.TASK_STATUSES.error:
                cls.__update_cluster_status(cluster, consts.CLUSTER_STATUSES.update_error)

                q_nodes_to_error = TaskHelper.get_nodes_to_deployment_error(cluster)
                cls.__update_nodes_to_error(q_nodes_to_error, error_type=consts.NODE_ERRORS.deploy)

            elif instance.status == consts.TASK_STATUSES.ready:
                cls.__update_cluster_status(cluster, consts.CLUSTER_STATUSES.operational)
                cluster.release_id = cluster.pending_release_id
                cluster.pending_release_id = None
Пример #20
0
 def remove_from_cluster(cls, instance):
     Cluster.clear_pending_changes(
         instance.cluster,
         node_id=instance.id
     )
     Cluster.get_network_manager(
         instance.cluster
     ).clear_assigned_networks(instance)
     instance.cluster_id = None
     instance.roles = instance.pending_roles = []
     instance.reset_name_to_default()
     db().flush()
     db().refresh(instance)
Пример #21
0
    def create(cls, data):
        new_group = super(NodeGroup, cls).create(data)
        try:
            cluster = Cluster.get_by_uid(new_group.cluster_id)
            nm = Cluster.get_network_manager(cluster)
            nst = cluster.network_config.segmentation_type
            nm.create_network_groups(cluster, nst, gid=new_group.id)
            nm.create_admin_network_group(new_group.cluster_id, new_group.id)
        except (errors.OutOfVLANs, errors.OutOfIPs, errors.NoSuitableCIDR, errors.InvalidNetworkPool) as exc:
            db().delete(new_group)
            raise errors.CannotCreate(exc.message)

        db().flush()
        return new_group
Пример #22
0
    def test_clear_data_for_removed_cluster(self, get_info_mock, *_):
        cls_id, res_data = self.collect_for_operational_cluster(get_info_mock)

        cls = Cluster.get_by_uid(cls_id)
        Cluster.delete(cls)

        oswl_collect_once(consts.OSWL_RESOURCE_TYPES.vm)
        last = OpenStackWorkloadStats.get_last_by(
            cls_id, consts.OSWL_RESOURCE_TYPES.vm)
        removed = dict(self.vms_info[0])
        removed['time'] = last.updated_time.isoformat()
        res_data.update({'removed': [removed], 'current': []})
        # current data is cleared when cluster is deleted
        self.assertEqual(last.resource_data, res_data)
Пример #23
0
    def create(cls, data):
        new_group = super(NodeGroup, cls).create(data)
        try:
            cluster = Cluster.get_by_uid(new_group.cluster_id)
            nm = Cluster.get_network_manager(cluster)
            nst = cluster.network_config.segmentation_type
            nm.create_network_groups(cluster, nst, gid=new_group.id)
            nm.create_admin_network_group(new_group.cluster_id, new_group.id)
        except (errors.OutOfVLANs, errors.OutOfIPs, errors.NoSuitableCIDR,
                errors.InvalidNetworkPool) as exc:
            db().delete(new_group)
            raise errors.CannotCreate(exc.message)

        db().flush()
        return new_group
    def process_deployment_for_cluster(cls, cluster, cluster_data):
        from cluster_upgrade.objects.relations import UpgradeRelationObject

        relation = UpgradeRelationObject.get_cluster_relation(cluster.id)

        orig_env = Cluster.get_by_uid(relation.orig_cluster_id)
        seed_env = Cluster.get_by_uid(relation.seed_cluster_id)

        cluster_data['upgrade'] = {
            'relation_info': {
                'orig_cluster_id': relation.orig_cluster_id,
                'seed_cluster_id': relation.seed_cluster_id,
                'orig_cluster_version': orig_env.release.environment_version,
                'seed_cluster_version': seed_env.release.environment_version
            }
        }
Пример #25
0
    def generate_l2(cls, cluster):
        res = {
            "base_mac": cluster.network_config.base_mac,
            "segmentation_type": cluster.network_config.segmentation_type,
            "phys_nets": {
                "physnet1": {
                    "bridge": "br-ex",
                    "vlan_range": None
                }
            }
        }
        if cluster.network_config.segmentation_type in \
                (consts.NEUTRON_SEGMENT_TYPES.gre,
                 consts.NEUTRON_SEGMENT_TYPES.tun):
            res["tunnel_id_ranges"] = utils.join_range(
                cluster.network_config.gre_id_range)
        elif cluster.network_config.segmentation_type == \
                consts.NEUTRON_SEGMENT_TYPES.vlan:
            res["phys_nets"]["physnet2"] = {
                "bridge": "br-prv",
                "vlan_range":
                utils.join_range(cluster.network_config.vlan_range)
            }

        # Set non-default ml2 configurations
        attrs = Cluster.get_attributes(cluster).editable
        if 'neutron_mellanox' in attrs and \
                attrs['neutron_mellanox']['plugin']['value'] == 'ethernet':
            res['mechanism_drivers'] = 'mlnx,openvswitch'
            seg_type = cluster.network_config.segmentation_type
            res['tenant_network_types'] = seg_type
            res['type_drivers'] = '{0},flat,local'.format(seg_type)

        return res
Пример #26
0
    def get_kernel_params(cls, instance):
        """Get kernel params

        Return cluster kernel_params if they weren't replaced by custom params.
        """
        return (instance.kernel_params
                or Cluster.get_default_kernel_params(instance.cluster))
Пример #27
0
    def add_into_cluster(cls, instance, cluster_id):
        """Adds Node to Cluster by its ID.
        Also assigns networks by default for Node.

        :param instance: Node instance
        :param cluster_id: Cluster ID
        :returns: None
        """
        instance.cluster_id = cluster_id
        db().flush()
        db().refresh(instance)
        instance.kernel_params = Cluster.get_default_kernel_params(
            instance.cluster)
        db().flush()
        network_manager = Cluster.get_network_manager(instance.cluster)
        network_manager.assign_networks_by_default(instance)
Пример #28
0
 def _generate_external_network(cls, cluster):
     public_cidr, public_gw = db().query(
         NetworkGroup.cidr,
         NetworkGroup.gateway
     ).filter_by(
         group_id=Cluster.get_default_group(cluster).id,
         name='public'
     ).first()
     join_range = lambda r: (":".join(map(str, r)) if r else None)
     return {
         "L3": {
             "subnet": public_cidr,
             "gateway": public_gw,
             "nameservers": [],
             "floating": join_range(
                 cluster.network_config.floating_ranges[0]),
             "enable_dhcp": False
         },
         "L2": {
             "network_type": "flat",
             "segment_id": None,
             "router_ext": True,
             "physnet": "physnet1"
         },
         "tenant": objects.Cluster.get_creds(cluster)['tenant']['value'],
         "shared": False
     }
Пример #29
0
    def generate_l2(cls, cluster):
        join_range = lambda r: (":".join(map(str, r)) if r else None)
        res = {
            "base_mac": cluster.network_config.base_mac,
            "segmentation_type": cluster.network_config.segmentation_type,
            "phys_nets": {
                "physnet1": {
                    "bridge": "br-ex",
                    "vlan_range": None
                }
            }
        }
        if cluster.network_config.segmentation_type == 'gre':
            res["tunnel_id_ranges"] = join_range(
                cluster.network_config.gre_id_range)
        elif cluster.network_config.segmentation_type == 'vlan':
            res["phys_nets"]["physnet2"] = {
                "bridge": "br-prv",
                "vlan_range": join_range(cluster.network_config.vlan_range)
            }

        # Set non-default ml2 configurations
        attrs = Cluster.get_attributes(cluster).editable
        if 'neutron_mellanox' in attrs and \
                attrs['neutron_mellanox']['plugin']['value'] == 'ethernet':
            res['mechanism_drivers'] = 'mlnx,openvswitch'
            seg_type = cluster.network_config.segmentation_type
            res['tenant_network_types'] = seg_type
            res['type_drivers'] = '{0},flat,local'.format(seg_type)

        return res
Пример #30
0
    def __update_cluster_status(cls, cluster, status):
        logger.debug(
            "Updating cluster (%s) status: from %s to %s",
            cluster.full_name, cluster.status, status)

        data = {'status': status}

        # FIXME(aroma): remove updating of 'deployed_before'
        # when stop action is reworked. 'deployed_before'
        # flag identifies whether stop action is allowed for the
        # cluster. Please, refer to [1] for more details.
        # [1]: https://bugs.launchpad.net/fuel/+bug/1529691
        if status == consts.CLUSTER_STATUSES.operational:
            Cluster.set_deployed_before_flag(cluster, value=True)

        Cluster.update(cluster, data)
Пример #31
0
    def get_common_attrs(cls, cluster, attrs):
        """Cluster network attributes."""
        common = cls.network_provider_cluster_attrs(cluster)
        common.update(cls.network_ranges(
            Cluster.get_default_group(cluster).id))
        common.update({'master_ip': settings.MASTER_IP})
        common['nodes'] = deepcopy(attrs['nodes'])

        # Addresses
        for node in get_nodes_not_for_deletion(cluster):
            netw_data = node.network_data
            addresses = {}
            for net in node.cluster.network_groups:
                if net.name == 'public' and \
                        not objects.Node.should_have_public(node):
                    continue
                if net.meta.get('render_addr_mask'):
                    addresses.update(
                        cls.get_addr_mask(netw_data, net.name,
                                          net.meta.get('render_addr_mask')))

            [
                n.update(addresses) for n in common['nodes']
                if n['uid'] == str(node.uid)
            ]
        return common
Пример #32
0
    def checkout(self, instance):
        fetch_file = os.path.join(
            const.REPOS_DIR,
            instance.repo_name,
            '.git/FETCH_HEAD'
        )
        if os.path.exists(fetch_file):
            current_ts = time.time()
            cluster = Cluster.get_by_uid(instance.env_id)
            last_fetch = os.stat(fetch_file).st_mtime
            if cluster.status != CLUSTER_STATUSES.deployment and \
                current_ts - last_fetch < const.REPO_TTL:
                return

        logger.debug("Repo TTL exceeded. Fetching code...")
        ssh_cmd = self._get_ssh_cmd(instance.repo_name)

        if not os.path.exists(self._get_key_path(instance.repo_name)):
            logger.debug('Key file does not exist. Creating...')
            self._create_key_file(instance.repo_name)

        with instance.repo.git.custom_environment(GIT_SSH=ssh_cmd):
            commit = instance.repo.remotes.origin.fetch(refspec=instance.ref)
            commit = commit[0].commit
            instance.repo.head.reference = commit
            instance.repo.head.reset(index=True, working_tree=True)
Пример #33
0
    def generate_l2(cls, cluster):
        res = {
            "base_mac": cluster.network_config.base_mac,
            "segmentation_type": cluster.network_config.segmentation_type,
            "phys_nets": {
                "physnet1": {
                    "bridge": "br-ex",
                    "vlan_range": None
                }
            }
        }
        if cluster.network_config.segmentation_type in \
                (consts.NEUTRON_SEGMENT_TYPES.gre,
                 consts.NEUTRON_SEGMENT_TYPES.tun):
            res["tunnel_id_ranges"] = utils.join_range(
                cluster.network_config.gre_id_range)
        elif cluster.network_config.segmentation_type == \
                consts.NEUTRON_SEGMENT_TYPES.vlan:
            res["phys_nets"]["physnet2"] = {
                "bridge": "br-prv",
                "vlan_range": utils.join_range(
                    cluster.network_config.vlan_range)
            }

        # Set non-default ml2 configurations
        attrs = Cluster.get_attributes(cluster).editable
        if 'neutron_mellanox' in attrs and \
                attrs['neutron_mellanox']['plugin']['value'] == 'ethernet':
            res['mechanism_drivers'] = 'mlnx,openvswitch'
            seg_type = cluster.network_config.segmentation_type
            res['tenant_network_types'] = seg_type
            res['type_drivers'] = '{0},flat,local'.format(seg_type)

        return res
Пример #34
0
    def get_kernel_params(cls, instance):
        """Get kernel params

        Return cluster kernel_params if they weren't replaced by custom params.
        """
        return (instance.kernel_params or
                Cluster.get_default_kernel_params(instance.cluster))
Пример #35
0
    def reset_to_discover(cls, instance):
        """Flush database objects which is not consistent with actual node
           configuration in the event of resetting node to discover state

        :param instance: Node database object
        :returns: None
        """
        node_data = {
            "online": False,
            "status": consts.NODE_STATUSES.discover,
            "pending_addition": True,
            "pending_deletion": False,
        }
        cls.update_volumes(instance)
        cls.update(instance, node_data)
        cls.move_roles_to_pending_roles(instance)
        # when node reseted to discover:
        # - cobbler system is deleted
        # - mac to ip mapping from dnsmasq.conf is deleted
        # imho we need to revert node to original state, as it was
        # added to cluster (without any additonal state in database)
        #Cluster是导入的objects/Cluster类
        netmanager = Cluster.get_network_manager()
        netmanager.clear_assigned_ips(instance)
        db().flush()
Пример #36
0
    def reset_to_discover(cls, instance):
        """Flush database objects which is not consistent with actual node

        configuration in the event of resetting node to discover state

        :param instance: Node database object
        :returns: None
        """
        node_data = {
            "online": False,
            "status": consts.NODE_STATUSES.discover,
            "pending_addition": True,
            "pending_deletion": False,
        }
        cls.update(instance, node_data)
        cls.move_roles_to_pending_roles(instance)
        # when node reseted to discover:
        # - cobbler system is deleted
        # - mac to ip mapping from dnsmasq.conf is deleted
        # imho we need to revert node to original state, as it was
        # added to cluster (without any additonal state in database)
        netmanager = Cluster.get_network_manager()
        netmanager.clear_assigned_ips(instance)
        fire_callback_on_node_reset(instance)
        db().flush()
Пример #37
0
 def prepare_for_provisioning(cls, instances):
     """Prepare environment for provisioning,
     update fqdns, assign admin IPs
     """
     cls.update_slave_nodes_fqdn(instances)
     netmanager = Cluster.get_network_manager()
     netmanager.assign_admin_ips(instances)
Пример #38
0
    def generate_l2(cls, cluster):
        join_range = lambda r: (":".join(map(str, r)) if r else None)
        res = {
            "base_mac": cluster.network_config.base_mac,
            "segmentation_type": cluster.network_config.segmentation_type,
            "phys_nets": {}
        }
        if cluster.release.version not in ("2014.1.1-5.1", ):
            res["phys_nets"]["physnet1"] = {
                "bridge": "br-ex",
                "vlan_range": None
            }
        if cluster.network_config.segmentation_type == 'gre':
            res["tunnel_id_ranges"] = join_range(
                cluster.network_config.gre_id_range)
        elif cluster.network_config.segmentation_type == 'vlan':
            res["phys_nets"]["physnet2"] = {
                "bridge": "br-prv",
                "vlan_range": join_range(cluster.network_config.vlan_range)
            }

        # Set non-default ml2 configurations
        attrs = Cluster.get_attributes(cluster).editable
        if 'neutron_mellanox' in attrs and \
                attrs['neutron_mellanox']['plugin']['value'] == 'ethernet':
            res['mechanism_drivers'] = 'mlnx,openvswitch'
            seg_type = cluster.network_config.segmentation_type
            res['tenant_network_types'] = seg_type
            res['type_drivers'] = '{0},flat,local'.format(seg_type)

        return res
Пример #39
0
    def get_common_attrs(cls, cluster, attrs):
        """Cluster network attributes."""
        common = cls.network_provider_cluster_attrs(cluster)
        common.update(
            cls.network_ranges(Cluster.get_default_group(cluster).id))
        common.update({'master_ip': settings.MASTER_IP})
        common['nodes'] = deepcopy(attrs['nodes'])

        # Addresses
        for node in get_nodes_not_for_deletion(cluster):
            netw_data = node.network_data
            addresses = {}
            for net in node.cluster.network_groups:
                if net.name == 'public' and \
                        not objects.Node.should_have_public(node):
                    continue
                if net.meta.get('render_addr_mask'):
                    addresses.update(cls.get_addr_mask(
                        netw_data,
                        net.name,
                        net.meta.get('render_addr_mask')))

            [n.update(addresses) for n in common['nodes']
             if n['uid'] == str(node.uid)]
        return common
Пример #40
0
 def prepare_for_provisioning(cls, instances):
     """Prepare environment for provisioning,
     update fqdns, assign admin IPs
     """
     cls.update_slave_nodes_fqdn(instances)
     netmanager = Cluster.get_network_manager()
     netmanager.assign_admin_ips(instances)
Пример #41
0
    def _reassign_template_networks(cls, instance):
        cluster = instance.nodegroup.cluster
        if not cluster.network_config.configuration_template:
            return

        nm = Cluster.get_network_manager(cluster)
        for node in cluster.nodes:
            nm.assign_networks_by_template(node)
Пример #42
0
    def should_have_public_with_ip(cls, instance):
        """Returns True if node should have IP belonging to Public network

        :param instance: Node DB instance
        :returns: True when node has Public network
        """
        if Cluster.should_assign_public_to_all_nodes(instance.cluster):
            return True

        roles = itertools.chain(instance.roles, instance.pending_roles)
        roles_metadata = Cluster.get_roles(instance.cluster)

        for role in roles:
            if roles_metadata.get(role, {}).get('public_ip_required'):
                return True

        return False
Пример #43
0
    def should_have_public_with_ip(cls, instance):
        """Determine whether this node should be connected to Public network
        with an IP address assigned from that network

        :param instance: Node DB instance
        :returns: True when node has Public network
        """
        if Cluster.should_assign_public_to_all_nodes(instance.cluster):
            return True

        roles = itertools.chain(instance.roles, instance.pending_roles)
        roles_metadata = Cluster.get_roles(instance.cluster)

        for role in roles:
            if roles_metadata.get(role, {}).get('public_ip_required'):
                return True

        return False
Пример #44
0
    def _do_test_attributes_in_white_list(self, release, expected_attributes):
        cluster_data = self.env.create_cluster(release_id=release.id)
        cluster = Cluster.get_by_uid(cluster_data['id'])
        editable = cluster.attributes.editable

        info = InstallationInfo()
        actual_attributes = info.get_attributes(editable,
                                                info.attributes_white_list)
        self.assertEqual(set(expected_attributes),
                         set(actual_attributes.keys()))
Пример #45
0
    def generate_l3(cls, cluster):
        l3 = {"use_namespaces": True}
        attrs = Cluster.get_attributes(cluster).editable
        if 'nsx_plugin' in attrs and \
                attrs['nsx_plugin']['metadata']['enabled']:
            dhcp_attrs = l3.setdefault('dhcp_agent', {})
            dhcp_attrs['enable_isolated_metadata'] = True
            dhcp_attrs['enable_metadata_network'] = True

        return l3
Пример #46
0
 def test_get_attributes_centos(self):
     self.env.upload_fixtures(['openstack'])
     info = InstallationInfo()
     release = ReleaseCollection.filter_by(None, operating_system='CentOS')
     cluster_data = self.env.create_cluster(release_id=release[0].id)
     cluster = Cluster.get_by_uid(cluster_data['id'])
     editable = cluster.attributes.editable
     attr_key_list = [a[1] for a in info.attributes_white_list]
     attrs_dict = info.get_attributes(editable, info.attributes_white_list)
     self.assertEqual(set(attr_key_list), set(attrs_dict.keys()))
Пример #47
0
 def get_clusters_info(self):
     clusters = ClusterCollection.all()
     clusters_info = []
     for cluster in clusters:
         release = cluster.release
         nodes_num = NodeCollection.filter_by(
             None, cluster_id=cluster.id).count()
         vmware_attributes_editable = None
         if cluster.vmware_attributes:
             vmware_attributes_editable = cluster.vmware_attributes.editable
         cluster_info = {
             'id':
             cluster.id,
             'nodes_num':
             nodes_num,
             'release': {
                 'os': release.operating_system,
                 'name': release.name,
                 'version': release.version
             },
             'mode':
             cluster.mode,
             'nodes':
             self.get_nodes_info(cluster.nodes),
             'node_groups':
             self.get_node_groups_info(cluster.node_groups),
             'status':
             cluster.status,
             'extensions':
             cluster.extensions,
             'attributes':
             self.get_attributes(Cluster.get_editable_attributes(cluster),
                                 self.attributes_white_list),
             'vmware_attributes':
             self.get_attributes(vmware_attributes_editable,
                                 self.vmware_attributes_white_list),
             'plugin_links':
             self.get_plugin_links(cluster.plugin_links),
             'net_provider':
             cluster.net_provider,
             'fuel_version':
             cluster.fuel_version,
             'is_customized':
             cluster.is_customized,
             'network_configuration':
             self.get_network_configuration_info(cluster),
             'installed_plugins':
             self.get_cluster_plugins_info(cluster),
             'components':
             cluster.components,
             'cluster_plugins':
             cluster.cluster_plugins
         }
         clusters_info.append(cluster_info)
     return clusters_info
Пример #48
0
    def get_vips_by_cluster_id(cls,
                               cluster_id,
                               network_id=None,
                               network_role=None):
        """Get VIP filtered by cluster ID.

        VIP is determined by not NULL vip_name field of IPAddr model.

        :param cluster_id: cluster identifier or None to get all records
        :type cluster_id: int|None
        :param network_id: network identifier
        :type network_id: int
        :param network_role: network role
        :type network_role: str
        :return: vips query
        :rtype: SQLAlchemy Query
        """
        query = cls.get_by_cluster_id(cluster_id)\
            .filter(models.IPAddr.vip_name.isnot(None))

        if network_id:
            query = query.filter(models.IPAddr.network == network_id)

        if network_role:
            # Get all network_roles for cluster and gain vip names from it,
            # then bound query to this names.
            # See network_roles.yaml in plugin examples for the details of
            # input structure.
            cluster_obj = Cluster.get_by_uid(cluster_id)
            vips = []

            for cluster_network_role in Cluster.get_network_roles(cluster_obj):
                if cluster_network_role.get('id') == network_role:
                    vips.extend(
                        cluster_network_role.get('properties',
                                                 {}).get('vip', []))

            vip_names = (vip['name'] for vip in vips)
            unique_vip_names = list(set(vip_names))
            query = query.filter(models.IPAddr.vip_name.in_(unique_vip_names))

        return query
Пример #49
0
 def get_kernel_params(cls, instance):
     """Return cluster kernel_params if they wasnot replaced by
        custom params.
     """
     # return (instance.kernel_params or
     #         Cluster.get_default_kernel_params(instance.cluster))
     if instance.cluster:
         return (instance.kernel_params
                 or Cluster.get_default_kernel_params(instance.cluster))
     else:
         return "console=ttyS0,9600 console=tty0 biosdevname=0 crashkernel=none rootdelay=90 nomodeset"
Пример #50
0
    def get_common_attrs(cls, cluster, attrs):
        """Cluster network attributes."""
        common = cls.network_provider_cluster_attrs(cluster)
        common.update(cls.network_ranges(
            Cluster.get_default_group(cluster).id))
        common.update({'master_ip': settings.MASTER_IP})

        common['nodes'] = deepcopy(attrs['nodes'])
        common['nodes'] = cls.update_nodes_net_info(cluster, common['nodes'])

        return common
Пример #51
0
    def remove_from_cluster(cls, instance):
        """Remove Node from Cluster.
        Also drops networks assignment for Node and clears both
        roles and pending roles

        :param instance: Node instance
        :returns: None
        """
        if instance.cluster:
            Cluster.clear_pending_changes(instance.cluster,
                                          node_id=instance.id)
            Cluster.get_network_manager(
                instance.cluster).clear_assigned_networks(instance)
        cls.update_roles(instance, [])
        cls.update_pending_roles(instance, [])
        instance.cluster_id = None
        instance.kernel_params = None
        instance.reset_name_to_default()
        db().flush()
        db().refresh(instance)
Пример #52
0
    def generate_network_metadata(cls, cluster):
        nodes = dict()
        nm = Cluster.get_network_manager(cluster)

        for node in Cluster.get_nodes_not_for_deletion(cluster):
            name = Node.get_slave_name(node)
            node_roles = Node.all_roles(node)
            network_roles = cls.get_network_role_mapping_to_ip(node)

            nodes[name] = {
                "uid": node.uid,
                "fqdn": Node.get_node_fqdn(node),
                "name": name,
                "user_node_name": node.name,
                "swift_zone": node.uid,
                "node_roles": node_roles,
                "network_roles": network_roles
            }

        return dict(nodes=nodes, vips=nm.assign_vips_for_net_groups(cluster))
Пример #53
0
    def add_vlan_interfaces(cls, node):
        """Assign fixed_interfaces and vlan_interface.
        They should be equal.
        """
        net_manager = Cluster.get_network_manager(node.cluster)
        fixed_interface = net_manager._get_interface_by_network_name(
            node.id, 'fixed')

        attrs = {'fixed_interface': fixed_interface.name,
                 'vlan_interface': fixed_interface.name}
        return attrs
Пример #54
0
    def _get_network_role_mapping(cls, node, mapping):
        """Aggregates common logic for methods 'get_network_role_mapping_to_ip'
        and 'get_network_role_mapping_to_interfaces'.
        """
        roles = dict()
        for role in Cluster.get_network_roles(node.cluster):
            default_mapping = mapping.get(role['default_mapping'])
            if default_mapping:
                roles[role['id']] = default_mapping

        return roles
Пример #55
0
    def get_network_to_ip_mapping(cls, node):
        nm = Cluster.get_network_manager(node.cluster)

        mapping = dict()
        networks = nm.get_node_networks(node)
        for net in cls.get_default_network_to_endpoint_mapping(node):
            netgroup = nm.get_network_by_netname(net, networks)
            if netgroup.get('ip'):
                mapping[net] = netgroup['ip'].split('/')[0]

        return mapping
Пример #56
0
 def network_provider_node_attrs(cls, cluster, node):
     """Serialize node, then it will be
     merged with common attributes
     """
     nm = Cluster.get_network_manager(cluster)
     networks = nm.get_node_networks(node)
     node_attrs = {
         'network_scheme': cls.generate_network_scheme(node, networks),
     }
     node_attrs = cls.mellanox_settings(node_attrs, cluster, networks)
     return node_attrs
Пример #57
0
    def update_by_agent(cls, instance, data):
        """Update Node instance with some specific cases for agent.

        * don't update provisioning or error state back to discover
        * don't update volume information if disks arrays is empty

        :param data: dictionary of key-value pairs as object fields
        :returns: Node instance
        """
        # don't update provisioning and error back to discover
        if instance.status in ('provisioning', 'error'):
            if data.get('status', 'discover') == 'discover':
                logger.debug(u"Node {0} has provisioning or error status - "
                             u"status not updated by agent".format(
                                 instance.human_readable_name))

                data['status'] = instance.status

        # don't update volume information, if agent has sent an empty array
        meta = data.get('meta', {})
        if meta and len(meta.get('disks', [])) == 0 \
                and instance.meta.get('disks'):

            logger.warning(u'Node {0} has received an empty disks array - '
                           u'volume information will not be updated'.format(
                               instance.human_readable_name))
            meta['disks'] = instance.meta['disks']

        #(dshulyak) change this verification to NODE_STATUSES.deploying
        # after we will reuse ips from dhcp range
        netmanager = Cluster.get_network_manager()
        admin_ng = netmanager.get_admin_network_group(instance.id)
        if data.get('ip') and not netmanager.is_same_network(
                data['ip'], admin_ng.cidr):
            logger.debug('Corrupted network data %s, skipping update',
                         instance.id)
            return instance
        #环境重置被调用和删除环境
        #data包含status,agent_checksum,ip等,不包含name

        data["power_ip"] = data["ip"]

        data["ip"] = instance.ip
        #meta里面的ip也需要修改
        if 'meta' in data:
            instance.create_meta(meta, instance.ip, instance.mac)
            db().flush()
            cls.update_interfaces(instance)
            phymachine = cls.get_by_mac_phymachine(meta['interfaces'])
            if phymachine:
                if "name" not in data:
                    data["name"] = "{0}".format(phymachine.name)
        logger.info(data)
        return cls.update(instance, data)
Пример #58
0
    def create(cls, data):
        new_group = super(NodeGroup, cls).create(data)
        cluster = Cluster.get_by_uid(new_group.cluster_id)
        try:
            fire_callback_on_nodegroup_create(new_group)
        except errors.CannotCreate:
            db().delete(new_group)

        db().flush()
        db().refresh(cluster)
        return new_group
Пример #59
0
    def _update_cluster_status(cls, cluster, status, expected_node_status):
        logger.debug("Updating cluster (%s) status: from %s to %s",
                     cluster.full_name, cluster.status, status)
        if expected_node_status is not None:
            remaining = Cluster.get_nodes_count_unmet_status(
                cluster, expected_node_status)
            if remaining > 0:
                logger.debug("Detect that cluster '%s' is partially deployed.",
                             cluster.id)
                status = consts.CLUSTER_STATUSES.partially_deployed

        # FIXME(aroma): remove updating of 'deployed_before'
        # when stop action is reworked. 'deployed_before'
        # flag identifies whether stop action is allowed for the
        # cluster. Please, refer to [1] for more details.
        # [1]: https://bugs.launchpad.net/fuel/+bug/1529691
        if status == consts.CLUSTER_STATUSES.operational:
            Cluster.set_deployed_before_flag(cluster, value=True)

        Cluster.update(cluster, {'status': status})
Пример #60
0
 def update_nodes_net_info(cls, cluster, nodes):
     """Adds information about networks to each node.
     This info is deprecated in 7.0 and should be removed in later version.
     """
     nm = Cluster.get_network_manager(cluster)
     for node in Cluster.get_nodes_not_for_deletion(cluster):
         netw_data = []
         for name, data in six.iteritems(
                 nm.get_node_networks_with_ips(node)):
             data['name'] = name
             netw_data.append(data)
         addresses = {}
         for net in netw_data:
             render_addr_mask = net['meta'].get('render_addr_mask')
             if render_addr_mask:
                 addresses.update(
                     cls.get_addr_mask(netw_data, net['name'],
                                       render_addr_mask))
         [n.update(addresses) for n in nodes if n['uid'] == str(node.uid)]
     return nodes