def test_get_network_id(self, mock_clients):
        fake_name = "fake_network"
        fake_id = "24fe5da0-1ac0-11e9-84cd-00224d6b7bc1"
        mock_nclient = mock.MagicMock()
        mock_nclient.list_networks.return_value = {
            'networks': [{
                'id': fake_id,
                'name': fake_name,
                'router:external': True
            }]
        }

        osc = mock.MagicMock()
        mock_clients.return_value = osc
        osc.neutron.return_value = mock_nclient

        network_id = neutron.get_network_id(self.context, fake_name)

        self.assertEqual(fake_id, network_id)
Exemple #2
0
    def test_get_network_id(self, mock_clients):
        fake_name = "fake_network"
        fake_id = "24fe5da0-1ac0-11e9-84cd-00224d6b7bc1"
        mock_nclient = mock.MagicMock()
        mock_nclient.list_networks.return_value = {
            'networks': [
                {
                    'id': fake_id,
                    'name': fake_name,
                    'router:external': True
                }
            ]
        }

        osc = mock.MagicMock()
        mock_clients.return_value = osc
        osc.neutron.return_value = mock_nclient

        network_id = neutron.get_network_id(self.context, fake_name)

        self.assertEqual(fake_id, network_id)
    def get_params(self, context, cluster_template, cluster, **kwargs):
        extra_params = kwargs.pop('extra_params', {})

        extra_params['discovery_url'] = \
            self.get_discovery_url(cluster, cluster_template=cluster_template)
        osc = self.get_osc(context)
        extra_params['magnum_url'] = osc.magnum_url()

        if cluster_template.tls_disabled:
            extra_params['loadbalancing_protocol'] = 'HTTP'
            extra_params['kubernetes_port'] = 8080

        extra_params['octavia_enabled'] = keystone.is_octavia_enabled()

        # NOTE(lxkong): Convert external network name to UUID, the template
        # field name is confused. If external_network_id is not specified in
        # cluster template use 'public' as the default value, which is the same
        # with the heat template default value as before.
        ext_net = cluster_template.external_network_id or "public"
        if not uuidutils.is_uuid_like(ext_net):
            ext_net_id = neutron.get_network_id(context, ext_net)
            extra_params['external_network'] = ext_net_id
        else:
            extra_params['external_network'] = ext_net

        label_list = ['flannel_network_cidr', 'flannel_backend',
                      'flannel_network_subnetlen',
                      'system_pods_initial_delay',
                      'system_pods_timeout',
                      'admission_control_list',
                      'prometheus_monitoring',
                      'grafana_admin_passwd',
                      'kube_dashboard_enabled',
                      'etcd_volume_size',
                      'cert_manager_api',
                      'ingress_controller_role',
                      'octavia_ingress_controller_tag',
                      'kubelet_options',
                      'kubeapi_options',
                      'kubeproxy_options',
                      'kubecontroller_options',
                      'kubescheduler_options',
                      'influx_grafana_dashboard_enabled']

        for label in label_list:
            extra_params[label] = cluster.labels.get(label)

        ingress_controller = cluster.labels.get('ingress_controller',
                                                '').lower()
        if (ingress_controller == 'octavia'
                and not extra_params['octavia_enabled']):
            raise exception.InvalidParameterValue(
                'Octavia service needs to be deployed for octavia ingress '
                'controller.')
        extra_params["ingress_controller"] = ingress_controller

        cluser_ip_range = cluster.labels.get('service_cluster_ip_range')
        if cluser_ip_range:
            extra_params['portal_network_cidr'] = cluser_ip_range

        if cluster_template.registry_enabled:
            extra_params['swift_region'] = CONF.docker_registry.swift_region
            extra_params['registry_container'] = (
                CONF.docker_registry.swift_registry_container)

        return super(K8sTemplateDefinition,
                     self).get_params(context, cluster_template, cluster,
                                      extra_params=extra_params,
                                      **kwargs)
Exemple #4
0
    def create_cluster(self, context, cluster, cluster_create_timeout):
        LOG.info("Starting to create cluster %s", cluster.uuid)

        cluster_template = conductor_utils.retrieve_cluster_template(
            context,
            cluster
        )

        cluser_service_ip_range = cluster.labels.get(
            'service_cluster_ip_range', '10.97.0.0/16'
        )
        if cluster_template.network_driver == 'flannel':
            cluser_pod_ip_range = cluster.labels.get(
                'flannel_network_cidr', '10.100.0.0/16'
            )
        if cluster_template.network_driver == 'calico':
            cluser_pod_ip_range = cluster.labels.get(
                'calico_ipv4pool', '192.168.0.0/16'
            )

        port_info = self._create_vip_port(context, cluster, cluster_template)

        # This address should be internal IP that other services could
        # communicate with.
        self.apiserver_address = port_info["private_ip"]
        external_apiserver_address = port_info.get("public_ip",
                                                   port_info["private_ip"])

        # The master address is always the private VIP address.
        cluster.api_address = 'https://%s:6443' % external_apiserver_address
        master_ng = cluster.default_ng_master
        setattr(master_ng, "node_addresses", [self.apiserver_address])
        master_ng.save()

        self.public_network_id = (
            cluster_template.external_network_id or "public")
        if not uuidutils.is_uuid_like(self.public_network_id):
            self.public_network_id = neutron.get_network_id(
                context,
                self.public_network_id
            )

        ca_cert = cert_manager.get_cluster_ca_certificate(
            cluster,
            context=context
        )
        ca_cert_encoded = base64.b64encode(ca_cert.get_certificate())
        ca_key_encoded = base64.b64encode(ca_cert.get_decrypted_private_key())

        cloud_provider_enabled = strutils.bool_from_string(
            cluster.labels.get("cloud_provider_enabled", "true")
        )

        ca_cert_encoded_str = ca_cert_encoded.decode('utf-8')
        ca_cert_encoded_str = ca_cert_encoded_str.replace("'","")
        ca_key_encoded_str = ca_key_encoded.decode('utf-8')
        ca_key_encoded_str = ca_key_encoded_str.replace("'","")

        params = {
            "namespace": cluster.uuid,
            "vip_port_ip": self.apiserver_address,
            "vip_external_ip": external_apiserver_address,
            "vip_port_id": port_info["port_id"],
            "service_ip_range": cluser_service_ip_range,
            "pod_ip_range": cluser_pod_ip_range,
            "ca_cert": ca_cert_encoded_str,
            "ca_key": ca_key_encoded_str,
            "subnet_id": cluster_template.fixed_subnet,
            "public_network_id": self.public_network_id,
            "cloud_provider_enabled": cloud_provider_enabled,
            "kube_version": cluster.labels.get("kube_tag", "v1.14.3"),
            "cloud_provider_tag": cluster.labels.get("cloud_provider_tag",
                                                     "v1.15.0")
        }

        # Keystone related info.
        osc = clients.OpenStackClients(context)
        params['trustee_user_id'] = cluster.trustee_user_id
        params['trustee_password'] = cluster.trustee_password
        if CONF.trust.cluster_user_trust:
            params['trust_id'] = cluster.trust_id
        else:
            params['trust_id'] = ""
        kwargs = {
            'service_type': 'identity',
            'interface': CONF.trust.trustee_keystone_interface,
            'version': 3
        }
        if CONF.trust.trustee_keystone_region_name:
            kwargs['region_name'] = CONF.trust.trustee_keystone_region_name
        params['auth_url'] = osc.url_for(**kwargs).rstrip('/')

        _apply_manifest = functools.partial(self._apply_manifest, params)

        LOG.info("Creating namespace for cluster %s", cluster.uuid)
        _apply_manifest('namespace.yaml.j2')

        # Create Secret for the new cluster CA and the kube services, the CA
        # could be referenced by various cluster components.
        LOG.info("Creating Secrets for cluster %s", cluster.uuid)
        _apply_manifest('secrets.yaml.j2')
        # TODO: Wait for all the certificates are ready

        # etcd Service and StatefulSet
        LOG.info("Creating etcd service for cluster %s", cluster.uuid)
        _apply_manifest('etcd.yaml.j2')

        # apiserver Service and Deployment
        LOG.info("Creating kube-apiserver for cluster %s", cluster.uuid)
        _apply_manifest('kube-apiserver.yaml.j2')

        # Deploy kube-controller-manager
        LOG.info("Creating kube-controller-manager for cluster %s",
                 cluster.uuid)
        _apply_manifest('kube-controllermgr.yaml.j2')

        # Deploy kube-scheduler
        LOG.info("Creating kube-scheduler for cluster %s", cluster.uuid)
        _apply_manifest('kube-scheduler.yaml.j2')

        kubeconfig_path = self._get_kubeconfig(
            context, cluster,
            ca_cert_encoded=ca_cert_encoded
        )
        LOG.info(
            "Kubeconfig created for cluster %s, path: %s",
            cluster.uuid, kubeconfig_path
        )

        cluster_kubectl = kubectl.KubeCtl(
            bin="/usr/bin/kubectl",
            global_flags="--kubeconfig %s" % kubeconfig_path
        )

        LOG.info(
            "Waiting for all the components up and running for "
            "cluster %s", cluster.uuid
        )
        self._wait_for_apiserver(cluster.uuid, cluster_kubectl)

        if cloud_provider_enabled:
            # Deploy openstack-cloud-controller-manager
            LOG.info("Creating openstack-cloud-controller-manager for "
                     "cluster %s", cluster.uuid)
            # Create RBAC for openstack-cloud-controller-manager in the
            # cluster.
            _apply_manifest(
                "openstack-cloud-controller-manager-in-cluster.yaml.j2",
                cluster_kubectl
            )
            _apply_manifest('openstack-cloud-controller-manager.yaml.j2')

        # Create bootstrap token and the bootstrap RBAC in the new cluster
        LOG.info(
            "Creating bootstrap token and RBAC in the cluster %s",
            cluster.uuid
        )
        expiration = timeutils.utcnow() + datetime.timedelta(days=1)
        # For bootstrap token, refer to
        # https://kubernetes.io/docs/reference/access-authn-authz/bootstrap-tokens/
        token_id = self._generate_random_string(6)
        token_secret = self._generate_random_string(16)
        bootstrap_params = {
            "token_id": token_id,
            "token_secret": token_secret,
            "expiration": expiration.strftime('%Y-%m-%dT%H:%M:%SZ'),
        }
        bootstrap_template = self.jinja_env.get_template('bootstrap.yaml.j2')
        bootstrap_body = bootstrap_template.render(bootstrap_params)
        cluster_kubectl.apply(definition=bootstrap_body)

        self.bootstrap_token = "%s.%s" % (token_id, token_secret)

        # Grant privilege to 'kubernetes' user so that apiserver can access
        # to kubelet for operations like logs, exec, etc.
        # The user name here must be the same with apiserver CN in
        # secrets.yaml.j2
        cluster_kubectl.execute(
            "create clusterrolebinding kube-apiserver --clusterrole "
            "cluster-admin --user kubernetes"
        )

        # Starts to create VMs and bootstrap kubelet
        LOG.info("Creating worker nodes for cluster %s", cluster.uuid)
        super(Driver, self).create_cluster(
            context, cluster, cluster_create_timeout
        )
Exemple #5
0
    def _create_vip_port(self, context, cluster, cluster_template):
        """Create port for kube-apiserver load balancer.

        This method should be called before creating apiserver, because:
        1. We need an IP address to generate kube-apiserver certificates.
        2. We need to specify the port to create Service for kube-apiserver.
        """
        ext_net_id = cluster_template.external_network_id or "public"
        if not uuidutils.is_uuid_like(ext_net_id):
            ext_net_id = neutron.get_network_id(context, ext_net_id)

        network_client = clients.OpenStackClients(context).neutron()
        vip_port = None
        fip = None
        port_info = {}

        try:
            body = {
                'port': {
                    'name': "magnum_%s_vip" % cluster.uuid,
                    'admin_state_up': True,
                }
            }
            if cluster_template.fixed_network:
                body["port"].update(
                    {"network_id": cluster_template.fixed_network}
                )
            if cluster_template.fixed_subnet:
                body['port'].update(
                    {
                        'fixed_ips': [
                            {'subnet_id': cluster_template.fixed_subnet}
                        ]
                    }
                )
            port = network_client.create_port(body)
            vip_port = port['port']
            LOG.info(
                "port %s created for cluster %s", vip_port["id"], cluster.uuid
            )

            port_info["port_id"] = vip_port["id"]
            port_info["private_ip"] = vip_port["fixed_ips"][0]["ip_address"]

            # NOTE: tags has length limit
            tag_info = {"magnum": cluster.uuid}
            tags_body = {"tags": [jsonutils.dumps(tag_info)]}
            network_client.replace_tag("ports", vip_port["id"], tags_body)

            if self._master_lb_fip_enabled(cluster, cluster_template):
                fip = network_client.create_floatingip(
                    body={
                        'floatingip': {
                            'floating_network_id': ext_net_id,
                            'port_id': vip_port["id"],
                            'description': ('Load balancer VIP for Magnum '
                                            'cluster %s' % cluster.uuid)
                        }
                    }
                )['floatingip']
                LOG.info(
                    "floating IP %s created for cluster %s",
                    fip["floating_ip_address"], cluster.uuid
                )

                port_info["public_ip"] = fip["floating_ip_address"]
        except neutron_exceptions.NeutronClientException as e:
            LOG.exception('Failed to create vip port for apiserver.')
            raise exception.NetworkResourceCreationFailed(
                cluster_uuid=cluster.uuid,
                msg=str(e)
            )

        return port_info