Beispiel #1
0
 def test_generate_csr_and_key(self, mock_generate_private_key,
                               mock_default_backend):
     mock_generate_private_key.return_value = mock.MagicMock()
     csr_keys = operations.generate_csr_and_key(u"Test")
     self.assertIsNotNone(csr_keys)
     self.assertTrue("public_key" in csr_keys)
     self.assertTrue("private_key" in csr_keys)
Beispiel #2
0
 def test_generate_csr_and_key(self, mock_generate_private_key,
                               mock_default_backend):
     mock_generate_private_key.return_value = mock.MagicMock()
     csr_keys = operations.generate_csr_and_key(u"Test")
     self.assertIsNotNone(csr_keys)
     self.assertTrue("public_key" in csr_keys)
     self.assertTrue("private_key" in csr_keys)
Beispiel #3
0
    def rotate_ca_certificate(self, context, cluster):
        cluster_template = conductor_utils.retrieve_cluster_template(context,
                                                                     cluster)
        if cluster_template.cluster_distro not in ["fedora-coreos"]:
            raise exception.NotSupported("Rotating the CA certificate is "
                                         "not supported for cluster with "
                                         "cluster_distro: %s." %
                                         cluster_template.cluster_distro)
        osc = clients.OpenStackClients(context)
        rollback = True
        heat_params = {}

        csr_keys = x509.generate_csr_and_key(u"Kubernetes Service Account")

        heat_params['kube_service_account_key'] = \
            csr_keys["public_key"].replace("\n", "\\n")
        heat_params['kube_service_account_private_key'] = \
            csr_keys["private_key"].replace("\n", "\\n")

        fields = {
            'existing': True,
            'parameters': heat_params,
            'disable_rollback': not rollback
        }
        osc.heat().stacks.update(cluster.stack_id, **fields)
Beispiel #4
0
    def get_params(self, context, cluster_template, cluster, **kwargs):
        extra_params = kwargs.pop('extra_params', {})

        extra_params['username'] = context.user_name
        osc = self.get_osc(context)
        extra_params['region_name'] = osc.cinder_region_name()

        # set docker_volume_type
        # use the configuration default if None provided
        docker_volume_type = cluster.labels.get(
            'docker_volume_type', CONF.cinder.default_docker_volume_type)
        extra_params['docker_volume_type'] = docker_volume_type

        extra_params['nodes_affinity_policy'] = \
            CONF.cluster.nodes_affinity_policy

        if cluster_template.network_driver == 'flannel':
            extra_params["pods_network_cidr"] = \
                cluster.labels.get('flannel_network_cidr', '10.100.0.0/16')
        if cluster_template.network_driver == 'calico':
            extra_params["pods_network_cidr"] = \
                cluster.labels.get('calico_ipv4pool', '192.168.0.0/16')

        # check cloud provider and cinder options. If cinder is selected,
        # the cloud provider needs to be enabled.
        cloud_provider_enabled = cluster.labels.get('cloud_provider_enabled',
                                                    'true').lower()
        if (cluster_template.volume_driver == 'cinder'
                and cloud_provider_enabled == 'false'):
            raise exception.InvalidParameterValue(
                _('"cinder" volume driver needs "cloud_provider_enabled" label '
                  'to be true or unset.'))

        label_list = [
            'kube_tag', 'container_infra_prefix', 'availability_zone',
            'cgroup_driver', 'calico_tag', 'calico_cni_tag',
            'calico_kube_controllers_tag', 'calico_ipv4pool', 'etcd_tag',
            'flannel_tag', 'cloud_provider_enabled', 'cloud_provider_tag',
            'prometheus_tag', 'grafana_tag', 'heat_container_agent_tag',
            'keystone_auth_enabled', 'k8s_keystone_auth_tag', 'tiller_enabled',
            'tiller_tag', 'tiller_namespace'
        ]

        for label in label_list:
            label_value = cluster.labels.get(label)
            if label_value:
                extra_params[label] = label_value

        csr_keys = x509.generate_csr_and_key(u"Kubernetes Service Account")

        extra_params['kube_service_account_key'] = \
            csr_keys["public_key"].replace("\n", "\\n")
        extra_params['kube_service_account_private_key'] = \
            csr_keys["private_key"].replace("\n", "\\n")

        cert_manager_api = cluster.labels.get('cert_manager_api')
        if strutils.bool_from_string(cert_manager_api):
            extra_params['cert_manager_api'] = cert_manager_api
            ca_cert = cert_manager.get_cluster_ca_certificate(cluster)
            if six.PY3 and isinstance(ca_cert.get_private_key_passphrase(),
                                      six.text_type):
                extra_params['ca_key'] = x509.decrypt_key(
                    ca_cert.get_private_key(),
                    ca_cert.get_private_key_passphrase().encode()).decode(
                    ).replace("\n", "\\n")
            else:
                extra_params['ca_key'] = x509.decrypt_key(
                    ca_cert.get_private_key(),
                    ca_cert.get_private_key_passphrase()).replace("\n", "\\n")

        extra_params['project_id'] = cluster.project_id

        return super(K8sFedoraTemplateDefinition,
                     self).get_params(context,
                                      cluster_template,
                                      cluster,
                                      extra_params=extra_params,
                                      **kwargs)
    def get_params(self, context, cluster_template, cluster, **kwargs):
        extra_params = kwargs.pop('extra_params', {})

        extra_params['username'] = context.user_name
        osc = self.get_osc(context)
        extra_params['region_name'] = osc.cinder_region_name()

        # set docker_volume_type
        # use the configuration default if None provided
        docker_volume_type = cluster.labels.get(
            'docker_volume_type', CONF.cinder.default_docker_volume_type)
        extra_params['docker_volume_type'] = docker_volume_type

        extra_params['nodes_affinity_policy'] = \
            CONF.cluster.nodes_affinity_policy

        if cluster_template.network_driver == 'flannel':
            extra_params["pods_network_cidr"] = \
                cluster.labels.get('flannel_network_cidr', '10.100.0.0/16')
        if cluster_template.network_driver == 'calico':
            extra_params["pods_network_cidr"] = \
                cluster.labels.get('calico_ipv4pool', '192.168.0.0/16')

        # check cloud provider and cinder options. If cinder is selected,
        # the cloud provider needs to be enabled.
        cloud_provider_enabled = cluster.labels.get('cloud_provider_enabled',
                                                    'true').lower()
        if (cluster_template.volume_driver == 'cinder'
                and cloud_provider_enabled == 'false'):
            raise exception.InvalidParameterValue(
                _('"cinder" volume driver needs "cloud_provider_enabled" label '
                  'to be true or unset.'))

        extra_params['master_image'] = cluster_template.image_id
        extra_params['minion_image'] = cluster_template.image_id

        label_list = [
            'coredns_tag', 'kube_tag', 'container_infra_prefix',
            'availability_zone', 'cgroup_driver', 'calico_tag',
            'calico_cni_tag', 'calico_kube_controllers_tag', 'calico_ipv4pool',
            'etcd_tag', 'flannel_tag', 'flannel_cni_tag',
            'cloud_provider_enabled', 'cloud_provider_tag', 'prometheus_tag',
            'grafana_tag', 'heat_container_agent_tag', 'keystone_auth_enabled',
            'k8s_keystone_auth_tag', 'monitoring_enabled', 'tiller_enabled',
            'tiller_tag', 'tiller_namespace', 'traefik_ingress_controller_tag',
            'node_problem_detector_tag', 'nginx_ingress_controller_tag',
            'auto_healing_enabled', 'auto_scaling_enabled', 'draino_tag',
            'autoscaler_tag', 'min_node_count', 'max_node_count'
        ]

        for label in label_list:
            label_value = cluster.labels.get(label)
            if label_value:
                extra_params[label] = label_value

        csr_keys = x509.generate_csr_and_key(u"Kubernetes Service Account")

        extra_params['kube_service_account_key'] = \
            csr_keys["public_key"].replace("\n", "\\n")
        extra_params['kube_service_account_private_key'] = \
            csr_keys["private_key"].replace("\n", "\\n")

        extra_params['project_id'] = cluster.project_id

        if not extra_params.get('max_node_count'):
            extra_params['max_node_count'] = cluster.node_count + 1

        self._set_cert_manager_params(cluster, extra_params)
        self._get_keystone_auth_default_policy(extra_params)

        return super(K8sFedoraTemplateDefinition,
                     self).get_params(context,
                                      cluster_template,
                                      cluster,
                                      extra_params=extra_params,
                                      **kwargs)
    def get_params(self, context, cluster_template, cluster, **kwargs):
        extra_params = kwargs.pop('extra_params', {})

        extra_params['username'] = context.user_name
        osc = self.get_osc(context)
        extra_params['region_name'] = osc.cinder_region_name()

        self._set_volumes(context, cluster, extra_params)

        extra_params['nodes_affinity_policy'] = \
            CONF.cluster.nodes_affinity_policy

        if cluster_template.network_driver == 'flannel':
            extra_params["pods_network_cidr"] = \
                cluster.labels.get('flannel_network_cidr', '10.100.0.0/16')
        if cluster_template.network_driver == 'calico':
            extra_params["pods_network_cidr"] = \
                cluster.labels.get('calico_ipv4pool', '10.100.0.0/16')

        # check cloud provider and cinder options. If cinder is selected,
        # the cloud provider needs to be enabled.
        cloud_provider_enabled = cluster.labels.get(
            'cloud_provider_enabled',
            'true' if CONF.trust.cluster_user_trust else 'false')
        if (not CONF.trust.cluster_user_trust
                and cloud_provider_enabled.lower() == 'true'):
            raise exception.InvalidParameterValue(
                _('"cluster_user_trust" must be set to True in magnum.conf when '
                  '"cloud_provider_enabled" label is set to true.'))
        if (cluster_template.volume_driver == 'cinder'
                and cloud_provider_enabled.lower() == 'false'):
            raise exception.InvalidParameterValue(
                _('"cinder" volume driver needs "cloud_provider_enabled" label '
                  'to be true or unset.'))
        extra_params['cloud_provider_enabled'] = cloud_provider_enabled

        label_list = [
            'coredns_tag', 'kube_tag', 'container_infra_prefix',
            'availability_zone', 'cgroup_driver', 'container_runtime',
            'containerd_version', 'containerd_tarball_url',
            'containerd_tarball_sha256', 'calico_tag',
            'calico_kube_controllers_tag', 'calico_ipv4pool',
            'calico_ipv4pool_ipip', 'cinder_csi_enabled',
            'cinder_csi_plugin_tag', 'csi_attacher_tag', 'csi_provisioner_tag',
            'csi_snapshotter_tag', 'csi_resizer_tag',
            'csi_node_driver_registrar_tag', 'etcd_tag', 'flannel_tag',
            'flannel_cni_tag', 'cloud_provider_tag', 'prometheus_tag',
            'grafana_tag', 'heat_container_agent_tag', 'keystone_auth_enabled',
            'k8s_keystone_auth_tag', 'heapster_enabled',
            'metrics_server_enabled', 'metrics_server_chart_tag',
            'monitoring_enabled', 'prometheus_operator_chart_tag',
            'prometheus_adapter_enabled', 'prometheus_adapter_chart_tag',
            'prometheus_adapter_configmap', 'selinux_mode', 'tiller_enabled',
            'tiller_tag', 'tiller_namespace', 'traefik_ingress_controller_tag',
            'node_problem_detector_tag', 'nginx_ingress_controller_tag',
            'nginx_ingress_controller_chart_tag', 'auto_healing_enabled',
            'auto_scaling_enabled', 'auto_healing_controller',
            'magnum_auto_healer_tag', 'draino_tag', 'autoscaler_tag',
            'min_node_count', 'max_node_count', 'npd_enabled', 'ostree_remote',
            'ostree_commit', 'use_podman', 'kube_image_digest'
        ]

        labels = self._get_relevant_labels(cluster, kwargs)

        for label in label_list:
            label_value = labels.get(label)
            if label_value:
                extra_params[label] = label_value

        csr_keys = x509.generate_csr_and_key(u"Kubernetes Service Account")

        extra_params['kube_service_account_key'] = \
            csr_keys["public_key"].replace("\n", "\\n")
        extra_params['kube_service_account_private_key'] = \
            csr_keys["private_key"].replace("\n", "\\n")

        extra_params['project_id'] = cluster.project_id
        extra_params['post_install_manifest_url'] = \
            CONF.kubernetes.post_install_manifest_url

        if not extra_params.get('max_node_count'):
            extra_params['max_node_count'] = cluster.node_count + 1

        self._set_cert_manager_params(context, cluster, extra_params)
        self._get_keystone_auth_default_policy(extra_params)
        self._set_volumes(context, cluster, extra_params)

        return super(K8sFedoraTemplateDefinition,
                     self).get_params(context,
                                      cluster_template,
                                      cluster,
                                      extra_params=extra_params,
                                      **kwargs)
    def get_params(self, context, cluster_template, cluster, **kwargs):
        extra_params = kwargs.pop('extra_params', {})

        extra_params['username'] = context.user_name
        osc = self.get_osc(context)
        extra_params['region_name'] = osc.cinder_region_name()

        # set docker_volume_type
        # use the configuration default if None provided
        docker_volume_type = cluster.labels.get(
            'docker_volume_type', CONF.cinder.default_docker_volume_type)
        extra_params['docker_volume_type'] = docker_volume_type

        extra_params['nodes_affinity_policy'] = \
            CONF.cluster.nodes_affinity_policy

        if cluster_template.network_driver == 'flannel':
            extra_params["pods_network_cidr"] = \
                cluster.labels.get('flannel_network_cidr', '10.100.0.0/16')
        if cluster_template.network_driver == 'calico':
            extra_params["pods_network_cidr"] = \
                cluster.labels.get('calico_ipv4pool', '192.168.0.0/16')

        # check cloud provider and cinder options. If cinder is selected,
        # the cloud provider needs to be enabled.
        cloud_provider_enabled = cluster.labels.get(
            'cloud_provider_enabled', 'true').lower()
        if (cluster_template.volume_driver == 'cinder'
                and cloud_provider_enabled == 'false'):
            raise exception.InvalidParameterValue(_(
                '"cinder" volume driver needs "cloud_provider_enabled" label '
                'to be true or unset.'))

        label_list = ['coredns_tag',
                      'kube_tag', 'container_infra_prefix',
                      'availability_zone',
                      'cgroup_driver',
                      'calico_tag', 'calico_cni_tag',
                      'calico_kube_controllers_tag', 'calico_ipv4pool',
                      'etcd_tag', 'flannel_tag', 'flannel_cni_tag',
                      'cloud_provider_enabled', 'cloud_provider_tag',
                      'prometheus_tag', 'grafana_tag',
                      'heat_container_agent_tag',
                      'keystone_auth_enabled', 'k8s_keystone_auth_tag',
                      'monitoring_enabled',
                      'tiller_enabled',
                      'tiller_tag',
                      'tiller_namespace',
                      'traefik_ingress_controller_tag',
                      'node_problem_detector_tag',
                      'nginx_ingress_controller_tag',
                      'auto_healing_enabled', 'auto_scaling_enabled',
                      'draino_tag', 'autoscaler_tag',
                      'min_node_count', 'max_node_count']

        for label in label_list:
            label_value = cluster.labels.get(label)
            if label_value:
                extra_params[label] = label_value

        csr_keys = x509.generate_csr_and_key(u"Kubernetes Service Account")

        extra_params['kube_service_account_key'] = \
            csr_keys["public_key"].replace("\n", "\\n")
        extra_params['kube_service_account_private_key'] = \
            csr_keys["private_key"].replace("\n", "\\n")

        extra_params['project_id'] = cluster.project_id

        if not extra_params.get('max_node_count'):
            extra_params['max_node_count'] = cluster.node_count + 1

        self._set_cert_manager_params(cluster, extra_params)

        return super(K8sFedoraTemplateDefinition,
                     self).get_params(context, cluster_template, cluster,
                                      extra_params=extra_params,
                                      **kwargs)
Beispiel #8
0
 def test_generate_csr_and_key(self):
     csr_keys = operations.generate_csr_and_key(u"Test")
     self.assertIsNotNone(csr_keys)
     self.assertTrue("public_key" in csr_keys)
     self.assertTrue("private_key" in csr_keys)