Пример #1
0
def add_lb_env_file(env_files, cluster_template):
    if cluster_template.master_lb_enabled:
        if keystone.is_octavia_enabled():
            env_files.append(COMMON_ENV_PATH + 'with_master_lb_octavia.yaml')
        else:
            env_files.append(COMMON_ENV_PATH + 'with_master_lb.yaml')
    else:
        env_files.append(COMMON_ENV_PATH + 'no_master_lb.yaml')
Пример #2
0
def add_lb_env_file(env_files, cluster_template):
    if cluster_template.master_lb_enabled:
        if keystone.is_octavia_enabled():
            env_files.append(COMMON_ENV_PATH + 'with_master_lb_octavia.yaml')
        else:
            env_files.append(COMMON_ENV_PATH + 'with_master_lb.yaml')
    else:
        env_files.append(COMMON_ENV_PATH + 'no_master_lb.yaml')
Пример #3
0
    def get_params(self, context, cluster_template, cluster, **kwargs):
        extra_params = kwargs.pop('extra_params', {})
        scale_mgr = kwargs.pop('scale_manager', None)
        if scale_mgr:
            hosts = self.get_output('kube_minions_private')
            extra_params['minions_to_remove'] = (
                scale_mgr.get_removal_nodes(hosts))

        extra_params['discovery_url'] = self.get_discovery_url(cluster)
        osc = self.get_osc(context)
        extra_params['magnum_url'] = osc.magnum_url()

        if cluster_template.tls_disabled:
            extra_params['loadbalancing_protocol'] = 'HTTP'
            extra_params['kubernetes_port'] = 8080

        extra_params['octavia_enabled'] = keystone.is_octavia_enabled()

        label_list = ['flannel_network_cidr', 'flannel_backend',
                      'flannel_network_subnetlen',
                      'system_pods_initial_delay',
                      'system_pods_timeout',
                      'admission_control_list',
                      'prometheus_monitoring',
                      'grafana_admin_passwd',
                      'kube_dashboard_enabled',
                      'etcd_volume_size',
                      'cert_manager_api',
                      'ingress_controller',
                      'ingress_controller_role',
                      'kubelet_options',
                      'kubeapi_options',
                      'kubeproxy_options',
                      'kubecontroller_options',
                      'kubescheduler_options',
                      'influx_grafana_dashboard_enabled']

        for label in label_list:
            extra_params[label] = cluster.labels.get(label)

        if cluster_template.registry_enabled:
            extra_params['swift_region'] = CONF.docker_registry.swift_region
            extra_params['registry_container'] = (
                CONF.docker_registry.swift_registry_container)

        return super(K8sTemplateDefinition,
                     self).get_params(context, cluster_template, cluster,
                                      extra_params=extra_params,
                                      **kwargs)
    def get_env_files(self, cluster_template, cluster):
        LOG.info("########## get_env_files")
        env_files = []

        if (cluster.fixed_network or cluster_template.fixed_network):
            env_files.append(COMMON_ENV_PATH + 'no_private_network.yaml')
        else:
            env_files.append(COMMON_ENV_PATH + 'with_private_network.yaml')

        # if int(cluster_template.labels.get('etcd_volume_size', 0)) < 1:
        env_files.append(COMMON_ENV_PATH + 'no_etcd_volume.yaml')
        # else:
        #     env_files.append(COMMON_ENV_PATH + 'with_etcd_volume.yaml')

        if cluster.docker_volume_size is None:
            env_files.append(COMMON_ENV_PATH + 'no_volume.yaml')
        else:
            env_files.append(COMMON_ENV_PATH + 'with_volume.yaml')

        if cluster_template.master_lb_enabled:
            if keystone.is_octavia_enabled():
                env_files.append(COMMON_ENV_PATH +
                                 'with_master_lb_octavia.yaml')
            else:
                env_files.append(COMMON_ENV_PATH + 'with_master_lb.yaml')
        else:
            env_files.append(COMMON_ENV_PATH + 'no_master_lb.yaml')

        lb_fip_enabled = cluster.labels.get(
            "master_lb_floating_ip_enabled",
            cluster_template.floating_ip_enabled)
        master_lb_fip_enabled = strutils.bool_from_string(lb_fip_enabled)

        if cluster.floating_ip_enabled:
            env_files.append(COMMON_ENV_PATH + 'enable_floating_ip.yaml')
        else:
            env_files.append(COMMON_ENV_PATH + 'disable_floating_ip.yaml')

        if cluster_template.master_lb_enabled and master_lb_fip_enabled:
            env_files.append(COMMON_ENV_PATH + 'enable_lb_floating_ip.yaml')
        else:
            env_files.append(COMMON_ENV_PATH + 'disable_lb_floating_ip.yaml')

        return env_files
Пример #5
0
    def get_params(self, context, cluster_template, cluster, **kwargs):
        extra_params = kwargs.pop('extra_params', {})

        extra_params['discovery_url'] = self.get_discovery_url(cluster)
        osc = self.get_osc(context)
        extra_params['magnum_url'] = osc.magnum_url()

        if cluster_template.tls_disabled:
            extra_params['loadbalancing_protocol'] = 'HTTP'
            extra_params['kubernetes_port'] = 8080

        extra_params['octavia_enabled'] = keystone.is_octavia_enabled()

        net_params = self.get_net_params(context, cluster_template, cluster)
        extra_params.update(net_params)

        label_list = [
            'flannel_network_cidr', 'flannel_backend',
            'flannel_network_subnetlen', 'system_pods_initial_delay',
            'system_pods_timeout', 'admission_control_list',
            'prometheus_monitoring', 'grafana_admin_passwd',
            'kube_dashboard_enabled', 'etcd_volume_size', 'cert_manager_api',
            'ingress_controller_role', 'octavia_ingress_controller_tag',
            'kubelet_options', 'kubeapi_options', 'kubeproxy_options',
            'kubecontroller_options', 'kubescheduler_options',
            'influx_grafana_dashboard_enabled', 'master_lb_allowed_cidrs'
        ]

        labels = self._get_relevant_labels(cluster, kwargs)

        for label in label_list:
            extra_params[label] = labels.get(label)

        ingress_controller = cluster.labels.get('ingress_controller',
                                                '').lower()
        if (ingress_controller == 'octavia'
                and not extra_params['octavia_enabled']):
            raise exception.InvalidParameterValue(
                'Octavia service needs to be deployed for octavia ingress '
                'controller.')
        extra_params["ingress_controller"] = ingress_controller

        cluser_ip_range = cluster.labels.get('service_cluster_ip_range')
        if cluser_ip_range:
            extra_params['portal_network_cidr'] = cluser_ip_range

        if cluster_template.registry_enabled:
            extra_params['swift_region'] = CONF.docker_registry.swift_region
            extra_params['registry_container'] = (
                CONF.docker_registry.swift_registry_container)

        kube_tag = (labels.get("kube_tag")
                    or cluster_template.labels.get("kube_tag"))
        if kube_tag:
            extra_params['kube_version'] = kube_tag
            extra_params['master_kube_tag'] = kube_tag
            extra_params['minion_kube_tag'] = kube_tag

        self._set_master_lb_allowed_cidrs(context, cluster, extra_params)

        return super(K8sTemplateDefinition,
                     self).get_params(context,
                                      cluster_template,
                                      cluster,
                                      extra_params=extra_params,
                                      **kwargs)
Пример #6
0
    def get_params(self, context, cluster_template, cluster, **kwargs):
        extra_params = kwargs.pop('extra_params', {})

        extra_params['discovery_url'] = \
            self.get_discovery_url(cluster, cluster_template=cluster_template)
        osc = self.get_osc(context)
        extra_params['magnum_url'] = osc.magnum_url()

        if cluster_template.tls_disabled:
            extra_params['loadbalancing_protocol'] = 'HTTP'
            extra_params['kubernetes_port'] = 8080

        extra_params['octavia_enabled'] = keystone.is_octavia_enabled()

        # NOTE(lxkong): Convert external network name to UUID, the template
        # field name is confused. If external_network_id is not specified in
        # cluster template use 'public' as the default value, which is the same
        # with the heat template default value as before.
        ext_net = cluster_template.external_network_id or "public"
        if not uuidutils.is_uuid_like(ext_net):
            ext_net_id = neutron.get_network_id(context, ext_net)
            extra_params['external_network'] = ext_net_id
        else:
            extra_params['external_network'] = ext_net

        label_list = ['flannel_network_cidr', 'flannel_backend',
                      'flannel_network_subnetlen',
                      'system_pods_initial_delay',
                      'system_pods_timeout',
                      'admission_control_list',
                      'prometheus_monitoring',
                      'grafana_admin_passwd',
                      'kube_dashboard_enabled',
                      'etcd_volume_size',
                      'cert_manager_api',
                      'ingress_controller_role',
                      'octavia_ingress_controller_tag',
                      'kubelet_options',
                      'kubeapi_options',
                      'kubeproxy_options',
                      'kubecontroller_options',
                      'kubescheduler_options',
                      'influx_grafana_dashboard_enabled']

        for label in label_list:
            extra_params[label] = cluster.labels.get(label)

        ingress_controller = cluster.labels.get('ingress_controller',
                                                '').lower()
        if (ingress_controller == 'octavia'
                and not extra_params['octavia_enabled']):
            raise exception.InvalidParameterValue(
                'Octavia service needs to be deployed for octavia ingress '
                'controller.')
        extra_params["ingress_controller"] = ingress_controller

        cluser_ip_range = cluster.labels.get('service_cluster_ip_range')
        if cluser_ip_range:
            extra_params['portal_network_cidr'] = cluser_ip_range

        if cluster_template.registry_enabled:
            extra_params['swift_region'] = CONF.docker_registry.swift_region
            extra_params['registry_container'] = (
                CONF.docker_registry.swift_registry_container)

        return super(K8sTemplateDefinition,
                     self).get_params(context, cluster_template, cluster,
                                      extra_params=extra_params,
                                      **kwargs)
Пример #7
0
 def pre_delete_cluster(self, context, cluster):
     """Delete cloud resources before deleting the cluster."""
     if keystone.is_octavia_enabled():
         LOG.info("Starting to delete loadbalancers for cluster %s",
                  cluster.uuid)
         octavia.delete_loadbalancers(context, cluster)
Пример #8
0
    def get_params(self, context, cluster_template, cluster, **kwargs):
        extra_params = kwargs.pop('extra_params', {})

        extra_params['discovery_url'] = self.get_discovery_url(cluster)
        osc = self.get_osc(context)
        # NOTE: Sometimes, version discovery fails when Magnum cannot talk to
        # Keystone via specified magnum_client.endpoint_type intended for
        # cluster instances either because it is not unreachable from the
        # controller or CA certs are missing for TLS enabled interface and the
        # returned auth_url may not be suffixed with /v1 in which case append
        # the url with the suffix so that instances can still talk to Magnum.
        magnum_url = osc.magnum_url()
        extra_params['magnum_url'] = magnum_url + (
            '' if magnum_url.endswith('/v1') else '/v1')

        if cluster_template.tls_disabled:
            extra_params['loadbalancing_protocol'] = 'HTTP'
            extra_params['kubernetes_port'] = 8080

        extra_params['octavia_enabled'] = keystone.is_octavia_enabled()

        net_params = self.get_net_params(context, cluster_template, cluster)
        extra_params.update(net_params)

        label_list = [
            'flannel_network_cidr', 'flannel_backend',
            'flannel_network_subnetlen', 'system_pods_initial_delay',
            'system_pods_timeout', 'admission_control_list',
            'prometheus_monitoring', 'grafana_admin_passwd',
            'kube_dashboard_enabled', 'etcd_volume_size', 'cert_manager_api',
            'ingress_controller_role', 'octavia_ingress_controller_tag',
            'kubelet_options', 'kubeapi_options', 'kubeproxy_options',
            'kubecontroller_options', 'kubescheduler_options',
            'influx_grafana_dashboard_enabled', 'master_lb_allowed_cidrs',
            'octavia_provider', 'octavia_lb_algorithm',
            'octavia_lb_healthcheck'
        ]

        labels = self._get_relevant_labels(cluster, kwargs)

        for label in label_list:
            extra_params[label] = labels.get(label)

        ingress_controller = cluster.labels.get('ingress_controller',
                                                '').lower()
        if (ingress_controller == 'octavia'
                and not extra_params['octavia_enabled']):
            raise exception.InvalidParameterValue(
                'Octavia service needs to be deployed for octavia ingress '
                'controller.')
        extra_params["ingress_controller"] = ingress_controller

        cluser_ip_range = cluster.labels.get('service_cluster_ip_range')
        if cluser_ip_range:
            extra_params['portal_network_cidr'] = cluser_ip_range

        if cluster_template.registry_enabled:
            extra_params['swift_region'] = CONF.docker_registry.swift_region
            extra_params['registry_container'] = (
                CONF.docker_registry.swift_registry_container)

        kube_tag = (labels.get("kube_tag")
                    or cluster_template.labels.get("kube_tag"))
        if kube_tag:
            extra_params['kube_version'] = kube_tag
            extra_params['master_kube_tag'] = kube_tag
            extra_params['minion_kube_tag'] = kube_tag

        self._set_master_lb_allowed_cidrs(context, cluster, extra_params)

        return super(K8sTemplateDefinition,
                     self).get_params(context,
                                      cluster_template,
                                      cluster,
                                      extra_params=extra_params,
                                      **kwargs)
Пример #9
0
 def pre_delete_cluster(self, context, cluster):
     """Delete cloud resources before deleting the cluster."""
     if keystone.is_octavia_enabled():
         LOG.info("Starting to delete loadbalancers for cluster %s",
                  cluster.uuid)
         octavia.delete_loadbalancers(context, cluster)
Пример #10
0
    def get_params(self, context, cluster_template, cluster, **kwargs):
        extra_params = kwargs.pop('extra_params', {})

        extra_params['discovery_url'] = self.get_discovery_url(cluster)
        osc = self.get_osc(context)
        extra_params['magnum_url'] = osc.magnum_url()

        if cluster_template.tls_disabled:
            extra_params['loadbalancing_protocol'] = 'HTTP'
            extra_params['kubernetes_port'] = 8080

        extra_params['octavia_enabled'] = keystone.is_octavia_enabled()

        # NOTE(lxkong): Convert external network name to UUID, the template
        # field name is confused. If external_network_id is not specified in
        # cluster template use 'public' as the default value, which is the same
        # with the heat template default value as before.
        external_network = cluster_template.external_network_id or "public"
        extra_params['external_network'] = \
            neutron.get_external_network_id(context, external_network)

        # NOTE(brtknr): Convert fixed network UUID to name if the given network
        # name is UUID like because OpenStack Cloud Controller Manager only
        # accepts a name as an argument to internal-network-name in the
        # cloud-config file provided to it. The default fixed network name is
        # the same as that defined in the heat template.
        fixed_network = (cluster.fixed_network
                         or cluster_template.fixed_network or "private")

        extra_params['fixed_network_name'] = \
            neutron.get_fixed_network_name(context, fixed_network)

        label_list = [
            'flannel_network_cidr', 'flannel_backend',
            'flannel_network_subnetlen', 'system_pods_initial_delay',
            'system_pods_timeout', 'admission_control_list',
            'prometheus_monitoring', 'grafana_admin_passwd',
            'kube_dashboard_enabled', 'etcd_volume_size', 'cert_manager_api',
            'ingress_controller_role', 'octavia_ingress_controller_tag',
            'kubelet_options', 'kubeapi_options', 'kubeproxy_options',
            'kubecontroller_options', 'kubescheduler_options',
            'influx_grafana_dashboard_enabled'
        ]

        for label in label_list:
            extra_params[label] = cluster.labels.get(label)

        ingress_controller = cluster.labels.get('ingress_controller',
                                                '').lower()
        if (ingress_controller == 'octavia'
                and not extra_params['octavia_enabled']):
            raise exception.InvalidParameterValue(
                'Octavia service needs to be deployed for octavia ingress '
                'controller.')
        extra_params["ingress_controller"] = ingress_controller

        cluser_ip_range = cluster.labels.get('service_cluster_ip_range')
        if cluser_ip_range:
            extra_params['portal_network_cidr'] = cluser_ip_range

        if cluster_template.registry_enabled:
            extra_params['swift_region'] = CONF.docker_registry.swift_region
            extra_params['registry_container'] = (
                CONF.docker_registry.swift_registry_container)

        kube_tag = (cluster.labels.get("kube_tag")
                    or cluster_template.labels.get("kube_tag"))
        if kube_tag:
            extra_params['kube_version'] = kube_tag
            extra_params['master_kube_tag'] = kube_tag
            extra_params['minion_kube_tag'] = kube_tag

        return super(K8sTemplateDefinition,
                     self).get_params(context,
                                      cluster_template,
                                      cluster,
                                      extra_params=extra_params,
                                      **kwargs)