Ejemplo n.º 1
0
    def test_parse_kubeconf_data_insecure(self):
        """
        Test parsing kubeconf data with current context containing
        cluster, user, namespace info and skipping tls verification
        """
        kubecfg_data = {
            "current-context": "context2",
            "contexts": [
                {"name": "context1"},
                {"name": "context2", "context": {"cluster": "cluster1", "user": "******", "namespace": "namespace1"}},
            ],
            "clusters": [{"name": "cluster1", "cluster": {"insecure-skip-tls-verify": "true", "server": "server1"}}],
            "users": [{"name": "user1", "user": {"token": "token1"}}],
        }

        self.assertEqual(
            KubeConfig.parse_kubeconf_data(kubecfg_data),
            {
                "provider-api": "server1",
                "provider-auth": "token1",
                "namespace": "namespace1",
                "provider-tlsverify": False,
                "provider-cafile": None,
            },
        )
Ejemplo n.º 2
0
    def test_parse_kubeconf_data_cafile(self):
        """
        Test parsing kubeconf data with current context containing
        cluster, user, namespace info and certificate-authority
        """
        kubecfg_data = {
            "current-context": "context2",
            "contexts": [
                {"name": "context1"},
                {"name": "context2", "context": {"cluster": "cluster1", "user": "******", "namespace": "namespace1"}},
            ],
            "clusters": [{"name": "cluster1", "cluster": {"certificate-authority": "/foo/bar", "server": "server1"}}],
            "users": [{"name": "user1", "user": {"token": "token1"}}],
        }

        self.assertEqual(
            KubeConfig.parse_kubeconf_data(kubecfg_data),
            {
                "provider-api": "server1",
                "provider-auth": "token1",
                "namespace": "namespace1",
                "provider-tlsverify": True,
                "provider-cafile": "/foo/bar",
            },
        )
Ejemplo n.º 3
0
    def test_parse_kubeconf_data_insecure(self):
        """
        Test parsing kubeconf data with current context containing
        cluster, user, namespace info and skipping tls verification
        """
        kubecfg_data = {
            'current-context': 'context2',
            'contexts': [
                {
                    'name': 'context1',
                },
                {
                    'name': 'context2',
                    'context': {
                        'cluster': 'cluster1',
                        'user': '******',
                        'namespace': 'namespace1'
                    }
                }
            ],
            'clusters': [
                {
                    'name': 'cluster1',
                    'cluster': {
                        'insecure-skip-tls-verify': 'true',
                        'server': 'server1'
                    }
                }
            ],
            'users': [
                {
                    'name': 'user1',
                    'user': {
                        'token': 'token1'
                    }
                }
            ]
        }

        self.assertEqual(KubeConfig.parse_kubeconf_data(kubecfg_data),
                         {'providerapi': 'server1',
                          'accesstoken': 'token1',
                          'namespace': 'namespace1',
                          'providertlsverify': False,
                          'providercafile': None})
Ejemplo n.º 4
0
    def test_parse_kubeconf_data_cafile(self):
        """
        Test parsing kubeconf data with current context containing
        cluster, user, namespace info and certificate-authority
        """
        kubecfg_data = {
            'current-context': 'context2',
            'contexts': [
                {
                    'name': 'context1',
                },
                {
                    'name': 'context2',
                    'context': {
                        'cluster': 'cluster1',
                        'user': '******',
                        'namespace': 'namespace1'
                    }
                }
            ],
            'clusters': [
                {
                    'name': 'cluster1',
                    'cluster': {
                        'certificate-authority': '/foo/bar',
                        'server': 'server1'
                    }
                }
            ],
            'users': [
                {
                    'name': 'user1',
                    'user': {
                        'token': 'token1'
                    }
                }
            ]
        }

        self.assertEqual(KubeConfig.parse_kubeconf_data(kubecfg_data),
                         {'providerapi': 'server1',
                          'accesstoken': 'token1',
                          'namespace': 'namespace1',
                          'providertlsverify': True,
                          'providercafile': '/foo/bar'})
Ejemplo n.º 5
0
    def test_parse_kubeconf_data_insecure(self):
        """
        Test parsing kubeconf data with current context containing
        cluster, user, namespace info and skipping tls verification
        """
        kubecfg_data = {
            'current-context':
            'context2',
            'contexts': [{
                'name': 'context1',
            }, {
                'name': 'context2',
                'context': {
                    'cluster': 'cluster1',
                    'user': '******',
                    'namespace': 'namespace1'
                }
            }],
            'clusters': [{
                'name': 'cluster1',
                'cluster': {
                    'insecure-skip-tls-verify': 'true',
                    'server': 'server1'
                }
            }],
            'users': [{
                'name': 'user1',
                'user': {
                    'token': 'token1'
                }
            }]
        }

        self.assertEqual(
            KubeConfig.parse_kubeconf_data(kubecfg_data), {
                'provider-api': 'server1',
                'provider-auth': 'token1',
                'namespace': 'namespace1',
                'provider-tlsverify': False,
                'provider-cafile': None
            })
Ejemplo n.º 6
0
    def test_parse_kubeconf_data_cafile(self):
        """
        Test parsing kubeconf data with current context containing
        cluster, user, namespace info and certificate-authority
        """
        kubecfg_data = {
            'current-context':
            'context2',
            'contexts': [{
                'name': 'context1',
            }, {
                'name': 'context2',
                'context': {
                    'cluster': 'cluster1',
                    'user': '******',
                    'namespace': 'namespace1'
                }
            }],
            'clusters': [{
                'name': 'cluster1',
                'cluster': {
                    'certificate-authority': '/foo/bar',
                    'server': 'server1'
                }
            }],
            'users': [{
                'name': 'user1',
                'user': {
                    'token': 'token1'
                }
            }]
        }

        self.assertEqual(
            KubeConfig.parse_kubeconf_data(kubecfg_data), {
                'provider-api': 'server1',
                'provider-auth': 'token1',
                'namespace': 'namespace1',
                'provider-tlsverify': True,
                'provider-cafile': '/foo/bar'
            })
Ejemplo n.º 7
0
    def _set_config_values(self):
        """
        Reads providerapi, namespace and accesstoken from answers.conf and
        corresponding values from providerconfig (if set).
        Use one that is set, if both are set and have conflicting values raise
        exception.

        Raises:
            ProviderFailedException: values in providerconfig and answers.conf
                are in conflict

        """

        # First things first, if we are running inside of an openshift pod via
        # `oc new-app` then get the config from the environment (files/env vars)
        # NOTE: pick up provider_tls_verify from answers if exists
        if Utils.running_on_openshift():
            self.providerapi = Utils.get_openshift_api_endpoint_from_env()
            self.namespace = os.environ['POD_NAMESPACE']
            self.access_token = os.environ['TOKEN_ENV_VAR']
            self.provider_ca = OPENSHIFT_POD_CA_FILE
            self.provider_tls_verify = \
                self.config.get(PROVIDER_TLS_VERIFY_KEY, True)
            return  # No need to process other information

        # initialize result to default values
        result = {PROVIDER_API_KEY: self.providerapi,
                  PROVIDER_AUTH_KEY: self.access_token,
                  NAMESPACE_KEY: self.namespace,
                  PROVIDER_TLS_VERIFY_KEY: self.provider_tls_verify,
                  PROVIDER_CA_KEY: self.provider_ca}

        # create keys in dicts and initialize values to None
        answers = dict.fromkeys(result)
        providerconfig = dict.fromkeys(result)

        # get values from answers.conf
        for k in result.keys():
            answers[k] = self.config.get(k)

        # get values from providerconfig
        if self.config_file:
            providerconfig = KubeConfig.parse_kubeconf(self.config_file)

        # decide between values from answers.conf and providerconfig
        # if only one is set use that, report if they are in conflict
        for k in result.keys():
            if answers[k] is not None and providerconfig[k] is None:
                result[k] = answers[k]
            elif answers[k] is None and providerconfig[k] is not None:
                result[k] = providerconfig[k]
            elif answers[k] is not None and providerconfig[k] is not None:
                if answers[k] == providerconfig[k]:
                    result[k] = answers[k]
                else:
                    msg = "There are conflicting values in %s (%s) and %s (%s)"\
                        % (self.config_file, providerconfig[k], ANSWERS_FILE,
                           answers[k])
                    logger.error(msg)
                    raise ProviderFailedException(msg)

        logger.debug("config values: %s" % result)

        # this items are required, they have to be not None
        for k in [PROVIDER_API_KEY, PROVIDER_AUTH_KEY, NAMESPACE_KEY]:
            if result[k] is None:
                msg = "You need to set %s in %s" % (k, ANSWERS_FILE)
                logger.error(msg)
                raise ProviderFailedException(msg)

        # set config values
        self.providerapi = result[PROVIDER_API_KEY]
        self.access_token = result[PROVIDER_AUTH_KEY]
        self.namespace = result[NAMESPACE_KEY]
        self.provider_tls_verify = result[PROVIDER_TLS_VERIFY_KEY]
        if result[PROVIDER_CA_KEY]:
            # if we are in container translate path to path on host
            self.provider_ca = Utils.get_real_abspath(result[PROVIDER_CA_KEY])
        else:
            self.provider_ca = None
Ejemplo n.º 8
0
    def _set_config_values(self):
        """
        Reads providerapi, namespace and accesstoken from answers.conf and
        corresponding values from providerconfig (if set).
        Use one that is set, if both are set and have conflicting values raise
        exception.

        Raises:
            ProviderFailedException: values in providerconfig and answers.conf
                are in conflict

        """

        # First things first, if we are running inside of an openshift pod via
        # `oc new-app` then get the config from the environment (files/env vars)
        # NOTE: pick up provider_tls_verify from answers if exists
        if Utils.running_on_openshift():
            self.providerapi = Utils.get_openshift_api_endpoint_from_env()
            self.namespace = os.environ['POD_NAMESPACE']
            self.access_token = os.environ['TOKEN_ENV_VAR']
            self.provider_ca = OPENSHIFT_POD_CA_FILE
            self.provider_tls_verify = \
                self.config.get(PROVIDER_TLS_VERIFY_KEY, True)
            return  # No need to process other information

        # initialize result to default values
        result = {
            PROVIDER_API_KEY: self.providerapi,
            PROVIDER_AUTH_KEY: self.access_token,
            NAMESPACE_KEY: self.namespace,
            PROVIDER_TLS_VERIFY_KEY: self.provider_tls_verify,
            PROVIDER_CA_KEY: self.provider_ca
        }

        # create keys in dicts and initialize values to None
        answers = {}
        providerconfig = {}
        for k in result.keys():
            answers[k] = None
            providerconfig[k] = None

        # get values from answers.conf
        for k in result.keys():
            answers[k] = self.config.get(k)

        # get values from providerconfig
        if self.config_file:
            providerconfig = KubeConfig.parse_kubeconf(self.config_file)

        # decide between values from answers.conf and providerconfig
        # if only one is set use that, report if they are in conflict
        for k in result.keys():
            if answers[k] is not None and providerconfig[k] is None:
                result[k] = answers[k]
            if answers[k] is None and providerconfig[k] is not None:
                result[k] = providerconfig[k]
            if answers[k] is not None and providerconfig[k] is not None:
                if answers[k] == providerconfig[k]:
                    result[k] = answers[k]
                else:
                    msg = "There are conflicting values in %s (%s) and %s (%s)"\
                        % (self.config_file, providerconfig[k], ANSWERS_FILE,
                           answers[k])
                    logger.error(msg)
                    raise ProviderFailedException(msg)

        logger.debug("config values: %s" % result)

        # this items are required, they have to be not None
        for k in [PROVIDER_API_KEY, PROVIDER_AUTH_KEY, NAMESPACE_KEY]:
            if result[k] is None:
                msg = "You need to set %s in %s" % (k, ANSWERS_FILE)
                logger.error(msg)
                raise ProviderFailedException(msg)

        # set config values
        self.providerapi = result[PROVIDER_API_KEY]
        self.access_token = result[PROVIDER_AUTH_KEY]
        self.namespace = result[NAMESPACE_KEY]
        self.provider_tls_verify = result[PROVIDER_TLS_VERIFY_KEY]
        if result[PROVIDER_CA_KEY]:
            # if we are in container translate path to path on host
            self.provider_ca = Utils.get_real_abspath(result[PROVIDER_CA_KEY])
        else:
            self.provider_ca = None