Пример #1
0
    def delete_test_pods(self):
        """Deletes any existing test pods for the release, as identified by the
        wait labels for the chart, to avoid test pod name conflicts when
        creating the new test pod as well as just for general cleanup since
        the new test pod should supercede it.
        """
        labels = get_wait_labels(self.chart)

        # Guard against labels being left empty, so we don't delete other
        # chart's test pods.
        if labels:
            label_selector = label_selectors(labels)

            namespace = self.chart['namespace']

            list_args = {
                'namespace': namespace,
                'label_selector': label_selector,
                'timeout_seconds': self.k8s_timeout
            }

            pod_list = self.tiller.k8s.client.list_namespaced_pod(**list_args)
            test_pods = [pod for pod in pod_list.items if is_test_pod(pod)]

            if test_pods:
                LOG.info(
                    'Found existing test pods for release with '
                    'namespace=%s, labels=(%s)', namespace, label_selector)

            for test_pod in test_pods:
                pod_name = test_pod.metadata.name
                LOG.info('Deleting existing test pod: %s', pod_name)
                self.tiller.k8s.delete_pod_action(pod_name,
                                                  namespace,
                                                  timeout=self.k8s_timeout)
Пример #2
0
 def __init__(
         self, resource_type, chart_wait, labels, get_resources,
         required=True):
     self.resource_type = resource_type
     self.chart_wait = chart_wait
     self.label_selector = label_selectors(labels)
     self.get_resources = get_resources
     self.required = required
Пример #3
0
    def rolling_upgrade_pod_deployment(
            self,
            name,
            release_name,
            namespace,
            resource_labels,
            action_type,
            chart,
            disable_hooks,
            values,
            timeout=const.DEFAULT_TILLER_TIMEOUT):
        '''
        update statefulsets (daemon, stateful)
        '''

        if action_type == 'daemonset':

            LOG.info('Updating: %s', action_type)

            label_selector = ''

            if resource_labels is not None:
                label_selector = label_selectors(resource_labels)

            get_daemonset = self.k8s.get_namespace_daemon_set(
                namespace, label_selector=label_selector)

            for ds in get_daemonset.items:
                ds_name = ds.metadata.name
                ds_labels = ds.metadata.labels
                if ds_name == name:
                    LOG.info(
                        "Deleting %s : %s in %s", action_type, ds_name,
                        namespace)
                    self.k8s.delete_daemon_action(ds_name, namespace)

                    # update the daemonset yaml
                    template = self.get_chart_templates(
                        ds_name, name, release_name, namespace, chart,
                        disable_hooks, values)
                    template['metadata']['labels'] = ds_labels
                    template['spec']['template']['metadata'][
                        'labels'] = ds_labels

                    self.k8s.create_daemon_action(
                        namespace=namespace, template=template)

                    # delete pods
                    self.delete_resources(
                        'pod',
                        resource_labels,
                        namespace,
                        wait=True,
                        timeout=timeout)

        else:
            LOG.error("Unable to exectue name: % type: %s", name, action_type)
Пример #4
0
 def __init__(self,
              resource_type,
              chart_wait,
              labels,
              get_resources,
              skip_if_none_found=False):
     self.resource_type = resource_type
     self.chart_wait = chart_wait
     self.label_selector = label_selectors(labels)
     self.get_resources = get_resources
     self.skip_if_none_found = skip_if_none_found
Пример #5
0
    def wait_until_ready(self,
                         release=None,
                         namespace='default',
                         labels='',
                         timeout=300,
                         sleep=15):
        '''
        :param release - part of namespace
        :param timeout - time before disconnecting stream
        '''
        LOG.debug("Wait on %s for %s sec", namespace, timeout)

        label_selector = ''

        if labels:
            label_selector = label_selectors(labels)

        valid_state = ['Succeeded', 'Running']

        wait_timeout = time.time() + 60 * timeout

        while True:

            self.is_pods_ready(label_selector=label_selector, timeout=timeout)

            pod_ready = []
            not_ready = []
            for pod in self.client.list_pod_for_all_namespaces(
                    label_selector=label_selector).items:
                p_state = pod.status.phase
                p_name = pod.metadata.name
                if p_state in valid_state:
                    pod_ready.append(True)
                    continue

                pod_ready.append(False)
                not_ready.append(p_name)

                LOG.debug('%s', p_state)

            if time.time() > wait_timeout or all(pod_ready):
                LOG.debug("Pod States %s", pod_ready)
                break
            if time.time() > wait_timeout and not all(pod_ready):
                LOG.exception('Failed to bring up release %s: %s', release,
                              not_ready)
                break
            else:
                LOG.debug('time: %s pod %s', wait_timeout, pod_ready)
Пример #6
0
    def wait_until_ready(self,
                         release=None,
                         namespace='default',
                         labels='',
                         timeout=300,
                         sleep=15,
                         required_successes=3,
                         inter_success_wait=10):
        '''
        :param release - part of namespace
        :param timeout - time before disconnecting stream
        '''
        label_selector = ''

        if labels:
            label_selector = label_selectors(labels)

        LOG.debug("Wait on %s (%s) for %s sec", namespace, label_selector,
                  timeout)

        deadline = time.time() + timeout

        # NOTE(mark-burnett): Attempt to wait multiple times without
        # modification, in case new pods appear after our watch exits.

        successes = 0
        while successes < required_successes:
            deadline_remaining = int(deadline - time.time())
            if deadline_remaining <= 0:
                return False
            timed_out, modified_pods, unready_pods = self.wait_one_time(
                label_selector, timeout=deadline_remaining)

            if timed_out:
                LOG.info('Timed out waiting for pods: %s', unready_pods)
                return False

            if modified_pods:
                successes = 0
                LOG.debug('Continuing to wait, found modified pods: %s',
                          modified_pods)
            else:
                successes += 1
                LOG.debug('Found no modified pods this attempt. successes=%d',
                          successes)

            time.sleep(inter_success_wait)

        return True
Пример #7
0
    def delete_resources(self,
                         release_name,
                         resource_name,
                         resource_type,
                         resource_labels,
                         namespace,
                         wait=False):
        '''
        :params release_name - release name the specified resource is under
        :params resource_name - name of specific resource
        :params resource_type - type of resource e.g. job, pod, etc.
        :params resource_labels - labels by which to identify the resource
        :params namespace - namespace of the resource

        Apply deletion logic based on type of resource
        '''

        label_selector = ''
        if resource_labels is not None:
            label_selector = label_selectors(resource_labels)
        LOG.debug(
            "Deleting resources in namespace %s matching"
            "selectors %s.", namespace, label_selector)

        if 'job' in resource_type:
            get_jobs = self.k8s.get_namespace_job(namespace, label_selector)
            for jb in get_jobs.items:
                jb_name = jb.metadata.name
                LOG.info("Deleting %s in namespace: %s", jb_name, namespace)

                self.k8s.delete_job_action(jb_name, namespace)

        elif 'pod' in resource_type:
            release_pods = self.k8s.get_namespace_pod(namespace,
                                                      label_selector)

            for pod in release_pods.items:
                pod_name = pod.metadata.name
                LOG.info("Deleting %s in namespace: %s", pod_name, namespace)
                self.k8s.delete_namespace_pod(pod_name, namespace)
                if wait:
                    self.k8s.wait_for_pod_redeployment(pod_name, namespace)
        else:
            LOG.error("Unable to execute name: %s type: %s ", resource_name,
                      resource_type)
Пример #8
0
    def wait_until_ready(self,
                         release=None,
                         namespace='',
                         labels='',
                         timeout=DEFAULT_K8S_TIMEOUT,
                         k8s_wait_attempts=1,
                         k8s_wait_attempt_sleep=1):
        '''
        Wait until all pods become ready given the filters provided by
        ``release``, ``labels`` and ``namespace``.

        :param release: chart release
        :param namespace: the namespace used to filter which pods to wait on
        :param labels: the labels used to filter which pods to wait on
        :param timeout: time before disconnecting ``Watch`` stream
        :param k8s_wait_attempts: The number of times to attempt waiting
            for pods to become ready (minimum 1).
        :param k8s_wait_attempt_sleep: The time in seconds to sleep
            between attempts (minimum 1).
        '''
        timeout = self._check_timeout(timeout)

        # NOTE(MarshM) 'release' is currently unused
        label_selector = label_selectors(labels) if labels else ''

        wait_attempts = (k8s_wait_attempts if k8s_wait_attempts >= 1 else 1)
        sleep_time = (k8s_wait_attempt_sleep
                      if k8s_wait_attempt_sleep >= 1 else 1)

        LOG.debug(
            "Wait on namespace=(%s) labels=(%s) for %s sec "
            "(k8s wait %s times, sleep %ss)", namespace, label_selector,
            timeout, wait_attempts, sleep_time)

        if not namespace:
            # This shouldn't be reachable
            LOG.warn('"namespace" not specified, waiting across all available '
                     'namespaces is likely to cause unintended consequences.')
        if not label_selector:
            LOG.warn('"label_selector" not specified, waiting with no labels '
                     'may cause unintended consequences.')

        # Track the overall deadline for timing out during waits
        deadline = time.time() + timeout

        # First, we should watch for jobs before checking pods, as a job can
        # still be running even after its current pods look healthy or have
        # been removed and are pending reschedule
        found_jobs = self.get_namespace_job(namespace, label_selector)
        if len(found_jobs.items):
            self._watch_job_completion(namespace, label_selector, timeout)

        # NOTE(mark-burnett): Attempt to wait multiple times without
        # modification, in case new pods appear after our watch exits.

        successes = 0
        while successes < wait_attempts:
            deadline_remaining = int(round(deadline - time.time()))
            if deadline_remaining <= 0:
                LOG.info('Timed out while waiting for pods.')
                raise exceptions.KubernetesWatchTimeoutException(
                    'Timed out while waiting on namespace=(%s) labels=(%s)' %
                    (namespace, label_selector))

            timed_out, modified_pods, unready_pods, found_events = (
                self._watch_pod_completions(namespace=namespace,
                                            label_selector=label_selector,
                                            timeout=deadline_remaining))

            if not found_events:
                LOG.warn(
                    'Saw no install/update events for release=%s, '
                    'namespace=%s, labels=(%s). Are the labels correct?',
                    release, namespace, label_selector)

            if timed_out:
                LOG.info('Timed out waiting for pods: %s',
                         sorted(unready_pods))
                raise exceptions.KubernetesWatchTimeoutException(
                    'Timed out while waiting on namespace=(%s) labels=(%s)' %
                    (namespace, label_selector))

            if modified_pods:
                successes = 0
                LOG.debug('Continuing to wait, found modified pods: %s',
                          sorted(modified_pods))
            else:
                successes += 1
                LOG.debug('Found no modified pods this attempt. successes=%d',
                          successes)

            time.sleep(sleep_time)

        return True
Пример #9
0
    def delete_resources(self,
                         release_name,
                         resource_name,
                         resource_type,
                         resource_labels,
                         namespace,
                         wait=False,
                         timeout=const.DEFAULT_TILLER_TIMEOUT):
        '''
        :param release_name: release name the specified resource is under
        :param resource_name: name of specific resource
        :param resource_type: type of resource e.g. job, pod, etc.
        :param resource_labels: labels by which to identify the resource
        :param namespace: namespace of the resource

        Apply deletion logic based on type of resource
        '''
        timeout = self._check_timeout(wait, timeout)

        label_selector = ''
        if resource_labels is not None:
            label_selector = label_selectors(resource_labels)
        LOG.debug(
            "Deleting resources in namespace %s matching "
            "selectors (%s).", namespace, label_selector)

        handled = False
        if resource_type == 'job':
            get_jobs = self.k8s.get_namespace_job(
                namespace, label_selector=label_selector)
            for jb in get_jobs.items:
                jb_name = jb.metadata.name

                if self.dry_run:
                    LOG.info(
                        'Skipping delete job during `dry-run`, would '
                        'have deleted job %s in namespace=%s.', jb_name,
                        namespace)
                    continue

                LOG.info("Deleting job %s in namespace: %s", jb_name,
                         namespace)
                self.k8s.delete_job_action(jb_name, namespace, timeout=timeout)
            handled = True

        if resource_type == 'cronjob' or resource_type == 'job':
            get_jobs = self.k8s.get_namespace_cron_job(
                namespace, label_selector=label_selector)
            for jb in get_jobs.items:
                jb_name = jb.metadata.name

                if resource_type == 'job':
                    # TODO: Eventually disallow this, allowing initially since
                    #       some existing clients were expecting this behavior.
                    LOG.warn("Deleting cronjobs via `type: job` is "
                             "deprecated, use `type: cronjob` instead")

                if self.dry_run:
                    LOG.info(
                        'Skipping delete cronjob during `dry-run`, would '
                        'have deleted cronjob %s in namespace=%s.', jb_name,
                        namespace)
                    continue

                LOG.info("Deleting cronjob %s in namespace: %s", jb_name,
                         namespace)
                self.k8s.delete_cron_job_action(jb_name, namespace)
            handled = True

        if resource_type == 'pod':
            release_pods = self.k8s.get_namespace_pod(
                namespace, label_selector=label_selector)
            for pod in release_pods.items:
                pod_name = pod.metadata.name

                if self.dry_run:
                    LOG.info(
                        'Skipping delete pod during `dry-run`, would '
                        'have deleted pod %s in namespace=%s.', pod_name,
                        namespace)
                    continue

                LOG.info("Deleting pod %s in namespace: %s", pod_name,
                         namespace)
                self.k8s.delete_pod_action(pod_name, namespace)
                if wait:
                    self.k8s.wait_for_pod_redeployment(pod_name, namespace)
            handled = True

        if not handled:
            LOG.error("Unable to execute name: %s type: %s ", resource_name,
                      resource_type)
Пример #10
0
    def wait_until_ready(self,
                         release=None,
                         namespace='',
                         labels='',
                         timeout=DEFAULT_K8S_TIMEOUT,
                         k8s_wait_attempts=1,
                         k8s_wait_attempt_sleep=1):
        '''
        Wait until all pods become ready given the filters provided by
        ``release``, ``labels`` and ``namespace``.

        :param release: chart release
        :param namespace: the namespace used to filter which pods to wait on
        :param labels: the labels used to filter which pods to wait on
        :param timeout: time before disconnecting ``Watch`` stream
        :param k8s_wait_attempts: The number of times to attempt waiting
            for pods to become ready (minimum 1).
        :param k8s_wait_attempt_sleep: The time in seconds to sleep
            between attempts (minimum 1).
        '''
        # NOTE(MarshM) 'release' is currently unused
        label_selector = label_selectors(labels) if labels else ''

        wait_attempts = (k8s_wait_attempts if k8s_wait_attempts >= 1 else 1)
        sleep_time = (k8s_wait_attempt_sleep
                      if k8s_wait_attempt_sleep >= 1 else 1)

        LOG.debug(
            "Wait on namespace=(%s) labels=(%s) for %s sec "
            "(k8s wait %s times, sleep %ss)", namespace, label_selector,
            timeout, wait_attempts, sleep_time)

        if not namespace:
            # This shouldn't be reachable
            LOG.warn('"namespace" not specified, waiting across all available '
                     'namespaces is likely to cause unintended consequences.')
        if not label_selector:
            LOG.warn('"label_selector" not specified, waiting with no labels '
                     'may cause unintended consequences.')

        deadline = time.time() + timeout

        # NOTE(mark-burnett): Attempt to wait multiple times without
        # modification, in case new pods appear after our watch exits.

        successes = 0
        while successes < wait_attempts:
            deadline_remaining = int(round(deadline - time.time()))
            if deadline_remaining <= 0:
                return False
            timed_out, modified_pods, unready_pods = self._wait_one_time(
                namespace=namespace,
                label_selector=label_selector,
                timeout=deadline_remaining)

            if timed_out:
                LOG.info('Timed out waiting for pods: %s',
                         sorted(unready_pods))
                raise exceptions.KubernetesWatchTimeoutException(
                    'Timed out while waiting on namespace=(%s) labels=(%s)' %
                    (namespace, label_selector))
                return False

            if modified_pods:
                successes = 0
                LOG.debug('Continuing to wait, found modified pods: %s',
                          sorted(modified_pods))
            else:
                successes += 1
                LOG.debug('Found no modified pods this attempt. successes=%d',
                          successes)

            time.sleep(sleep_time)

        return True
Пример #11
0
    def delete_resources(self,
                         resource_type,
                         resource_labels,
                         namespace,
                         wait=False,
                         timeout=const.DEFAULT_TILLER_TIMEOUT):
        '''
        Delete resources matching provided resource type, labels, and
        namespace.

        :param resource_type: type of resource e.g. job, pod, etc.
        :param resource_labels: labels for selecting the resources
        :param namespace: namespace of resources
        '''
        timeout = self._check_timeout(wait, timeout)

        label_selector = ''
        if resource_labels is not None:
            label_selector = label_selectors(resource_labels)
        LOG.debug(
            "Deleting resources in namespace %s matching "
            "selectors (%s).", namespace, label_selector)

        handled = False
        if resource_type == 'job':
            get_jobs = self.k8s.get_namespace_job(
                namespace, label_selector=label_selector)
            for jb in get_jobs.items:
                jb_name = jb.metadata.name

                if self.dry_run:
                    LOG.info(
                        'Skipping delete job during `dry-run`, would '
                        'have deleted job %s in namespace=%s.', jb_name,
                        namespace)
                    continue

                LOG.info("Deleting job %s in namespace: %s", jb_name,
                         namespace)
                self.k8s.delete_job_action(jb_name, namespace, timeout=timeout)
            handled = True

        # TODO: Remove when v1 doc support is removed.
        chart = get_current_chart()
        schema_info = schema.get_schema_info(chart['schema'])
        job_implies_cronjob = schema_info.version < 2
        implied_cronjob = resource_type == 'job' and job_implies_cronjob

        if resource_type == 'cronjob' or implied_cronjob:
            get_jobs = self.k8s.get_namespace_cron_job(
                namespace, label_selector=label_selector)
            for jb in get_jobs.items:
                jb_name = jb.metadata.name

                # TODO: Remove when v1 doc support is removed.
                if implied_cronjob:
                    LOG.warn("Deleting cronjobs via `type: job` is "
                             "deprecated, use `type: cronjob` instead")

                if self.dry_run:
                    LOG.info(
                        'Skipping delete cronjob during `dry-run`, would '
                        'have deleted cronjob %s in namespace=%s.', jb_name,
                        namespace)
                    continue

                LOG.info("Deleting cronjob %s in namespace: %s", jb_name,
                         namespace)
                self.k8s.delete_cron_job_action(jb_name, namespace)
            handled = True

        if resource_type == 'pod':
            release_pods = self.k8s.get_namespace_pod(
                namespace, label_selector=label_selector)
            for pod in release_pods.items:
                pod_name = pod.metadata.name

                if self.dry_run:
                    LOG.info(
                        'Skipping delete pod during `dry-run`, would '
                        'have deleted pod %s in namespace=%s.', pod_name,
                        namespace)
                    continue

                LOG.info("Deleting pod %s in namespace: %s", pod_name,
                         namespace)
                self.k8s.delete_pod_action(pod_name, namespace)
                if wait:
                    self.k8s.wait_for_pod_redeployment(pod_name, namespace)
            handled = True

        if not handled:
            LOG.error('No resources found with labels=%s type=%s namespace=%s',
                      resource_labels, resource_type, namespace)
Пример #12
0
    def delete_resources(self,
                         release_name,
                         resource_name,
                         resource_type,
                         resource_labels,
                         namespace,
                         wait=False,
                         timeout=TILLER_TIMEOUT):
        '''
        :params release_name - release name the specified resource is under
        :params resource_name - name of specific resource
        :params resource_type - type of resource e.g. job, pod, etc.
        :params resource_labels - labels by which to identify the resource
        :params namespace - namespace of the resource

        Apply deletion logic based on type of resource
        '''

        label_selector = ''
        if resource_labels is not None:
            label_selector = label_selectors(resource_labels)
        LOG.debug(
            "Deleting resources in namespace %s matching "
            "selectors %s.", namespace, label_selector)

        handled = False
        if resource_type == 'job':
            get_jobs = self.k8s.get_namespace_job(namespace, label_selector)
            for jb in get_jobs.items:
                jb_name = jb.metadata.name
                LOG.info("Deleting job %s in namespace: %s", jb_name,
                         namespace)
                self.k8s.delete_job_action(jb_name, namespace, timeout=timeout)
            handled = True

        if resource_type == 'cronjob' or resource_type == 'job':
            get_jobs = self.k8s.get_namespace_cron_job(namespace,
                                                       label_selector)
            for jb in get_jobs.items:
                jb_name = jb.metadata.name
                LOG.info("Deleting cron job %s in namespace: %s", jb_name,
                         namespace)
                if resource_type == 'job':
                    # TODO: Eventually disallow this, allowing initially since
                    #       some existing clients were expecting this behavior.
                    LOG.warning("Deleting cron jobs via `type: job` is "
                                "deprecated, use `type: cronjob` instead")
                self.k8s.delete_cron_job_action(jb_name, namespace)
            handled = True

        if resource_type == 'pod':
            release_pods = self.k8s.get_namespace_pod(namespace,
                                                      label_selector)

            for pod in release_pods.items:
                pod_name = pod.metadata.name
                LOG.info("Deleting pod %s in namespace: %s", pod_name,
                         namespace)
                self.k8s.delete_namespace_pod(pod_name, namespace)
                if wait:
                    self.k8s.wait_for_pod_redeployment(pod_name, namespace)
            handled = True

        if not handled:
            LOG.error("Unable to execute name: %s type: %s ", resource_name,
                      resource_type)