コード例 #1
0
ファイル: periodic.py プロジェクト: dragorosson/magnum
    def _get_cluster_stacks(
            self, clusters, sid_to_cluster_mapping, cluster_stack_ids):
        stacks = []

        _clusters = clusters
        _sid_to_cluster_mapping = sid_to_cluster_mapping
        _cluster_stack_ids = cluster_stack_ids

        for cluster in _clusters:
            try:
                # Create client with cluster's trustee user context
                bosc = clients.OpenStackClients(
                    context.make_cluster_context(cluster))
                stack = bosc.heat().stacks.get(cluster.stack_id)
                stacks.append(stack)
            # No need to do anything in this case
            except heat_exc.HTTPNotFound:
                pass
            except Exception as e:
                # Any other exception means we do not perform any
                # action on this cluster in the current sync run, so remove
                # it from all records.
                LOG.warning(
                    _LW("Exception while attempting to retrieve "
                        "Heat stack %(stack_id)s for cluster %(cluster_id)s. "
                        "Traceback follows."),
                    {'stack_id': cluster.stack_id, 'cluster_id': cluster.id})
                LOG.warning(e)
                _sid_to_cluster_mapping.pop(cluster.stack_id)
                _cluster_stack_ids.remove(cluster.stack_id)
                _clusters.remove(cluster)
        return [stacks, _clusters, _cluster_stack_ids, _sid_to_cluster_mapping]
コード例 #2
0
ファイル: periodic.py プロジェクト: yangkf1985/magnum
    def _get_cluster_stacks(self, clusters, sid_to_cluster_mapping,
                            cluster_stack_ids):
        stacks = []

        _clusters = clusters
        _sid_to_cluster_mapping = sid_to_cluster_mapping
        _cluster_stack_ids = cluster_stack_ids

        for cluster in _clusters:
            try:
                # Create client with cluster's trustee user context
                bosc = clients.OpenStackClients(
                    context.make_cluster_context(cluster))
                stack = bosc.heat().stacks.get(cluster.stack_id)
                stacks.append(stack)
            # No need to do anything in this case
            except heat_exc.HTTPNotFound:
                pass
            except Exception as e:
                # Any other exception means we do not perform any
                # action on this cluster in the current sync run, so remove
                # it from all records.
                LOG.warning(
                    _LW("Exception while attempting to retrieve "
                        "Heat stack %(stack_id)s for cluster %(cluster_id)s. "
                        "Traceback follows."), {
                            'stack_id': cluster.stack_id,
                            'cluster_id': cluster.id
                        })
                LOG.warning(e)
                _sid_to_cluster_mapping.pop(cluster.stack_id)
                _cluster_stack_ids.remove(cluster.stack_id)
                _clusters.remove(cluster)
        return [stacks, _clusters, _cluster_stack_ids, _sid_to_cluster_mapping]
コード例 #3
0
 def update_cluster_status(self, context, cluster):
     if cluster.stack_id is None:
         # NOTE(mgoddard): During cluster creation it is possible to poll
         # the cluster before its heat stack has been created. See bug
         # 1682058.
         return
     stack_ctx = mag_ctx.make_cluster_context(cluster)
     poller = HeatPoller(clients.OpenStackClients(stack_ctx), context,
                         cluster, self)
     poller.poll_and_check()
コード例 #4
0
ファイル: driver.py プロジェクト: openstack/magnum
 def update_cluster_status(self, context, cluster):
     if cluster.stack_id is None:
         # NOTE(mgoddard): During cluster creation it is possible to poll
         # the cluster before its heat stack has been created. See bug
         # 1682058.
         return
     stack_ctx = mag_ctx.make_cluster_context(cluster)
     poller = HeatPoller(clients.OpenStackClients(stack_ctx), context,
                         cluster, self)
     poller.poll_and_check()
コード例 #5
0
 def update_cluster_status(self, context, cluster):
     stack_ctx = mag_ctx.make_cluster_context(cluster)
     poller = HeatPoller(clients.OpenStackClients(stack_ctx), context,
                         cluster, self)
     poller.poll_and_check()
コード例 #6
0
ファイル: driver.py プロジェクト: charliekang/magnum
 def update_cluster_status(self, context, cluster):
     stack_ctx = mag_ctx.make_cluster_context(cluster)
     poller = HeatPoller(clients.OpenStackClients(stack_ctx), context,
                         cluster, self)
     poller.poll_and_check()
コード例 #7
0
    def update_cluster_status(self, context, cluster):
        """Updates the cluster status.

        This method should be finished within the periodic interval(10s).

        :param context: Admin context.
        :param cluster: Cluster object.
        """
        if cluster.status == fields.ClusterStatus.CREATE_IN_PROGRESS:
            if cluster.stack_id is None:
                return

            stack_ctx = mag_ctx.make_cluster_context(cluster)
            os_clients = clients.OpenStackClients(stack_ctx)
            stack = os_clients.heat().stacks.get(
                cluster.stack_id,
                resolve_outputs=False
            )

            if stack.stack_status == fields.ClusterStatus.CREATE_COMPLETE:
                stack_ctx = mag_ctx.make_cluster_context(cluster)
                kubeconfig_path = self._get_kubeconfig(stack_ctx, cluster)
                cluster_kubectl = kubectl.KubeCtl(
                    bin="/usr/bin/kubectl",
                    global_flags="--kubeconfig %s" % kubeconfig_path
                )

                ns = self.kubectl.get("namespace %s" % cluster.uuid)
                labels = ns['metadata'].get('labels', {})

                if not labels.get('magnum.k8s.io/status'):
                    self._install_addons(cluster, cluster_kubectl, context)
                    return

                if self._workers_ready(cluster, cluster_kubectl):
                    LOG.info(
                        'Cluster %s is created successfully', cluster.uuid
                    )

                    # Update the worker addresses in the cluster from the Heat
                    # stack output.
                    stack = os_clients.heat().stacks.get(
                        cluster.stack_id,
                        resolve_outputs=True
                    )
                    template_def = self.get_template_definition()
                    c_template = conductor_utils.retrieve_cluster_template(
                        context,
                        cluster
                    )
                    template_def.update_outputs(stack, c_template, cluster)

                    cluster.status = fields.ClusterStatus.CREATE_COMPLETE
                    cluster.save()
            elif stack.stack_status in (
                fields.ClusterStatus.CREATE_FAILED,
                fields.ClusterStatus.DELETE_FAILED,
                fields.ClusterStatus.UPDATE_FAILED,
                fields.ClusterStatus.ROLLBACK_COMPLETE,
                fields.ClusterStatus.ROLLBACK_FAILED
            ):
                self._sync_cluster_status(cluster, stack)
                LOG.error('Failed to create cluster %s', cluster.uuid)

        elif cluster.status == fields.ClusterStatus.DELETE_IN_PROGRESS:
            # Check if the namespace is deleted.
            ns_template = self.jinja_env.get_template('namespace.yaml.j2')
            ns_body = ns_template.render({"namespace": cluster.uuid})
            namespaces = self.kubectl.get('namespace')
            names = [n['metadata']['name'] for n in namespaces]

            if cluster.uuid not in names:
                LOG.debug(
                    "Namespace has been deleted for cluster %s",
                    cluster.uuid
                )
                stack_ctx = mag_ctx.make_cluster_context(cluster)
                os_client = clients.OpenStackClients(stack_ctx)

                try:
                    trust_manager.delete_trustee_and_trust(
                        os_client,
                        context,
                        cluster
                    )
                    cert_manager.delete_certificates_from_cluster(
                        cluster,
                        context=context
                    )
                    cert_manager.delete_client_files(cluster, context=context)
                except exception.ClusterNotFound:
                    LOG.info(
                        'The cluster %s has been deleted by others.',
                        cluster.uuid
                    )

                LOG.info('Cluster %s has been deleted.', cluster.uuid)

                cluster.status = fields.ClusterStatus.DELETE_COMPLETE
                cluster.save()