Пример #1
0
def delete_complete_cluster(context, cluster_id):
    cluster_obj = objects.Cluster.get_cluster_by_id(context, cluster_id)
    target = {'tenant_id': cluster_obj.project_id}
    policy.check("cluster:delete", context, target)

    # update cluster to deleting
    objects.Cluster.update_cluster_deleting(context, cluster_id)

    # retrieve cluster nodes
    nodes = objects.Node.get_nodes_by_cluster_id(context, cluster_id)

    # create list with node id's for create cluster flow
    node_ids = [node.id for node in nodes]

    # retrieve cluster record
    cluster = objects.Cluster.get_cluster_by_id(context, cluster_id)

    # prepare and post cluster delete job to backend
    flow_kwargs = {
        'cluster_id': cluster_id,
        'node_ids': node_ids,
        'group_id': cluster.group_id,
    }

    job_args = {
        'context': context.to_dict(),
    }

    job_client = task_flow_client.get_client_instance()
    # TODO(dagnello): might be better to use request_id for job_uuid
    job_uuid = uuidutils.generate_uuid()
    job_client.post(delete_cluster,
                    job_args,
                    flow_kwargs=flow_kwargs,
                    tx_uuid=job_uuid)

    LOG.info(
        _LI('Delete Cluster Request Cluster ID %(cluster_id)s Job ID '
            '%(job_id)s') % ({
                "cluster_id": cluster_id,
                "job_id": job_uuid
            }))
Пример #2
0
    def check(self):
        if not self.lock.acquired:
            self.lock.acquire(blocking=False)

        if self.lock.acquired:

            clusters = get_cluster_id_node_ids()

            taskflow_client_instance = taskflow_client.get_client_instance()
            job_list = taskflow_client_instance.joblist()

            cluster_ids = []
            for job in job_list:
                if 'cluster_status_check' in job.details['store']:
                    cluster_ids.append(job.details['store']['cluster_id'])

            filtered_clusters = []
            for cluster in clusters:
                if cluster[0] not in cluster_ids:
                    filtered_clusters.append(cluster)

            for cluster in filtered_clusters:
                job_args = {
                    'cluster_status_check': '',
                    'cluster_id': cluster[0],
                    'context': {},
                    'default_rabbit_user': '******',
                    'default_rabbit_pass': cluster[0],
                }
                flow_kwargs = {
                    'cluster_id': cluster[0],
                    'node_ids': cluster[1]
                }
                taskflow_client_instance.post(check_cluster_status,
                                              job_args,
                                              flow_kwargs=flow_kwargs)
Пример #3
0
    def test_check(self):
        tf_instance = tf_client.get_client_instance()
        start_job_list = tf_instance.joblist()
        start_job_list_length = len(start_job_list)

        # Test while job board is empty
        self.cue_monitor_service.check()

        end_job_list = sorted(tf_instance.joblist())
        end_job_list_length = len(end_job_list)

        self.assertEqual(
            2, end_job_list_length - start_job_list_length,
            "Job list should only have two "
            "clusters: " + str(tf_instance.joblist()))

        # Test while job board has 2 entries
        self.cue_monitor_service.check()

        # No new jobs should have been added.
        new_end_job_list = sorted(tf_instance.joblist())

        self.assertEqual(end_job_list, new_end_job_list,
                         "Job list should not have changed")
Пример #4
0
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Authors: Davide Agnello <*****@*****.**>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright [2014] Hewlett-Packard Development Company, L.P.
# limitations under the License.

import zake.fake_client as fake_client

import cue.taskflow.client as tf_client

_zk_client = fake_client.FakeClient()
persistence = tf_client.create_persistence(client=_zk_client)
jobboard = tf_client.create_jobboard("test_board",
                                     persistence=persistence,
                                     client=_zk_client)

tf_client = tf_client.get_client_instance(persistence=persistence,
                                          jobboard=jobboard)
Пример #5
0
    def post(self, data):
        """Create a new Cluster.

        :param data: cluster parameters within the request body.
        """
        context = pecan.request.context
        request_data = data.as_dict()
        cluster_flavor = request_data['flavor']

        if data.size <= 0:
            raise exception.Invalid(_("Invalid cluster size provided"))
        elif data.size > CONF.api.max_cluster_size:
            raise exception.RequestEntityTooLarge(
                _("Invalid cluster size, max size is: %d") %
                CONF.api.max_cluster_size)

        if len(data.network_id) > 1:
            raise exception.Invalid(_("Invalid number of network_id's"))

        # extract username/password
        if (data.authentication and data.authentication.type
                and data.authentication.token):
            auth_validator = auth_validate.AuthTokenValidator.validate_token(
                auth_type=data.authentication.type,
                token=data.authentication.token)
            if not auth_validator or not auth_validator.validate():
                raise exception.Invalid(
                    _("Invalid broker authentication "
                      "parameter(s)"))
        else:
            raise exception.Invalid(
                _("Missing broker authentication "
                  "parameter(s)"))

        default_rabbit_user = data.authentication.token['username']
        default_rabbit_pass = data.authentication.token['password']

        broker_name = CONF.default_broker_name

        # get the image id of default broker
        image_id = objects.BrokerMetadata.get_image_id_by_broker_name(
            context, broker_name)

        # validate cluster flavor
        self._validate_flavor(image_id, cluster_flavor)

        # convert 'network_id' from list to string type for objects/cluster
        # compatibility
        request_data['network_id'] = request_data['network_id'][0]

        # create new cluster object with required data from user
        new_cluster = objects.Cluster(**request_data)

        # create new cluster with node related data from user
        new_cluster.create(context)

        # retrieve cluster data
        cluster = get_complete_cluster(context, new_cluster.id)

        nodes = objects.Node.get_nodes_by_cluster_id(context, cluster.id)

        # create list with node id's for create cluster flow
        node_ids = [node.id for node in nodes]

        # prepare and post cluster create job to backend
        flow_kwargs = {
            'cluster_id': cluster.id,
            'node_ids': node_ids,
            'user_network_id': cluster.network_id[0],
            'management_network_id': CONF.management_network_id,
        }

        # generate unique erlang cookie to be used by all nodes in the new
        # cluster, erlang cookies are strings of up to 255 characters
        erlang_cookie = uuidutils.generate_uuid()

        job_args = {
            'tenant_id': new_cluster.project_id,
            'flavor': cluster.flavor,
            'image': image_id,
            'volume_size': cluster.volume_size,
            'port': '5672',
            'context': context.to_dict(),
            # TODO(sputnik13: this needs to come from the create request
            # and default to a configuration value rather than always using
            # config value
            'security_groups': [CONF.os_security_group],
            'port': CONF.rabbit_port,
            'key_name': CONF.openstack.os_key_name,
            'erlang_cookie': erlang_cookie,
            'default_rabbit_user': default_rabbit_user,
            'default_rabbit_pass': default_rabbit_pass,
        }
        job_client = task_flow_client.get_client_instance()
        # TODO(dagnello): might be better to use request_id for job_uuid
        job_uuid = uuidutils.generate_uuid()
        job_client.post(create_cluster,
                        job_args,
                        flow_kwargs=flow_kwargs,
                        tx_uuid=job_uuid)

        LOG.info(
            _LI('Create Cluster Request Cluster ID %(cluster_id)s '
                'Cluster size %(size)s network ID %(network_id)s '
                'Job ID %(job_id)s Broker name %(broker_name)s') %
            ({
                "cluster_id": cluster.id,
                "size": cluster.size,
                "network_id": cluster.network_id,
                "job_id": job_uuid,
                "broker_name": broker_name
            }))

        cluster.additional_information = []
        cluster.additional_information.append(
            dict(def_rabbit_user=default_rabbit_user))
        cluster.additional_information.append(
            dict(def_rabbit_pass=default_rabbit_pass))

        cluster.unset_empty_fields()
        return cluster
Пример #6
0
 def setUp(self):
     super(ApiTaskFlowClientTest, self).setUp()
     self.tf_client = tf_client.get_client_instance()