Ejemplo n.º 1
0
 def test_cluster_object_to_dict_with_invalid_field(self):
     """test conversion of Cluster object to dict with an invalid field."""
     cluster = self.get_test_cluster()
     cluster_obj = objects.Cluster(**cluster)
     cluster_obj['endpoint'] = '10.0.0.4:5672'
     cluster_dict = cluster_obj.as_dict()
     self.assertRaises(KeyError, lambda: cluster_dict['endpoint'])
Ejemplo n.º 2
0
    def revert(self, context, cluster_id, cluster_values, **kwargs):
        """Revert UpdateClusterRecord

        This method is executed upon failure of the UpdateClusterRecord or the
        Flow that the Task is part of.  This method will set the cluster status
        to the matching failure status identified by the status_revert_pairs
        mapping.  If a mapping does not exist, the cluster status will be set
        to ERROR.

        :param args: positional arguments that the task required to execute.
        :type args: list
        :param kwargs: keyword arguments that the task required to execute; the
                       special key `result` will contain the :meth:`execute`
                       results (if any) and the special key `flow_failures`
                       will contain any failure information.
        """
        request_context = context_module.RequestContext.from_dict(context)

        if ('status' in cluster_values) and (cluster_values['status']) in (
                UpdateClusterRecord.status_revert_pairs):
            cluster_values['status'] = self.status_revert_pairs[
                cluster_values['status']]
        else:
            cluster_values['status'] = models.Status.ERROR

        # Extract exception information
        if 'flow_failures' in kwargs:
            cluster_values['error_detail'] = '\n'.join(
                [str(value) for value in kwargs['flow_failures'].values()])

        cluster = objects.Cluster(**cluster_values)
        cluster.update(request_context, cluster_id)
Ejemplo n.º 3
0
    def test_create_two_clusters_verify_time_stamps(self):
        """test time stamps times at creation and delete."""
        api_cluster_1 = test_utils.create_api_test_cluster()
        api_cluster_2 = test_utils.create_api_test_cluster()

        # Create two clusters
        data_1 = self.post_json('/clusters',
                                params=api_cluster_1,
                                headers=self.auth_headers,
                                status=202)
        data_2 = self.post_json('/clusters',
                                params=api_cluster_2,
                                headers=self.auth_headers,
                                status=202)

        # retrieve cluster objects
        cluster_1 = objects.Cluster.get_cluster_by_id(self.context,
                                                      data_1.json["id"])
        cluster_2 = objects.Cluster.get_cluster_by_id(self.context,
                                                      data_2.json["id"])

        # verify second cluster was created after first by created_at time
        self.assertTrue(cluster_2.created_at > cluster_1.created_at,
                        "Second cluster was not created after first")

        cluster_1_created_at = cluster_1.created_at

        # issue delete request cluster for cluster_1
        self.delete('/clusters/' + data_1.json["id"],
                    headers=self.auth_headers)

        # retrieve cluster_1
        cluster_1 = objects.Cluster.get_cluster_by_id(self.context,
                                                      data_1.json["id"])

        # verify updated_at time is after created_at
        self.assertTrue(cluster_1.updated_at > cluster_1.created_at,
                        "Cluster updated at time is invalid")
        # verify created_at time did not change
        self.assertEqual(cluster_1_created_at, cluster_1.created_at,
                         "Cluster created_at time has changed")

        # delete cluster_1
        cluster = objects.Cluster(deleted=True, status=models.Status.DELETED)
        cluster.update(self.context, data_1.json["id"])

        # retrieve deleted (soft) cluster
        cluster_query = db_api.model_query(
            self.context, models.Cluster,
            read_deleted=True).filter_by(id=data_1.json["id"])
        cluster_1 = cluster_query.one()

        # verify deleted_at time is after created_at
        self.assertTrue(cluster_1.deleted_at > cluster_1.created_at,
                        "Cluster deleted_at time is invalid")
        # verify updated_at time is after deleted_at
        self.assertTrue(cluster_1.updated_at > cluster_1.deleted_at,
                        "Cluster deleted_at time is invalid")
Ejemplo n.º 4
0
    def test_create_cluster_max_retries_multi_node_single_retry(self):
        flow_store = {
            "tenant_id": str(self.valid_network['tenant_id']),
            "image": self.valid_image.id,
            "flavor": self.valid_flavor.id,
            "port": self.port,
            "context": self.context.to_dict(),
            "erlang_cookie": str(uuid.uuid4()),
            "default_rabbit_user": '******',
            "default_rabbit_pass": str(uuid.uuid4()),
        }

        cluster_values = {
            "project_id": self.context.tenant_id,
            "name": "RabbitCluster",
            "network_id": self.valid_network['id'],
            "flavor": "1",
            "size": 3,
        }

        new_cluster = objects.Cluster(**cluster_values)
        new_cluster.create(self.context)

        nodes = objects.Node.get_nodes_by_cluster_id(self.context,
                                                     new_cluster.id)

        # Todo: Raise the retry count once the fixture timeout issue
        # is resolved
        CONF.flow_options.create_cluster_node_vm_active_retry_count = 1

        # configure custom vm_status list
        nova.VmStatusDetails.set_vm_status(['BUILD',
                                            'BUILD',
                                            'BUILD',
                                            'BUILD'])

        node_ids = []
        for node in nodes:
            node_ids.append(node.id)

        flow = create_cluster(new_cluster.id,
                              node_ids,
                              self.valid_network['id'],
                              self.management_network['id'])

        try:
            engines.run(flow, store=flow_store)
        except taskflow_exc.WrappedFailure as err:
            self.assertEqual(3, len(err._causes))
            exc_list = set(type(c.exception) for c in err._causes)
            self.assertEqual({cue_exceptions.VmBuildingException,
                              cue_exceptions.VmBuildingException,
                              cue_exceptions.VmBuildingException},
                             exc_list)
        except Exception as e:
            self.assertEqual(taskflow_exc.WrappedFailure, type(e))
        else:
            self.fail("Expected taskflow_exc.WrappedFailure exception.")
Ejemplo n.º 5
0
    def test_from_db_object_to_cluster_object(self):
        """test conversion of database object to Cluster object."""
        cluster = self.get_test_cluster()
        cue_cluster = objects.Cluster()
        db_cluster = models.Cluster()
        db_cluster.update(cluster)

        objects.Cluster._from_db_object(cue_cluster, db_cluster)
        test_utils.validate_cluster_values(self, cluster, cue_cluster)
Ejemplo n.º 6
0
def create_object_cluster(context, **kw):
    """Create test Cluster entry in DB from objects API and return Cluster

    object.
    """
    test_cluster_dict = get_test_cluster(**kw)
    new_cluster = objects.Cluster(**test_cluster_dict)
    new_cluster.create(context)
    return new_cluster
Ejemplo n.º 7
0
 def test_get_object_changes(self):
     """test Cluster object changes by setting a valid field."""
     cluster_obj = objects.Cluster()
     cluster_obj['status'] = models.Status.DELETING
     cluster_changes_dict = cluster_obj.obj_get_changes()
     # check the changed field
     self.assertEqual(models.Status.DELETING,
                      cluster_changes_dict['status'])
     # check for an unchanged field
     self.assertRaises(KeyError, lambda: cluster_changes_dict['name'])
Ejemplo n.º 8
0
    def test_update_cluster_status_fail(self):
        """Verifies UpdateClusterRecord task failed flow scenario.

        This test simulates a failed flow with UpdateClusterRecord task
        followed by CreatePort task.  The CreateFlow task will be configured to
        fail which will in-turn fail the overall flow.  The failed flow will
        trigger UpdateClusterRecord task's revert method which will set Cluster
        status to ERROR state.
        """

        # retrieve neutron client API class
        neutron_client = client.neutron_client()

        # setup a test cluster in DB for this test
        cluster_values = {
            "project_id": self.context.tenant_id,
            "name": "RabbitCluster",
            "network_id": str(uuid.uuid4()),
            "flavor": "1",
            "size": 3,
        }

        new_cluster = objects.Cluster(**cluster_values)
        new_cluster.create(self.context)

        # verify new cluster is in BUILDING state
        self.assertEqual(models.Status.BUILDING, new_cluster.status,
                         "Invalid status")

        # setup require task input variables for "UpdateClusterRecord" task
        UpdateClusterRecordTest.task_store['context'] = self.context.to_dict()
        UpdateClusterRecordTest.task_store['cluster_id'] = new_cluster.id

        # create flow with "UpdateClusterRecord" task
        self.flow = linear_flow.Flow(name="update cluster status").add(
            update_cluster_status.UpdateClusterRecord(
                name="get RabbitMQ status",
                inject={'cluster_values': {
                    'status': models.Status.BUILDING
                }}),
            neutron_task.CreatePort(os_client=neutron_client,
                                    provides='neutron_port_id'))

        # start engine to run task
        self.assertRaises(exceptions.NetworkNotFoundClient,
                          engines.run,
                          self.flow,
                          store=UpdateClusterRecordTest.task_store)

        # verify cluster status is now in ERROR state
        cluster_after = objects.Cluster.get_cluster_by_id(
            self.context, new_cluster.id)
        self.assertEqual(models.Status.ERROR, cluster_after.status,
                         "Invalid status")
Ejemplo n.º 9
0
    def test_from_db_object_to_cluster_object_invalid_field(self):
        """test conversion of database object to Cluster object with an

        invalid field.
        """
        cluster = self.get_test_cluster()
        cluster['endpoint'] = '10.0.0.4:5672'
        db_cluster = models.Cluster()
        db_cluster.update(cluster)
        cue_cluster = objects.Cluster()

        objects.Cluster._from_db_object(cue_cluster, db_cluster)
        self.assertRaises(AttributeError, lambda: cue_cluster.endpoint)
Ejemplo n.º 10
0
def set_up_test_clusters(context, status, cluster_id, size):

    cluster_values = {
        "id": cluster_id,
        "project_id": "test_project_id",
        "name": "test_cluster",
        "network_id": "test_uuid",
        "flavor": "1",
        "size": size,
    }
    new_cluster = objects.Cluster(**cluster_values)
    new_cluster.create(context)

    new_cluster.status = status
    new_cluster.update(context, cluster_id)
Ejemplo n.º 11
0
    def test_create_cluster_invalid_user_network(self):
        invalid_network_id = str(uuid.uuid4())
        cluster_size = 3

        flow_store = {
            "tenant_id": str(self.valid_network['tenant_id']),
            'image': self.valid_image.id,
            'flavor': self.valid_flavor.id,
            "port": self.port,
            "context": self.context.to_dict(),
            "erlang_cookie": str(uuid.uuid4()),
            "default_rabbit_user": '******',
            "default_rabbit_pass": str(uuid.uuid4()),
        }

        cluster_values = {
            "project_id": self.context.tenant_id,
            "name": "RabbitCluster",
            "network_id": invalid_network_id,
            "flavor": "1",
            "size": cluster_size,
        }

        new_cluster = objects.Cluster(**cluster_values)
        new_cluster.create(self.context)

        nodes = objects.Node.get_nodes_by_cluster_id(self.context,
                                                     new_cluster.id)

        node_ids = []
        for node in nodes:
            node_ids.append(node.id)

        flow = create_cluster(new_cluster.id,
                              node_ids,
                              invalid_network_id,
                              self.management_network['id'])

        try:
            engines.run(flow, store=flow_store)
        except neutron_exceptions.NeutronClientException as err:
            # When an incorrect user network ID is given, the neutron client
            # returns a NeutronClientException.
            self.assertEqual(err.message,
                             "Network " + str(invalid_network_id) +
                             " could not be found.")
        else:
            self.fail("Expected taskflow_exc.WrappedFailure exception.")
Ejemplo n.º 12
0
    def test_create_cluster_anti_affinity(self):
        self.flags(cluster_node_anti_affinity=True, group="taskflow")

        flow_store = {
            "tenant_id": str(self.valid_network['tenant_id']),
            'image': self.valid_image.id,
            'flavor': self.valid_flavor.id,
            "port": self.port,
            "context": self.context.to_dict(),
            "erlang_cookie": str(uuid.uuid4()),
            "default_rabbit_user": '******',
            "default_rabbit_pass": str(uuid.uuid4()),
        }

        cluster_values = {
            "project_id": self.context.tenant_id,
            "name": "RabbitCluster",
            "network_id": str(uuid.uuid4()),
            "flavor": "1",
            "size": 3,
        }

        new_cluster = objects.Cluster(**cluster_values)
        new_cluster.create(self.context)

        nodes = objects.Node.get_nodes_by_cluster_id(self.context,
                                                     new_cluster.id)

        node_ids = []
        for node in nodes:
            node_ids.append(node.id)

        flow = create_cluster(new_cluster.id,
                              node_ids,
                              self.valid_network['id'],
                              self.management_network['id'])

        engines.run(flow, store=flow_store)

        nodes_after = objects.Node.get_nodes_by_cluster_id(self.context,
                                                           new_cluster.id)

        # check if the host_ids are different for cluster nodes
        host_ids = []
        for node in nodes_after:
            host_id = self.nova_client.servers.get(node.instance_id).host_id
            self.assertNotIn(host_id, host_ids)
            host_ids.append(host_id)
Ejemplo n.º 13
0
    def test_create_cluster_size_three(self):
        """Tests create cluster of size three from Cluster objects API."""
        test_cluster_dict = func_utils.get_test_cluster(size=3)
        new_cluster = objects.Cluster(**test_cluster_dict)
        test_utils.validate_cluster_values(self, test_cluster_dict,
                                           new_cluster)
        new_cluster.create(self.context)
        db_cluster = self.dbapi.get_cluster_by_id(self.context, new_cluster.id)
        test_utils.validate_cluster_values(self, new_cluster, db_cluster)

        cluster_nodes = self.dbapi.get_nodes_in_cluster(
            self.context, db_cluster.id)
        for nodes in cluster_nodes:
            self.assertEqual(db_cluster.id, nodes.cluster_id)
            self.assertEqual(db_cluster.flavor, nodes.flavor)
            self.assertEqual(db_cluster.status, nodes.status)
Ejemplo n.º 14
0
    def test_create_cluster_size_one(self):
        """Tests create cluster from Cluster objects API."""
        test_cluster_dict = func_utils.get_test_cluster(size=1)
        new_cluster = objects.Cluster(**test_cluster_dict)
        test_utils.validate_cluster_values(self, test_cluster_dict,
                                           new_cluster)
        new_cluster.create(self.context)
        db_cluster = self.dbapi.get_cluster_by_id(self.context, new_cluster.id)
        test_utils.validate_cluster_values(self, new_cluster, db_cluster)
        cluster_node = self.dbapi.get_nodes_in_cluster(self.context,
                                                       db_cluster.id)

        # check if cluster size is one before accessing the node
        self.assertEqual(1, len(cluster_node))
        self.assertEqual(db_cluster.id, cluster_node[0].cluster_id)
        self.assertEqual(db_cluster.flavor, cluster_node[0].flavor)
        self.assertEqual(db_cluster.status, cluster_node[0].status)
Ejemplo n.º 15
0
    def test_create_cluster_max_retries_single_node(self):
        flow_store = {
            "tenant_id": str(self.valid_network['tenant_id']),
            "image": self.valid_image.id,
            "flavor": self.valid_flavor.id,
            "port": self.port,
            "context": self.context.to_dict(),
            "erlang_cookie": str(uuid.uuid4()),
            "default_rabbit_user": '******',
            "default_rabbit_pass": str(uuid.uuid4()),
        }

        cluster_values = {
            "project_id": self.context.tenant_id,
            "name": "RabbitCluster",
            "network_id": self.valid_network['id'],
            "flavor": "1",
            "size": 1,
        }

        new_cluster = objects.Cluster(**cluster_values)
        new_cluster.create(self.context)

        nodes = objects.Node.get_nodes_by_cluster_id(self.context,
                                                     new_cluster.id)

        CONF.flow_options.create_cluster_node_vm_active_retry_count = 3

        # configure custom vm_status list
        nova.VmStatusDetails.set_vm_status(['BUILD',
                                            'BUILD',
                                            'BUILD',
                                            'BUILD'])

        node_ids = []
        for node in nodes:
            node_ids.append(node.id)

        flow = create_cluster(new_cluster.id,
                              node_ids,
                              self.valid_network['id'],
                              self.management_network['id'])

        self.assertRaises(cue_exceptions.VmBuildingException,
                          engines.run, flow, store=flow_store)
Ejemplo n.º 16
0
    def test_create_cluster_overlimit(self):
        vm_list = self.nova_client.servers.list()
        port_list = self.neutron_client.list_ports()

        flow_store = {
            "tenant_id": str(self.valid_network['tenant_id']),
            'image': self.valid_image.id,
            'flavor': self.valid_flavor.id,
            "port": self.port,
            "context": self.context.to_dict(),
            "erlang_cookie": str(uuid.uuid4()),
            "default_rabbit_user": '******',
            "default_rabbit_pass": str(uuid.uuid4()),
        }

        cluster_values = {
            "project_id": self.context.tenant_id,
            "name": "RabbitCluster",
            "network_id": self.valid_network['id'],
            "flavor": "1",
            "size": 10,
        }

        new_cluster = objects.Cluster(**cluster_values)
        new_cluster.create(self.context)

        nodes = objects.Node.get_nodes_by_cluster_id(self.context,
                                                     new_cluster.id)

        node_ids = []
        for node in nodes:
            node_ids.append(node.id)

        flow = create_cluster(new_cluster.id,
                              node_ids,
                              self.valid_network['id'],
                              self.management_network['id'])

        self.assertRaises(taskflow_exc.WrappedFailure, engines.run,
                          flow, store=flow_store)

        self.assertEqual(vm_list, self.nova_client.servers.list())
        self.assertEqual(port_list, self.neutron_client.list_ports())
Ejemplo n.º 17
0
    def test_cluster_api_to_object_to_api(self):
        """Tests Cluster api object conversion to Cluster object and back

        to api object.
        """
        # create cluster api object
        api_cluster = func_utils.create_api_test_cluster_all().as_dict()
        # adjust network_id from list to single value
        api_cluster['network_id'] = api_cluster['network_id'][0]
        # create cue cluster object from api cluster object
        object_cluster = objects.Cluster(**api_cluster).as_dict()
        # verify fields match
        test_utils.validate_cluster_values(self, api_cluster, object_cluster)

        # adjust network_id from single value back to list
        object_cluster['network_id'] = [object_cluster['network_id']]
        # create cluster api object from cue cluster object
        api_cluster_2 = cluster.Cluster(**object_cluster).as_dict()
        # verify fields match to initial api cluster object
        test_utils.validate_cluster_values(self, api_cluster, api_cluster_2)
Ejemplo n.º 18
0
def create_db_test_cluster_from_objects_api(context, **kw):
    """Create test Cluster entry in DB from objects API and return Cluster

    DB object.  Function to be used to create test Cluster objects in the
    database.

    :param kw: kwargs with overriding values for cluster's attributes.
    :returns: Test Cluster DB object.

    """
    test_cluster = get_test_cluster(**kw)

    cluster_parameters = {
        'name': test_cluster['name'],
        'network_id': test_cluster['network_id'],
        'flavor': test_cluster['flavor'],
        'size': test_cluster['size'],
        'volume_size': test_cluster['volume_size'],
    }

    new_cluster = objects.Cluster(**cluster_parameters)

    new_cluster.create(context)

    # add some endpoints to each node in cluster
    cluster_nodes = objects.Node.get_nodes_by_cluster_id(
        context, new_cluster.id)
    for i, node in enumerate(cluster_nodes):
        endpoint_value = {
            'node_id': node.id,
            'uri': '10.0.0.' + str(i) + ':5672',
            'type': 'AMQP'
        }
        endpoint = objects.Endpoint(**endpoint_value)
        endpoint.create(context)
        if i % 2:
            endpoint_value['uri'] = '10.0.' + str(i + 1) + '.0:5672'
            endpoint = objects.Endpoint(**endpoint_value)
            endpoint.create(context)

    return new_cluster
Ejemplo n.º 19
0
    def execute(self,
                context,
                cluster_id,
                cluster_values,
                project_only=True,
                **kwargs):
        """Main execute method which will update the cluster record in the DB

        :param context: The request context in dict format
        :type context: oslo_context.RequestContext
        :param cluster_id: Unique ID for the cluster
        :type cluster_id: string
        :param cluster_status: Cluster status
        :type cluster_status: string
        """
        request_context = context_module.RequestContext.from_dict(context)
        cluster = objects.Cluster(**cluster_values)
        cluster.update(request_context,
                       cluster_id,
                       project_only=project_only,
                       **kwargs)
Ejemplo n.º 20
0
    def test_cluster_db_to_object_to_db(self):
        """Tests Cluster db object conversion to Cluster object and back

        to db object.
        """
        db_cluster_object = func_utils.create_db_test_cluster_model_object(
            self.context, deleted_at=timeutils.utcnow(), deleted=True)
        object_cluster = objects.Cluster._from_db_object(
            objects.Cluster(), db_cluster_object)

        test_utils.validate_cluster_values(self, db_cluster_object,
                                           object_cluster)
        self.validate_cluster_deleted_fields(db_cluster_object, object_cluster)

        cluster_changes = object_cluster.obj_get_changes()
        db_cluster_object_2 = models.Cluster()
        db_cluster_object_2.update(cluster_changes)
        test_utils.validate_cluster_values(self, db_cluster_object,
                                           db_cluster_object_2)
        self.validate_cluster_deleted_fields(db_cluster_object,
                                             db_cluster_object_2)
Ejemplo n.º 21
0
    def test_update_cluster_status(self):
        """Verifies UpdateClusterRecord task directly."""

        # setup a test cluster in DB for this test
        cluster_values = {
            "project_id": self.context.tenant_id,
            "name": "RabbitCluster",
            "network_id": str(uuid.uuid4()),
            "flavor": "1",
            "size": 3,
        }

        new_cluster = objects.Cluster(**cluster_values)
        new_cluster.create(self.context)

        # verify new cluster is in BUILDING state
        self.assertEqual(models.Status.BUILDING, new_cluster.status,
                         "Invalid status")

        # setup require task input variables for "UpdateClusterRecord" task
        UpdateClusterRecordTest.task_store['context'] = self.context.to_dict()
        UpdateClusterRecordTest.task_store['cluster_id'] = new_cluster.id

        # create flow with "UpdateClusterRecord" task
        self.flow = linear_flow.Flow(name="update cluster status").add(
            update_cluster_status.UpdateClusterRecord(
                name="get RabbitMQ status",
                inject={'cluster_values': {
                    'status': models.Status.ACTIVE
                }}))

        # start engine to run task
        engines.run(self.flow, store=UpdateClusterRecordTest.task_store)

        # verify cluster status is now in ACTIVE state
        cluster_after = objects.Cluster.get_cluster_by_id(
            self.context, new_cluster.id)
        self.assertEqual(models.Status.ACTIVE, cluster_after.status,
                         "Invalid status")
Ejemplo n.º 22
0
 def test_get_object_changes_with_invalid_field(self):
     """test Cluster object changes by setting an invalid field."""
     cluster_obj = objects.Cluster()
     cluster_obj['endpoint'] = '10.0.0.4:5672'
     cluster_dict = cluster_obj.obj_get_changes()
     self.assertFalse(bool(cluster_dict))
Ejemplo n.º 23
0
    def test_check_cluster_status_active(self):
        flow_store_create = {
            "tenant_id": str(self.valid_network['tenant_id']),
            "image": self.valid_image.id,
            "flavor": self.valid_flavor.id,
            "port": self.port,
            "context": self.context.to_dict(),
            "erlang_cookie": str(uuid.uuid4()),
            "default_rabbit_user": '******',
            "default_rabbit_pass": str(uuid.uuid4()),
        }
        flow_store_check = {
            "context": self.context.to_dict(),
            "default_rabbit_user": "******",
            "default_rabbit_pass": "******"
        }

        cluster_values = {
            "project_id": self.context.tenant_id,
            "name": "RabbitCluster",
            "network_id": str(uuid.uuid4()),
            "flavor": "1",
            "size": 2,
        }

        new_cluster = objects.Cluster(**cluster_values)
        new_cluster.create(self.context)

        nodes = objects.Node.get_nodes_by_cluster_id(self.context,
                                                     new_cluster.id)
        node_ids = []
        for node in nodes:
            node_ids.append(str(node.id))

        flow_create = create_cluster(new_cluster.id, node_ids,
                                     self.valid_network['id'],
                                     self.management_network['id'])
        engines.run(flow_create, store=flow_store_create)

        cluster_before = objects.Cluster.get_cluster_by_id(
            self.context, new_cluster.id)
        self.assertEqual(models.Status.ACTIVE, cluster_before.status,
                         "Invalid status for cluster")

        urllib2_fixture.Urllib2ResultDetails.set_urllib2_result([
            '{"status": "ok"}',
            '[{"name": "/"}]',
            '{"status": "not-ok"}',
            '[{"name": "/"}]',
        ])
        flow_check_status = check_cluster_status(str(new_cluster.id), node_ids)
        result = engines.run(flow_check_status, store=flow_store_check)

        cluster_after = objects.Cluster.get_cluster_by_id(
            self.context, new_cluster.id)
        self.assertEqual(models.Status.ACTIVE, cluster_after.status,
                         "Invalid status for cluster")

        nodes_after = objects.Node.get_nodes_by_cluster_id(
            self.context, new_cluster.id)

        for i, node in enumerate(nodes_after):
            self.assertEqual(result["node_values_%d" % i]["status"],
                             node.status, "Invalid status for node %d" % i)
Ejemplo n.º 24
0
    def post(self, data):
        """Create a new Cluster.

        :param data: cluster parameters within the request body.
        """
        context = pecan.request.context
        request_data = data.as_dict()
        cluster_flavor = request_data['flavor']

        if data.size <= 0:
            raise exception.Invalid(_("Invalid cluster size provided"))
        elif data.size > CONF.api.max_cluster_size:
            raise exception.RequestEntityTooLarge(
                _("Invalid cluster size, max size is: %d") %
                CONF.api.max_cluster_size)

        if len(data.network_id) > 1:
            raise exception.Invalid(_("Invalid number of network_id's"))

        # extract username/password
        if (data.authentication and data.authentication.type
                and data.authentication.token):
            auth_validator = auth_validate.AuthTokenValidator.validate_token(
                auth_type=data.authentication.type,
                token=data.authentication.token)
            if not auth_validator or not auth_validator.validate():
                raise exception.Invalid(
                    _("Invalid broker authentication "
                      "parameter(s)"))
        else:
            raise exception.Invalid(
                _("Missing broker authentication "
                  "parameter(s)"))

        default_rabbit_user = data.authentication.token['username']
        default_rabbit_pass = data.authentication.token['password']

        broker_name = CONF.default_broker_name

        # get the image id of default broker
        image_id = objects.BrokerMetadata.get_image_id_by_broker_name(
            context, broker_name)

        # validate cluster flavor
        self._validate_flavor(image_id, cluster_flavor)

        # convert 'network_id' from list to string type for objects/cluster
        # compatibility
        request_data['network_id'] = request_data['network_id'][0]

        # create new cluster object with required data from user
        new_cluster = objects.Cluster(**request_data)

        # create new cluster with node related data from user
        new_cluster.create(context)

        # retrieve cluster data
        cluster = get_complete_cluster(context, new_cluster.id)

        nodes = objects.Node.get_nodes_by_cluster_id(context, cluster.id)

        # create list with node id's for create cluster flow
        node_ids = [node.id for node in nodes]

        # prepare and post cluster create job to backend
        flow_kwargs = {
            'cluster_id': cluster.id,
            'node_ids': node_ids,
            'user_network_id': cluster.network_id[0],
            'management_network_id': CONF.management_network_id,
        }

        # generate unique erlang cookie to be used by all nodes in the new
        # cluster, erlang cookies are strings of up to 255 characters
        erlang_cookie = uuidutils.generate_uuid()

        job_args = {
            'tenant_id': new_cluster.project_id,
            'flavor': cluster.flavor,
            'image': image_id,
            'volume_size': cluster.volume_size,
            'port': '5672',
            'context': context.to_dict(),
            # TODO(sputnik13: this needs to come from the create request
            # and default to a configuration value rather than always using
            # config value
            'security_groups': [CONF.os_security_group],
            'port': CONF.rabbit_port,
            'key_name': CONF.openstack.os_key_name,
            'erlang_cookie': erlang_cookie,
            'default_rabbit_user': default_rabbit_user,
            'default_rabbit_pass': default_rabbit_pass,
        }
        job_client = task_flow_client.get_client_instance()
        # TODO(dagnello): might be better to use request_id for job_uuid
        job_uuid = uuidutils.generate_uuid()
        job_client.post(create_cluster,
                        job_args,
                        flow_kwargs=flow_kwargs,
                        tx_uuid=job_uuid)

        LOG.info(
            _LI('Create Cluster Request Cluster ID %(cluster_id)s '
                'Cluster size %(size)s network ID %(network_id)s '
                'Job ID %(job_id)s Broker name %(broker_name)s') %
            ({
                "cluster_id": cluster.id,
                "size": cluster.size,
                "network_id": cluster.network_id,
                "job_id": job_uuid,
                "broker_name": broker_name
            }))

        cluster.additional_information = []
        cluster.additional_information.append(
            dict(def_rabbit_user=default_rabbit_user))
        cluster.additional_information.append(
            dict(def_rabbit_pass=default_rabbit_pass))

        cluster.unset_empty_fields()
        return cluster
Ejemplo n.º 25
0
 def test_cluster_object_generation(self):
     """Test Cluster Object generation from a cluster dictionary object."""
     cluster_dict = func_utils.get_test_cluster()
     cluster_object = objects.Cluster(**cluster_dict)
     test_utils.validate_cluster_values(self, cluster_dict, cluster_object)
Ejemplo n.º 26
0
 def test_cluster_object_to_dict(self):
     """test conversion of Cluster object to dict using as_dict."""
     cluster = self.get_test_cluster()
     cluster_obj = objects.Cluster(**cluster)
     cluster_dict = cluster_obj.as_dict()
     test_utils.validate_cluster_values(self, cluster, cluster_dict)
Ejemplo n.º 27
0
    def test_delete_cluster(self):
        flow_store_create = {
            "tenant_id": str(self.valid_network['tenant_id']),
            "image": self.valid_image.id,
            "flavor": self.valid_flavor.id,
            "port": self.port,
            "context": self.context.to_dict(),
            "erlang_cookie": str(uuid.uuid4()),
            "default_rabbit_user": '******',
            "default_rabbit_pass": str(uuid.uuid4()),
        }
        flow_store_delete = {
            "context": self.context.to_dict(),
        }

        cluster_values = {
            "project_id": self.context.tenant_id,
            "name": "RabbitCluster",
            "network_id": str(uuid.uuid4()),
            "flavor": "1",
            "size": 3,
        }

        new_cluster = objects.Cluster(**cluster_values)
        new_cluster.create(self.context)

        nodes = objects.Node.get_nodes_by_cluster_id(self.context,
                                                     new_cluster.id)

        node_ids = []
        for node in nodes:
            node_ids.append(str(node.id))

        flow_create = create_cluster(new_cluster.id,
                                     node_ids,
                                     self.valid_network['id'],
                                     self.management_network['id'])

        result = engines.run(flow_create, store=flow_store_create)

        nodes_after = objects.Node.get_nodes_by_cluster_id(self.context,
                                                           new_cluster.id)

        cluster_after = objects.Cluster.get_cluster_by_id(self.context,
                                                          new_cluster.id)

        self.assertEqual(models.Status.ACTIVE, cluster_after.status,
                         "Invalid status for cluster")

        for i, node in enumerate(nodes_after):
            self.assertEqual(models.Status.ACTIVE, result["vm_status_%d" % i])
            self.new_vm_list.append(result["vm_id_%d" % i])
            self.assertEqual(models.Status.ACTIVE, node.status,
                             "Invalid status for node %d" % i)
            endpoints = objects.Endpoint.get_endpoints_by_node_id(self.context,
                                                                  node.id)
            self.assertEqual(1, len(endpoints), "invalid number of endpoints "
                                                "received")
            endpoint = endpoints.pop()
            self.assertEqual(node.id, endpoint.node_id, "invalid endpoint node"
                                                        " id reference")

            uri = result['vm_user_ip_' + str(i)]
            uri += ':' + self.port
            self.assertEqual(uri, endpoint.uri, "invalid endpoint uri")
            self.assertEqual('AMQP', endpoint.type, "invalid endpoint type")

        flow_delete = delete_cluster(str(new_cluster.id), node_ids,
                                     cluster_after.group_id)
        result = engines.run(flow_delete, store=flow_store_delete)

        nodes_after = objects.Node.get_nodes_by_cluster_id(self.context,
                                                           new_cluster.id)

        self.assertRaises(exception.NotFound,
                          objects.Cluster.get_cluster_by_id,
                          self.context,
                          new_cluster.id)

        for i, node in enumerate(nodes_after):
            self.new_vm_list.remove(result["vm_id_%d" % i])
            self.assertEqual(models.Status.DELETED, node.status,
                             "Invalid status for node %d" % i)
            endpoints = objects.Endpoint.get_endpoints_by_node_id(self.context,
                                                                  node.id)
            self.assertEqual(0, len(endpoints), "endpoints were not deleted")
Ejemplo n.º 28
0
    def test_delete_cluster_anti_affinity(self):
        self.flags(cluster_node_anti_affinity=True, group="taskflow")

        flow_store_create = {
            "tenant_id": str(self.valid_network['tenant_id']),
            "image": self.valid_image.id,
            "flavor": self.valid_flavor.id,
            "port": self.port,
            "context": self.context.to_dict(),
            "erlang_cookie": str(uuid.uuid4()),
            "default_rabbit_user": '******',
            "default_rabbit_pass": str(uuid.uuid4()),
        }
        flow_store_delete = {
            "context": self.context.to_dict(),
        }

        cluster_values = {
            "project_id": self.context.tenant_id,
            "name": "RabbitCluster",
            "network_id": str(uuid.uuid4()),
            "flavor": "1",
            "size": 3,
        }

        new_cluster = objects.Cluster(**cluster_values)
        new_cluster.create(self.context)

        nodes = objects.Node.get_nodes_by_cluster_id(self.context,
                                                     new_cluster.id)

        node_ids = []
        for node in nodes:
            node_ids.append(str(node.id))

        flow_create = create_cluster(new_cluster.id,
                                     node_ids,
                                     self.valid_network['id'],
                                     self.management_network['id'])

        engines.run(flow_create, store=flow_store_create)

        cluster_after = objects.Cluster.get_cluster_by_id(self.context,
                                                          new_cluster.id)

        self.assertEqual(models.Status.ACTIVE, cluster_after.status,
                         "Invalid status for cluster")

        flow_delete = delete_cluster(str(new_cluster.id), node_ids,
                                     cluster_after.group_id)
        engines.run(flow_delete, store=flow_store_delete)

        self.assertRaises(exception.NotFound,
                          objects.Cluster.get_cluster_by_id,
                          self.context,
                          new_cluster.id)

        # verify server group is not found
        self.assertRaises(nova_exc.NotFound,
                          self.nova_client.server_groups.get,
                          cluster_after.group_id)