Exemplo n.º 1
0
    def test_create_cluster_max_retries_multi_node_single_retry(self):
        flow_store = {
            "tenant_id": str(self.valid_network['tenant_id']),
            "image": self.valid_image.id,
            "flavor": self.valid_flavor.id,
            "port": self.port,
            "context": self.context.to_dict(),
            "erlang_cookie": str(uuid.uuid4()),
            "default_rabbit_user": '******',
            "default_rabbit_pass": str(uuid.uuid4()),
        }

        cluster_values = {
            "project_id": self.context.tenant_id,
            "name": "RabbitCluster",
            "network_id": self.valid_network['id'],
            "flavor": "1",
            "size": 3,
        }

        new_cluster = objects.Cluster(**cluster_values)
        new_cluster.create(self.context)

        nodes = objects.Node.get_nodes_by_cluster_id(self.context,
                                                     new_cluster.id)

        # Todo: Raise the retry count once the fixture timeout issue
        # is resolved
        CONF.flow_options.create_cluster_node_vm_active_retry_count = 1

        # configure custom vm_status list
        nova.VmStatusDetails.set_vm_status(['BUILD',
                                            'BUILD',
                                            'BUILD',
                                            'BUILD'])

        node_ids = []
        for node in nodes:
            node_ids.append(node.id)

        flow = create_cluster(new_cluster.id,
                              node_ids,
                              self.valid_network['id'],
                              self.management_network['id'])

        try:
            engines.run(flow, store=flow_store)
        except taskflow_exc.WrappedFailure as err:
            self.assertEqual(3, len(err._causes))
            exc_list = set(type(c.exception) for c in err._causes)
            self.assertEqual({cue_exceptions.VmBuildingException,
                              cue_exceptions.VmBuildingException,
                              cue_exceptions.VmBuildingException},
                             exc_list)
        except Exception as e:
            self.assertEqual(taskflow_exc.WrappedFailure, type(e))
        else:
            self.fail("Expected taskflow_exc.WrappedFailure exception.")
Exemplo n.º 2
0
    def test_create_cluster_anti_affinity(self):
        self.flags(cluster_node_anti_affinity=True, group="taskflow")

        flow_store = {
            "tenant_id": str(self.valid_network['tenant_id']),
            'image': self.valid_image.id,
            'flavor': self.valid_flavor.id,
            "port": self.port,
            "context": self.context.to_dict(),
            "erlang_cookie": str(uuid.uuid4()),
            "default_rabbit_user": '******',
            "default_rabbit_pass": str(uuid.uuid4()),
        }

        cluster_values = {
            "project_id": self.context.tenant_id,
            "name": "RabbitCluster",
            "network_id": str(uuid.uuid4()),
            "flavor": "1",
            "size": 3,
        }

        new_cluster = objects.Cluster(**cluster_values)
        new_cluster.create(self.context)

        nodes = objects.Node.get_nodes_by_cluster_id(self.context,
                                                     new_cluster.id)

        node_ids = []
        for node in nodes:
            node_ids.append(node.id)

        flow = create_cluster(new_cluster.id,
                              node_ids,
                              self.valid_network['id'],
                              self.management_network['id'])

        engines.run(flow, store=flow_store)

        nodes_after = objects.Node.get_nodes_by_cluster_id(self.context,
                                                           new_cluster.id)

        # check if the host_ids are different for cluster nodes
        host_ids = []
        for node in nodes_after:
            host_id = self.nova_client.servers.get(node.instance_id).host_id
            self.assertNotIn(host_id, host_ids)
            host_ids.append(host_id)
Exemplo n.º 3
0
    def test_create_cluster_invalid_user_network(self):
        invalid_network_id = str(uuid.uuid4())
        cluster_size = 3

        flow_store = {
            "tenant_id": str(self.valid_network['tenant_id']),
            'image': self.valid_image.id,
            'flavor': self.valid_flavor.id,
            "port": self.port,
            "context": self.context.to_dict(),
            "erlang_cookie": str(uuid.uuid4()),
            "default_rabbit_user": '******',
            "default_rabbit_pass": str(uuid.uuid4()),
        }

        cluster_values = {
            "project_id": self.context.tenant_id,
            "name": "RabbitCluster",
            "network_id": invalid_network_id,
            "flavor": "1",
            "size": cluster_size,
        }

        new_cluster = objects.Cluster(**cluster_values)
        new_cluster.create(self.context)

        nodes = objects.Node.get_nodes_by_cluster_id(self.context,
                                                     new_cluster.id)

        node_ids = []
        for node in nodes:
            node_ids.append(node.id)

        flow = create_cluster(new_cluster.id,
                              node_ids,
                              invalid_network_id,
                              self.management_network['id'])

        try:
            engines.run(flow, store=flow_store)
        except neutron_exceptions.NeutronClientException as err:
            # When an incorrect user network ID is given, the neutron client
            # returns a NeutronClientException.
            self.assertEqual(err.message,
                             "Network " + str(invalid_network_id) +
                             " could not be found.")
        else:
            self.fail("Expected taskflow_exc.WrappedFailure exception.")
Exemplo n.º 4
0
    def test_create_cluster_max_retries_single_node(self):
        flow_store = {
            "tenant_id": str(self.valid_network['tenant_id']),
            "image": self.valid_image.id,
            "flavor": self.valid_flavor.id,
            "port": self.port,
            "context": self.context.to_dict(),
            "erlang_cookie": str(uuid.uuid4()),
            "default_rabbit_user": '******',
            "default_rabbit_pass": str(uuid.uuid4()),
        }

        cluster_values = {
            "project_id": self.context.tenant_id,
            "name": "RabbitCluster",
            "network_id": self.valid_network['id'],
            "flavor": "1",
            "size": 1,
        }

        new_cluster = objects.Cluster(**cluster_values)
        new_cluster.create(self.context)

        nodes = objects.Node.get_nodes_by_cluster_id(self.context,
                                                     new_cluster.id)

        CONF.flow_options.create_cluster_node_vm_active_retry_count = 3

        # configure custom vm_status list
        nova.VmStatusDetails.set_vm_status(['BUILD',
                                            'BUILD',
                                            'BUILD',
                                            'BUILD'])

        node_ids = []
        for node in nodes:
            node_ids.append(node.id)

        flow = create_cluster(new_cluster.id,
                              node_ids,
                              self.valid_network['id'],
                              self.management_network['id'])

        self.assertRaises(cue_exceptions.VmBuildingException,
                          engines.run, flow, store=flow_store)
Exemplo n.º 5
0
    def test_create_cluster_overlimit(self):
        vm_list = self.nova_client.servers.list()
        port_list = self.neutron_client.list_ports()

        flow_store = {
            "tenant_id": str(self.valid_network['tenant_id']),
            'image': self.valid_image.id,
            'flavor': self.valid_flavor.id,
            "port": self.port,
            "context": self.context.to_dict(),
            "erlang_cookie": str(uuid.uuid4()),
            "default_rabbit_user": '******',
            "default_rabbit_pass": str(uuid.uuid4()),
        }

        cluster_values = {
            "project_id": self.context.tenant_id,
            "name": "RabbitCluster",
            "network_id": self.valid_network['id'],
            "flavor": "1",
            "size": 10,
        }

        new_cluster = objects.Cluster(**cluster_values)
        new_cluster.create(self.context)

        nodes = objects.Node.get_nodes_by_cluster_id(self.context,
                                                     new_cluster.id)

        node_ids = []
        for node in nodes:
            node_ids.append(node.id)

        flow = create_cluster(new_cluster.id,
                              node_ids,
                              self.valid_network['id'],
                              self.management_network['id'])

        self.assertRaises(taskflow_exc.WrappedFailure, engines.run,
                          flow, store=flow_store)

        self.assertEqual(vm_list, self.nova_client.servers.list())
        self.assertEqual(port_list, self.neutron_client.list_ports())
Exemplo n.º 6
0
    def test_delete_cluster(self):
        flow_store_create = {
            "tenant_id": str(self.valid_network['tenant_id']),
            "image": self.valid_image.id,
            "flavor": self.valid_flavor.id,
            "port": self.port,
            "context": self.context.to_dict(),
            "erlang_cookie": str(uuid.uuid4()),
            "default_rabbit_user": '******',
            "default_rabbit_pass": str(uuid.uuid4()),
        }
        flow_store_delete = {
            "context": self.context.to_dict(),
        }

        cluster_values = {
            "project_id": self.context.tenant_id,
            "name": "RabbitCluster",
            "network_id": str(uuid.uuid4()),
            "flavor": "1",
            "size": 3,
        }

        new_cluster = objects.Cluster(**cluster_values)
        new_cluster.create(self.context)

        nodes = objects.Node.get_nodes_by_cluster_id(self.context,
                                                     new_cluster.id)

        node_ids = []
        for node in nodes:
            node_ids.append(str(node.id))

        flow_create = create_cluster(new_cluster.id,
                                     node_ids,
                                     self.valid_network['id'],
                                     self.management_network['id'])

        result = engines.run(flow_create, store=flow_store_create)

        nodes_after = objects.Node.get_nodes_by_cluster_id(self.context,
                                                           new_cluster.id)

        cluster_after = objects.Cluster.get_cluster_by_id(self.context,
                                                          new_cluster.id)

        self.assertEqual(models.Status.ACTIVE, cluster_after.status,
                         "Invalid status for cluster")

        for i, node in enumerate(nodes_after):
            self.assertEqual(models.Status.ACTIVE, result["vm_status_%d" % i])
            self.new_vm_list.append(result["vm_id_%d" % i])
            self.assertEqual(models.Status.ACTIVE, node.status,
                             "Invalid status for node %d" % i)
            endpoints = objects.Endpoint.get_endpoints_by_node_id(self.context,
                                                                  node.id)
            self.assertEqual(1, len(endpoints), "invalid number of endpoints "
                                                "received")
            endpoint = endpoints.pop()
            self.assertEqual(node.id, endpoint.node_id, "invalid endpoint node"
                                                        " id reference")

            uri = result['vm_user_ip_' + str(i)]
            uri += ':' + self.port
            self.assertEqual(uri, endpoint.uri, "invalid endpoint uri")
            self.assertEqual('AMQP', endpoint.type, "invalid endpoint type")

        flow_delete = delete_cluster(str(new_cluster.id), node_ids,
                                     cluster_after.group_id)
        result = engines.run(flow_delete, store=flow_store_delete)

        nodes_after = objects.Node.get_nodes_by_cluster_id(self.context,
                                                           new_cluster.id)

        self.assertRaises(exception.NotFound,
                          objects.Cluster.get_cluster_by_id,
                          self.context,
                          new_cluster.id)

        for i, node in enumerate(nodes_after):
            self.new_vm_list.remove(result["vm_id_%d" % i])
            self.assertEqual(models.Status.DELETED, node.status,
                             "Invalid status for node %d" % i)
            endpoints = objects.Endpoint.get_endpoints_by_node_id(self.context,
                                                                  node.id)
            self.assertEqual(0, len(endpoints), "endpoints were not deleted")
Exemplo n.º 7
0
    def test_delete_cluster_anti_affinity(self):
        self.flags(cluster_node_anti_affinity=True, group="taskflow")

        flow_store_create = {
            "tenant_id": str(self.valid_network['tenant_id']),
            "image": self.valid_image.id,
            "flavor": self.valid_flavor.id,
            "port": self.port,
            "context": self.context.to_dict(),
            "erlang_cookie": str(uuid.uuid4()),
            "default_rabbit_user": '******',
            "default_rabbit_pass": str(uuid.uuid4()),
        }
        flow_store_delete = {
            "context": self.context.to_dict(),
        }

        cluster_values = {
            "project_id": self.context.tenant_id,
            "name": "RabbitCluster",
            "network_id": str(uuid.uuid4()),
            "flavor": "1",
            "size": 3,
        }

        new_cluster = objects.Cluster(**cluster_values)
        new_cluster.create(self.context)

        nodes = objects.Node.get_nodes_by_cluster_id(self.context,
                                                     new_cluster.id)

        node_ids = []
        for node in nodes:
            node_ids.append(str(node.id))

        flow_create = create_cluster(new_cluster.id,
                                     node_ids,
                                     self.valid_network['id'],
                                     self.management_network['id'])

        engines.run(flow_create, store=flow_store_create)

        cluster_after = objects.Cluster.get_cluster_by_id(self.context,
                                                          new_cluster.id)

        self.assertEqual(models.Status.ACTIVE, cluster_after.status,
                         "Invalid status for cluster")

        flow_delete = delete_cluster(str(new_cluster.id), node_ids,
                                     cluster_after.group_id)
        engines.run(flow_delete, store=flow_store_delete)

        self.assertRaises(exception.NotFound,
                          objects.Cluster.get_cluster_by_id,
                          self.context,
                          new_cluster.id)

        # verify server group is not found
        self.assertRaises(nova_exc.NotFound,
                          self.nova_client.server_groups.get,
                          cluster_after.group_id)
Exemplo n.º 8
0
    def test_check_cluster_status_active(self):
        flow_store_create = {
            "tenant_id": str(self.valid_network['tenant_id']),
            "image": self.valid_image.id,
            "flavor": self.valid_flavor.id,
            "port": self.port,
            "context": self.context.to_dict(),
            "erlang_cookie": str(uuid.uuid4()),
            "default_rabbit_user": '******',
            "default_rabbit_pass": str(uuid.uuid4()),
        }
        flow_store_check = {
            "context": self.context.to_dict(),
            "default_rabbit_user": "******",
            "default_rabbit_pass": "******"
        }

        cluster_values = {
            "project_id": self.context.tenant_id,
            "name": "RabbitCluster",
            "network_id": str(uuid.uuid4()),
            "flavor": "1",
            "size": 2,
        }

        new_cluster = objects.Cluster(**cluster_values)
        new_cluster.create(self.context)

        nodes = objects.Node.get_nodes_by_cluster_id(self.context,
                                                     new_cluster.id)
        node_ids = []
        for node in nodes:
            node_ids.append(str(node.id))

        flow_create = create_cluster(new_cluster.id, node_ids,
                                     self.valid_network['id'],
                                     self.management_network['id'])
        engines.run(flow_create, store=flow_store_create)

        cluster_before = objects.Cluster.get_cluster_by_id(
            self.context, new_cluster.id)
        self.assertEqual(models.Status.ACTIVE, cluster_before.status,
                         "Invalid status for cluster")

        urllib2_fixture.Urllib2ResultDetails.set_urllib2_result([
            '{"status": "ok"}',
            '[{"name": "/"}]',
            '{"status": "not-ok"}',
            '[{"name": "/"}]',
        ])
        flow_check_status = check_cluster_status(str(new_cluster.id), node_ids)
        result = engines.run(flow_check_status, store=flow_store_check)

        cluster_after = objects.Cluster.get_cluster_by_id(
            self.context, new_cluster.id)
        self.assertEqual(models.Status.ACTIVE, cluster_after.status,
                         "Invalid status for cluster")

        nodes_after = objects.Node.get_nodes_by_cluster_id(
            self.context, new_cluster.id)

        for i, node in enumerate(nodes_after):
            self.assertEqual(result["node_values_%d" % i]["status"],
                             node.status, "Invalid status for node %d" % i)