Ejemplo n.º 1
0
 def test_osd_balance(self, es):
     """
     Current pattern is:
         add 6 osds (9 total, 3 nodes)
         add 3 nodes
         add 9 osds (18 total, 6 nodes)
         add 3 nodes
         add 9 osds (27 total, 9 nodes)
     """
     crd_data = templating.load_yaml(constants.OSD_SCALE_BENCHMARK_YAML)
     our_uuid = uuid4().hex
     self.elastic_info = ElasticData(our_uuid, crd_data)
     self.elastic_info.es_connect()
     collect_stats(INITIAL_SETUP, self.elastic_info)
     for cntr in range(0, MAX_TIMES_ADDED):
         num_nodes = len(get_nodes(constants.WORKER_MACHINE))
         osd_incr = 3
         if cntr == 0 and num_nodes == START_NODE_NUM:
             osd_incr = 2
         if osd_incr == 3:
             scale_ocs_node()
             collect_stats("Three nodes have been added", self.elastic_info)
         cntval = 3 * osd_incr
         logging.info(f"Adding {cntval} osds to nodes")
         scale_capacity_with_deviceset(add_deviceset_count=osd_incr,
                                       timeout=900)
         collect_stats("OSD capacity increase", self.elastic_info)
     collect_stats(FINAL_REPORT, self.elastic_info)
    def test_scale_node_and_capacity(self):
        """
        Test for scaling 12 OCS worker nodes to the cluster
        Scale 12*3 = 36 OSDs
        """

        expected_worker_count = 12
        osds_per_node = 3

        try:
            # Gather existing deviceset, OSD and node count in setup
            existing_ocs_worker_list = get_worker_nodes()
            existing_deviceset_count = storage_cluster.get_deviceset_count()
            osd_replication_count = storage_cluster.get_osd_replica_count()
            expected_deviceset_count = (expected_worker_count /
                                        osds_per_node) * osd_replication_count

            # Check existing OCS worker node count and add nodes if required
            if len(existing_ocs_worker_list) < expected_worker_count:
                scale_worker_count = expected_worker_count - len(
                    existing_ocs_worker_list)
                if not scale_lib.scale_ocs_node(node_count=scale_worker_count):
                    raise OCSWorkerScaleFailed(
                        "OCS worker nodes scaling Failed")

            # Check existing OSD count and add OSDs if required
            if existing_deviceset_count < expected_deviceset_count:
                additional_deviceset = int(expected_deviceset_count -
                                           existing_deviceset_count)
                if not scale_lib.scale_capacity_with_deviceset(
                        add_deviceset_count=additional_deviceset, timeout=600):
                    raise OSDScaleFailed("Scaling OSDs Failed")

            # Check ceph health statuss
            utils.ceph_health_check(tries=30)

        except (OCSWorkerScaleFailed, OSDScaleFailed, Exception) as ex:
            TestAddNode.skip_all = True
            logging.warning(
                f"Due to Exception set TestAddNode.skip_all to {TestAddNode.skip_all}"
            )
            logging.error(f"Cluster not in expected state. {ex}")
Ejemplo n.º 3
0
    def test_scale_node_and_capacity(self):
        """
        Test for scaling 12 OCS worker nodes to the cluster
        Scale 12*3 = 36 OSDs
        """

        expected_worker_count = 12
        osds_per_node = 3

        try:
            # Gather existing deviceset, OSD and node count in setup
            existing_ocs_worker_list = get_worker_nodes()
            existing_deviceset_count = storage_cluster.get_deviceset_count()
            osd_replication_count = storage_cluster.get_osd_replica_count()
            expected_deviceset_count = (
                expected_worker_count / osds_per_node
            ) * osd_replication_count

            # Check existing OCS worker node count and add nodes if required
            if len(existing_ocs_worker_list) < expected_worker_count:
                scale_worker_count = expected_worker_count - len(
                    existing_ocs_worker_list
                )
                assert scale_lib.scale_ocs_node(node_count=scale_worker_count)

            # Check existing OSD count and add OSDs if required
            if existing_deviceset_count < expected_deviceset_count:
                add_deviceset_count = (
                    expected_deviceset_count - existing_deviceset_count
                )
                assert scale_lib.scale_capacity_with_deviceset(
                    add_deviceset_count=add_deviceset_count
                )

            # Check ceph health statuss
            utils.ceph_health_check(tries=30)

        except UnexpectedBehaviour:
            TestAddNode.skip_all = True
            logging.info("Cluster is not in expected state, unexpected behaviour")
            raise