示例#1
0
    def test_restart_heketi_pod(self):
        """Validate restarting heketi pod"""

        # create heketi volume
        vol_info = heketi_volume_create(self.heketi_client_node,
                                        self.heketi_server_url,
                                        size=1,
                                        json=True)
        self.assertTrue(vol_info, "Failed to create heketi volume of size 1")
        self.addCleanup(heketi_volume_delete,
                        self.heketi_client_node,
                        self.heketi_server_url,
                        vol_info['id'],
                        raise_on_error=False)
        topo_info = heketi_topology_info(self.heketi_client_node,
                                         self.heketi_server_url,
                                         json=True)

        # get heketi-pod name
        heketi_pod_name = get_pod_name_from_dc(self.ocp_master_node[0],
                                               self.heketi_dc_name)

        # delete heketi-pod (it restarts the pod)
        oc_delete(self.ocp_master_node[0],
                  'pod',
                  heketi_pod_name,
                  collect_logs=self.heketi_logs_before_delete)
        wait_for_resource_absence(self.ocp_master_node[0], 'pod',
                                  heketi_pod_name)

        # get new heketi-pod name
        heketi_pod_name = get_pod_name_from_dc(self.ocp_master_node[0],
                                               self.heketi_dc_name)
        wait_for_pod_be_ready(self.ocp_master_node[0], heketi_pod_name)

        # check heketi server is running
        self.assertTrue(
            hello_heketi(self.heketi_client_node, self.heketi_server_url),
            "Heketi server %s is not alive" % self.heketi_server_url)

        # compare the topology
        new_topo_info = heketi_topology_info(self.heketi_client_node,
                                             self.heketi_server_url,
                                             json=True)
        self.assertEqual(
            new_topo_info, topo_info, "topology info is not same,"
            " difference - %s" % diff(topo_info, new_topo_info))

        # create new volume
        vol_info = heketi_volume_create(self.heketi_client_node,
                                        self.heketi_server_url,
                                        size=2,
                                        json=True)
        self.assertTrue(vol_info, "Failed to create heketi volume of size 20")
        heketi_volume_delete(self.heketi_client_node, self.heketi_server_url,
                             vol_info['id'])
    def test_heketi_node_states_enable_disable(self):
        """Test node enable and disable functionality
        """
        h_client, h_server = self.heketi_client_node, self.heketi_server_url

        node_list = heketi_ops.heketi_node_list(h_client, h_server)
        online_hosts = []
        for node_id in node_list:
            node_info = heketi_ops.heketi_node_info(h_client,
                                                    h_server,
                                                    node_id,
                                                    json=True)
            if node_info["state"] == "online":
                online_hosts.append(node_info)

        if len(online_hosts) < 3:
            raise self.skipTest(
                "This test can run only if online hosts are more than 2")

        #  Disable n-3 nodes, in case we have n nodes
        for node_info in online_hosts[3:]:
            node_id = node_info["id"]
            heketi_ops.heketi_node_disable(h_client, h_server, node_id)
            self.addCleanup(heketi_ops.heketi_node_enable, h_client, h_server,
                            node_id)

        # Create volume when 3 nodes are online
        vol_size = 1
        vol_info = heketi_ops.heketi_volume_create(h_client,
                                                   h_server,
                                                   vol_size,
                                                   json=True)
        self.addCleanup(heketi_ops.heketi_volume_delete, h_client, h_server,
                        vol_info['id'])

        node_id = online_hosts[0]['id']
        try:
            heketi_ops.heketi_node_disable(h_client, h_server, node_id)

            # Try to create a volume, volume creation should fail
            with self.assertRaises(AssertionError):
                heketi_volume = heketi_ops.heketi_volume_create(
                    h_client, h_server, vol_size)
                self.addCleanup(heketi_ops.heketi_volume_delete, h_client,
                                h_server, heketi_volume["id"])
        finally:
            # Enable heketi node
            heketi_ops.heketi_node_enable(h_client, h_server, node_id)

        # Create volume when heketi node is enabled
        vol_info = heketi_ops.heketi_volume_create(h_client,
                                                   h_server,
                                                   vol_size,
                                                   json=True)
        heketi_ops.heketi_volume_delete(h_client, h_server, vol_info['id'])
    def test_restart_heketi_pod(self):
        """Validate restarting heketi pod"""

        # create heketi volume
        vol_info = heketi_volume_create(self.heketi_client_node,
                                        self.heketi_server_url,
                                        size=1, json=True)
        self.assertTrue(vol_info, "Failed to create heketi volume of size 1")
        self.addCleanup(
            heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, vol_info['id'], raise_on_error=False)
        topo_info = heketi_topology_info(self.heketi_client_node,
                                         self.heketi_server_url,
                                         json=True)

        # get heketi-pod name
        heketi_pod_name = get_pod_name_from_dc(self.ocp_master_node[0],
                                               self.heketi_dc_name)

        # delete heketi-pod (it restarts the pod)
        oc_delete(self.ocp_master_node[0], 'pod', heketi_pod_name)
        wait_for_resource_absence(self.ocp_master_node[0],
                                  'pod', heketi_pod_name)

        # get new heketi-pod name
        heketi_pod_name = get_pod_name_from_dc(self.ocp_master_node[0],
                                               self.heketi_dc_name)
        wait_for_pod_be_ready(self.ocp_master_node[0],
                              heketi_pod_name)

        # check heketi server is running
        self.assertTrue(
            hello_heketi(self.heketi_client_node, self.heketi_server_url),
            "Heketi server %s is not alive" % self.heketi_server_url
        )

        # compare the topology
        new_topo_info = heketi_topology_info(self.heketi_client_node,
                                             self.heketi_server_url,
                                             json=True)
        self.assertEqual(new_topo_info, topo_info, "topology info is not same,"
                         " difference - %s" % diff(topo_info, new_topo_info))

        # create new volume
        vol_info = heketi_volume_create(self.heketi_client_node,
                                        self.heketi_server_url,
                                        size=2, json=True)
        self.assertTrue(vol_info, "Failed to create heketi volume of size 20")
        heketi_volume_delete(
            self.heketi_client_node, self.heketi_server_url, vol_info['id'])
示例#4
0
    def test_delete_heketidb_volume(self):
        """Method to test heketidb volume deletion via heketi-cli."""
        for i in range(0, 2):
            volume_info = heketi_ops.heketi_volume_create(
                self.heketi_client_node, self.heketi_server_url, 10, json=True)
            self.addCleanup(heketi_ops.heketi_volume_delete,
                            self.heketi_client_node, self.heketi_server_url,
                            volume_info["id"])

        volume_list_info = heketi_ops.heketi_volume_list(
            self.heketi_client_node, self.heketi_server_url, json=True)

        self.assertTrue(volume_list_info["volumes"],
                        "Heketi volume list empty.")

        for volume_id in volume_list_info["volumes"]:
            volume_info = heketi_ops.heketi_volume_info(
                self.heketi_client_node,
                self.heketi_server_url,
                volume_id,
                json=True)

            if volume_info["name"] == "heketidbstorage":
                self.assertRaises(AssertionError,
                                  heketi_ops.heketi_volume_delete,
                                  self.heketi_client_node,
                                  self.heketi_server_url, volume_id)
                return
        raise ExecutionError(
            "Warning: heketidbstorage doesn't exist in list of volumes")
    def test_create_vol_and_retrieve_vol_info(self):
        """Validate heketi and gluster volume info"""

        g.log.info("Create a heketi volume")
        out = heketi_volume_create(self.heketi_client_node,
                                   self.heketi_server_url,
                                   self.volume_size, json=True)
        self.assertTrue(out, ("Failed to create heketi "
                        "volume of size %s" % self.volume_size))
        g.log.info("Heketi volume successfully created" % out)
        volume_id = out["bricks"][0]["volume"]
        self.addCleanup(
            heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, volume_id)

        g.log.info("Retrieving heketi volume info")
        out = heketi_volume_info(
            self.heketi_client_node, self.heketi_server_url, volume_id,
            json=True)
        self.assertTrue(out, ("Failed to get heketi volume info"))
        g.log.info("Successfully got the heketi volume info")
        name = out["name"]

        vol_info = get_volume_info('auto_get_gluster_endpoint', volname=name)
        self.assertTrue(vol_info, "Failed to get volume info %s" % name)
        g.log.info("Successfully got the volume info %s" % name)
示例#6
0
    def test_volume_inconsistencies(self):
        # Examine Gluster cluster and Heketi that there is no inconsistencies
        out = heketi_ops.heketi_examine_gluster(self.heketi_client_node,
                                                self.heketi_server_url)
        if ("heketi volume list matches with volume list of all nodes"
                not in out['report']):
            self.skipTest("heketi and Gluster are inconsistent to each other")

        # create volume
        vol = heketi_ops.heketi_volume_create(self.heketi_client_node,
                                              self.heketi_server_url,
                                              1,
                                              json=True)
        self.addCleanup(heketi_ops.heketi_volume_delete,
                        self.heketi_client_node, self.heketi_server_url,
                        vol['id'])

        # delete volume from gluster cluster directly
        openshift_ops.cmd_run_on_gluster_pod_or_node(
            self.node, "gluster vol stop %s force --mode=script" % vol['name'])
        openshift_ops.cmd_run_on_gluster_pod_or_node(
            self.node, "gluster vol delete %s --mode=script" % vol['name'])

        # verify that heketi is reporting inconsistencies
        out = heketi_ops.heketi_examine_gluster(self.heketi_client_node,
                                                self.heketi_server_url)
        self.assertNotIn(
            "heketi volume list matches with volume list of all nodes",
            out['report'])
示例#7
0
    def test_heketi_volume_snapshot_create(self):
        """Test heketi volume snapshot create operation"""
        h_volume_size = 1
        snap_name = 'snap_test_heketi_volume_snapshot_create_1'
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        h_volume_info = heketi_volume_create(h_node,
                                             h_url,
                                             h_volume_size,
                                             json=True)
        self.addCleanup(heketi_volume_delete, h_node, h_url,
                        h_volume_info["id"])

        h_volume_name = h_volume_info["name"]
        ret, _, _ = snap_create('auto_get_gluster_endpoint',
                                h_volume_name,
                                snap_name,
                                timestamp=False)
        self.addCleanup(podcmd.GlustoPod()(snap_delete),
                        "auto_get_gluster_endpoint", snap_name)
        self.assertEqual(
            ret, 0, "Failed to create snapshot {} for heketi volume {}".format(
                snap_name, h_volume_name))
        ret, out, _ = snap_list('auto_get_gluster_endpoint')
        self.assertEqual(
            ret, 0,
            "Failed to list snapshot {} for heketi volume".format(snap_name))
        self.assertIn(
            snap_name, out,
            "Heketi volume snapshot {} not found in {}".format(snap_name, out))
示例#8
0
    def test_device_delete_with_bricks(self):
        """Validate device deletion with existing bricks on the device"""
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        # Create volume
        vol_size = 1
        vol_info = heketi_volume_create(h_node, h_url, vol_size, json=True)
        self.addCleanup(heketi_volume_delete, h_node, h_url, vol_info['id'])
        device_delete_id = vol_info['bricks'][0]['device']
        node_id = vol_info['bricks'][0]['node']
        device_info = heketi_device_info(h_node,
                                         h_url,
                                         device_delete_id,
                                         json=True)
        device_name = device_info['name']

        # Disable the device
        heketi_device_disable(h_node, h_url, device_delete_id)
        self.addCleanup(heketi_device_enable, h_node, h_url, device_delete_id)

        # Delete device with bricks
        with self.assertRaises(AssertionError):
            heketi_device_delete(h_node, h_url, device_delete_id)
            self.addCleanup(heketi_device_add, h_node, h_url, device_name,
                            node_id)
    def test_heketi_brick_evict(self):
        """Test brick evict basic functionality and verify it replace a brick
        properly
        """
        h_node, h_server = self.heketi_client_node, self.heketi_server_url

        size = 1
        vol_info_old = heketi_ops.heketi_volume_create(
            h_node, h_server, size, json=True)
        self.addCleanup(
            heketi_ops.heketi_volume_delete, h_node, h_server,
            vol_info_old['id'])
        heketi_ops.heketi_brick_evict(
            h_node, h_server, vol_info_old["bricks"][0]['id'])

        vol_info_new = heketi_ops.heketi_volume_info(
            h_node, h_server, vol_info_old['id'], json=True)

        bricks_old = set({brick['path'] for brick in vol_info_old["bricks"]})
        bricks_new = set({brick['path'] for brick in vol_info_new["bricks"]})
        self.assertEqual(
            len(bricks_new - bricks_old), 1,
            "Brick was not replaced with brick evict for vol \n {}".format(
                vol_info_new))

        gvol_info = self._get_gluster_vol_info(vol_info_new['name'])
        gbricks = set(
            {brick['name'].split(":")[1]
                for brick in gvol_info["bricks"]["brick"]})
        self.assertEqual(
            bricks_new, gbricks, "gluster vol info and heketi vol info "
            "mismatched after brick evict {} \n {}".format(
                gvol_info, vol_info_new))
    def test_volume_inconsistencies(self):
        # Examine Gluster cluster and Heketi that there is no inconsistencies
        out = heketi_ops.heketi_examine_gluster(
            self.heketi_client_node, self.heketi_server_url)
        if ("heketi volume list matches with volume list of all nodes"
                not in out['report']):
            self.skipTest(
                "heketi and Gluster are inconsistent to each other")

        # create volume
        vol = heketi_ops.heketi_volume_create(
            self.heketi_client_node, self.heketi_server_url, 1, json=True)
        self.addCleanup(
            heketi_ops.heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, vol['id'])

        # delete volume from gluster cluster directly
        openshift_ops.cmd_run_on_gluster_pod_or_node(
            self.node,
            "gluster vol stop %s force --mode=script" % vol['name'])
        openshift_ops.cmd_run_on_gluster_pod_or_node(
            self.node,
            "gluster vol delete %s --mode=script" % vol['name'])

        # verify that heketi is reporting inconsistencies
        out = heketi_ops.heketi_examine_gluster(
            self.heketi_client_node, self.heketi_server_url)
        self.assertNotIn(
            "heketi volume list matches with volume list of all nodes",
            out['report'])
    def test_heketi_with_expand_volume(self):
        """
        Test volume expand and size if updated correctly in heketi-cli info
        """

        vol_info = heketi_volume_create(self.heketi_client_node,
                                        self.heketi_server_url,
                                        self.volume_size, json=True)
        self.assertTrue(vol_info, ("Failed to create heketi volume of size %s"
                                   % self.volume_size))
        self.addCleanup(
            heketi_volume_delete,
            self.heketi_client_node, self.heketi_server_url, vol_info['id'])
        self.assertEqual(vol_info['size'], self.volume_size,
                         ("Failed to create volume."
                          "Expected Size: %s, Actual Size: %s"
                          % (self.volume_size, vol_info['size'])))
        volume_id = vol_info["id"]
        expand_size = 2
        ret = heketi_volume_expand(self.heketi_client_node,
                                   self.heketi_server_url, volume_id,
                                   expand_size)
        self.assertTrue(ret, ("Failed to expand heketi volume of id %s"
                              % volume_id))
        volume_info = heketi_volume_info(self.heketi_client_node,
                                         self.heketi_server_url,
                                         volume_id, json=True)
        expected_size = self.volume_size + expand_size
        self.assertEqual(volume_info['size'], expected_size,
                         ("Volume Expansion failed Expected Size: %s, Actual "
                          "Size: %s" % (str(expected_size),
                                        str(volume_info['size']))))
示例#12
0
    def test_heketi_volume_snapshot_create_with_one_brick_down(self):
        """
        Test heketi volume snapshot create with one brick down
        """
        h_vol_size = 1
        self.node = self.ocp_master_node[0]
        snap_name = 'snap_creation_test_with_one_brick_down'
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        h_vol_info = heketi_volume_create(h_node, h_url, h_vol_size, json=True)
        self.addCleanup(heketi_volume_delete, h_node, h_url, h_vol_info["id"])
        h_volume_name = h_vol_info["name"]
        pids_before = self._get_bricks_pids(h_volume_name)
        self.assertTrue(
            pids_before,
            "Failed to get the brick process for volume {}".format(
                h_volume_name))

        # kill only one brick process
        cmd = "kill -9 {}".format(pids_before[0][1])
        cmd_run_on_gluster_pod_or_node(self.node, cmd, pids_before[0][0])
        pids_after = self._get_bricks_pids(h_volume_name)
        self.assertTrue(
            pids_after,
            "Failed to get the brick process for volume {}".format(
                h_volume_name))
        self.assertTrue(
            pids_after[0][1],
            "Failed to kill brick process {} on brick {}".format(
                pids_before[0][1], pids_after[0][0]))

        # Get the snapshot list
        ret, out, err = snap_list('auto_get_gluster_endpoint')
        self.assertFalse(
            ret,
            "Failed to list snapshot from gluster side due to error"
            " {}".format(err))
        snap_list_before = out.split("\n")
        ret, out, err = snap_create(
            'auto_get_gluster_endpoint', h_volume_name,
            snap_name, timestamp=False)
        exp_err_msg = "Snapshot command failed\n"
        self.assertTrue(
            ret, "Failed to run snapshot create cmd from gluster side "
            "with error {}".format(err))
        self.assertEqual(
            out, exp_err_msg,
            "Expecting error msg {} and {} to match".format(
                out, exp_err_msg))

        # Check for count after snapshot creation
        ret, out, err = snap_list('auto_get_gluster_endpoint')
        self.assertFalse(
            ret,
            "Failed to list snapshot from gluster with error {}".format(err))
        snap_list_after = out.split("\n")
        self.assertEqual(
            snap_list_before, snap_list_after,
            "Expecting Snapshot count before {} and after creation {} to be "
            "same".format(snap_list_before, snap_list_after))
示例#13
0
    def test_heketi_metrics_validating_vol_count_on_vol_deletion(self):
        """Validate heketi metrics VolumeCount after volume deletion"""

        vol_list = []

        for i in range(3):
            # Create volume
            vol = heketi_volume_create(self.heketi_client_node,
                                       self.heketi_server_url,
                                       1,
                                       json=True)

            self.assertTrue(vol)

            self.addCleanup(heketi_volume_delete,
                            self.heketi_client_node,
                            self.heketi_server_url,
                            vol['id'],
                            raise_on_error=False)

            volume_list = heketi_volume_list(self.heketi_client_node,
                                             self.heketi_server_url)

            self.assertIn(vol['id'], volume_list)
            vol_list.append(vol)

        for vol in vol_list:
            # delete volume
            heketi_volume_delete(self.heketi_client_node,
                                 self.heketi_server_url, vol['id'])
            volume_list = heketi_volume_list(self.heketi_client_node,
                                             self.heketi_server_url)
            self.assertNotIn(vol['id'], volume_list)
            self.verify_volume_count()
    def test_100gb_block_pvc_create_and_delete_twice(self):
        """Validate creation and deletion of blockvoume of size 100GB"""
        # Define required space, bhv size required for on 100GB block PVC
        size, bhv_size, required_space = 100, 103, 309
        h_node, h_url = self.heketi_client_node, self.heketi_server_url
        prefix = 'autotest-pvc-{}'.format(utils.get_random_str(size=5))

        # Skip test if required free space is not available
        free_space = get_total_free_space(self.heketi_client_node,
                                          self.heketi_server_url)[0]
        if free_space < required_space:
            self.skipTest("Available free space {} is less than the required "
                          "free space {}".format(free_space, required_space))

        # Create block hosting volume of 103GB required for 100GB block PVC
        bhv = heketi_volume_create(h_node,
                                   h_url,
                                   bhv_size,
                                   block=True,
                                   json=True)['id']
        self.addCleanup(heketi_volume_delete, h_node, h_url, bhv)

        for _ in range(2):
            # Create PVC of size 100GB
            pvc_name = self.create_and_wait_for_pvc(pvc_size=size,
                                                    pvc_name_prefix=prefix)
            match_pvc_and_pv(self.node, prefix)

            # Delete the PVC
            oc_delete(self.node, 'pvc', pvc_name)
            wait_for_resource_absence(self.node, 'pvc', pvc_name)
    def test_validate_brick_paths_on_gluster_pods_or_nodes(self):
        """Validate brick paths after creation and deletion of a volume."""

        # Create heketi volume
        vol = heketi_volume_create(self.heketi_client_node,
                                   self.heketi_server_url,
                                   size=1,
                                   json=True)
        self.assertTrue(vol, "Failed to create 1Gb heketi volume")
        vol_id = vol["bricks"][0]["volume"]
        self.addCleanup(heketi_volume_delete,
                        self.heketi_client_node,
                        self.heketi_server_url,
                        vol_id,
                        raise_on_error=False)

        # Gather brick paths
        brick_paths = [p['path'] for p in vol["bricks"]]

        # Make sure that volume's brick paths exist in the fstab files
        self._find_bricks(brick_paths, present=True)

        # Delete heketi volume
        out = heketi_volume_delete(self.heketi_client_node,
                                   self.heketi_server_url, vol_id)
        self.assertTrue(out, "Failed to delete heketi volume %s" % vol_id)

        # Make sure that volume's brick paths are absent in the fstab file
        self._find_bricks(brick_paths, present=False)
示例#16
0
    def test_heketi_with_expand_volume(self):
        """
        Test volume expand and size if updated correctly in heketi-cli info
        """

        vol_info = heketi_volume_create(self.heketi_client_node,
                                        self.heketi_server_url,
                                        self.volume_size,
                                        json=True)
        self.assertTrue(
            vol_info,
            ("Failed to create heketi volume of size %s" % self.volume_size))
        self.addCleanup(heketi_volume_delete, self.heketi_client_node,
                        self.heketi_server_url, vol_info['id'])
        self.assertEqual(vol_info['size'], self.volume_size,
                         ("Failed to create volume."
                          "Expected Size: %s, Actual Size: %s" %
                          (self.volume_size, vol_info['size'])))
        volume_id = vol_info["id"]
        expand_size = 2
        ret = heketi_volume_expand(self.heketi_client_node,
                                   self.heketi_server_url, volume_id,
                                   expand_size)
        self.assertTrue(
            ret, ("Failed to expand heketi volume of id %s" % volume_id))
        volume_info = heketi_volume_info(self.heketi_client_node,
                                         self.heketi_server_url,
                                         volume_id,
                                         json=True)
        expected_size = self.volume_size + expand_size
        self.assertEqual(volume_info['size'], expected_size,
                         ("Volume Expansion failed Expected Size: %s, Actual "
                          "Size: %s" %
                          (str(expected_size), str(volume_info['size']))))
示例#17
0
    def test_delete_heketi_volume(self):
        """
        Method to test heketi volume deletion and whether it
        frees up used space after deletion
        """

        volume_info = heketi_ops.heketi_volume_create(self.heketi_client_node,
                                                      self.heketi_server_url,
                                                      10,
                                                      json=True)
        self.addCleanup(heketi_ops.heketi_volume_delete,
                        self.heketi_client_node,
                        self.heketi_server_url,
                        volume_info["id"],
                        raise_on_error=False)

        free_space_after_creation = self.get_free_space_summary_devices()

        heketi_ops.heketi_volume_delete(self.heketi_client_node,
                                        self.heketi_server_url,
                                        volume_info["id"])

        free_space_after_deletion = self.get_free_space_summary_devices()

        self.assertTrue(
            free_space_after_deletion > free_space_after_creation,
            "Free space is not reclaimed after deletion "
            "of %s" % volume_info["id"])
    def test_validate_brick_paths_on_gluster_pods_or_nodes(self):
        """Validate brick paths after creation and deletion of a volume."""

        # Create heketi volume
        vol = heketi_volume_create(
            self.heketi_client_node, self.heketi_server_url, size=1, json=True)
        self.assertTrue(vol, "Failed to create 1Gb heketi volume")
        vol_id = vol["bricks"][0]["volume"]
        self.addCleanup(
            heketi_volume_delete,
            self.heketi_client_node, self.heketi_server_url, vol_id,
            raise_on_error=False)

        # Gather brick paths
        brick_paths = [p['path'] for p in vol["bricks"]]

        # Make sure that volume's brick paths exist in the fstab files
        self._find_bricks(brick_paths, present=True)

        # Delete heketi volume
        out = heketi_volume_delete(
            self.heketi_client_node, self.heketi_server_url, vol_id)
        self.assertTrue(out, "Failed to delete heketi volume %s" % vol_id)

        # Make sure that volume's brick paths are absent in the fstab file
        self._find_bricks(brick_paths, present=False)
示例#19
0
    def test_volume_create_replica_2(self):
        """Validate creation of a replica 2 volume"""
        vol_create_info = heketi_ops.heketi_volume_create(
            self.heketi_client_node,
            self.heketi_server_url,
            1,
            replica=2,
            json=True)
        self.addCleanup(heketi_ops.heketi_volume_delete,
                        self.heketi_client_node,
                        self.heketi_server_url,
                        vol_create_info["id"],
                        raise_on_error=True)
        actual_replica = int(
            vol_create_info["durability"]["replicate"]["replica"])
        self.assertEqual(
            actual_replica, 2, "Volume '%s' has '%s' as value for replica,"
            " expected 2." % (vol_create_info["id"], actual_replica))
        vol_name = vol_create_info['name']

        # Get gluster volume info
        gluster_vol = volume_ops.get_volume_info('auto_get_gluster_endpoint',
                                                 volname=vol_name)
        self.assertTrue(gluster_vol,
                        "Failed to get volume '%s' info" % vol_name)

        # Check amount of bricks
        brick_amount = len(gluster_vol[vol_name]['bricks']['brick'])
        self.assertEqual(
            brick_amount, 2, "Brick amount is expected to be 2. "
            "Actual amount is '%s'" % brick_amount)
示例#20
0
    def test_heketi_volume_create_with_clusterid(self):
        """Validate creation of heketi volume with clusters argument"""
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        # Get one of the cluster id from heketi cluster list
        creation_cluster_id = heketi_cluster_list(h_node, h_url,
                                                  json=True)['clusters'][0]

        # Create a heketi volume specific to cluster list
        volume_id = heketi_volume_create(h_node,
                                         h_url,
                                         self.volume_size,
                                         clusters=creation_cluster_id,
                                         json=True)["bricks"][0]["volume"]
        self.addCleanup(heketi_volume_delete, self.heketi_client_node,
                        self.heketi_server_url, volume_id)

        # Get the cluster id from heketi volume info
        info_cluster_id = heketi_volume_info(h_node,
                                             h_url,
                                             volume_id,
                                             json=True)['cluster']

        # Match the creation cluster id  with the info cluster id
        self.assertEqual(
            info_cluster_id, creation_cluster_id,
            "Volume creation cluster id {} not matching the info cluster id "
            "{}".format(creation_cluster_id, info_cluster_id))
示例#21
0
    def test_create_vol_and_retrieve_vol_info(self):
        """Validate heketi and gluster volume info"""

        g.log.info("Create a heketi volume")
        out = heketi_volume_create(self.heketi_client_node,
                                   self.heketi_server_url,
                                   self.volume_size,
                                   json=True)
        self.assertTrue(out, ("Failed to create heketi "
                              "volume of size %s" % self.volume_size))
        g.log.info("Heketi volume successfully created" % out)
        volume_id = out["bricks"][0]["volume"]
        self.addCleanup(heketi_volume_delete, self.heketi_client_node,
                        self.heketi_server_url, volume_id)

        g.log.info("Retrieving heketi volume info")
        out = heketi_volume_info(self.heketi_client_node,
                                 self.heketi_server_url,
                                 volume_id,
                                 json=True)
        self.assertTrue(out, ("Failed to get heketi volume info"))
        g.log.info("Successfully got the heketi volume info")
        name = out["name"]

        vol_info = get_volume_info('auto_get_gluster_endpoint', volname=name)
        self.assertTrue(vol_info, "Failed to get volume info %s" % name)
        g.log.info("Successfully got the volume info %s" % name)
示例#22
0
    def test_heketi_volume_provision_after_node_reboot(self):
        """Provision volume before and after node reboot"""
        # Skip test if not able to connect to Cloud Provider
        try:
            node_ops.find_vm_name_by_ip_or_hostname(self.node)
        except (NotImplementedError, exceptions.ConfigError) as e:
            self.skipTest(e)

        h_client, h_server = self.heketi_client_node, self.heketi_server_url
        g_nodes = [
            g_node["manage"] for g_node in self.gluster_servers_info.values()
        ][2:]

        # Create heketi volume
        vol_info = heketi_ops.heketi_volume_create(h_client,
                                                   h_server,
                                                   1,
                                                   json=True)
        self.addCleanup(heketi_ops.heketi_volume_delete, h_client, h_server,
                        vol_info['id'])

        # Power off gluster server nodes
        for g_node in g_nodes:
            vm_name = node_ops.find_vm_name_by_ip_or_hostname(g_node)
            self.power_off_gluster_node_vm(vm_name, g_node)

        # Create heketi volume when gluster nodes are down
        with self.assertRaises(AssertionError):
            vol_info = heketi_ops.heketi_volume_create(h_client,
                                                       h_server,
                                                       1,
                                                       json=True)
            self.addCleanup(heketi_ops.heketi_volume_delete, h_client,
                            h_server, vol_info['id'])

        # Power on gluster server nodes
        for g_node in g_nodes:
            vm_name = node_ops.find_vm_name_by_ip_or_hostname(g_node)
            self.power_on_gluster_node_vm(vm_name, g_node)

        # Try to create heketi volume after reboot
        vol_info = heketi_ops.heketi_volume_create(h_client,
                                                   h_server,
                                                   1,
                                                   json=True)
        self.addCleanup(heketi_ops.heketi_volume_delete, h_client, h_server,
                        vol_info['id'])
    def test_brick_evict_on_more_than_three_node_with_one_down(self):
        """Test brick evict basic functionality and verify brick evict
        will success after one node down out of more than three nodes"""

        h_node, h_server = self.heketi_client_node, self.heketi_server_url

        # Create heketi volume
        vol_info = heketi_ops.heketi_volume_create(
            h_node, h_server, 1, json=True)
        self.addCleanup(
            heketi_ops.heketi_volume_delete,
            h_node, h_server, vol_info.get('id'))

        # Get node on which heketi pod is scheduled
        heketi_pod = openshift_ops.get_pod_name_from_dc(
            self.ocp_client, self.heketi_dc_name)
        heketi_node = openshift_ops.oc_get_custom_resource(
            self.ocp_client, 'pod', '.:spec.nodeName', heketi_pod)[0]

        # Get brick id and glusterfs node which is not heketi node
        for node in vol_info.get('bricks', {}):
            node_info = heketi_ops.heketi_node_info(
                h_node, h_server, node.get('node'), json=True)
            hostname = node_info.get('hostnames').get('manage')[0]
            if hostname != heketi_node:
                brick_id = node.get('id')
                break

        self._power_off_node_and_wait_node_to_be_not_ready(hostname)

        # Perform brick evict operation
        heketi_ops.heketi_brick_evict(h_node, h_server, brick_id)

        # Get volume info after brick evict operation
        vol_info_new = heketi_ops.heketi_volume_info(
            h_node, h_server, vol_info.get('id'), json=True)

        # Get previous and new bricks from volume
        bricks_old = set(
            {brick.get('path') for brick in vol_info.get("bricks")})
        bricks_new = set(
            {brick.get('path') for brick in vol_info_new.get("bricks")})
        self.assertEqual(
            len(bricks_new - bricks_old), 1,
            "Brick was not replaced with brick evict for vol \n {}".format(
                vol_info_new))

        # Get gluster volume info
        g_vol_info = self._get_gluster_vol_info(vol_info_new.get('name'))

        # Validate bricks on gluster volume and heketi volume
        g_bricks = set(
            {brick.get('name').split(":")[1]
                for brick in g_vol_info.get("bricks", {}).get("brick")})
        self.assertEqual(
            bricks_new, g_bricks, "gluster vol info and heketi vol info "
            "mismatched after brick evict {} \n {}".format(
                g_bricks, g_vol_info))
示例#24
0
    def test_create_vol_and_retrieve_topology_info(self):
        volume_names = []
        volume_ids = []

        # Create 3 volumes and make 3rd volume of type distributed replica
        g.log.info("Creating 3 volumes")
        for i in range(3):
            out = heketi_volume_create(self.heketi_client_node,
                                       self.heketi_server_url,
                                       self.volume_size,
                                       json=True)
            g.log.info("Heketi volume %s successfully created" % out)
            volume_names.append(out["name"])
            volume_ids.append(out["bricks"][0]["volume"])
            self.addCleanup(heketi_volume_delete,
                            self.heketi_client_node,
                            self.heketi_server_url,
                            volume_ids[i],
                            raise_on_error=(i == 2))
        heketi_volume_expand(self.heketi_client_node, self.heketi_server_url,
                             volume_ids[1], 1)

        # Check if volume is shown in the heketi topology
        topology_volumes = get_heketi_volume_and_brick_count_list(
            self.heketi_client_node, self.heketi_server_url)
        existing_volumes = [v for v, _ in topology_volumes]
        for v in volume_names:
            self.assertIn(v, existing_volumes)
        for v, b_count in topology_volumes:
            expected_bricks_count = 6 if v == volume_names[1] else 3
            self.assertGreaterEqual(
                b_count, expected_bricks_count,
                'Bricks number of the %s volume is %s and it is expected '
                'to be greater or equal to %s' %
                (v, b_count, expected_bricks_count))

        # Delete first 2 volumes and verify their deletion in the topology
        for vol_id in volume_ids[:2]:
            g.log.info("Deleting volume %s" % vol_id)
            heketi_volume_delete(self.heketi_client_node,
                                 self.heketi_server_url, vol_id)
        topology_volumes = get_heketi_volume_and_brick_count_list(
            self.heketi_client_node, self.heketi_server_url)
        existing_volumes = [v for v, _ in topology_volumes]
        for vol_name in volume_names[:2]:
            self.assertNotIn(
                vol_name, existing_volumes,
                ("volume %s shown in the heketi topology after deletion"
                 "\nTopology info:\n%s" % (vol_name, existing_volumes)))

        # Check the existence of third volume
        self.assertIn(
            volume_names[2], existing_volumes, "volume %s not "
            "shown in the heketi topology\nTopology info"
            "\n%s" % (volume_ids[2], existing_volumes))
        g.log.info("Sucessfully verified the topology info")
    def test_brick_evict_on_three_node_with_one_down(self):
        """Test brick evict basic functionality and verify brick evict
        will fail after node down if nodes are three"""

        h_node, h_server = self.heketi_client_node, self.heketi_server_url

        # Disable node if more than 3
        node_list = heketi_ops.heketi_node_list(h_node, h_server)
        if len(node_list) > 3:
            for node_id in node_list[3:]:
                heketi_ops.heketi_node_disable(h_node, h_server, node_id)
                self.addCleanup(heketi_ops.heketi_node_enable, h_node,
                                h_server, node_id)

        # Create heketi volume
        vol_info = heketi_ops.heketi_volume_create(h_node,
                                                   h_server,
                                                   1,
                                                   json=True)
        self.addCleanup(heketi_ops.heketi_volume_delete, h_node, h_server,
                        vol_info.get('id'))

        # Get node on which heketi pod is scheduled
        heketi_pod = openshift_ops.get_pod_name_from_dc(
            self.ocp_client, self.heketi_dc_name)
        heketi_node = openshift_ops.oc_get_custom_resource(
            self.ocp_client, 'pod', '.:spec.nodeName', heketi_pod)[0]

        # Get list of hostname from node id
        host_list = []
        for node_id in node_list[3:]:
            node_info = heketi_ops.heketi_node_info(h_node,
                                                    h_server,
                                                    node_id,
                                                    json=True)
            host_list.append(node_info.get('hostnames').get('manage')[0])

        # Get brick id and glusterfs node which is not heketi node
        for node in vol_info.get('bricks', {}):
            node_info = heketi_ops.heketi_node_info(h_node,
                                                    h_server,
                                                    node.get('node'),
                                                    json=True)
            hostname = node_info.get('hostnames').get('manage')[0]
            if (hostname != heketi_node) and (hostname not in host_list):
                brick_id = node.get('id')
                break

        self._power_off_node_and_wait_node_to_be_not_ready(hostname)

        # Perform brick evict operation
        try:
            heketi_ops.heketi_brick_evict(h_node, h_server, brick_id)
        except AssertionError as e:
            if ('No Replacement was found' not in six.text_type(e)):
                raise
    def test_creation_of_block_vol_greater_than_the_default_size_of_BHV_neg(
            self):
        """Verify that block volume creation fails when we create block
        volume of size greater than the default size of BHV.
        Verify that block volume creation succeed when we create BHV
        of size greater than the default size of BHV.
        """

        default_bhv_size = get_default_block_hosting_volume_size(
            self.node, self.heketi_dc_name)
        reserve_size = default_bhv_size * 0.02
        reserve_size = int(math.ceil(reserve_size))

        self.verify_free_space(default_bhv_size + reserve_size + 2)

        with self.assertRaises(ExecutionError):
            # create a block vol greater than default BHV size
            bvol_info = heketi_blockvolume_create(
                self.heketi_client_node, self.heketi_server_url,
                (default_bhv_size + 1), json=True)
            self.addCleanup(
                heketi_blockvolume_delete, self.heketi_client_node,
                self.heketi_server_url, bvol_info['id'])

        sc_name = self.create_storage_class()

        # create a block pvc greater than default BHV size
        pvc_name = oc_create_pvc(
            self.node, sc_name, pvc_size=(default_bhv_size + 1))
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pvc', pvc_name)
        self.addCleanup(
            oc_delete, self.node, 'pvc', pvc_name, raise_on_absence=False)

        wait_for_events(
            self.node, pvc_name, obj_type='PersistentVolumeClaim',
            event_type='Warning', event_reason='ProvisioningFailed')

        # create block hosting volume greater than default BHV size
        vol_info = heketi_volume_create(
            self.heketi_client_node, self.heketi_server_url,
            (default_bhv_size + reserve_size + 2), block=True,
            json=True)
        self.addCleanup(
            heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, vol_info['id'])

        # Cleanup PVC before block hosting volume to avoid failures
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pvc', pvc_name)
        self.addCleanup(
            oc_delete, self.node, 'pvc', pvc_name, raise_on_absence=False)

        verify_pvc_status_is_bound(self.node, pvc_name)
示例#27
0
    def test_heketi_volume_snapshot_delete(self):
        """Test heketi volume snapshot delete operation"""
        h_volume_size = 1
        snap_name = 'snap_test_heketi_volume_snapshot_create_1'
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        h_volume_info = heketi_volume_create(h_node,
                                             h_url,
                                             h_volume_size,
                                             json=True)
        self.addCleanup(heketi_volume_delete, h_node, h_url,
                        h_volume_info["id"])

        # Get the snapshot list before snap creation
        snap_list_before = get_snap_list('auto_get_gluster_endpoint')
        self.assertIsNotNone(
            snap_list_before,
            "Failed to get the snapshot list {}".format(snap_list_before))

        # Create a snapshot
        h_volume_name = h_volume_info["name"]
        ret, _, err = snap_create('auto_get_gluster_endpoint',
                                  h_volume_name,
                                  snap_name,
                                  timestamp=False)
        self.addCleanup(podcmd.GlustoPod()(snap_delete),
                        "auto_get_gluster_endpoint", snap_name)
        self.assertFalse(
            ret, "Failed to create snapshot {} for heketi volume {} with"
            " error {}".format(snap_name, h_volume_name, err))

        snap_list = get_snap_list('auto_get_gluster_endpoint')
        self.assertIsNotNone(
            snap_list, "Failed to get the snapshot list {}".format(snap_list))
        self.assertIn(
            snap_name, snap_list,
            "Heketi volume snapshot {} not found in {}".format(
                snap_name, snap_list))

        # Delete the snapshot
        ret, _, err = snap_delete('auto_get_gluster_endpoint', snap_name)
        self.assertFalse(
            ret, "Failed to delete snapshot {} for heketi volume with err {}".
            format(snap_name, err))

        # Check for count after snapshot deletion
        snap_list_after = get_snap_list('auto_get_gluster_endpoint')
        self.assertIsNotNone(
            snap_list_after,
            "Failed to get the snapshot list {}".format(snap_list_after))
        self.assertEqual(
            snap_list_before, snap_list_after,
            "Expecting Snapshot count before {} and after creation {} to be "
            "same".format(snap_list_before, snap_list_after))
    def test_creation_of_block_vol_greater_than_the_default_size_of_BHV_neg(
            self):
        """Verify that block volume creation fails when we create block
        volume of size greater than the default size of BHV.
        Verify that block volume creation succeed when we create BHV
        of size greater than the default size of BHV.
        """

        default_bhv_size = get_default_block_hosting_volume_size(
            self.node, self.heketi_dc_name)
        reserve_size = default_bhv_size * 0.02
        reserve_size = int(math.ceil(reserve_size))

        self.verify_free_space(default_bhv_size + reserve_size + 2)

        with self.assertRaises(AssertionError):
            # create a block vol greater than default BHV size
            bvol_info = heketi_blockvolume_create(
                self.heketi_client_node, self.heketi_server_url,
                (default_bhv_size + 1), json=True)
            self.addCleanup(
                heketi_blockvolume_delete, self.heketi_client_node,
                self.heketi_server_url, bvol_info['id'])

        sc_name = self.create_storage_class()

        # create a block pvc greater than default BHV size
        pvc_name = oc_create_pvc(
            self.node, sc_name, pvc_size=(default_bhv_size + 1))
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pvc', pvc_name)
        self.addCleanup(
            oc_delete, self.node, 'pvc', pvc_name, raise_on_absence=False)

        wait_for_events(
            self.node, pvc_name, obj_type='PersistentVolumeClaim',
            event_type='Warning', event_reason='ProvisioningFailed')

        # create block hosting volume greater than default BHV size
        vol_info = heketi_volume_create(
            self.heketi_client_node, self.heketi_server_url,
            (default_bhv_size + reserve_size + 2), block=True,
            json=True)
        self.addCleanup(
            heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, vol_info['id'])

        # Cleanup PVC before block hosting volume to avoid failures
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pvc', pvc_name)
        self.addCleanup(
            oc_delete, self.node, 'pvc', pvc_name, raise_on_absence=False)

        verify_pvc_status_is_bound(self.node, pvc_name)
 def test_validate_gluster_voloptions_blockhostvolume(self):
     """Validate gluster volume options which are set for
        block hosting volume"""
     options_to_validate = (
         ('performance.quick-read', 'off'),
         ('performance.read-ahead', 'off'),
         ('performance.io-cache', 'off'),
         ('performance.stat-prefetch', 'off'),
         ('performance.open-behind', 'off'),
         ('performance.readdir-ahead', 'off'),
         ('performance.strict-o-direct', 'on'),
         ('network.remote-dio', 'disable'),
         ('cluster.eager-lock', 'enable'),
         ('cluster.quorum-type', 'auto'),
         ('cluster.data-self-heal-algorithm', 'full'),
         ('cluster.locking-scheme', 'granular'),
         ('cluster.shd-max-threads', '8'),
         ('cluster.shd-wait-qlength', '10000'),
         ('features.shard', 'on'),
         ('features.shard-block-size', '64MB'),
         ('user.cifs', 'off'),
         ('server.allow-insecure', 'on'),
     )
     free_space, nodenum = get_total_free_space(self.heketi_client_node,
                                                self.heketi_server_url)
     if nodenum < 3:
         self.skipTest("Skip the test case since number of"
                       "online nodes is less than 3.")
     free_space_available = int(free_space / nodenum)
     default_bhv_size = get_default_block_hosting_volume_size(
         self.heketi_client_node, self.heketi_dc_name)
     if free_space_available < default_bhv_size:
         self.skipTest("Skip the test case since free_space_available %s"
                       "is less than the default_bhv_size %s ." %
                       (free_space_available, default_bhv_size))
     block_host_create_info = heketi_volume_create(self.heketi_client_node,
                                                   self.heketi_server_url,
                                                   default_bhv_size,
                                                   json=True,
                                                   block=True)
     self.addCleanup(heketi_volume_delete,
                     self.heketi_client_node,
                     self.heketi_server_url,
                     block_host_create_info["id"],
                     raise_on_error=True)
     bhv_name = block_host_create_info["name"]
     vol_info = get_volume_info('auto_get_gluster_endpoint',
                                volname=bhv_name)
     self.assertTrue(vol_info, "Failed to get volume info %s" % bhv_name)
     self.assertIn("options", vol_info[bhv_name].keys())
     for k, v in options_to_validate:
         self.assertIn(k, vol_info[bhv_name]["options"].keys())
         self.assertEqual(v, vol_info[bhv_name]["options"][k])
示例#30
0
    def test_to_check_deletion_of_node(self):
        """Validate deletion of a node which contains devices"""

        # Create Heketi volume to make sure we have devices with usages
        heketi_url = self.heketi_server_url
        vol = heketi_volume_create(self.heketi_client_node,
                                   heketi_url,
                                   1,
                                   json=True)
        self.assertTrue(vol, "Failed to create heketi volume.")
        g.log.info("Heketi volume successfully created")
        volume_id = vol["bricks"][0]["volume"]
        self.addCleanup(heketi_volume_delete, self.heketi_client_node,
                        self.heketi_server_url, volume_id)

        # Pick up suitable node
        node_ids = heketi_node_list(self.heketi_client_node, heketi_url)
        self.assertTrue(node_ids)
        for node_id in node_ids:
            node_info = heketi_node_info(self.heketi_client_node,
                                         heketi_url,
                                         node_id,
                                         json=True)
            if (node_info['state'].lower() != 'online'
                    or not node_info['devices']):
                continue
            for device in node_info['devices']:
                if device['state'].lower() != 'online':
                    continue
                if device['storage']['used']:
                    node_id = node_info['id']
                    break
        else:
            self.assertTrue(
                node_id, "Failed to find online node with online device which "
                "has some usages.")

        # Try to delete the node by its ID
        g.log.info("Trying to delete the node which contains devices in it. "
                   "Expecting failure.")
        self.assertRaises(AssertionError, heketi_node_delete,
                          self.heketi_client_node, heketi_url, node_id)

        # Make sure our node hasn't been deleted
        g.log.info("Listing heketi node list")
        node_list = heketi_node_list(self.heketi_client_node, heketi_url)
        self.assertTrue(node_list, ("Failed to list heketi nodes"))
        self.assertIn(node_id, node_list)
        node_info = heketi_node_info(self.heketi_client_node,
                                     heketi_url,
                                     node_id,
                                     json=True)
        self.assertEqual(node_info['state'].lower(), 'online')
    def test_volume_create_as_tag_maching_rule(self):
        """Validate settags operation only on one device in the cluster"""

        h_node, h_server = self.heketi_client_node, self.heketi_server_url

        # Set tag on any one device in cluster
        node_list = heketi_node_list(h_node, h_server, json=True)
        node_info = heketi_node_info(h_node, h_server, node_list[0], json=True)
        device_id = node_info.get('devices', {})[0].get('id')
        set_tags(h_node, h_server, 'device', device_id, "tier:it")
        self.addCleanup(rm_tags, h_node, h_server, 'device', device_id, 'tier')

        # Volume creation should fail
        try:
            heketi_volume_create(
                h_node,
                h_server,
                2,
                gluster_volume_options="user.heketi.device-tag-match tier=it")
        except AssertionError as e:
            if ("Failed to allocate new volume" not in six.text_type(e)):
                raise
    def test_to_check_deletion_of_node(self):
        """Validate deletion of a node which contains devices"""

        # Create Heketi volume to make sure we have devices with usages
        heketi_url = self.heketi_server_url
        vol = heketi_volume_create(
            self.heketi_client_node, heketi_url, 1, json=True)
        self.assertTrue(vol, "Failed to create heketi volume.")
        g.log.info("Heketi volume successfully created")
        volume_id = vol["bricks"][0]["volume"]
        self.addCleanup(
            heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, volume_id)

        # Pick up suitable node
        node_ids = heketi_node_list(self.heketi_client_node, heketi_url)
        self.assertTrue(node_ids)
        for node_id in node_ids:
            node_info = heketi_node_info(
                self.heketi_client_node, heketi_url, node_id, json=True)
            if (node_info['state'].lower() != 'online' or
                    not node_info['devices']):
                continue
            for device in node_info['devices']:
                if device['state'].lower() != 'online':
                    continue
                if device['storage']['used']:
                    node_id = node_info['id']
                    break
        else:
            self.assertTrue(
                node_id,
                "Failed to find online node with online device which "
                "has some usages.")

        # Try to delete the node by its ID
        g.log.info("Trying to delete the node which contains devices in it. "
                   "Expecting failure.")
        self.assertRaises(
            ExecutionError,
            heketi_node_delete,
            self.heketi_client_node, heketi_url, node_id)

        # Make sure our node hasn't been deleted
        g.log.info("Listing heketi node list")
        node_list = heketi_node_list(self.heketi_client_node, heketi_url)
        self.assertTrue(node_list, ("Failed to list heketi nodes"))
        self.assertIn(node_id, node_list)
        node_info = heketi_node_info(
            self.heketi_client_node, heketi_url, node_id, json=True)
        self.assertEqual(node_info['state'].lower(), 'online')
示例#33
0
    def test_to_check_deletion_of_cluster(self):
        """Validate deletion of cluster with volumes"""
        # List heketi volumes
        g.log.info("List heketi volumes")
        volumes = heketi_volume_list(self.heketi_client_node,
                                     self.heketi_server_url,
                                     json=True)
        if (len(volumes["volumes"]) == 0):
            g.log.info("Creating heketi volume")
            out = heketi_volume_create(self.heketi_client_node,
                                       self.heketi_server_url,
                                       self.volume_size,
                                       json=True)
            self.assertTrue(out, ("Failed to create heketi "
                                  "volume of size %s" % self.volume_size))
            g.log.info("Heketi volume successfully created" % out)
            volume_id = out["bricks"][0]["volume"]
            self.addCleanup(heketi_volume_delete, self.heketi_client_node,
                            self.heketi_server_url, volume_id)

        # List heketi cluster's
        g.log.info("Listing heketi cluster list")
        out = heketi_cluster_list(self.heketi_client_node,
                                  self.heketi_server_url,
                                  json=True)
        self.assertTrue(out, ("Failed to list heketi cluster"))
        g.log.info("All heketi cluster successfully listed")
        cluster_id = out["clusters"][0]

        # Deleting a heketi cluster
        g.log.info("Trying to delete a heketi cluster"
                   " which contains volumes and/or nodes:"
                   " Expected to fail")
        self.assertRaises(
            AssertionError,
            heketi_cluster_delete,
            self.heketi_client_node,
            self.heketi_server_url,
            cluster_id,
        )
        g.log.info("Expected result: Unable to delete cluster %s"
                   " because it contains volumes "
                   " and/or nodes" % cluster_id)

        # To confirm deletion failed, check heketi cluster list
        g.log.info("Listing heketi cluster list")
        out = heketi_cluster_list(self.heketi_client_node,
                                  self.heketi_server_url,
                                  json=True)
        self.assertTrue(out, ("Failed to list heketi cluster"))
        g.log.info("All heketi cluster successfully listed")
    def test_block_host_volume_delete_without_block_volumes(self):
        """Validate deletion of empty block hosting volume"""
        block_host_create_info = heketi_volume_create(
            self.heketi_client_node, self.heketi_server_url, 1, json=True,
            block=True)

        block_hosting_vol_id = block_host_create_info["id"]
        self.addCleanup(
            heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, block_hosting_vol_id, raise_on_error=False)

        heketi_volume_delete(
            self.heketi_client_node, self.heketi_server_url,
            block_hosting_vol_id, json=True)
    def test_to_check_deletion_of_cluster(self):
        """Validate deletion of cluster with volumes"""
        # List heketi volumes
        g.log.info("List heketi volumes")
        volumes = heketi_volume_list(self.heketi_client_node,
                                     self.heketi_server_url,
                                     json=True)
        if (len(volumes["volumes"]) == 0):
            g.log.info("Creating heketi volume")
            out = heketi_volume_create(self.heketi_client_node,
                                       self.heketi_server_url,
                                       self.volume_size, json=True)
            self.assertTrue(out, ("Failed to create heketi "
                            "volume of size %s" % self.volume_size))
            g.log.info("Heketi volume successfully created" % out)
            volume_id = out["bricks"][0]["volume"]
            self.addCleanup(
                heketi_volume_delete, self.heketi_client_node,
                self.heketi_server_url, volume_id)

        # List heketi cluster's
        g.log.info("Listing heketi cluster list")
        out = heketi_cluster_list(self.heketi_client_node,
                                  self.heketi_server_url,
                                  json=True)
        self.assertTrue(out, ("Failed to list heketi cluster"))
        g.log.info("All heketi cluster successfully listed")
        cluster_id = out["clusters"][0]

        # Deleting a heketi cluster
        g.log.info("Trying to delete a heketi cluster"
                   " which contains volumes and/or nodes:"
                   " Expected to fail")
        self.assertRaises(
            ExecutionError,
            heketi_cluster_delete,
            self.heketi_client_node, self.heketi_server_url, cluster_id,
        )
        g.log.info("Expected result: Unable to delete cluster %s"
                   " because it contains volumes "
                   " and/or nodes" % cluster_id)

        # To confirm deletion failed, check heketi cluster list
        g.log.info("Listing heketi cluster list")
        out = heketi_cluster_list(self.heketi_client_node,
                                  self.heketi_server_url,
                                  json=True)
        self.assertTrue(out, ("Failed to list heketi cluster"))
        g.log.info("All heketi cluster successfully listed")
示例#36
0
    def test_heketi_volume_create_mutiple_sizes(self):
        """Validate creation of heketi volume with differnt sizes"""
        sizes, required_space = [15, 50, 100], 495
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        # Skip test if space is not available
        available_space = get_total_free_space(h_node, h_url)[0]
        if required_space > available_space:
            self.skipTest("Required space {} greater than the available space "
                          "{}".format(required_space, available_space))

        # Create volume 3 times, each time different size
        for size in sizes:
            vol_id = heketi_volume_create(h_node, h_url, size, json=True)['id']
            self.addCleanup(heketi_volume_delete, h_node, h_url, vol_id)
示例#37
0
    def test_create_volume_with_same_name(self):
        """Test create two volumes with the same name and verify that 2nd one
        is failing with the appropriate error.
        """
        h_node, h_url = self.heketi_client_node, self.heketi_server_url
        vol_name = "autovol-%s" % utils.get_random_str()

        # Create volume for the first time
        vol_info = heketi_ops.heketi_volume_create(h_node,
                                                   h_url,
                                                   size=1,
                                                   name=vol_name,
                                                   json=True)
        self.addCleanup(heketi_ops.heketi_volume_delete, h_node, h_url,
                        vol_info['id'])

        vol_info_new = None
        try:
            # Try to create volume for 2nd time
            vol_info_new = heketi_ops.heketi_volume_create(h_node,
                                                           h_url,
                                                           size=1,
                                                           name=vol_name,
                                                           json=True)
            self.addCleanup(heketi_ops.heketi_volume_delete, h_node, h_url,
                            vol_info_new['id'])
        except AssertionError as err:
            # Verify msg in error
            msg = "Volume name '%s' already in use" % vol_name
            if msg not in six.text_type(err):
                raise

        # Raise exception if there is no error raised by heketi
        msg = ('Volume %s and %s got created two times with the same name '
               'unexpectedly.' % (vol_info, vol_info_new))
        self.assertFalse(vol_info_new, msg)
    def test_create_block_vol_after_host_vol_creation(self):
        """Validate block-device after manual block hosting volume creation
           using heketi
        """
        block_host_create_info = heketi_volume_create(
            self.heketi_client_node, self.heketi_server_url, 5,
            json=True, block=True)
        self.addCleanup(
            heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, block_host_create_info["id"])

        block_vol = heketi_blockvolume_create(
            self.heketi_client_node, self.heketi_server_url, 1, json=True)
        self.addCleanup(
            heketi_blockvolume_delete, self.heketi_client_node,
            self.heketi_server_url, block_vol["id"])
示例#39
0
    def test_heketi_volume_mount(self):
        self.node = self.ocp_master_node[0]
        try:
            cmd_run('rpm -q glusterfs-fuse', self.node)
        except AssertionError:
            self.skipTest("gluster-fuse package is not present on Node %s" %
                          self.node)

        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        # Create volume
        vol_info = heketi_volume_create(h_node, h_url, 2, json=True)
        self.addCleanup(heketi_volume_delete, h_node, h_url, vol_info['id'])

        mount_point = vol_info['mount']['glusterfs']['device']
        mount_dir = '/mnt/dir-%s' % utils.get_random_str()
        mount_cmd = 'mount -t glusterfs %s %s' % (mount_point, mount_dir)

        # Create directory to mount volume
        cmd_run('mkdir %s' % mount_dir, self.node)
        self.addCleanup(cmd_run, 'rm -rf %s' % mount_dir, self.node)

        # Mount volume
        cmd_run(mount_cmd, self.node)
        self.addCleanup(cmd_run, 'umount %s' % mount_dir, self.node)

        # Run I/O to make sure Mount point works
        _file = 'file'
        run_io_cmd = ('dd if=/dev/urandom of=%s/%s bs=4k count=1000' %
                      (mount_dir, _file))

        # Verify size of volume
        cmd_run(run_io_cmd, self.node)
        size = cmd_run('df -kh --output=size %s | tail -1' % mount_dir,
                       self.node).strip()
        self.assertEqual('2.0G', size)

        # Verify file on gluster vol bricks
        for brick in vol_info['bricks']:
            node_id = brick['node']
            node_info = heketi_node_info(h_node, h_url, node_id, json=True)
            brick_host = node_info['hostnames']['storage'][0]
            cmd_run_on_gluster_pod_or_node(self.node,
                                           'ls %s/%s' % (brick['path'], _file),
                                           brick_host)
示例#40
0
    def test_heketi_metrics_heketipod_failure(self):
        """Validate heketi metrics after heketi pod failure"""
        scale_dc_pod_amount_and_wait(self.ocp_master_node[0],
                                     self.heketi_dc_name,
                                     pod_amount=0)
        self.addCleanup(scale_dc_pod_amount_and_wait,
                        self.ocp_master_node[0],
                        self.heketi_dc_name,
                        pod_amount=1)

        # verify that metrics is not accessable when heketi pod is down
        with self.assertRaises(AssertionError):
            get_heketi_metrics(self.heketi_client_node,
                               self.heketi_server_url,
                               prometheus_format=True)

        scale_dc_pod_amount_and_wait(self.ocp_master_node[0],
                                     self.heketi_dc_name,
                                     pod_amount=1)

        pod_name = get_pod_name_from_dc(self.ocp_master_node[0],
                                        self.heketi_dc_name,
                                        self.heketi_dc_name)
        wait_for_pod_be_ready(self.ocp_master_node[0], pod_name, wait_step=5)

        for i in range(3):
            vol = heketi_volume_create(self.heketi_client_node,
                                       self.heketi_server_url,
                                       1,
                                       json=True)

            self.assertTrue(vol)

            self.addCleanup(heketi_volume_delete,
                            self.heketi_client_node,
                            self.heketi_server_url,
                            vol['id'],
                            raise_on_error=False)

            vol_list = heketi_volume_list(self.heketi_client_node,
                                          self.heketi_server_url)

            self.assertIn(vol['id'], vol_list)

        self.verify_heketi_metrics_with_topology_info()
    def test_heketi_with_default_options(self):
        """
        Test to create volume with default options.
        """

        vol_info = heketi_volume_create(self.heketi_client_node,
                                        self.heketi_server_url,
                                        self.volume_size, json=True)
        self.assertTrue(vol_info, ("Failed to create heketi volume of size %s"
                                   % self.volume_size))
        self.addCleanup(
            heketi_volume_delete,
            self.heketi_client_node, self.heketi_server_url, vol_info['id'])

        self.assertEqual(vol_info['size'], self.volume_size,
                         ("Failed to create volume with default options."
                          "Expected Size: %s, Actual Size: %s"
                          % (self.volume_size, vol_info['size'])))
    def test_volume_create_and_list_volume(self):
        """Validate heketi and gluster volume list"""
        g.log.info("List gluster volumes before Heketi volume creation")
        existing_g_vol_list = get_volume_list('auto_get_gluster_endpoint')
        self.assertTrue(existing_g_vol_list, ("Unable to get volumes list"))

        g.log.info("List heketi volumes before volume creation")
        existing_h_vol_list = heketi_volume_list(
            self.heketi_client_node, self.heketi_server_url,
            json=True)["volumes"]
        g.log.info("Heketi volumes successfully listed")

        g.log.info("Create a heketi volume")
        out = heketi_volume_create(self.heketi_client_node,
                                   self.heketi_server_url,
                                   self.volume_size, json=True)
        g.log.info("Heketi volume successfully created" % out)
        volume_id = out["bricks"][0]["volume"]
        self.addCleanup(
            heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, volume_id)

        g.log.info("List heketi volumes after volume creation")
        h_vol_list = heketi_volume_list(
            self.heketi_client_node, self.heketi_server_url,
            json=True)["volumes"]
        g.log.info("Heketi volumes successfully listed")

        g.log.info("List gluster volumes after Heketi volume creation")
        g_vol_list = get_volume_list('auto_get_gluster_endpoint')
        self.assertTrue(g_vol_list, ("Unable to get volumes list"))
        g.log.info("Successfully got the volumes list")

        # Perform checks
        self.assertEqual(
            len(existing_g_vol_list) + 1, len(g_vol_list),
            "Expected creation of only one volume in Gluster creating "
            "Heketi volume. Here is lists before and after volume creation: "
            "%s \n%s" % (existing_g_vol_list, g_vol_list))
        self.assertEqual(
            len(existing_h_vol_list) + 1, len(h_vol_list),
            "Expected creation of only one volume in Heketi. Here is lists "
            "of Heketi volumes before and after volume creation: %s\n%s" % (
                existing_h_vol_list, h_vol_list))
    def test_delete_heketidb_volume(self):
        """
        Method to test heketidb volume deletion via heketi-cli
        """
        heketidbexists = False
        msg = "Error: Cannot delete volume containing the Heketi database"

        for i in range(0, 2):
            volume_info = heketi_ops.heketi_volume_create(
                self.heketi_client_node, self.heketi_server_url,
                10, json=True)

            self.addCleanup(
                heketi_ops.heketi_volume_delete, self.heketi_client_node,
                self.heketi_server_url, volume_info["id"])

        volume_list_info = heketi_ops.heketi_volume_list(
            self.heketi_client_node,
            self.heketi_server_url, json=True)

        if volume_list_info["volumes"] == []:
            raise ExecutionError("Heketi volume list empty")

        for volume_id in volume_list_info["volumes"]:
            volume_info = heketi_ops.heketi_volume_info(
                self.heketi_client_node, self.heketi_server_url,
                volume_id, json=True)

            if volume_info["name"] == "heketidbstorage":
                heketidbexists = True
                delete_ret, delete_output, delete_error = (
                    heketi_ops.heketi_volume_delete(
                        self.heketi_client_node,
                        self.heketi_server_url, volume_id,
                        raw_cli_output=True))

                self.assertNotEqual(delete_ret, 0, "Return code not 0")
                self.assertEqual(
                    delete_error.strip(), msg,
                    "Invalid reason for heketidb deletion failure")

        if not heketidbexists:
            raise ExecutionError(
                "Warning: heketidbstorage doesn't exist in list of volumes")
    def test_heketi_metrics_heketipod_failure(self):
        """Validate heketi metrics after heketi pod failure"""
        scale_dc_pod_amount_and_wait(
            self.ocp_master_node[0], self.heketi_dc_name, pod_amount=0)
        self.addCleanup(
            scale_dc_pod_amount_and_wait, self.ocp_master_node[0],
            self.heketi_dc_name, pod_amount=1)

        # verify that metrics is not accessable when heketi pod is down
        with self.assertRaises(exceptions.ExecutionError):
            get_heketi_metrics(
                self.heketi_client_node,
                self.heketi_server_url,
                prometheus_format=True)

        scale_dc_pod_amount_and_wait(
            self.ocp_master_node[0], self.heketi_dc_name, pod_amount=1)

        pod_name = get_pod_name_from_dc(
            self.ocp_master_node[0], self.heketi_dc_name, self.heketi_dc_name)
        wait_for_pod_be_ready(self.ocp_master_node[0], pod_name, wait_step=5)

        for i in range(3):
            vol = heketi_volume_create(
                self.heketi_client_node,
                self.heketi_server_url, 1, json=True)

            self.assertTrue(vol)

            self.addCleanup(
                heketi_volume_delete,
                self.heketi_client_node,
                self.heketi_server_url,
                vol['id'],
                raise_on_error=False)

            vol_list = heketi_volume_list(
                self.heketi_client_node,
                self.heketi_server_url)

            self.assertIn(vol['id'], vol_list)

        self.verify_heketi_metrics_with_topology_info()
    def test_delete_heketi_volume(self):
        """
        Method to test heketi volume deletion and whether it
        frees up used space after deletion
        """

        creation_output_dict = heketi_ops.heketi_volume_create(
            self.heketi_client_node,
            self.heketi_server_url, 10, json=True)

        volume_id = creation_output_dict["name"].strip().split("_")[1]
        free_space_after_creation = self.get_free_space_summary_devices()

        heketi_ops.heketi_volume_delete(
            self.heketi_client_node, self.heketi_server_url, volume_id)

        free_space_after_deletion = self.get_free_space_summary_devices()

        self.assertTrue(
            free_space_after_deletion > free_space_after_creation,
            "Free space is not reclaimed after deletion of %s" % volume_id)
    def test_heketi_metrics_validating_vol_count_on_vol_deletion(self):
        """Validate heketi metrics VolumeCount after volume deletion"""

        vol_list = []

        for i in range(3):
            # Create volume
            vol = heketi_volume_create(
                self.heketi_client_node,
                self.heketi_server_url, 1, json=True)

            self.assertTrue(vol)

            self.addCleanup(
                heketi_volume_delete,
                self.heketi_client_node,
                self.heketi_server_url,
                vol['id'],
                raise_on_error=False)

            volume_list = heketi_volume_list(
                self.heketi_client_node,
                self.heketi_server_url)

            self.assertIn(vol['id'], volume_list)
            vol_list.append(vol)

        for vol in vol_list:
            # delete volume
            heketi_volume_delete(
                self.heketi_client_node,
                self.heketi_server_url,
                vol['id'])
            volume_list = heketi_volume_list(
                self.heketi_client_node,
                self.heketi_server_url)
            self.assertNotIn(vol['id'], volume_list)
            self.verify_volume_count()
    def test_creation_of_block_vol_greater_than_the_default_size_of_BHV_pos(
            self):
        """Verify that block volume creation succeed when we create BHV
        of size greater than the default size of BHV.
        """

        default_bhv_size = get_default_block_hosting_volume_size(
            self.node, self.heketi_dc_name)
        reserve_size = default_bhv_size * 0.02
        reserve_size = int(math.ceil(reserve_size))

        self.verify_free_space(default_bhv_size + reserve_size + 2)

        # create block hosting volume greater than default BHV size
        vol_info = heketi_volume_create(
            self.heketi_client_node, self.heketi_server_url,
            (default_bhv_size + reserve_size + 2), block=True,
            json=True)
        self.addCleanup(
            heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, vol_info['id'])

        # create a block pvc greater than default BHV size
        self.create_and_wait_for_pvc(pvc_size=(default_bhv_size + 1))
    def test_create_volumes_enabling_and_disabling_heketi_devices(self):
        """Validate enable/disable of heketi device"""

        # Get nodes info
        node_id_list = heketi_ops.heketi_node_list(
            self.heketi_client_node, self.heketi_server_url)
        node_info_list = []
        for node_id in node_id_list[0:3]:
            node_info = heketi_ops.heketi_node_info(
                self.heketi_client_node, self.heketi_server_url,
                node_id, json=True)
            node_info_list.append(node_info)

        # Disable 4th and other nodes
        if len(node_id_list) > 3:
            for node in node_id_list[3:]:
                heketi_ops.heketi_node_disable(
                    self.heketi_client_node, self.heketi_server_url, node_id)
                self.addCleanup(
                    heketi_ops.heketi_node_enable, self.heketi_client_node,
                    self.heketi_server_url, node_id)

        # Disable second and other devices on the first 3 nodes
        for node_info in node_info_list[0:3]:
            devices = node_info["devices"]
            self.assertTrue(
                devices, "Node '%s' does not have devices." % node_info["id"])
            if devices[0]["state"].strip().lower() != "online":
                self.skipTest("Test expects first device to be enabled.")
            if len(devices) < 2:
                continue
            for device in node_info["devices"][1:]:
                out = heketi_ops.heketi_device_disable(
                    self.heketi_client_node, self.heketi_server_url,
                    device["id"])
                self.assertTrue(
                    out, "Failed to disable the device %s" % device["id"])
                self.addCleanup(
                    heketi_ops.heketi_device_enable,
                    self.heketi_client_node, self.heketi_server_url,
                    device["id"])

        # Create heketi volume
        out = heketi_ops.heketi_volume_create(
            self.heketi_client_node, self.heketi_server_url, 1, json=True)
        self.assertTrue(out, "Failed to create heketi volume of size 1")
        g.log.info("Successfully created heketi volume of size 1")
        device_id = out["bricks"][0]["device"]
        self.addCleanup(
            heketi_ops.heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, out["bricks"][0]["volume"])

        # Disable device
        g.log.info("Disabling '%s' device" % device_id)
        out = heketi_ops.heketi_device_disable(
            self.heketi_client_node, self.heketi_server_url, device_id)
        self.assertTrue(out, "Failed to disable the device %s" % device_id)
        g.log.info("Successfully disabled device %s" % device_id)

        try:
            # Get device info
            g.log.info("Retrieving '%s' device info" % device_id)
            out = heketi_ops.heketi_device_info(
                self.heketi_client_node, self.heketi_server_url,
                device_id, json=True)
            self.assertTrue(out, "Failed to get device info %s" % device_id)
            g.log.info("Successfully retrieved device info %s" % device_id)
            name = out["name"]
            if out["state"].lower().strip() != "offline":
                raise exceptions.ExecutionError(
                    "Device %s is not in offline state." % name)
            g.log.info("Device %s is now offine" % name)

            # Try to create heketi volume
            g.log.info("Creating heketi volume: Expected to fail.")
            try:
                out = heketi_ops.heketi_volume_create(
                    self.heketi_client_node, self.heketi_server_url, 1,
                    json=True)
            except exceptions.ExecutionError:
                g.log.info("Volume was not created as expected.")
            else:
                self.addCleanup(
                    heketi_ops.heketi_volume_delete, self.heketi_client_node,
                    self.heketi_server_url, out["bricks"][0]["volume"])
                msg = "Volume unexpectedly created. Out: %s" % out
                assert False, msg
        finally:
            # Enable the device back
            g.log.info("Enable '%s' device back." % device_id)
            out = heketi_ops.heketi_device_enable(
                self.heketi_client_node, self.heketi_server_url, device_id)
            self.assertTrue(out, "Failed to enable the device %s" % device_id)
            g.log.info("Successfully enabled device %s" % device_id)

        # Get device info
        out = heketi_ops.heketi_device_info(
            self.heketi_client_node, self.heketi_server_url, device_id,
            json=True)
        self.assertTrue(out, ("Failed to get device info %s" % device_id))
        g.log.info("Successfully retrieved device info %s" % device_id)
        name = out["name"]
        if out["state"] != "online":
            raise exceptions.ExecutionError(
                "Device %s is not in online state." % name)

        # Create heketi volume of size
        out = heketi_ops.heketi_volume_create(
            self.heketi_client_node, self.heketi_server_url, 1, json=True)
        self.assertTrue(out, "Failed to create volume of size 1")
        self.addCleanup(
            heketi_ops.heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, out["bricks"][0]["volume"])
        g.log.info("Successfully created volume of size 1")
        name = out["name"]

        # Get gluster volume info
        vol_info = get_volume_info('auto_get_gluster_endpoint', volname=name)
        self.assertTrue(vol_info, "Failed to get '%s' volume info." % name)
        g.log.info("Successfully got the '%s' volume info." % name)
    def test_volume_expansion_rebalance_brick(self):
        """Validate volume expansion with brick and check rebalance"""
        creation_info = heketi_ops.heketi_volume_create(
            self.heketi_client_node, self.heketi_server_url, 10, json=True)

        self.assertNotEqual(creation_info, False, "Volume creation failed")

        volume_name = creation_info["name"]
        volume_id = creation_info["id"]

        free_space_after_creation = self.get_devices_summary_free_space()

        volume_info_before_expansion = heketi_ops.heketi_volume_info(
            self.heketi_client_node,
            self.heketi_server_url,
            volume_id, json=True)

        self.assertNotEqual(volume_info_before_expansion, False,
                            "Volume info for %s failed" % volume_id)

        heketi_vol_info_size_before_expansion = (
            volume_info_before_expansion["size"])

        self.get_brick_and_volume_status(volume_name)
        num_of_bricks_before_expansion = self.get_num_of_bricks(volume_name)

        expansion_info = heketi_ops.heketi_volume_expand(
            self.heketi_client_node,
            self.heketi_server_url,
            volume_id, 5)

        self.assertNotEqual(expansion_info, False,
                            "Volume expansion of %s failed" % volume_id)

        free_space_after_expansion = self.get_devices_summary_free_space()
        self.assertTrue(
            free_space_after_creation > free_space_after_expansion,
            "Free space not consumed after expansion of %s" % volume_id)

        volume_info_after_expansion = heketi_ops.heketi_volume_info(
            self.heketi_client_node,
            self.heketi_server_url,
            volume_id, json=True)

        self.assertNotEqual(volume_info_after_expansion, False,
                            "Volume info failed for %s" % volume_id)

        heketi_vol_info_size_after_expansion = (
            volume_info_after_expansion["size"])

        difference_size = (heketi_vol_info_size_after_expansion -
                           heketi_vol_info_size_before_expansion)

        self.assertTrue(
            difference_size > 0,
            "Size not increased after expansion of %s" % volume_id)

        self.get_brick_and_volume_status(volume_name)
        num_of_bricks_after_expansion = self.get_num_of_bricks(volume_name)

        num_of_bricks_added = (num_of_bricks_after_expansion -
                               num_of_bricks_before_expansion)

        self.assertEqual(
            num_of_bricks_added, 3,
            "Number of bricks added is not 3 for %s" % volume_id)

        self.get_rebalance_status(volume_name)

        deletion_info = heketi_ops.heketi_volume_delete(
            self.heketi_client_node, self.heketi_server_url,
            volume_id, json=True)

        self.assertNotEqual(deletion_info, False,
                            "Deletion of volume %s failed" % volume_id)

        free_space_after_deletion = self.get_devices_summary_free_space()

        self.assertTrue(
            free_space_after_deletion > free_space_after_expansion,
            "Free space is not reclaimed after volume deletion of %s"
            % volume_id)
    def test_volume_expansion_no_free_space(self):
        """Validate volume expansion when there is no free space"""

        vol_size, expand_size, additional_devices_attached = None, 10, {}
        h_node, h_server_url = self.heketi_client_node, self.heketi_server_url

        # Get nodes info
        heketi_node_id_list = heketi_ops.heketi_node_list(h_node, h_server_url)
        if len(heketi_node_id_list) < 3:
            self.skipTest("3 Heketi nodes are required.")

        # Disable 4th and other nodes
        for node_id in heketi_node_id_list[3:]:
            heketi_ops.heketi_node_disable(h_node, h_server_url, node_id)
            self.addCleanup(
                heketi_ops.heketi_node_enable, h_node, h_server_url, node_id)

        # Prepare first 3 nodes
        smallest_size = None
        err_msg = ''
        for node_id in heketi_node_id_list[0:3]:
            node_info = heketi_ops.heketi_node_info(
                h_node, h_server_url, node_id, json=True)

            # Disable second and other devices
            devices = node_info["devices"]
            self.assertTrue(
                devices, "Node '%s' does not have devices." % node_id)
            if devices[0]["state"].strip().lower() != "online":
                self.skipTest("Test expects first device to be enabled.")
            if (smallest_size is None or
                    devices[0]["storage"]["free"] < smallest_size):
                smallest_size = devices[0]["storage"]["free"]
            for device in node_info["devices"][1:]:
                heketi_ops.heketi_device_disable(
                    h_node, h_server_url, device["id"])
                self.addCleanup(
                    heketi_ops.heketi_device_enable,
                    h_node, h_server_url, device["id"])

            # Gather info about additional devices
            additional_device_name = None
            for gluster_server in self.gluster_servers:
                gluster_server_data = self.gluster_servers_info[gluster_server]
                g_manage = gluster_server_data["manage"]
                g_storage = gluster_server_data["storage"]
                if not (g_manage in node_info["hostnames"]["manage"] or
                        g_storage in node_info["hostnames"]["storage"]):
                    continue
                additional_device_name = ((
                    gluster_server_data.get("additional_devices") or [''])[0])
                break

            if not additional_device_name:
                err_msg += ("No 'additional_devices' are configured for "
                            "'%s' node, which has following hostnames and "
                            "IP addresses: %s.\n" % (
                                node_id,
                                ', '.join(node_info["hostnames"]["manage"] +
                                          node_info["hostnames"]["storage"])))
                continue

            heketi_ops.heketi_device_add(
                h_node, h_server_url, additional_device_name, node_id)
            additional_devices_attached.update(
                {node_id: additional_device_name})

        # Schedule cleanup of the added devices
        for node_id in additional_devices_attached.keys():
            node_info = heketi_ops.heketi_node_info(
                h_node, h_server_url, node_id, json=True)
            for device in node_info["devices"]:
                if device["name"] != additional_devices_attached[node_id]:
                    continue
                self.addCleanup(self.detach_devices_attached, device["id"])
                break
            else:
                self.fail("Could not find ID for added device on "
                          "'%s' node." % node_id)

        if err_msg:
            self.skipTest(err_msg)

        # Temporary disable new devices
        self.disable_devices(additional_devices_attached)

        # Create volume and save info about it
        vol_size = int(smallest_size / (1024**2)) - 1
        creation_info = heketi_ops.heketi_volume_create(
            h_node, h_server_url, vol_size, json=True)
        volume_name, volume_id = creation_info["name"], creation_info["id"]
        self.addCleanup(
            heketi_ops.heketi_volume_delete,
            h_node, h_server_url, volume_id, raise_on_error=False)

        volume_info_before_expansion = heketi_ops.heketi_volume_info(
            h_node, h_server_url, volume_id, json=True)
        num_of_bricks_before_expansion = self.get_num_of_bricks(volume_name)
        self.get_brick_and_volume_status(volume_name)
        free_space_before_expansion = self.get_devices_summary_free_space()

        # Try to expand volume with not enough device space
        self.assertRaises(
            ExecutionError, heketi_ops.heketi_volume_expand,
            h_node, h_server_url, volume_id, expand_size)

        # Enable new devices to be able to expand our volume
        self.enable_devices(additional_devices_attached)

        # Expand volume and validate results
        heketi_ops.heketi_volume_expand(
            h_node, h_server_url, volume_id, expand_size, json=True)
        free_space_after_expansion = self.get_devices_summary_free_space()
        self.assertGreater(
            free_space_before_expansion, free_space_after_expansion,
            "Free space not consumed after expansion of %s" % volume_id)
        num_of_bricks_after_expansion = self.get_num_of_bricks(volume_name)
        self.get_brick_and_volume_status(volume_name)
        volume_info_after_expansion = heketi_ops.heketi_volume_info(
            h_node, h_server_url, volume_id, json=True)
        self.assertGreater(
            volume_info_after_expansion["size"],
            volume_info_before_expansion["size"],
            "Size of %s not increased" % volume_id)
        self.assertGreater(
            num_of_bricks_after_expansion, num_of_bricks_before_expansion)
        self.assertEqual(
            num_of_bricks_after_expansion % num_of_bricks_before_expansion, 0)

        # Delete volume and validate release of the used space
        heketi_ops.heketi_volume_delete(h_node, h_server_url, volume_id)
        free_space_after_deletion = self.get_devices_summary_free_space()
        self.assertGreater(
            free_space_after_deletion, free_space_after_expansion,
            "Free space not reclaimed after deletion of volume %s" % volume_id)
    def test_node_state(self):
        """
        Test node enable and disable functionality.

        If we have 4 gluster servers, if we disable 1/4 nodes from heketi
        and create a volume, the volume creation should be successful.

        If we disable 2/4 nodes from heketi-cli and create a volume
        the volume creation should fail.

        If we enable back one gluster server and create a volume
        the volume creation should be successful.
        """
        g.log.info("Disable node in heketi")
        node_list = heketi_node_list(self.heketi_client_node,
                                     self.heketi_server_url)
        self.assertTrue(node_list, "Failed to list heketi nodes")
        g.log.info("Successfully got the list of nodes")
        online_hosts = self.get_online_nodes(node_list)

        if len(online_hosts) < 3:
            raise self.skipTest(
                "This test can run only if online hosts are more "
                "than 2")
        # if we have n nodes, disable n-3 nodes
        for node_info in online_hosts[3:]:
            node_id = node_info["id"]
            g.log.info("going to disable node id %s", node_id)
            self.disable_node(node_id)
            self.addCleanup(self.enable_node, node_id)

        vol_size = 1
        # create volume when 3 nodes are online
        vol_info = heketi_volume_create(self.heketi_client_node,
                                        self.heketi_server_url, vol_size,
                                        json=True)
        self.addCleanup(
            heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, vol_info['id'])

        node_id = online_hosts[0]['id']
        g.log.info("going to disable node id %s", node_id)
        self.disable_node(node_id)
        self.addCleanup(self.enable_node, node_id)

        # try to create a volume, volume creation should fail
        ret, out, err = heketi_volume_create(
            self.heketi_client_node, self.heketi_server_url,
            vol_size, raw_cli_output=True)
        if ret == 0:
            out_json = json.loads(out)
            self.addCleanup(
                heketi_volume_delete, self.heketi_client_node,
                self.heketi_server_url, out_json["id"])
        self.assertNotEqual(ret, 0,
                            ("Volume creation did not fail ret- %s "
                             "out- %s err- %s" % (ret, out, err)))

        g.log.info("Volume creation failed as expected, err- %s", err)
        # enable node
        self.enable_node(node_id)

        # create volume when node is enabled
        vol_info = heketi_volume_create(self.heketi_client_node,
                                        self.heketi_server_url, vol_size,
                                        json=True)
        self.addCleanup(
            heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, vol_info['id'])