Ejemplo n.º 1
0
    def test_volume_reduce_replica_count(self):
        """
        Test case:
        1) Create a 2x3 replica volume.
        2) Remove bricks in the volume to make it a 2x2 replica volume.
        3) Remove bricks in the volume to make it a distribute volume.
        """

        # Create and start a volume
        ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
        self.assertTrue(ret, "Failed to create and start volume")

        # Getting a list of all the bricks.
        g.log.info("Get all the bricks of the volume")
        self.brick_list = get_all_bricks(self.mnode, self.volname)
        self.assertIsNotNone(self.brick_list, "Failed to get the brick list")
        g.log.info("Successfully got the list of bricks of volume")

        # Converting 2x3 to 2x2 volume.
        remove_brick_list = [self.brick_list[0], self.brick_list[3]]
        ret, _, _ = remove_brick(self.mnode,
                                 self.volname,
                                 remove_brick_list,
                                 'force',
                                 replica_count=2)
        self.assertEqual(ret, 0, "Failed to start remove brick operation")
        g.log.info("Remove brick operation successfully")

        # Checking if volume is 2x2 or not.
        volume_info = get_volume_info(self.mnode, self.volname)
        brick_count = int(volume_info[self.volname]['brickCount'])
        self.assertEqual(brick_count, 4, "Failed to remove 2 bricks.")
        g.log.info("Successfully removed 2 bricks.")
        type_string = volume_info[self.volname]['typeStr']
        self.assertEqual(type_string, 'Distributed-Replicate',
                         "Convertion to 2x2 failed.")
        g.log.info("Convertion to 2x2 successful.")

        # Converting 2x2 to distribute volume.
        remove_brick_list = [self.brick_list[1], self.brick_list[4]]
        ret, _, _ = remove_brick(self.mnode,
                                 self.volname,
                                 remove_brick_list,
                                 'force',
                                 replica_count=1)
        self.assertEqual(ret, 0, "Failed to start remove brick operation")
        g.log.info("Remove brick operation successfully")

        # Checking if volume is pure distribute or not.
        volume_info = get_volume_info(self.mnode, self.volname)
        brick_count = int(volume_info[self.volname]['brickCount'])
        self.assertEqual(brick_count, 2, "Failed to remove 2 bricks.")
        g.log.info("Successfully removed 2 bricks.")
        type_string = volume_info[self.volname]['typeStr']
        self.assertEqual(type_string, 'Distribute',
                         "Convertion to distributed failed.")
        g.log.info("Convertion to distributed successful.")
Ejemplo n.º 2
0
def get_all_bricks(mnode, volname):
    """Get list of all the bricks of the specified volume.

    Args:
        mnode (str): Node on which command has to be executed
        volname (str): Name of the volume

    Returns:
        list: List of all the bricks of the volume on Success.
        NoneType: None on failure.
    """
    volinfo = get_volume_info(mnode, volname)
    if volinfo is None:
        g.log.error("Unable to get the volinfo of %s.", volname)
        return None

    # Get bricks from a volume
    all_bricks = []
    if 'bricks' in volinfo[volname]:
        if 'brick' in volinfo[volname]['bricks']:
            for brick in volinfo[volname]['bricks']['brick']:
                if 'name' in brick:
                    all_bricks.append(brick['name'])
                else:
                    g.log.error("brick %s doesn't have the key 'name' "
                                "for the volume: %s", brick, volname)
                    return None
            return all_bricks
        g.log.error("Bricks not found in Bricks section of volume "
                    "info for the volume %s", volname)
        return None
    g.log.error("Bricks not found for the volume %s", volname)
    return None
Ejemplo n.º 3
0
    def get_brick_and_volume_status(self, volume_name):
        """Status of each brick in a volume for background validation."""

        volume_info = volume_ops.get_volume_info(
            'auto_get_gluster_endpoint', volume_name)
        self.assertIsNotNone(
            volume_info, "'%s' volume info is empty" % volume_name)

        volume_status = volume_ops.get_volume_status(
            'auto_get_gluster_endpoint', volume_name)
        self.assertIsNotNone(
            volume_status, "'%s' volume status is empty" % volume_name)

        self.assertEqual(int(volume_info[volume_name]["status"]), 1,
                         "Volume not up")

        brick_info = []
        for brick_details in volume_info[volume_name]["bricks"]["brick"]:
            brick_info.append(brick_details["name"])
        self.assertTrue(
            brick_info, "Brick details are empty for %s" % volume_name)

        for brick in brick_info:
            brick_data = brick.strip().split(":")
            brick_ip = brick_data[0]
            brick_name = brick_data[1]
            self.assertEqual(int(volume_status[volume_name][brick_ip]
                             [brick_name]["status"]), 1,
                             "Brick %s not up" % brick_name)
    def test_create_vol_and_retrieve_vol_info(self):
        """Validate heketi and gluster volume info"""

        g.log.info("Create a heketi volume")
        out = heketi_volume_create(self.heketi_client_node,
                                   self.heketi_server_url,
                                   self.volume_size, json=True)
        self.assertTrue(out, ("Failed to create heketi "
                        "volume of size %s" % self.volume_size))
        g.log.info("Heketi volume successfully created" % out)
        volume_id = out["bricks"][0]["volume"]
        self.addCleanup(
            heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, volume_id)

        g.log.info("Retrieving heketi volume info")
        out = heketi_volume_info(
            self.heketi_client_node, self.heketi_server_url, volume_id,
            json=True)
        self.assertTrue(out, ("Failed to get heketi volume info"))
        g.log.info("Successfully got the heketi volume info")
        name = out["name"]

        vol_info = get_volume_info('auto_get_gluster_endpoint', volname=name)
        self.assertTrue(vol_info, "Failed to get volume info %s" % name)
        g.log.info("Successfully got the volume info %s" % name)
Ejemplo n.º 5
0
def get_all_bricks(mnode, volname):
    """Get list of all the bricks of the specified volume.

    Args:
        mnode (str): Node on which command has to be executed
        volname (str): Name of the volume

    Returns:
        list: List of all the bricks of the volume on Success.
        NoneType: None on failure.
    """

    volinfo = get_volume_info(mnode, volname)
    if volinfo is None:
        g.log.error("Unable to get the volinfo of %s.", volname)
        return None

    all_bricks = []
    for bricks in volinfo['subvols']:
        for brick in bricks['bricks']:
            path = []
            path.append(brick['host'])
            path.append(brick['path'])
            brick = ":".join(path)
            all_bricks.append(brick)
    return all_bricks
Ejemplo n.º 6
0
def get_volume_type_info(mnode, volname):
    """Returns volume type information for the specified volume.
    Args:
        mnode (str): Node on which commands are executed.
        volname (str): Name of the volume.
    Returns:
        dict : Dict containing the keys, values defining the volume type:
        NoneType: None if volume does not exist or any other key errors.
    """
    volinfo = get_volume_info(mnode, volname)
    if not volinfo:
        g.log.error("Unable to get the volume info for volume %s", volname)
        return None

    volume_type_info = {
        'type': '',
        'replica-count': '',
        'arbiter-count': '',
        'distribute-count': ''
        }
    for key in volume_type_info.keys():
        if key in volinfo:
            volume_type_info[key] = volinfo[key]
        else:
            volume_type_info[key] = None

    return volume_type_info
Ejemplo n.º 7
0
def get_subvols(mnode, volname):
    """Gets the subvolumes in the given volume
    Args:
        volname (str): volume name
        mnode (str): Node on which cmd has to be executed.
    Returns:
        dict: with empty list values for all keys, if volume doesn't exist
        dict: Dictionary of subvols, value of each key is list of lists
            containing subvols
    Example:
        get_subvols("abc.xyz.com", "testvol")
    """

    bricks_path = []
    subvols = {'volume_subvols':[]}
    volinfo = get_volume_info(mnode, volname)
    if volinfo:
        subvol_info = volinfo['subvols']
        for subvol in subvol_info:
            path = []
            for brick in subvol['bricks']:
                path1 = ':'.join([brick['host'], brick['path']])
                path.append(path1)
            bricks_path.append(path)
            subvols['volume_subvols'] = bricks_path
    return subvols
Ejemplo n.º 8
0
    def setUp(self):

        # SettingUp volume and Mounting the volume
        GlusterBaseClass.setUp.im_func(self)
        g.log.info("Starting to SetUp and Mount Volume")
        ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
        if not ret:
            raise ExecutionError("Failed to setup volume %s" % self.volname)
        g.log.info("Volume %s has been setup successfully", self.volname)

        # Enable Shared storage
        g.log.info("Starting to enable shared storage")
        ret = enable_shared_storage(self.mnode)
        if not ret:
            raise ExecutionError("Unexpected: Failed to enable shared storage")
        g.log.info("Successfully enabled shared storage as expected")

        # Validate shared storage mounted
        g.log.info("Starting to validate shared storage mounted")
        ret = is_shared_volume_mounted(self.mnode)
        if not ret:
            raise ExecutionError("Failed to mount shared volume")
        g.log.info("Successfully mounted shared volume")

        # Validate shared storage is enabled
        g.log.info("Starting to validate shared storage volume")
        volinfo = get_volume_info(self.mnode, "gluster_shared_storage")
        if ((volinfo['gluster_shared_storage']['options']
             ['cluster.enable-shared-storage']) != 'enable'):
            raise ExecutionError("Unexpected: shared storage is disabled")
        g.log.info("Shared storage enabled successfully as expected")
    def get_brick_and_volume_status(self, volume_name):
        """Status of each brick in a volume for background validation."""

        volume_info = volume_ops.get_volume_info(
            'auto_get_gluster_endpoint', volume_name)
        self.assertIsNotNone(
            volume_info, "'%s' volume info is empty" % volume_name)

        volume_status = volume_ops.get_volume_status(
            'auto_get_gluster_endpoint', volume_name)
        self.assertIsNotNone(
            volume_status, "'%s' volume status is empty" % volume_name)

        self.assertEqual(int(volume_info[volume_name]["status"]), 1,
                         "Volume not up")

        brick_info = []
        for brick_details in volume_info[volume_name]["bricks"]["brick"]:
            brick_info.append(brick_details["name"])
        self.assertTrue(
            brick_info, "Brick details are empty for %s" % volume_name)

        for brick in brick_info:
            brick_data = brick.strip().split(":")
            brick_ip = brick_data[0]
            brick_name = brick_data[1]
            self.assertEqual(int(volume_status[volume_name][brick_ip]
                             [brick_name]["status"]), 1,
                             "Brick %s not up" % brick_name)
Ejemplo n.º 10
0
    def test_create_vol_and_retrieve_vol_info(self):
        """Validate heketi and gluster volume info"""

        g.log.info("Create a heketi volume")
        out = heketi_volume_create(self.heketi_client_node,
                                   self.heketi_server_url,
                                   self.volume_size,
                                   json=True)
        self.assertTrue(out, ("Failed to create heketi "
                              "volume of size %s" % self.volume_size))
        g.log.info("Heketi volume successfully created" % out)
        volume_id = out["bricks"][0]["volume"]
        self.addCleanup(heketi_volume_delete, self.heketi_client_node,
                        self.heketi_server_url, volume_id)

        g.log.info("Retrieving heketi volume info")
        out = heketi_volume_info(self.heketi_client_node,
                                 self.heketi_server_url,
                                 volume_id,
                                 json=True)
        self.assertTrue(out, ("Failed to get heketi volume info"))
        g.log.info("Successfully got the heketi volume info")
        name = out["name"]

        vol_info = get_volume_info('auto_get_gluster_endpoint', volname=name)
        self.assertTrue(vol_info, "Failed to get volume info %s" % name)
        g.log.info("Successfully got the volume info %s" % name)
Ejemplo n.º 11
0
def select_bricks_to_bring_offline(mnode, volname):
    """Randomly selects bricks to bring offline without affecting the cluster

    Args:
        mnode (str): Node on which commands will be executed.
        volname (str): Name of the volume.

    Returns:
        dict: On success returns dict. Value of each key is list of bricks to
            bring offline.
            If volume doesn't exist returns dict with value of each item
            being empty list.
            Example:
                brick_to_bring_offline = {
                    'volume_bricks': []
                    }
    """
    # Defaulting the values to empty list
    bricks_to_bring_offline = {
        'volume_bricks': []
    }

    volinfo = get_volume_info(mnode, volname)
    if volinfo is None:
        g.log.error("Unable to get the volume info for volume %s", volname)
        return bricks_to_bring_offline

    # Select bricks from the volume.
    volume_bricks = select_volume_bricks_to_bring_offline(mnode, volname)
    bricks_to_bring_offline['volume_bricks'] = volume_bricks

    return bricks_to_bring_offline
Ejemplo n.º 12
0
    def test_volume_create_replica_2(self):
        """Validate creation of a replica 2 volume"""
        vol_create_info = heketi_ops.heketi_volume_create(
            self.heketi_client_node,
            self.heketi_server_url,
            1,
            replica=2,
            json=True)
        self.addCleanup(heketi_ops.heketi_volume_delete,
                        self.heketi_client_node,
                        self.heketi_server_url,
                        vol_create_info["id"],
                        raise_on_error=True)
        actual_replica = int(
            vol_create_info["durability"]["replicate"]["replica"])
        self.assertEqual(
            actual_replica, 2, "Volume '%s' has '%s' as value for replica,"
            " expected 2." % (vol_create_info["id"], actual_replica))
        vol_name = vol_create_info['name']

        # Get gluster volume info
        gluster_vol = volume_ops.get_volume_info('auto_get_gluster_endpoint',
                                                 volname=vol_name)
        self.assertTrue(gluster_vol,
                        "Failed to get volume '%s' info" % vol_name)

        # Check amount of bricks
        brick_amount = len(gluster_vol[vol_name]['bricks']['brick'])
        self.assertEqual(
            brick_amount, 2, "Brick amount is expected to be 2. "
            "Actual amount is '%s'" % brick_amount)
Ejemplo n.º 13
0
    def tearDownClass(cls,
                      umount_vol=True,
                      cleanup_vol=True,
                      teardown_nfs_ganesha_cluster=True):
        """Teardown the export, mounts and volume.
        """

        # Unmount volume
        if umount_vol:
            _rc = True
            for mount_obj in cls.mounts:
                ret = mount_obj.unmount()
                if not ret:
                    g.log.error("Unable to unmount volume '%s:%s' on '%s:%s'",
                                mount_obj.server_system, mount_obj.volname,
                                mount_obj.client_system, mount_obj.mountpoint)
                    _rc = False
            if not _rc:
                raise ExecutionError("Unmount of all mounts are not "
                                     "successful")

        # Cleanup volume
        if cleanup_vol:

            volinfo = get_volume_info(cls.mnode, cls.volname)
            if volinfo is None or cls.volname not in volinfo:
                g.log.info("Volume %s does not exist in %s" %
                           (cls.volname, cls.mnode))
            else:
                # Unexport volume, if it is not unexported already
                vol_option = get_volume_options(cls.mnode,
                                                cls.volname,
                                                option='ganesha.enable')
                if vol_option is None:
                    raise ExecutionError("Failed to get ganesha.enable volume "
                                         " option for %s " % cls.volume)
                if vol_option['ganesha.enable'] != 'off':
                    if is_volume_exported(cls.mnode, cls.volname, "nfs"):
                        ret, out, err = unexport_nfs_ganesha_volume(
                            mnode=cls.mnode, volname=cls.volname)
                        if ret != 0:
                            raise ExecutionError(
                                "Failed to unexport volume %s" % cls.volname)
                        time.sleep(5)
                else:
                    g.log.info("Volume %s is unexported already" % cls.volname)

                _, _, _ = g.run(cls.mnode, "showmount -e")

            ret = cleanup_volume(mnode=cls.mnode, volname=cls.volname)
            if not ret:
                raise ExecutionError("cleanup volume %s failed", cls.volname)

        # All Volume Info
        volume_info(cls.mnode)

        (NfsGaneshaClusterSetupClass.tearDownClass.im_func(
            cls, delete_nfs_ganesha_cluster=teardown_nfs_ganesha_cluster))
Ejemplo n.º 14
0
    def test_arbiter_volume_delete_using_pvc(self):
        """Test Arbiter volume delete using pvc when volume is not mounted
           on app pod
        """
        prefix = "autotest-%s" % utils.get_random_str()

        # Create sc with gluster arbiter info
        sc_name = self.create_storage_class(vol_name_prefix=prefix,
                                            is_arbiter_vol=True)

        # Create PVC and wait for it to be in 'Bound' state
        pvc_name = self.create_and_wait_for_pvc(pvc_name_prefix=prefix,
                                                sc_name=sc_name)

        # Get vol info
        gluster_vol_info = openshift_ops.get_gluster_vol_info_by_pvc_name(
            self.node, pvc_name)

        # Verify arbiter volume properties
        self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
            gluster_vol_info)

        # Get volume ID
        gluster_vol_id = gluster_vol_info["gluster_vol_id"]

        # Delete the pvc
        openshift_ops.oc_delete(self.node, 'pvc', pvc_name)
        openshift_ops.wait_for_resource_absence(self.node, 'pvc', pvc_name)

        # Check the heketi volume list if pvc is deleted
        g.log.info("List heketi volumes")
        heketi_volumes = heketi_ops.heketi_volume_list(self.heketi_client_node,
                                                       self.heketi_server_url)

        err_msg = "Failed to delete heketi volume by prefix %s" % prefix
        self.assertNotIn(prefix, heketi_volumes, err_msg)

        # Check presence for the gluster volume
        get_gluster_vol_info = volume_ops.get_volume_info(
            "auto_get_gluster_endpoint", gluster_vol_id)
        err_msg = "Failed to delete gluster volume %s" % gluster_vol_id
        self.assertFalse(get_gluster_vol_info, err_msg)

        # Check presence of bricks and lvs
        for brick in gluster_vol_info['bricks']['brick']:
            gluster_node_ip, brick_name = brick["name"].split(":")

            with self.assertRaises(exceptions.ExecutionError):
                cmd = "df %s" % brick_name
                openshift_ops.cmd_run_on_gluster_pod_or_node(
                    self.node, cmd, gluster_node_ip)

            with self.assertRaises(exceptions.ExecutionError):
                lv_match = re.search(BRICK_REGEX, brick["name"])
                if lv_match:
                    cmd = "lvs %s" % lv_match.group(2).strip()
                    openshift_ops.cmd_run_on_gluster_pod_or_node(
                        self.node, cmd, gluster_node_ip)
Ejemplo n.º 15
0
    def get_num_of_bricks(self, volume_name):
        """Method to determine number of bricks at present in the volume."""

        volume_info = volume_ops.get_volume_info(
            'auto_get_gluster_endpoint', volume_name)
        self.assertIsNotNone(
            volume_info, "'%s' volume info is None" % volume_name)

        return len([b for b in volume_info[volume_name]["bricks"]["brick"]])
    def get_num_of_bricks(self, volume_name):
        """Method to determine number of bricks at present in the volume."""

        volume_info = volume_ops.get_volume_info(
            'auto_get_gluster_endpoint', volume_name)
        self.assertIsNotNone(
            volume_info, "'%s' volume info is None" % volume_name)

        return len([b for b in volume_info[volume_name]["bricks"]["brick"]])
 def test_validate_gluster_voloptions_blockhostvolume(self):
     """Validate gluster volume options which are set for
        block hosting volume"""
     options_to_validate = (
         ('performance.quick-read', 'off'),
         ('performance.read-ahead', 'off'),
         ('performance.io-cache', 'off'),
         ('performance.stat-prefetch', 'off'),
         ('performance.open-behind', 'off'),
         ('performance.readdir-ahead', 'off'),
         ('performance.strict-o-direct', 'on'),
         ('network.remote-dio', 'disable'),
         ('cluster.eager-lock', 'enable'),
         ('cluster.quorum-type', 'auto'),
         ('cluster.data-self-heal-algorithm', 'full'),
         ('cluster.locking-scheme', 'granular'),
         ('cluster.shd-max-threads', '8'),
         ('cluster.shd-wait-qlength', '10000'),
         ('features.shard', 'on'),
         ('features.shard-block-size', '64MB'),
         ('user.cifs', 'off'),
         ('server.allow-insecure', 'on'),
     )
     free_space, nodenum = get_total_free_space(self.heketi_client_node,
                                                self.heketi_server_url)
     if nodenum < 3:
         self.skipTest("Skip the test case since number of"
                       "online nodes is less than 3.")
     free_space_available = int(free_space / nodenum)
     default_bhv_size = get_default_block_hosting_volume_size(
         self.heketi_client_node, self.heketi_dc_name)
     if free_space_available < default_bhv_size:
         self.skipTest("Skip the test case since free_space_available %s"
                       "is less than the default_bhv_size %s ." %
                       (free_space_available, default_bhv_size))
     block_host_create_info = heketi_volume_create(self.heketi_client_node,
                                                   self.heketi_server_url,
                                                   default_bhv_size,
                                                   json=True,
                                                   block=True)
     self.addCleanup(heketi_volume_delete,
                     self.heketi_client_node,
                     self.heketi_server_url,
                     block_host_create_info["id"],
                     raise_on_error=True)
     bhv_name = block_host_create_info["name"]
     vol_info = get_volume_info('auto_get_gluster_endpoint',
                                volname=bhv_name)
     self.assertTrue(vol_info, "Failed to get volume info %s" % bhv_name)
     self.assertIn("options", vol_info[bhv_name].keys())
     for k, v in options_to_validate:
         self.assertIn(k, vol_info[bhv_name]["options"].keys())
         self.assertEqual(v, vol_info[bhv_name]["options"][k])
Ejemplo n.º 18
0
def get_all_bricks(mnode, volname):
    """Get list of all the bricks of the specified volume.
        If the volume is 'Tier' volume, the list will contian both
        'hot tier' and 'cold tier' bricks.

    Args:
        mnode (str): Node on which command has to be executed
        volname (str): Name of the volume

    Returns:
        list: List of all the bricks of the volume on Success.
        NoneType: None on failure.
    """
    volinfo = get_volume_info(mnode, volname)
    if volinfo is None:
        g.log.error("Unable to get the volinfo of %s.", volname)
        return None

    if 'Tier' in volinfo[volname]['typeStr']:
        # Get bricks from hot-tier in case of Tier volume
        hot_tier_bricks = get_hot_tier_bricks(mnode, volname)
        if hot_tier_bricks is None:
            return None
        # Get cold-tier bricks in case of Tier volume
        cold_tier_bricks = get_cold_tier_bricks(mnode, volname)
        if cold_tier_bricks is None:
            return None

        return hot_tier_bricks + cold_tier_bricks

    # Get bricks from a non Tier volume
    all_bricks = []
    if 'bricks' in volinfo[volname]:
        if 'brick' in volinfo[volname]['bricks']:
            for brick in volinfo[volname]['bricks']['brick']:
                if 'name' in brick:
                    all_bricks.append(brick['name'])
                else:
                    g.log.error(
                        "brick %s doesn't have the key 'name' "
                        "for the volume: %s", brick, volname)
                    return None
            return all_bricks
        else:
            g.log.error(
                "Bricks not found in Bricks section of volume "
                "info for the volume %s", volname)
            return None
    else:
        g.log.error("Bricks not found for the volume %s", volname)
        return None
Ejemplo n.º 19
0
    def setUp(self):
        # Setup Volume to create a replicated volume
        ret = self.setup_volume()
        if not ret:
            raise ExecutionError("Failed to setup volume %s" % self.volname)
        g.log.info("Volume %s has been setup successfully", self.volname)

        # Check if volume is started
        volinfo = get_volume_info(self.mnode, self.volname)
        if volinfo[self.volname]['statusStr'] != "Started":
            raise ExecutionError("Volume has not Started")
        g.log.info("Volume is started.")
        # Calling GlusterBaseClass Setup
        self.get_super_method(self, 'setUp')()
Ejemplo n.º 20
0
def volume_exists(mnode, volname):
    """Check if volume already exists
    Args:
        mnode (str): Node on which commands has to be executed
        volname (str): Name of the volume.
    Returns:
        bool : True if volume exists. False Otherwise
    """
    volinfo = get_volume_info(mnode, volname)
    if volinfo:
        g.log.info("Volume %s exists", volname)
        return True

    g.log.error("Volume %s doesnot exist", volname)
    return False
Ejemplo n.º 21
0
    def setUp(self):
        """
        Creating a replicated volume and checking if it is started
        """
        ret = self.setup_volume()
        if not ret:
            raise ExecutionError("Failed to setup volume %s" % self.volname)
        g.log.info("Volume %s has been setup successfully", self.volname)

        # Check if volume is started
        volinfo = get_volume_info(self.mnode, self.volname)
        if volinfo[self.volname]['statusStr'] != "Started":
            raise ExecutionError("Volume has not Started")
        g.log.info("Volume is started")
        # Calling GlusterBaseClass Setup
        GlusterBaseClass.setUp.im_func(self)
Ejemplo n.º 22
0
    def setUp(self):
        """
        Creating a replicated volume and checking if it is started
        """
        # Calling GlusterBaseClass Setup
        self.get_super_method(self, 'setUp')()

        ret = self.setup_volume()
        if not ret:
            raise ExecutionError("Failed to setup volume %s" % self.volname)

        # Check if volume is started
        volinfo = get_volume_info(self.mnode, self.volname)
        if volinfo[self.volname]['statusStr'] != "Started":
            raise ExecutionError("Volume has not Started")
        g.log.info("Volume is started")
    def test_uuid_in_volume_info_xml(self):

        # create a two node cluster
        ret = peer_probe_servers(self.servers[0], self.servers[1])
        self.assertTrue(
            ret,
            "Peer probe failed to %s from %s" % (self.mnode, self.servers[1]))

        # create a 2x2 volume
        servers_info_from_two_node_cluster = {}
        for server in self.servers[0:2]:
            servers_info_from_two_node_cluster[server] = self.all_servers_info[
                server]

        self.volume['servers'] = self.servers[0:2]
        self.volume['voltype']['replica_count'] = 2
        self.volume['voltype']['dist_count'] = 2
        ret = setup_volume(self.mnode, servers_info_from_two_node_cluster,
                           self.volume)
        self.assertTrue(ret, ("Failed to create"
                              "and start volume %s" % self.volname))

        # probe a new node from cluster
        ret = peer_probe_servers(self.mnode, self.servers[2])
        self.assertTrue(
            ret,
            "Peer probe failed to %s from %s" % (self.mnode, self.servers[2]))

        # check gluster vol info --xml from newly probed node
        xml_output = get_volume_info(self.servers[2], self.volname)
        self.assertIsNotNone(xml_output,
                             ("Failed to get volume info --xml for"
                              "volume %s from newly probed node %s" %
                              (self.volname, self.servers[2])))

        # volume info --xml should have non zero UUID for host and brick
        uuid_with_zeros = '00000000-0000-0000-0000-000000000000'
        len_of_uuid = len(uuid_with_zeros)
        number_of_bricks = int(xml_output[self.volname]['brickCount'])
        for i in range(number_of_bricks):
            uuid = xml_output[self.volname]['bricks']['brick'][i]['hostUuid']
            self.assertEqual(len(uuid), len_of_uuid, "Invalid uuid length")
            self.assertNotEqual(uuid, uuid_with_zeros,
                                ("Invalid uuid %s" % uuid))
Ejemplo n.º 24
0
    def _get_gluster_vol_info(self, file_vol):
        """Get Gluster vol info.

        Args:
            ocp_client (str): Node to execute OCP commands.
            file_vol (str): file volume name.

        Returns:
            dict: Info of the given gluster vol.
        """
        g_vol_info = volume_ops.get_volume_info("auto_get_gluster_endpoint",
                                                file_vol)

        if not g_vol_info:
            raise AssertionError("Failed to get volume info for gluster "
                                 "volume {}".format(file_vol))
        if file_vol in g_vol_info:
            g_vol_info = g_vol_info.get(file_vol)
        return g_vol_info
Ejemplo n.º 25
0
    def test_replica_volume_expand(self):
        """
        Test expansion of a replica volume
        """
        h_node, h_server = self.heketi_client_node, self.heketi_server_url
        volume_name = ("autotests-heketi-volume-{}".format(
            utils.get_random_str()))
        volume_size = 10
        creation_info = self.create_heketi_volume_with_name_and_wait(
            volume_name, volume_size, json=True, raise_on_cleanup_error=False)
        volume_id = creation_info["id"]
        volume_info = heketi_ops.heketi_volume_info(h_node,
                                                    h_server,
                                                    volume_id,
                                                    json=True)

        # Get gluster volume info
        gluster_vol = volume_ops.get_volume_info('auto_get_gluster_endpoint',
                                                 volname=volume_name)
        self.assertTrue(gluster_vol,
                        "Failed to get volume {} info".format(volume_name))
        vol_name = gluster_vol[volume_name]
        self.assertEqual(
            vol_name['replicaCount'], "3",
            "Replica count is different for volume {} Actual:{} "
            "Expected : 3".format(vol_name, vol_name['replicaCount']))

        expand_size = 5
        heketi_ops.heketi_volume_expand(h_node, h_server, volume_id,
                                        expand_size)
        volume_info = heketi_ops.heketi_volume_info(h_node,
                                                    h_server,
                                                    volume_id,
                                                    json=True)
        expected_size = volume_size + expand_size
        self.assertEqual(
            volume_info['size'], expected_size,
            "Volume Expansion failed, Expected Size: {}, Actual "
            "Size: {}".format(str(expected_size), str(volume_info['size'])))

        self.get_brick_and_volume_status(volume_name)
        self.get_rebalance_status(volume_name)
Ejemplo n.º 26
0
def select_tier_volume_bricks_to_bring_offline(mnode, volname):
    """Randomly selects bricks to bring offline without affecting the cluster
    from a tiered volume.

    Args:
        mnode (str): Node on which commands will be executed.
        volname (str): Name of the volume.

    Returns:
        dict: On success returns dict. Value of each key is list of bricks to
            bring offline.
            If volume doesn't exist or is not a tiered volume returns dict
            with value of each item being empty list.
            Example:
                brick_to_bring_offline = {
                    'hot_tier_bricks': [],
                    'cold_tier_bricks': [],
                    }
    """
    # Defaulting the values to empty list
    bricks_to_bring_offline = {
        'hot_tier_bricks': [],
        'cold_tier_bricks': [],
    }

    volinfo = get_volume_info(mnode, volname)
    if volinfo is None:
        g.log.error("Unable to get the volume info for volume %s", volname)
        return bricks_to_bring_offline

    if is_tiered_volume(mnode, volname):
        # Select bricks from both hot tier and cold tier.
        hot_tier_bricks = (select_hot_tier_bricks_to_bring_offline(
            mnode, volname))
        cold_tier_bricks = (select_cold_tier_bricks_to_bring_offline(
            mnode, volname))
        bricks_to_bring_offline['hot_tier_bricks'] = hot_tier_bricks
        bricks_to_bring_offline['cold_tier_bricks'] = cold_tier_bricks
        return bricks_to_bring_offline
    else:
        return bricks_to_bring_offline
Ejemplo n.º 27
0
def get_hot_tier_bricks(mnode, volname):
    """Get list of hot-tier bricks of the specified volume

    Args:
        mnode (str): Node on which command has to be executed
        volname (str): Name of the volume

    Returns:
        list : List of hot-tier bricks of the volume on Success.
        NoneType: None on failure.
    """
    volinfo = get_volume_info(mnode, volname)
    if volinfo is None:
        g.log.error("Unable to get the volinfo of %s.", volname)
        return None

    if 'Tier' not in volinfo[volname]['typeStr']:
        g.log.error("Volume %s is not a tiered volume", volname)
        return None

    hot_tier_bricks = []
    if 'bricks' in volinfo[volname]:
        if 'hotBricks' in volinfo[volname]['bricks']:
            if 'brick' in volinfo[volname]['bricks']['hotBricks']:
                for brick in volinfo[volname]['bricks']['hotBricks']['brick']:
                    if 'name' in brick:
                        hot_tier_bricks.append(brick['name'])
                    else:
                        g.log.error(
                            "brick %s doesn't have the key 'name' "
                            "for the volume: %s", brick, volname)
                        return None
            else:
                g.log.error(
                    "Bricks not found in hotBricks section of volume "
                    "info for the volume %s", volname)
                return None
        return hot_tier_bricks
    else:
        g.log.error("Bricks not found for the volume %s", volname)
        return None
Ejemplo n.º 28
0
    def test_volume_create_snapshot_enabled(self):
        """Validate volume creation with snapshot enabled"""
        factor = 1.5
        vol_create_info = heketi_ops.heketi_volume_create(
            self.heketi_client_node,
            self.heketi_server_url,
            1,
            snapshot_factor=factor,
            json=True)
        self.addCleanup(heketi_ops.heketi_volume_delete,
                        self.heketi_client_node, self.heketi_server_url,
                        vol_create_info["id"])
        snap_factor_count = vol_create_info["snapshot"]["factor"]
        self.assertEqual(
            snap_factor_count, factor, "snapshot factor %s is not same as %s" %
            (snap_factor_count, factor))

        vol_name, snap_name = vol_create_info["name"], "snap1"
        try:
            ret, out, err = snap_ops.snap_create('auto_get_gluster_endpoint',
                                                 vol_name,
                                                 snap_name,
                                                 timestamp=False)
            self.assertEqual(ret, 0,
                             "Failed to create snapshot %s" % snap_name)

            # Get gluster volume info
            gluster_vol = volume_ops.get_volume_info(
                'auto_get_gluster_endpoint', volname=vol_name)
            self.assertTrue(gluster_vol,
                            "Failed to get volume '%s' info" % vol_name)
            self.assertEqual(
                gluster_vol[vol_name]['snapshotCount'], "1",
                "Failed to get snapshot count for volume %s" % vol_name)
        finally:
            ret, out, err = snap_ops.snap_delete('auto_get_gluster_endpoint',
                                                 snap_name)
            self.assertEqual(ret, 0,
                             "Failed to delete snapshot %s" % snap_name)
    def tearDown(self):

        # Clean up the volumes created specific for this tests.
        for i in range(5):
            volname = "nfsvol" + str(i)
            volinfo = get_volume_info(self.mnode, volname)
            if volinfo is None or volname not in volinfo:
                g.log.info("Volume %s does not exist in %s", volname,
                           self.mnode)
                continue

            # Unexport volume, if it is not unexported already
            vol_option = get_volume_options(self.mnode,
                                            volname,
                                            option='ganesha.enable')
            if vol_option is None:
                raise ExecutionError("Failed to get ganesha.enable volume "
                                     " option for %s " % volname)
            if vol_option['ganesha.enable'] != 'off':
                if is_volume_exported(self.mnode, volname, "nfs"):
                    ret, _, _ = unexport_nfs_ganesha_volume(mnode=self.mnode,
                                                            volname=volname)
                    if ret != 0:
                        raise ExecutionError("Failed to unexport volume %s " %
                                             volname)
                    time.sleep(5)
            else:
                g.log.info("Volume %s is unexported already", volname)

            _, _, _ = g.run(self.mnode, "showmount -e")

            ret = cleanup_volume(mnode=self.mnode, volname=volname)
            if not ret:
                raise ExecutionError("cleanup volume %s failed" % volname)

        NfsGaneshaIOBaseClass.tearDown.im_func(self)
Ejemplo n.º 30
0
    def test_create_volumes_enabling_and_disabling_heketi_devices(self):
        """Validate enable/disable of heketi device"""

        # Get nodes info
        node_id_list = heketi_ops.heketi_node_list(self.heketi_client_node,
                                                   self.heketi_server_url)
        node_info_list = []
        for node_id in node_id_list[0:3]:
            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    self.heketi_server_url,
                                                    node_id,
                                                    json=True)
            node_info_list.append(node_info)

        # Disable 4th and other nodes
        if len(node_id_list) > 3:
            for node_id in node_id_list[3:]:
                heketi_ops.heketi_node_disable(self.heketi_client_node,
                                               self.heketi_server_url, node_id)
                self.addCleanup(heketi_ops.heketi_node_enable,
                                self.heketi_client_node,
                                self.heketi_server_url, node_id)

        # Disable second and other devices on the first 3 nodes
        for node_info in node_info_list[0:3]:
            devices = node_info["devices"]
            self.assertTrue(
                devices, "Node '%s' does not have devices." % node_info["id"])
            if devices[0]["state"].strip().lower() != "online":
                self.skipTest("Test expects first device to be enabled.")
            if len(devices) < 2:
                continue
            for device in node_info["devices"][1:]:
                out = heketi_ops.heketi_device_disable(self.heketi_client_node,
                                                       self.heketi_server_url,
                                                       device["id"])
                self.assertTrue(
                    out, "Failed to disable the device %s" % device["id"])
                self.addCleanup(heketi_ops.heketi_device_enable,
                                self.heketi_client_node,
                                self.heketi_server_url, device["id"])

        # Create heketi volume
        out = heketi_ops.heketi_volume_create(self.heketi_client_node,
                                              self.heketi_server_url,
                                              1,
                                              json=True)
        self.assertTrue(out, "Failed to create heketi volume of size 1")
        g.log.info("Successfully created heketi volume of size 1")
        device_id = out["bricks"][0]["device"]
        self.addCleanup(heketi_ops.heketi_volume_delete,
                        self.heketi_client_node, self.heketi_server_url,
                        out["bricks"][0]["volume"])

        # Disable device
        g.log.info("Disabling '%s' device" % device_id)
        out = heketi_ops.heketi_device_disable(self.heketi_client_node,
                                               self.heketi_server_url,
                                               device_id)
        self.assertTrue(out, "Failed to disable the device %s" % device_id)
        g.log.info("Successfully disabled device %s" % device_id)

        try:
            # Get device info
            g.log.info("Retrieving '%s' device info" % device_id)
            out = heketi_ops.heketi_device_info(self.heketi_client_node,
                                                self.heketi_server_url,
                                                device_id,
                                                json=True)
            self.assertTrue(out, "Failed to get device info %s" % device_id)
            g.log.info("Successfully retrieved device info %s" % device_id)
            name = out["name"]
            self.assertEqual(out["state"].lower().strip(), "offline",
                             "Device %s is not in offline state." % name)
            g.log.info("Device %s is now offine" % name)

            # Try to create heketi volume
            g.log.info("Creating heketi volume: Expected to fail.")
            try:
                out = heketi_ops.heketi_volume_create(self.heketi_client_node,
                                                      self.heketi_server_url,
                                                      1,
                                                      json=True)
            except AssertionError:
                g.log.info("Volume was not created as expected.")
            else:
                self.addCleanup(heketi_ops.heketi_volume_delete,
                                self.heketi_client_node,
                                self.heketi_server_url,
                                out["bricks"][0]["volume"])
                msg = "Volume unexpectedly created. Out: %s" % out
                assert False, msg
        finally:
            # Enable the device back
            g.log.info("Enable '%s' device back." % device_id)
            out = heketi_ops.heketi_device_enable(self.heketi_client_node,
                                                  self.heketi_server_url,
                                                  device_id)
            self.assertTrue(out, "Failed to enable the device %s" % device_id)
            g.log.info("Successfully enabled device %s" % device_id)

        # Get device info
        out = heketi_ops.heketi_device_info(self.heketi_client_node,
                                            self.heketi_server_url,
                                            device_id,
                                            json=True)
        self.assertTrue(out, ("Failed to get device info %s" % device_id))
        g.log.info("Successfully retrieved device info %s" % device_id)
        name = out["name"]
        self.assertEqual(out["state"], "online",
                         "Device %s is not in online state." % name)

        # Create heketi volume of size
        out = heketi_ops.heketi_volume_create(self.heketi_client_node,
                                              self.heketi_server_url,
                                              1,
                                              json=True)
        self.assertTrue(out, "Failed to create volume of size 1")
        self.addCleanup(heketi_ops.heketi_volume_delete,
                        self.heketi_client_node, self.heketi_server_url,
                        out["bricks"][0]["volume"])
        g.log.info("Successfully created volume of size 1")
        name = out["name"]

        # Get gluster volume info
        vol_info = get_volume_info('auto_get_gluster_endpoint', volname=name)
        self.assertTrue(vol_info, "Failed to get '%s' volume info." % name)
        g.log.info("Successfully got the '%s' volume info." % name)
Ejemplo n.º 31
0
def form_bricks_list(mnode,
                     volname,
                     number_of_bricks,
                     servers,
                     servers_info,
                     dirname=None):
    """Forms bricks list for create-volume/add-brick given the num_of_bricks
        servers and servers_info.

    Args:
        mnode (str): The node on which the command has to be run.
        volname (str): Volume name for which we require brick-list.
        number_of_bricks (int): The number of bricks for which brick list
            has to be created.
        servers (str|list): A server|List of servers from which the bricks
            needs to be selected for creating the brick list.
        servers_info (dict): dict of server info of each servers.

    kwargs:
        dirname (str): Name of the directory for glusterfs brick

    Returns:
        list - List of bricks to use with volume-create/add-brick
        None - if number_of_bricks is greater than unused bricks.

    Example:
        form_bricks_path(g.config['servers'](0), "testvol", 6,
                         g.config['servers'], g.config['servers_info'])
    """
    if not isinstance(servers, list):
        servers = [servers]
    dict_index = 0
    bricks_list = []

    servers_unused_bricks_dict = get_servers_unused_bricks_dict(
        mnode, servers, servers_info)
    num_of_unused_bricks = 0
    for each_server_unused_bricks_list in list(
            servers_unused_bricks_dict.values()):
        num_of_unused_bricks = (num_of_unused_bricks +
                                len(each_server_unused_bricks_list))

    if num_of_unused_bricks < number_of_bricks:
        g.log.error("Not enough bricks available for creating the bricks")
        return None

    brick_index = 0
    vol_info_dict = get_volume_info(mnode, volname)
    if vol_info_dict:
        brick_index = int(vol_info_dict[volname]['brickCount'])

    for num in range(brick_index, brick_index + number_of_bricks):
        # current_server is the server from which brick path will be created
        current_server = list(servers_unused_bricks_dict.keys())[dict_index]
        current_server_unused_bricks_list = (list(
            servers_unused_bricks_dict.values())[dict_index])
        brick_path = ''
        if current_server_unused_bricks_list:
            if dirname and (" " not in dirname):
                brick_path = (
                    "%s:%s/%s_brick%s" %
                    (current_server, current_server_unused_bricks_list[0],
                     dirname, num))
                bricks_list.append(brick_path)
            else:
                brick_path = (
                    "%s:%s/%s_brick%s" %
                    (current_server, current_server_unused_bricks_list[0],
                     volname, num))
                bricks_list.append(brick_path)

            # Remove the added brick from the current_server_unused_bricks_list
            list(servers_unused_bricks_dict.values())[dict_index].pop(0)

        if dict_index < len(servers_unused_bricks_dict) - 1:
            dict_index = dict_index + 1
        else:
            dict_index = 0

    return bricks_list
    def test_snap_scheduler_behaviour(self):
        """
        Steps:
        1. Create volumes
        2. Enable shared storage
        3. Validate shared storage mounted
        4. Validate shared storage is enabled
        5. Initialise snapshot scheduler on all node
        6. Enable snapshot scheduler
        7. Validate snapshot scheduler status
        8. Disable snapshot scheduler
        9. Validate snapshot scheduler status
        """

        # Enable shared storage
        g.log.info("Enable shared storage")
        ret = enable_shared_storage(self.mnode)
        self.assertTrue(ret, "Failed to enable shared storage")
        g.log.info("Successfully enabled shared storage")

        # Validate shared storage mounted
        g.log.info("Starting to validate shared storage mounted")
        for server in self.servers:
            ret = is_shared_volume_mounted(server)
            self.assertTrue(ret, "Failed to mount shared volume")
        g.log.info("Successfully mounted shared volume")

        # Validate shared storage is enabled
        g.log.info("Starting to validate shared storage volume")
        self.shared = "gluster_shared_storage"
        volinfo = get_volume_info(self.mnode, self.shared)
        self.assertEqual(
            volinfo['gluster_shared_storage']['options']
            ['cluster.enable-shared-storage'], 'enable',
            "shared storage is disabled")
        g.log.info("Shared storage enabled successfully")

        # Initialise snap scheduler
        g.log.info("Initialising snapshot scheduler on all nodes")
        ret = scheduler_init(self.servers)
        self.assertTrue(ret, "Failed to initialize scheduler on all nodes")
        g.log.info("Successfully initialized scheduler on all nodes")

        # Enable snap scheduler
        g.log.info("Starting to enable snapshot scheduler on all nodes")
        ret, _, _ = scheduler_enable(self.mnode)
        self.assertEqual(ret, 0, "Failed to enable scheduler on all servers")
        g.log.info("Successfully enabled scheduler on all nodes")

        # Check snapshot scheduler status
        g.log.info("checking status of snapshot scheduler")
        for server in self.servers:
            count = 0
            while count < 40:
                ret, status, _ = scheduler_status(server)
                if status.strip().split(":")[2] == ' Enabled':
                    break
                time.sleep(2)
                count += 2
        self.assertEqual(status.strip().split(":")[2], ' Enabled',
                         "Failed to check status of scheduler")
        g.log.info("Successfully checked scheduler status")

        # Disable snap scheduler
        g.log.info("Starting to disable snapshot scheduler on all nodes")
        ret, _, _ = scheduler_disable(self.mnode)
        self.assertEqual(
            ret, 0, "Failed to disable scheduler on node"
            " %s" % self.mnode)
        g.log.info("Successfully disabled scheduler on all nodes")

        # Check snapshot scheduler status
        g.log.info("checking status of snapshot scheduler")
        for server in self.servers:
            count = 0
            while count < 40:
                ret, status, _ = scheduler_status(server)
                if status.strip().split(":")[2] == ' Disabled':
                    break
                time.sleep(2)
                count += 2
        self.assertEqual(status.strip().split(":")[2], ' Disabled',
                         "Failed to check status of scheduler")
        g.log.info("Successfully checked scheduler status")
    def test_create_volumes_enabling_and_disabling_heketi_devices(self):
        """Validate enable/disable of heketi device"""

        # Get nodes info
        node_id_list = heketi_ops.heketi_node_list(
            self.heketi_client_node, self.heketi_server_url)
        node_info_list = []
        for node_id in node_id_list[0:3]:
            node_info = heketi_ops.heketi_node_info(
                self.heketi_client_node, self.heketi_server_url,
                node_id, json=True)
            node_info_list.append(node_info)

        # Disable 4th and other nodes
        if len(node_id_list) > 3:
            for node in node_id_list[3:]:
                heketi_ops.heketi_node_disable(
                    self.heketi_client_node, self.heketi_server_url, node_id)
                self.addCleanup(
                    heketi_ops.heketi_node_enable, self.heketi_client_node,
                    self.heketi_server_url, node_id)

        # Disable second and other devices on the first 3 nodes
        for node_info in node_info_list[0:3]:
            devices = node_info["devices"]
            self.assertTrue(
                devices, "Node '%s' does not have devices." % node_info["id"])
            if devices[0]["state"].strip().lower() != "online":
                self.skipTest("Test expects first device to be enabled.")
            if len(devices) < 2:
                continue
            for device in node_info["devices"][1:]:
                out = heketi_ops.heketi_device_disable(
                    self.heketi_client_node, self.heketi_server_url,
                    device["id"])
                self.assertTrue(
                    out, "Failed to disable the device %s" % device["id"])
                self.addCleanup(
                    heketi_ops.heketi_device_enable,
                    self.heketi_client_node, self.heketi_server_url,
                    device["id"])

        # Create heketi volume
        out = heketi_ops.heketi_volume_create(
            self.heketi_client_node, self.heketi_server_url, 1, json=True)
        self.assertTrue(out, "Failed to create heketi volume of size 1")
        g.log.info("Successfully created heketi volume of size 1")
        device_id = out["bricks"][0]["device"]
        self.addCleanup(
            heketi_ops.heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, out["bricks"][0]["volume"])

        # Disable device
        g.log.info("Disabling '%s' device" % device_id)
        out = heketi_ops.heketi_device_disable(
            self.heketi_client_node, self.heketi_server_url, device_id)
        self.assertTrue(out, "Failed to disable the device %s" % device_id)
        g.log.info("Successfully disabled device %s" % device_id)

        try:
            # Get device info
            g.log.info("Retrieving '%s' device info" % device_id)
            out = heketi_ops.heketi_device_info(
                self.heketi_client_node, self.heketi_server_url,
                device_id, json=True)
            self.assertTrue(out, "Failed to get device info %s" % device_id)
            g.log.info("Successfully retrieved device info %s" % device_id)
            name = out["name"]
            if out["state"].lower().strip() != "offline":
                raise exceptions.ExecutionError(
                    "Device %s is not in offline state." % name)
            g.log.info("Device %s is now offine" % name)

            # Try to create heketi volume
            g.log.info("Creating heketi volume: Expected to fail.")
            try:
                out = heketi_ops.heketi_volume_create(
                    self.heketi_client_node, self.heketi_server_url, 1,
                    json=True)
            except exceptions.ExecutionError:
                g.log.info("Volume was not created as expected.")
            else:
                self.addCleanup(
                    heketi_ops.heketi_volume_delete, self.heketi_client_node,
                    self.heketi_server_url, out["bricks"][0]["volume"])
                msg = "Volume unexpectedly created. Out: %s" % out
                assert False, msg
        finally:
            # Enable the device back
            g.log.info("Enable '%s' device back." % device_id)
            out = heketi_ops.heketi_device_enable(
                self.heketi_client_node, self.heketi_server_url, device_id)
            self.assertTrue(out, "Failed to enable the device %s" % device_id)
            g.log.info("Successfully enabled device %s" % device_id)

        # Get device info
        out = heketi_ops.heketi_device_info(
            self.heketi_client_node, self.heketi_server_url, device_id,
            json=True)
        self.assertTrue(out, ("Failed to get device info %s" % device_id))
        g.log.info("Successfully retrieved device info %s" % device_id)
        name = out["name"]
        if out["state"] != "online":
            raise exceptions.ExecutionError(
                "Device %s is not in online state." % name)

        # Create heketi volume of size
        out = heketi_ops.heketi_volume_create(
            self.heketi_client_node, self.heketi_server_url, 1, json=True)
        self.assertTrue(out, "Failed to create volume of size 1")
        self.addCleanup(
            heketi_ops.heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, out["bricks"][0]["volume"])
        g.log.info("Successfully created volume of size 1")
        name = out["name"]

        # Get gluster volume info
        vol_info = get_volume_info('auto_get_gluster_endpoint', volname=name)
        self.assertTrue(vol_info, "Failed to get '%s' volume info." % name)
        g.log.info("Successfully got the '%s' volume info." % name)
Ejemplo n.º 34
0
    def test_create_snap_bricks(self):
        """
        1. get brick list
        2. check all bricks are online
        3. Selecting one brick randomly to bring it offline
        4. get brick list
        5. check all bricks are online
        6. Offline Bricks list
        7. Online Bricks list
        8. Create snapshot of volume
        9. snapshot create should fail
        """

        bricks_list = []
        # get the bricks from the volume
        g.log.info("Fetching bricks for the volume : %s" % self.volname)
        bricks_list = get_all_bricks(self.mnode, self.volname)
        g.log.info("Brick List : %s" % bricks_list)

        # check all bricks are online
        g.log.info("Verifying all bricks are online or not.....")
        ret = are_bricks_online(self.mnode, self.volname, bricks_list)
        self.assertTrue(ret, ("Not all bricks are online"))
        g.log.info("All bricks are online.")

        # Selecting one brick randomly to bring it offline
        g.log.info("Selecting one brick randomly to bring it offline")
        brick_to_bring_offline = random.choice(bricks_list)
        g.log.info("Brick to bring offline:%s " % brick_to_bring_offline)
        ret = bring_bricks_offline(self.volname, brick_to_bring_offline, None)
        self.assertTrue(ret, "Failed to bring the bricks offline")
        g.log.info("Randomly Selected brick: %s" % brick_to_bring_offline)

        # get brick list
        g.log.info("Fetching bricks for the volume : %s" % self.volname)
        bricks_list = get_all_bricks(self.mnode, self.volname)
        g.log.info("Brick List : %s" % bricks_list)

        # check all bricks are online
        g.log.info("Verifying all bricks are online or not.....")
        ret = are_bricks_online(self.mnode, self.volname, bricks_list)
        self.assertFalse(ret, ("Not all bricks are online"))
        g.log.info("All bricks are online.")

        # get the bricks for the volume
        g.log.info("Fetching bricks for the volume : %s" % self.volname)
        bricks_list = get_all_bricks(self.mnode, self.volname)
        g.log.info("Brick List : %s" % bricks_list)

        # Offline Bricks list
        offbricks = get_offline_bricks_list(self.mnode, self.volname)
        g.log.info("Bricks Offline: %s" % offbricks)

        # Online Bricks list
        onbricks = get_online_bricks_list(self.mnode, self.volname)
        g.log.info("Bricks Online: %s" % onbricks)

        # Create snapshot of volume
        ret = snap_create(self.mnode, self.volname, "snap1", False,
                          "Description with $p3c1al characters!")
        self.assertTrue(ret, ("Failed to create snapshot snap1"))
        g.log.info("Snapshot snap1 of volume %s created Successfully" %
                   (self.volname))

        # Volume status
        ret = get_volume_info(self.mnode, self.volname)
        self.assertTrue(ret, ("Failed to perform gluster volume"
                              "info on volume %s" % self.volname))
        g.log.info("Gluster volume info on volume %s is successful" %
                   self.volname)
        # snapshot list
        ret = snap_list(self.mnode)
        self.assertTrue(
            ret, ("Failed to list snapshot of volume %s" % self.volname))
        g.log.info("Snapshot list command for volume %s was successful" %
                   self.volname)