def tearDown(self): # Calling GlusterBaseClass teardown self.get_super_method(self, 'tearDown')() # delete created snapshots g.log.info("starting to delete all created snapshots") ret, _, _ = snap_delete_all(self.mnode) self.assertEqual(ret, 0, "Failed to delete all snapshots") g.log.info("Successfully deleted all snapshots") # Disable Activate on create option = {'activate-on-create': 'disable'} ret, _, _ = set_snap_config(self.mnode, option) if ret != 0: raise ExecutionError("Failed to set activateOnCreate" "config option") g.log.info("ActivateOnCreate config option Successfully set") # umount clone volume g.log.info("Unmounting clone volume") ret, _, _ = umount_volume(self.clients[0], self.mpoint1) if ret != 0: raise ExecutionError("Failed to unmount clone " "volume %s" % self.clone1) g.log.info("Successfully unmounted clone volume %s", self.clone1) ret, _, _ = umount_volume(self.clients[0], self.mpoint2) if ret != 0: raise ExecutionError("Failed to unmount clone " "volume %s" % self.clone2) g.log.info("Successfully unmounted clone volume %s", self.clone2) # cleanup volume g.log.info("starting to cleanup volume") ret1 = cleanup_volume(self.mnode, self.clone1) ret2 = cleanup_volume(self.mnode, self.clone2) if not ret1: raise ExecutionError("Failed to cleanup %s clone " "volume" % self.clone1) if not ret2: raise ExecutionError("Failed to cleanup %s clone " "volume" % self.clone2) g.log.info("Successfully cleanedup cloned volumes") # Unmount and cleanup-volume g.log.info("Starting to Unmount and cleanup-volume") ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts) if not ret: raise ExecutionError("Failed to Unmount and Cleanup Volume") g.log.info("Successful in Unmount Volume and Cleanup Volume")
def tearDown(self): ret = is_glusterd_running(self.servers) if ret: ret = start_glusterd(self.servers) if not ret: raise ExecutionError("Failed to start glusterd on %s" % self.servers) g.log.info("Glusterd started successfully on %s", self.servers) # Checking for peer status from every node for server in self.servers: ret = wait_for_peers_to_connect(server, self.servers) if not ret: raise ExecutionError("Servers are not in peer probed state") ret = cleanup_volume(self.mnode, self.volname_2) if not ret: raise ExecutionError("Unable to delete volume % s" % self.volname_2) # Unmount and cleanup original volume ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts) if not ret: raise ExecutionError("Failed to umount the vol & cleanup Volume") # Calling GlusterBaseClass tearDown self.get_super_method(self, 'tearDown')()
def tearDownClass(cls, umount_vol=True, cleanup_vol=True): """Teardown the mounts and volume. """ GlusterBaseClass.tearDownClass.im_func(cls) # Unmount volume if umount_vol: _rc = True for mount_obj in cls.mounts: ret = mount_obj.unmount() if not ret: g.log.error("Unable to unmount volume '%s:%s' on '%s:%s'", mount_obj.server_system, mount_obj.volname, mount_obj.client_system, mount_obj.mountpoint) _rc = False if not _rc: raise ExecutionError("Unmount of all mounts are not " "successful") # Cleanup volume if cleanup_vol: ret = cleanup_volume(mnode=cls.mnode, volname=cls.volname) if not ret: raise ExecutionError("cleanup volume %s failed", cls.volname) # All Volume Info volume_info(cls.mnode)
def tearDown(self): # UnMount Volume g.log.info("Starting to Unmount Volume %s", self.volname) ret = umount_volume(self.mounts[0].client_system, self.mounts[0].mountpoint, mtype=self.mount_type) self.assertTrue(ret, ("Failed to Unmount Volume %s" % self.volname)) g.log.info("Successfully Unmounted Volume %s", self.volname) # Clean up all volumes and peer probe to form cluster vol_list = get_volume_list(self.mnode) if vol_list is not None: for volume in vol_list: # check all bricks are online ret = wait_for_bricks_to_be_online(self.mnode, volume) if not ret: raise ExecutionError("Failed to bring bricks online" "for volume %s" % volume) ret = cleanup_volume(self.mnode, volume) if not ret: raise ExecutionError("Failed to cleanup volume") g.log.info("Volume deleted successfully : %s", volume) # Peer probe detached servers pool = nodes_from_pool_list(self.mnode) for node in pool: peer_detach(self.mnode, node) ret = peer_probe_servers(self.mnode, self.servers) if not ret: raise ExecutionError("Failed to probe peer " "servers %s" % self.servers) g.log.info("Peer probe success for detached " "servers %s", self.servers) self.get_super_method(self, 'tearDown')()
def tearDownClass(cls): """ Clean up the volume and umount volume from client """ # umount all volumes for mount_obj in cls.mounts: ret, _, _ = umount_volume( mount_obj.client_system, mount_obj.mountpoint) if ret: raise ExecutionError( "Failed to umount on volume %s " % cls.volname) g.log.info("Successfully umounted %s on client %s", cls.volname, mount_obj.client_system) ret = rmdir(mount_obj.client_system, mount_obj.mountpoint) if not ret: raise ExecutionError( ret, "Failed to remove directory mount directory.") g.log.info("Mount directory is removed successfully") # stopping all volumes g.log.info("Starting to Cleanup all Volumes") volume_list = get_volume_list(cls.mnode) for volume in volume_list: ret = cleanup_volume(cls.mnode, volume) if not ret: raise ExecutionError("Failed to cleanup Volume %s" % volume) g.log.info("Volume: %s cleanup is done", volume) g.log.info("Successfully Cleanedup all Volumes") # calling GlusterBaseClass tearDownClass cls.get_super_method(cls, 'tearDownClass')()
def tearDown(self): # Peer probe detached servers pool = nodes_from_pool_list(self.mnode) for node in pool: peer_detach(self.mnode, node) ret = peer_probe_servers(self.mnode, self.servers) if not ret: raise ExecutionError("Failed to probe detached " "servers %s" % self.servers) g.log.info("Peer probe success for detached " "servers %s", self.servers) # clean up all volumes and detaches peers from cluster vol_list = get_volume_list(self.mnode) for volume in vol_list: ret = cleanup_volume(self.mnode, volume) if not ret: raise ExecutionError("Failed to Cleanup the " "Volume %s" % volume) g.log.info("Volume deleted successfully : %s", volume) # Calling GlusterBaseClass tearDown GlusterBaseClass.tearDown.im_func(self)
def tearDown(self): """ tearDown for every test """ # clean up all volumes vol_list = get_volume_list(self.mnode) if vol_list is None: raise ExecutionError("Failed to get the volume list") for volume in vol_list: ret = cleanup_volume(self.mnode, volume) if not ret: raise ExecutionError("Unable to delete volume % s" % volume) g.log.info("Volume deleted successfully : %s", volume) # Cleaning the deleted volume bricks for brick in self.brick_list: node, brick_path = brick.split(r':') cmd = "rm -rf " + brick_path ret, _, _ = g.run(node, cmd) if ret: raise ExecutionError("Failed to delete the brick " "dir's of deleted volume") # Calling GlusterBaseClass tearDown self.get_super_method(self, 'tearDown')()
def tearDown(self): # Starting glusterd on node where stopped. ret = start_glusterd(self.servers[self.random_server]) if ret: ExecutionError("Failed to start glusterd.") g.log.info("Successfully started glusterd.") ret = wait_for_glusterd_to_start(self.servers) if not ret: ExecutionError("glusterd is not running on %s" % self.servers) g.log.info("Glusterd start on the nodes succeeded") # Checking if peer is connected. ret = wait_for_peers_to_connect(self.mnode, self.servers) if not ret: ExecutionError("Peer is not in connected state.") g.log.info("Peers is in connected state.") # Stopping and deleting volume. ret = cleanup_volume(self.mnode, self.volname) if not ret: raise ExecutionError("Unable to delete volume % s" % self.volname) g.log.info("Volume deleted successfully : %s", self.volname) self.get_super_method(self, 'tearDown')()
def tearDown(self): # Unmounting the volume. ret, _, _ = umount_volume(mclient=self.mounts[0].client_system, mpoint=self.mounts[0].mountpoint) if ret: raise ExecutionError("Volume %s is not unmounted" % self.volname) g.log.info("Volume unmounted successfully : %s", self.volname) # clean up all volumes vol_list = get_volume_list(self.mnode) if not vol_list: raise ExecutionError("Failed to get the volume list") for volume in vol_list: ret = cleanup_volume(self.mnode, volume) if not ret: raise ExecutionError("Unable to delete volume % s" % volume) g.log.info("Volume deleted successfully : %s", volume) # Cleaning the deleted volume bricks for brick in self.brick_list: node, brick_path = brick.split(r':') cmd = "rm -rf " + brick_path ret, _, _ = g.run(node, cmd) if ret: raise ExecutionError("Failed to delete the brick " "dir's of deleted volume") self.get_super_method(self, 'tearDown')()
def tearDown(self): # Starting glusterd on node where stopped. ret = start_glusterd(self.servers[self.random_server]) if ret: ExecutionError("Failed to start glusterd.") g.log.info("Successfully started glusterd.") # Checking if peer is connected. counter = 0 while counter < 30: ret = is_peer_connected(self.mnode, self.servers) counter += 1 if ret: break sleep(3) if not ret: ExecutionError("Peer is not in connected state.") g.log.info("Peers is in connected state.") # Stopping and deleting volume. ret = cleanup_volume(self.mnode, self.volname) if not ret: raise ExecutionError("Unable to delete volume % s" % self.volname) g.log.info("Volume deleted successfully : %s", self.volname) GlusterBaseClass.tearDown.im_func(self)
def tearDown(self): # Cleanup and umount volume g.log.info("Starting to Unmount Volume %s", self.volname) ret = umount_volume(self.mounts[0].client_system, self.mounts[0].mountpoint, mtype=self.mount_type) self.assertTrue(ret, ("Failed to Unmount Volume %s" % self.volname)) g.log.info("Successfully Unmounted Volume %s", self.volname) # Clean up all volumes and peer probe to form cluster vol_list = get_volume_list(self.mnode) if vol_list is not None: for volume in vol_list: ret = cleanup_volume(self.mnode, volume) if not ret: raise ExecutionError("Failed to cleanup volume") g.log.info("Volume deleted successfully : %s", volume) g.log.info("Successful in umounting the volume and Cleanup") # Peer probe detached servers pool = nodes_from_pool_list(self.mnode) for node in pool: peer_detach(self.mnode, node) ret = peer_probe_servers(self.mnode, self.servers) if not ret: raise ExecutionError("Failed to probe detached " "servers %s" % self.servers) g.log.info("Peer probe success for detached " "servers %s", self.servers) GlusterBaseClass.tearDown.im_func(self)
def tearDown(self): # Calling GlusterBaseClass tearDown self.get_super_method(self, 'tearDown')() # Disable USS on cloned volume ret, _, _ = disable_uss(self.mnode, self.clone_vol1) if ret: raise ExecutionError("Failed to disable USS on cloned volume") g.log.info("Successfully disabled USS on Cloned volume") # Cleanup cloned volume ret = unmount_mounts(self.mount1) if not ret: raise ExecutionError("Failed to unmount cloned volume") ret = cleanup_volume(self.mnode, self.clone_vol1) if not ret: raise ExecutionError("Failed to unmount and cleanup cloned volume") g.log.info("Successfully umounted and cleanup cloned volume") # Unmount and cleanup-volume ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts) if not ret: raise ExecutionError("Failed to Unmount and Cleanup Volume") g.log.info("Successful in Unmount Volume and Cleanup Volume")
def tearDown(self): vol_list = get_volume_list(self.mnode) if vol_list is None: raise ExecutionError("Failed to get volume list") for volume in vol_list: ret = cleanup_volume(self.mnode, volume) if not ret: raise ExecutionError("Failed Cleanup the volume") g.log.info("Volume deleted successfully %s", volume) # Setting quorum ratio to 51% ret = set_volume_options(self.mnode, 'all', {'cluster.server-quorum-ratio': '51%'}) if not ret: raise ExecutionError("Failed to set server quorum ratio on %s" % self.volname) # Peer probe servers since we are doing peer detach in setUpClass for server in self.servers: ret = is_peer_connected(server, self.servers) if not ret: ret = peer_probe_servers(server, self.servers) if not ret: raise ExecutionError( "Peer probe failed to one of the node") g.log.info("Peer probe successful") self.get_super_method(self, 'tearDown')()
def tearDown(self): # start glusterd on all servers ret = start_glusterd(self.servers) if not ret: raise ExecutionError("Failed to start glusterd on all servers") for server in self.servers: ret = is_peer_connected(server, self.servers) if not ret: ret = peer_probe_servers(server, self.servers) if not ret: raise ExecutionError("Failed to peer probe all " "the servers") # clean up all volumes vol_list = get_volume_list(self.mnode) if vol_list is None: raise ExecutionError("Failed to get the volume list") for volume in vol_list: ret = cleanup_volume(self.mnode, volume) if not ret: raise ExecutionError("Unable to delete volume % s" % volume) g.log.info("Volume deleted successfully : %s" % volume) GlusterBaseClass.tearDown.im_func(self)
def tearDown(self): # start the volume, it should succeed ret, _, _ = volume_start(self.mnode, self.volname) self.assertEqual(ret, 0, "Volume stop failed") # start glusterd on all servers ret = start_glusterd(self.servers) if not ret: raise ExecutionError("Failed to start glusterd on all servers") for server in self.servers: ret = wait_for_peers_to_connect(server, self.servers) if not ret: ret = peer_probe_servers(server, self.servers) if not ret: raise ExecutionError("Failed to peer probe all " "the servers") # clean up all volumes vol_list = get_volume_list(self.mnode) if vol_list is None: raise ExecutionError("Failed to get the volume list") for volume in vol_list: ret = cleanup_volume(self.mnode, volume) if not ret: raise ExecutionError("Unable to delete volume % s" % volume) g.log.info("Volume deleted successfully : %s", volume) self.get_super_method(self, 'tearDown')()
def tearDownClass(cls): # stopping the volume and Cleaning up the volume ret = cleanup_volume(cls.mnode, cls.volname) if ret: g.log.info("Volume deleted successfully : %s", cls.volname) else: raise ExecutionError("Failed Cleanup the Volume %s" % cls.volname)
def tearDownClass(cls): """ Clean up the volume and umount volume from client """ # stopping all volumes g.log.info("Starting to Cleanup all Volumes") volume_list = get_volume_list(cls.mnode) for volume in volume_list: ret = cleanup_volume(cls.mnode, volume) if not ret: raise ExecutionError("Failed to cleanup Volume %s" % volume) g.log.info("Volume: %s cleanup is done", volume) g.log.info("Successfully Cleanedup all Volumes") # umount all volumes for mount_point in cls.mount_points: ret, _, _ = umount_volume(cls.client, mount_point) if ret: raise ExecutionError("Failed to umount on volume %s " % cls.volname) g.log.info("Successfully umounted %s on client %s", cls.volname, cls.client) # calling GlusterBaseClass tearDownClass GlusterBaseClass.tearDownClass.im_func(cls)
def tearDown(self): # Unmount Cloned volume g.log.info("Starting to Unmount Cloned volume") for count in range(0, 2): self.mpoint = "/mnt/clone%s" % count ret, _, _ = umount_volume(self.clients[0], self.mpoint, self.mount_type) if ret == 1: raise ExecutionError("Unmounting the mount point %s failed" % self.mpoint) g.log.info("Mount point %s deleted successfully", self.mpoint) g.log.info("Unmount Volume Successful") # Cleanup Cloned Volumes g.log.info("Starting to cleanup cloned volumes") for clone_count in range(0, 2): ret = cleanup_volume(self.mnode, "clone%s" % clone_count) if not ret: raise ExecutionError("Failed to cleanup clone%s volume" % clone_count) g.log.info("Successful in clone%s volume cleanup", clone_count) # Unmount and cleanup-volume g.log.info("Starting to Unmount and cleanup-volume") ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts) if not ret: raise ExecutionError("Failed to Unmount and Cleanup Volume") g.log.info("Cleanup Volume Successfully")
def tearDown(self): # stopping the volume and Cleaning up the volume self.get_super_method(self, 'tearDown')() ret = is_glusterd_running(self.servers) if ret: ret = start_glusterd(self.servers) if not ret: raise ExecutionError("Failed to start glusterd on %s" % self.servers) # Takes 5 seconds to restart glusterd into peer connected state sleep(5) g.log.info("Glusterd started successfully on %s", self.servers) # checking for peer status from every node ret = is_peer_connected(self.mnode, self.servers) if not ret: ret = peer_probe_servers(self.mnode, self.servers) if not ret: raise ExecutionError("Failed to peer probe failed in " "servers %s" % self.servers) g.log.info("All peers are in connected state") vol_list = get_volume_list(self.mnode) if vol_list is None: raise ExecutionError("Failed to get the volume list") for volume in vol_list: ret = cleanup_volume(self.mnode, volume) if not ret: raise ExecutionError("Failed Cleanup the Volume") g.log.info("Volume deleted successfully")
def tearDown(self): # unmount the volume ret = self.unmount_volume(self.mounts) self.assertTrue(ret, "Volume unmount failed for %s" % self.volname) # get volumes list and clean up all the volumes vol_list = get_volume_list(self.mnode) if vol_list is None: raise ExecutionError("Error while getting vol list") else: for volume in vol_list: ret = cleanup_volume(self.mnode, volume) if ret is True: g.log.info("Volume deleted successfully : %s", volume) else: raise ExecutionError("Failed Cleanup the" " Volume %s" % volume) # peer probe all the servers ret = peer_probe_servers(self.mnode, self.servers) if not ret: raise ExecutionError("Peer probe failed to all the servers from " "the node.") GlusterBaseClass.tearDown.im_func(self)
def tearDownClass(cls, umount_vol=True, cleanup_vol=True, teardown_nfs_ganesha_cluster=True): """Teardown the export, mounts and volume. """ # Unmount volume if umount_vol: _rc = True for mount_obj in cls.mounts: ret = mount_obj.unmount() if not ret: g.log.error("Unable to unmount volume '%s:%s' on '%s:%s'", mount_obj.server_system, mount_obj.volname, mount_obj.client_system, mount_obj.mountpoint) _rc = False if not _rc: raise ExecutionError("Unmount of all mounts are not " "successful") # Cleanup volume if cleanup_vol: volinfo = get_volume_info(cls.mnode, cls.volname) if volinfo is None or cls.volname not in volinfo: g.log.info("Volume %s does not exist in %s" % (cls.volname, cls.mnode)) else: # Unexport volume, if it is not unexported already vol_option = get_volume_options(cls.mnode, cls.volname, option='ganesha.enable') if vol_option is None: raise ExecutionError("Failed to get ganesha.enable volume " " option for %s " % cls.volume) if vol_option['ganesha.enable'] != 'off': if is_volume_exported(cls.mnode, cls.volname, "nfs"): ret, out, err = unexport_nfs_ganesha_volume( mnode=cls.mnode, volname=cls.volname) if ret != 0: raise ExecutionError( "Failed to unexport volume %s" % cls.volname) time.sleep(5) else: g.log.info("Volume %s is unexported already" % cls.volname) _, _, _ = g.run(cls.mnode, "showmount -e") ret = cleanup_volume(mnode=cls.mnode, volname=cls.volname) if not ret: raise ExecutionError("cleanup volume %s failed", cls.volname) # All Volume Info volume_info(cls.mnode) (NfsGaneshaClusterSetupClass.tearDownClass.im_func( cls, delete_nfs_ganesha_cluster=teardown_nfs_ganesha_cluster))
def tearDown(self): """ tearDown for every test """ if not self.glusterd_service: ret = start_glusterd(self.servers[1]) if not ret: raise ExecutionError("Failed to start glusterd services " "for : %s" % self.servers[1]) # Checking glusterd service running or not ret = is_glusterd_running(self.servers[1]) if ret == 0: g.log.info("glusterd running on :%s", self.servers[1]) else: raise ExecutionError("glusterd not running on :%s" % self.servers[1]) # In this test case performing quorum operations, # deleting volumes immediately after glusterd services start, volume # deletions are failing with quorum not met, # that's the reason verifying peers are connected or not before # deleting volumes peers_not_connected = True count = 0 while count < 10: ret = self.validate_peers_are_connected() if ret: peers_not_connected = False break count += 1 sleep(5) if peers_not_connected: raise ExecutionError("Servers are not in peer probed state") # Reverting back the quorum ratio to 51% self.quorum_perecent = {'cluster.server-quorum-ratio': '51%'} ret = set_volume_options(self.mnode, 'all', self.quorum_perecent) if not ret: raise ExecutionError( ret, "gluster volume set all cluster" ".server-quorum- ratio percentage Failed" " :%s" % self.servers) g.log.info( "gluster volume set all cluster.server-quorum-ratio 51" "percentage enabled successfully :%s", self.servers) # stopping the volume and Cleaning up the volume for volume in self.volume_list: ret = cleanup_volume(self.mnode, volume) if not ret: raise ExecutionError("Failed to Cleanup the " "Volume %s" % volume) g.log.info("Volume deleted successfully : %s", volume) # Calling GlusterBaseClass tearDown self.get_super_method(self, 'tearDown')()
def tearDown(self): # Calling GlusterBaseClass tearDown GlusterBaseClass.tearDown.im_func(self) # stopping the volume and Cleaning up the volume ret = cleanup_volume(self.mnode, self.volname) if not ret: raise ExecutionError("Failed Cleanup the Volume") g.log.info("Volume deleted successfully : %s", self.volname)
def tearDown(self): # Stopping the volume and Cleaning up the volume ret = cleanup_volume(self.mnode, self.volname) if not ret: raise ExecutionError("Failed to cleanup volume") g.log.info("Volume deleted successfully : %s", self.volname) # Calling GlusterBaseClass tearDown self.get_super_method(self, 'tearDown')()
def tearDown(self): # Calling GlusterBaseClass teardown GlusterBaseClass.tearDown.im_func(self) # Disable Activate on create option = {'activate-on-create': 'disable'} ret, _, _ = set_snap_config(self.mnode, option) if ret != 0: raise ExecutionError("Failed to set activateOnCreate" "config option") g.log.info("ActivateOnCreate config option Successfully set") # umount clone volume g.log.info("Unmounting clone volume") ret, _, _ = umount_volume(self.mounts[0].client_system, self.mpoint1) if ret != 0: raise ExecutionError("Failed to unmount clone " "volume %s" % self.clone1) g.log.info("Successfully unmounted clone volume %s", self.clone1) ret, _, _ = umount_volume(self.mounts[0].client_system, self.mpoint2) if ret != 0: raise ExecutionError("Failed to unmount clone " "volume %s" % self.clone2) g.log.info("Successfully unmounted clone volume %s", self.clone2) # cleanup volume g.log.info("starting to cleanup volume") ret1 = cleanup_volume(self.mnode, self.clone1) ret2 = cleanup_volume(self.mnode, self.clone2) if not ret1: raise ExecutionError("Failed to cleanup %s clone " "volume" % self.clone1) if not ret2: raise ExecutionError("Failed to cleanup %s clone " "volume" % self.clone2) g.log.info("Successfully cleanedup cloned volumes") # Unmount and cleanup-volume g.log.info("Starting to Unmount and cleanup-volume") ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts) if not ret: raise ExecutionError("Failed to Unmount and Cleanup Volume") g.log.info("Successful in Unmount Volume and Cleanup Volume")
def tearDown(self): """ TearDown for Volume Volume Cleanup """ ret = cleanup_volume(self.mnode, self.volname) if not ret: raise ExecutionError("Failed to Cleanup the " "Volume %s" % self.volname) g.log.info("Volume deleted successfully " ": %s", self.volname)
def tearDown(self): ''' clean up all volumes and detaches peers from cluster ''' vol_list = get_volume_list(self.mnode) for volume in vol_list: ret = cleanup_volume(self.mnode, volume) self.assertTrue(ret, "Failed to Cleanup the Volume %s" % volume) g.log.info("Volume deleted successfully : %s" % volume) GlusterBaseClass.tearDown.im_func(self)
def tearDown(self): # Calling GlusterBaseClass teardown self.get_super_method(self, 'tearDown')() # Cleanup cloned volume g.log.info("Starting to delete cloned volume") ret = cleanup_volume(self.mnode, self.clone) if not ret: raise ExecutionError("Failed to delete the cloned volume") g.log.info("Successful in deleting Cloned volume")
def tearDown(self): ''' clean up all volumes and detaches peers from cluster ''' vol_list = get_volume_list(self.mnode) for volume in vol_list: ret = cleanup_volume(self.mnode, volume) self.assertTrue(ret, "Failed to Cleanup the Volume %s" % volume) g.log.info("Volume deleted successfully : %s", volume) self.get_super_method(self, 'tearDown')()
def tearDown(self): # tearDown for every test # Cleanup all successfully created volumes. ret = cleanup_volume(self.mnode, self.volname) if not ret: raise ExecutionError("Failed to Cleanup the " "Volume %s" % self.volname) g.log.info("Volume deleted successfully " ": %s", self.volname) # Calling GlusterBaseClass tearDown self.get_super_method(self, 'tearDown')()