Esempio n. 1
0
    def tearDown(self):

        status_info = get_remove_brick_status(
            self.mnode, self.volname, bricks_list=self.remove_brick_list)
        status = status_info['aggregate']['statusStr']
        if 'in progress' in status:
            # Shrink volume by removing bricks with option start
            g.log.info("Vol %s: Stop remove brick", self.volname)
            ret, _, _ = remove_brick(self.mnode, self.volname,
                                     self.remove_brick_list, "stop")
            g.log.info("Volume %s shrink stopped ", self.volname)

        # Unmount Volume and Cleanup Volume
        g.log.info("Starting to Unmount Volume and Cleanup Volume")
        ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
        if not ret:
            raise ExecutionError("Failed to Unmount Volume and Cleanup Volume")
        g.log.info("Successful in Unmount Volume and Cleanup Volume")

        # Calling GlusterBaseClass tearDown
        GlusterBaseClass.tearDown.im_func(self)
Esempio n. 2
0
    def test_kill_brick_with_remove_brick(self):
        """
        Test case:
        1. Create a volume, start it and mount it.
        2. Create some data on the volume.
        3. Start remove-brick on the volume.
        4. When remove-brick is in progress kill brick process of a brick
           which is being remove.
        5. Remove-brick should complete without any failures.
        """
        # Start I/O from clients on the volume
        counter = 1
        for mount_obj in self.mounts:
            cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d --dir-depth 2 "
                   "--dir-length 10 --max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, counter, mount_obj.mountpoint))
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Failed to create datat on volume")
            counter += 10

        # Collect arequal checksum before ops
        arequal_checksum_before = collect_mounts_arequal(self.mounts[0])

        # Start remove-brick on the volume
        brick_list = form_bricks_list_to_remove_brick(self.mnode, self.volname)
        self.assertIsNotNone(brick_list, "Brick list is empty")

        ret, _, _ = remove_brick(self.mnode, self.volname, brick_list, 'start')
        self.assertFalse(ret, "Failed to start remove-brick on volume")
        g.log.info("Successfully started remove-brick on volume")

        # Check rebalance is in progress
        ret = get_remove_brick_status(self.mnode, self.volname, brick_list)
        ret = ret['aggregate']['statusStr']
        self.assertEqual(ret, "in progress", ("Rebalance is not in "
                                              "'in progress' state, either "
                                              "rebalance is in completed state"
                                              " or failed to get rebalance "
                                              "status"))

        # kill brick process of a brick which is being removed
        brick = choice(brick_list)
        node, _ = brick.split(":")
        ret = kill_process(node, process_names="glusterfsd")
        self.assertTrue(ret,
                        "Failed to kill brick process of brick %s" % brick)

        # Wait for remove-brick to complete on the volume
        ret = wait_for_remove_brick_to_complete(self.mnode,
                                                self.volname,
                                                brick_list,
                                                timeout=1200)
        self.assertTrue(ret, "Remove-brick didn't complete")
        g.log.info("Remove brick completed successfully")

        # Check for data loss by comparing arequal before and after ops
        arequal_checksum_after = collect_mounts_arequal(self.mounts[0])
        self.assertEqual(arequal_checksum_before, arequal_checksum_after,
                         "arequal checksum is NOT MATCHNG")
        g.log.info("arequal checksum is SAME")