Ejemplo n.º 1
0
    def test_validate_profile_for_inodelk(self):
        """
        Test Steps:
        1) Create an ecvolume and mount it
        2) Set the eagerlock option
        3) Create a 1GB file
        4) View the profile of the volume for INODELK count must be about
           2-10 locks for each brick.
        5) check backend bricks for trusted.ec.dirty xattr must be non-zero
        6) Disable the eagerlock option
        7) Repeat steps 3-5 and now dirty xattr must be zero and
           INODELK count in range of 100-5k.
        """

        # Enable EagerLock
        ret = set_volume_options(self.mnode, self.volname, {
            'disperse.eager-lock': 'on',
            'disperse.eager-lock-timeout': '10'
        })
        self.assertTrue(ret, "Failed to turn on eagerlock"
                        "on %s" % self.volname)

        # Start profile on volume.
        ret, _, _ = profile_start(self.mnode, self.volname)
        self.assertEqual(
            ret, 0, "Failed to start profile on volume: %s" % self.volname)

        # Test behavior with EagerLock on
        filename = self._file_create_and_profile_info("on")
        self.assertIsNotNone(filename, "Failed to get filename")

        # Test dirty bit with EagerLock on
        ret = self._check_dirty_xattr(filename)
        self.assertEqual(
            ret, '0x00000000000000010000000000000001',
            "Unexpected dirty xattr value is %s on %s" % (ret, filename))

        # Disable EagerLock
        ret = set_volume_options(self.mnode, self.volname,
                                 {'disperse.eager-lock': 'off'})
        self.assertTrue(ret, "Failed to turn off eagerlock "
                        "on %s" % self.volname)

        # Test behavior with EagerLock off
        filename = self._file_create_and_profile_info("off")
        self.assertIsNotNone(filename, "Failed to get filename")

        # Test dirty bit with EagerLock off
        ret = self._check_dirty_xattr(filename)
        self.assertEqual(
            ret, '0x00000000000000000000000000000000',
            "Unexpected dirty xattr value is %s on %s" % (ret, filename))

        # Stop profile on volume.
        ret, _, _ = profile_stop(self.mnode, self.volname)
        self.assertEqual(ret, 0,
                         "Failed to stop profile on volume: %s" % self.volname)
    def test_profile_operations_with_one_node_down(self):

        # pylint: disable=too-many-statements
        """
        Test Case:
        1) Create a volume and start it.
        2) Mount volume on client and start IO.
        3) Start profile info on the volume.
        4) Stop glusterd on one node.
        5) Run profile info with different parameters
           and see if all bricks are present or not.
        6) Stop profile on the volume.
        """

        # Start IO on mount points.
        g.log.info("Starting IO on all mounts...")
        self.all_mounts_procs = []
        counter = 1
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
                   "--dir-depth 4 "
                   "--dirname-start-num %d "
                   "--dir-length 6 "
                   "--max-num-of-dirs 3 "
                   "--num-of-files 5 %s" % (
                       self.script_upload_path,
                       counter, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system, cmd,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)
            counter += 1

        # Start profile on volume.
        ret, _, _ = profile_start(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to start profile on volume: %s"
                         % self.volname)
        g.log.info("Successfully started profile on volume: %s",
                   self.volname)

        # Fetching a random server from list.
        self.random_server = randint(1, len(self.servers)-1)

        # Stopping glusterd on one node.
        ret = stop_glusterd(self.servers[self.random_server])
        self.assertTrue(ret, "Failed to stop glusterd on one node.")
        g.log.info("Successfully stopped glusterd on one node.")
        ret = wait_for_glusterd_to_start(self.servers[self.random_server])
        self.assertFalse(ret, "glusterd is still running on %s"
                         % self.servers[self.random_server])
        g.log.info("Glusterd stop on the nodes : %s "
                   "succeeded", self.servers[self.random_server])

        # Getting and checking output of profile info.
        ret, out, _ = profile_info(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to run profile info on volume: %s"
                         % self.volname)
        g.log.info("Successfully executed profile info on volume: %s",
                   self.volname)

        # Checking if all bricks are present in profile info.
        brick_list = get_online_bricks_list(self.mnode, self.volname)
        for brick in brick_list:
            self.assertTrue(brick in out,
                            "Brick %s not a part of profile info output."
                            % brick)
            g.log.info("Brick %s showing in profile info output.",
                       brick)

        # Running profile info with different profile options.
        profile_options = ['peek', 'incremental', 'clear', 'incremental peek',
                           'cumulative']
        for option in profile_options:

            # Getting and checking output of profile info.
            ret, out, _ = profile_info(self.mnode, self.volname,
                                       options=option)
            self.assertEqual(ret, 0,
                             "Failed to run profile info %s on volume: %s"
                             % (option, self.volname))
            g.log.info("Successfully executed profile info %s on volume: %s",
                       option, self.volname)

            # Checking if all bricks are present in profile info peek.
            for brick in brick_list:
                self.assertTrue(brick in out,
                                "Brick %s not a part of profile"
                                " info %s output."
                                % (brick, option))
                g.log.info("Brick %s showing in profile info %s output.",
                           brick, option)

        # Starting glusterd on node where stopped.
        ret = start_glusterd(self.servers[self.random_server])
        self.assertTrue(ret, "Failed to start glusterd.")
        g.log.info("Successfully started glusterd.")

        # Checking if peer is connected
        ret = wait_for_peers_to_connect(self.mnode, self.servers)
        self.assertTrue(ret, "Peers are not in connected state.")
        g.log.info("Peers are in connected state.")

        # Stop profile on volume.
        ret, _, _ = profile_stop(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to stop profile on volume: %s"
                         % self.volname)
        g.log.info("Successfully stopped profile on volume: %s", self.volname)

        # Validate IO
        self.assertTrue(
            validate_io_procs(self.all_mounts_procs, self.mounts),
            "IO failed on some of the clients"
        )
        g.log.info("IO validation complete.")
Ejemplo n.º 3
0
    def test_profile_start_with_quorum_not_met(self):
        # pylint: disable=too-many-statements
        """
        1. Create a volume
        2. Set the quorum type to server and ratio to 90
        3. Stop glusterd randomly on one of the node
        4. Start profile on the volume
        5. Start glusterd on the node where it is stopped
        6. Start profile on the volume
        7. Stop profile on the volume where it is started
        """

        # Enabling server quorum
        self.quorum_options = {'cluster.server-quorum-type': 'server'}
        ret = set_volume_options(self.mnode, self.volname, self.quorum_options)
        self.assertTrue(ret, "gluster volume set %s cluster.server-quorum-type"
                             " server Failed" % self.volname)
        g.log.info("gluster volume set %s cluster.server-quorum-type server "
                   "enabled successfully", self.volname)

        # Setting Quorum ratio to 90%
        self.quorum_perecent = {'cluster.server-quorum-ratio': '90%'}
        ret = set_volume_options(self.mnode, 'all', self.quorum_perecent)
        self.assertTrue(ret, "gluster volume set all cluster.server-quorum-rat"
                             "io percentage Failed :%s" % self.servers)
        g.log.info("gluster volume set all cluster.server-quorum-ratio 90 "
                   "percentage enabled successfully on :%s", self.servers)

        # Stop glusterd on one of the node randomly
        self.node_on_glusterd_to_stop = choice(self.servers)
        ret = stop_glusterd(self.node_on_glusterd_to_stop)
        self.assertTrue(ret, "glusterd stop on the node failed")
        g.log.info("glusterd stop on the node: % "
                   "succeeded", self.node_on_glusterd_to_stop)

        # checking whether peers are connected or not
        count = 0
        while count < 5:
            ret = self.validate_peers_are_connected()
            if not ret:
                break
            sleep(2)
            count += 1
        self.assertFalse(ret, "Peers are in connected state even after "
                              "stopping glusterd on one node")

        # Starting volume profile when quorum is not met
        self.new_servers = self.servers[:]
        self.new_servers.remove(self.node_on_glusterd_to_stop)
        ret, _, _ = profile_start(choice(self.new_servers),
                                  self.volname)
        self.assertNotEqual(ret, 0, "Expected: Should not be able to start "
                                    "volume profile. Acutal: Able to start "
                                    "the volume profile start")
        g.log.info("gluster vol profile start is failed as expected")

        # Start glusterd on the node where it is stopped
        ret = start_glusterd(self.node_on_glusterd_to_stop)
        self.assertTrue(ret, "glusterd start on the node failed")
        g.log.info("glusterd start succeeded")

        # checking whether peers are connected or not
        count = 0
        while count < 5:
            ret = self.validate_peers_are_connected()
            if ret:
                break
            sleep(5)
            count += 1
        self.assertTrue(ret, "Peer are not in connected state ")

        # Starting profile when volume quorum is met
        ret, _, _ = profile_start(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Expected: Should be able to start the"
                                 "volume profile start. Acutal: Not able"
                                 " to start the volume profile start")
        g.log.info("gluster vol profile start is successful")

        # Stop the profile
        ret, _, _ = profile_stop(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Expected: Should be able to stop the "
                                 "profile stop. Acutal: Not able to stop"
                                 " the profile stop")
        g.log.info("gluster volume profile stop is successful")
Ejemplo n.º 4
0
    def test_eagerlock_while_io_in_progress(self):
        '''
        Create replica volume then mount the volume, once
        volume mounted successfully on client, start running IOs on
        mount point then run the "gluster volume <volname> profile info"
        command on all clusters randomly.
        Then check that IOs completed successfully or not on mount point.
        Check that files in mount point listing properly or not.
        check the release directory value should be less or equals '4'
        '''

        status_on = "on"
        validate_profiles = ('cluster.eager-lock',
                             'diagnostics.count-fop-hits',
                             'diagnostics.latency-measurement')

        ret, _, _ = profile_start(random.choice(self.servers), self.volname)
        self.assertEqual(
            ret, 0,
            ("Volume profile failed to start for volume %s" % self.volname))

        for validate_profile in validate_profiles:
            out = get_volume_options(random.choice(self.servers),
                                     self.volname,
                                     option=(validate_profile))
            self.assertIsNotNone(
                out, "Volume get failed for volume "
                "%s" % self.volname)
            self.assertEqual(out[validate_profile], status_on, "Failed to "
                             "match profile information")

        # Mounting a volume
        ret = self.mount_volume(self.mounts)
        self.assertTrue(ret, "Volume mount failed for %s" % self.volname)

        # run IOs
        g.log.info("Starting IO on all mounts...")
        self.all_mounts_procs = []
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = (
                "/usr/bin/env python %s create_deep_dirs_with_files "
                "--dirname-start-num %d "
                "--dir-depth 2 "
                "--dir-length 15 "
                "--max-num-of-dirs 5 "
                "--num-of-files 25 %s" %
                (self.script_upload_path, self.counter, mount_obj.mountpoint))

            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)
            self.counter = self.counter + 10
        self.io_validation_complete = False

        # this command should not get hang while io is in progress
        # pylint: disable=unused-variable
        for i in range(20):
            ret, _, _ = profile_info(random.choice(self.servers), self.volname)
            self.assertEqual(ret, 0, ("Volume profile info failed on "
                                      "volume %s" % self.volname))

        # Validate IO
        ret = validate_io_procs(self.all_mounts_procs, self.mounts)
        self.io_validation_complete = True
        self.assertTrue(ret, "IO failed on some of the clients")

        # List all files and dirs created
        ret = list_all_files_and_dirs_mounts(self.mounts)
        self.assertTrue(ret, "Failed to list all files and dirs")
        g.log.info("Listing all files and directories is successful")

        volume_profile_info = "gluster v profile %s info"
        _, out, _ = g.run(
            random.choice(self.servers),
            volume_profile_info % self.volname + " | grep"
            "OPENDIR | awk '{print$8}'")
        self.assertIsNotNone(
            out, "Failed to get volume %s profile info" % self.volname)
        out.strip().split('\n')
        for value in out:
            self.assertLessEqual(
                value, '4', "Failed to Validate profile"
                " on volume %s" % self.volname)
    def test_profile_operations(self):
        """
        Test Case:
        1) Create a volume and start it.
        2) Mount volume on client and start IO.
        3) Start profile on the volume.
        4) Run profile info and see if all bricks are present or not.
        6) Create another volume.
        7) Run profile info without starting the profile.
        8) Run profile info with all possible options without starting
           the profile.
        """
        # Timestamp of current test case of start time
        ret, test_timestamp, _ = g.run_local('date +%s')
        test_timestamp = test_timestamp.strip()

        # Start IO on mount points.
        g.log.info("Starting IO on all mounts...")
        self.all_mounts_procs = []
        counter = 1
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
                   "--dir-depth 4 "
                   "--dir-length 6 "
                   "--dirname-start-num %d "
                   "--max-num-of-dirs 3 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, counter, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)
            counter += 1

        # Start profile on volume.
        ret, _, _ = profile_start(self.mnode, self.volname)
        self.assertEqual(
            ret, 0, "Failed to start profile on volume: %s" % self.volname)
        g.log.info("Successfully started profile on volume: %s", self.volname)

        # Getting and checking output of profile info.
        ret, out, _ = profile_info(self.mnode, self.volname)
        self.assertEqual(
            ret, 0, "Failed to run profile info on volume: %s" % self.volname)
        g.log.info("Successfully executed profile info on volume: %s",
                   self.volname)

        # Checking if all bricks are present in profile info.
        brick_list = get_all_bricks(self.mnode, self.volname)
        for brick in brick_list:
            self.assertTrue(
                brick in out,
                "Brick %s not a part of profile info output." % brick)
            g.log.info("Brick %s showing in profile info output.", brick)

        # Validate IO
        self.assertTrue(validate_io_procs(self.all_mounts_procs, self.mounts),
                        "IO failed on some of the clients")
        g.log.info("IO validation complete.")

        # Create and start a volume
        self.volume['name'] = "volume_2"
        self.volname = "volume_2"
        ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
        self.assertTrue(ret, "Failed to create and start volume")
        g.log.info("Successfully created and started volume_2")

        # Check profile info on volume without starting profile
        ret, _, _ = profile_info(self.mnode, self.volname)
        self.assertNotEqual(
            ret, 0, "Unexpected:Successfully ran profile info"
            " on volume: %s" % self.volname)
        g.log.info("Expected: Failed to run pofile info on volume: %s",
                   self.volname)

        # Running profile info with different profile options.
        profile_options = ('peek', 'incremental', 'clear', 'incremental peek',
                           'cumulative')
        for option in profile_options:
            # Getting and checking output of profile info.
            ret, _, _ = profile_info(self.mnode, self.volname, options=option)
            self.assertNotEqual(
                ret, 0, "Unexpected: Successfully ran profile info"
                " %s on volume: %s" % (option, self.volname))
            g.log.info(
                "Expected: Failed to execute profile info %s on"
                " volume: %s", option, self.volname)

        # Chekcing for core files.
        ret = is_core_file_created(self.servers, test_timestamp)
        self.assertTrue(ret, "glusterd service should not crash")
        g.log.info("No core file found, glusterd service running "
                   "successfully")

        # Checking whether glusterd is running or not
        ret = is_glusterd_running(self.servers)
        self.assertEqual(ret, 0, "Glusterd has crashed on nodes.")
        g.log.info("No glusterd crashes observed.")
Ejemplo n.º 6
0
    def test_profile_simultaneously_on_different_nodes(self):
        """
        Test Case:
        1) Create a volume and start it.
        2) Mount volume on client and start IO.
        3) Start profile on the volume.
        4) Create another volume.
        5) Start profile on the volume.
        6) Run volume status in a loop in one of the node.
        7) Run profile info for the new volume on one of the other node
        8) Run profile info for the new volume in loop for 100 times on
           the other node
        """
        # Timestamp of current test case of start time
        ret, test_timestamp, _ = g.run_local('date +%s')
        test_timestamp = test_timestamp.strip()

        # Start IO on mount points.
        self.all_mounts_procs = []
        counter = 1
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
                   "--dir-depth 4 "
                   "--dir-length 6 "
                   "--dirname-start-num %d "
                   "--max-num-of-dirs 3 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, counter, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)
            counter += 1

        # Start profile on volume.
        ret, _, _ = profile_start(self.mnode, self.volname)
        self.assertEqual(
            ret, 0, "Failed to start profile on volume: %s" % self.volname)
        g.log.info("Successfully started profile on volume: %s", self.volname)

        # Validate IO
        self.assertTrue(validate_io_procs(self.all_mounts_procs, self.mounts),
                        "IO failed on some of the clients")
        g.log.info("IO validation complete.")

        # Create and start a volume
        self.volume['name'] = "volume_2"
        self.volname = "volume_2"
        ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
        self.assertTrue(ret, "Failed to create and start volume")
        g.log.info("Successfully created and started volume_2")

        # Start profile on volume.
        ret, _, _ = profile_start(self.mnode, self.volname)
        self.assertEqual(
            ret, 0, "Failed to start profile on volume: %s" % self.volname)
        g.log.info("Successfully started profile on volume: %s", self.volname)

        # Run volume status on one of the node in loop
        cmd = "for i in `seq 1 100`;do gluster v status;done"
        proc1 = g.run_async(self.servers[1], cmd)

        # Check profile on one of the other node
        cmd = "gluster v profile %s info" % self.volname
        ret, _, _ = g.run(self.mnode, cmd)
        self.assertEqual(
            ret, 0, "Failed to run profile info on volume: %s"
            " on node %s" % (self.volname, self.mnode))
        g.log.info("Successfully run pofile info on volume: %s on node %s",
                   self.volname, self.mnode)

        # Run volume profile info on one of the other node in loop
        cmd = """for i in `seq 1 100`;do gluster v profile %s info;
              done""" % self.volname
        proc2 = g.run_async(self.servers[3], cmd)

        ret1, _, _ = proc1.async_communicate()
        ret2, _, _ = proc2.async_communicate()

        self.assertEqual(
            ret1, 0, "Failed to run volume status in a loop"
            " on node %s" % self.servers[1])
        g.log.info(
            "Successfully running volume status in a loop on node"
            " %s", self.servers[1])

        self.assertEqual(
            ret2, 0, "Failed to run profile info in a loop"
            " on node %s" % self.servers[3])
        g.log.info(
            "Successfully running volume status in a loop on node"
            " %s", self.servers[3])

        # Chekcing for core files.
        ret = is_core_file_created(self.servers, test_timestamp)
        self.assertTrue(ret, "glusterd service should not crash")
        g.log.info("No core file found, glusterd service running "
                   "successfully")

        # Checking whether glusterd is running or not
        ret = is_glusterd_running(self.servers)
        self.assertEqual(ret, 0, "Glusterd has crashed on nodes.")
        g.log.info("No glusterd crashes observed.")