def test_op_version(self):
        '''
        -> Create Volume
        -> Get the current op-version
        -> Get the max supported op-version
        -> Verify vol info file exists or not in all servers
        -> Get the version number from vol info file
        -> If current op-version is less than max-op-version
        set the current op-version to max-op-version
        -> After vol set operation verify that version number
        increased by one or not in vol info file
        -> verify that current-op-version and max-op-version same or not.
        '''

        # Getting current op-version
        vol_dict = get_volume_options(self.mnode, 'all', 'cluster.op-version')
        current_op_version = int(vol_dict['cluster.op-version'])

        # Getting Max op-verison
        all_dict = get_volume_options(self.mnode, 'all')
        max_op_version = int(all_dict['cluster.max-op-version'])

        # File_path: path for vol info file
        # Checking vol file exist in all servers or not
        file_path = '/var/lib/glusterd/vols/' + self.volname + '/info'
        for server in self.servers:
            conn = g.rpyc_get_connection(server)
            ret = conn.modules.os.path.isfile(file_path)
            self.assertTrue(ret, "Vol file not found in server %s" % server)
            g.log.info("vol file found in server %s", server)
        g.rpyc_close_deployed_servers()

        # Getting version number from vol info file
        # cmd: grepping  version from vol info file
        ret, out, _ = g.run(self.mnode,
                            ' '.join(['grep', "'^version'", file_path]))
        version_list = out.split('=')
        version_no = int(version_list[1]) + 1

        # Comparing current op-version and max op-version
        if current_op_version < max_op_version:

            # Set max-op-version
            ret = set_volume_options(self.mnode, 'all',
                                     {'cluster.op-version': max_op_version})
            self.assertTrue(ret, "Failed to set max op-version for cluster")
            g.log.info("Setting up max-op-version is successful for cluster")

            # Grepping version number from vol info file after
            # vol set operation
            ret, out, _ = g.run(self.mnode,
                                ' '.join(['grep', "'^version'", file_path]))
            version_list = out.split('=')
            after_version_no = int(version_list[1])

            # Comparing version number before and after vol set operations
            self.assertEqual(
                version_no, after_version_no,
                "After volume set operation version "
                "number not increased by one")
            g.log.info("After volume set operation version number "
                       "increased by one")

            # Getting current op-version
            vol_dict = get_volume_options(self.mnode, 'all',
                                          'cluster.op-version')
            current_op_version = int(vol_dict['cluster.op-version'])

        # Checking current-op-version and max-op-version equal or not
        self.assertEqual(
            current_op_version, max_op_version,
            "Current op-version and max op-version "
            "are not same")
        g.log.info("current-op-version and max-op-version of cluster are same")
    def test_snap_info_from_detached_node(self):
        # pylint: disable=too-many-statements
        """
        Create a volume with single brick
        Create a snapshot
        Activate the snapshot created
        Enabled uss on the volume
        Validated snap info on all the nodes
        Peer detach one node
        Validate /var/lib/glusterd/snaps on the detached node
        Probe the detached node
        """

        # Creating volume with single brick on one node
        servers_info_single_node = {
            self.servers[0]: self.all_servers_info[self.servers[0]]
        }
        bricks_list = form_bricks_list(self.mnode, self.volname, 1,
                                       self.servers[0],
                                       servers_info_single_node)
        ret, _, _ = volume_create(self.servers[0], self.volname, bricks_list)
        self.assertEqual(ret, 0, "Volume creation failed")
        g.log.info("Volume %s created successfully", self.volname)

        # Create a snapshot of the volume without volume start should fail
        self.snapname = "snap1"
        ret, _, _ = snap_create(self.mnode,
                                self.volname,
                                self.snapname,
                                timestamp=False)
        self.assertNotEqual(ret, 0,
                            "Snapshot created without starting the volume")
        g.log.info("Snapshot creation failed as expected")

        # Start the volume
        ret, _, _ = volume_start(self.mnode, self.volname)
        self.assertEqual(ret, 0,
                         "Failed to start the volume %s" % self.volname)
        g.log.info("Volume start succeeded")

        # Create a snapshot of the volume after volume start
        ret, _, _ = snap_create(self.mnode,
                                self.volname,
                                self.snapname,
                                timestamp=False)
        self.assertEqual(
            ret, 0, "Snapshot creation failed on the volume %s" % self.volname)
        g.log.info("Snapshot create succeeded")

        # Activate snapshot created
        ret, _, err = snap_activate(self.mnode, self.snapname)
        self.assertEqual(
            ret, 0, "Snapshot activate failed with following error %s" % (err))
        g.log.info("Snapshot activated successfully")

        # Enable uss
        self.vol_options['features.uss'] = 'enable'
        ret = set_volume_options(self.mnode, self.volname, self.vol_options)
        self.assertTrue(
            ret, "gluster volume set %s features.uss "
            "enable failed" % self.volname)
        g.log.info("gluster volume set %s features.uss "
                   "enable successfully", self.volname)

        # Validate files /var/lib/glusterd/snaps on all the servers is same
        self.pathname = "/var/lib/glusterd/snaps/%s" % self.snapname
        for server in self.servers:
            conn = g.rpyc_get_connection(server)
            ret = conn.modules.os.path.isdir(self.pathname)
            self.assertTrue(
                ret, "%s directory doesn't exist on node %s" %
                (self.pathname, server))
            g.log.info("%s path exists on node %s", self.pathname, server)
        g.rpyc_close_deployed_servers()

        # Peer detach one node
        self.random_node_peer_detach = random.choice(self.servers[1:])
        ret = peer_detach_servers(self.mnode,
                                  self.random_node_peer_detach,
                                  validate=True)
        self.assertTrue(
            ret,
            "Peer detach of node: %s failed" % self.random_node_peer_detach)
        g.log.info("Peer detach succeeded")

        # /var/lib/glusterd/snaps/<snapname> directory should not present
        conn = g.rpyc_get_connection(self.random_node_peer_detach)
        ret = conn.modules.os.path.isdir(self.pathname)
        self.assertFalse(
            ret, "%s directory should not exist on the peer"
            "which is detached from cluster%s" %
            (self.pathname, self.random_node_peer_detach))
        g.log.info("Expected: %s path doesn't exist on peer detached node %s",
                   self.pathname, self.random_node_peer_detach)
        g.rpyc_close_deployed_servers()

        # Peer probe the detached node
        ret, _, _ = peer_probe(self.mnode, self.random_node_peer_detach)
        self.assertEqual(
            ret, 0,
            "Peer probe of node: %s failed" % self.random_node_peer_detach)
        g.log.info("Peer probe succeeded")

        # Validating peers are in connected state
        count = 0
        while count < 10:
            sleep(2)
            ret = self.validate_peers_are_connected()
            if ret:
                break
            count += 1
        self.assertTrue(ret, "Peers are not in connected state")
        g.log.info("Peer are in connected state")
    def tearDownClass(cls):
        """unittest tearDownClass override"""
        print "Tearing Down Class: %s" % cls.__name__

        # rpyc should do this on script exit, but for cleanliness sake...
        g.rpyc_close_deployed_servers()
    def tearDownClass(cls):
        """unittest tearDownClass override"""
        print "Tearing Down Class: %s" % cls.__name__

        # rpyc should do this on script exit, but for cleanliness sake...
        g.rpyc_close_deployed_servers()