def gluster_basic_test(): tc.logger.info("Testing gluster volume create and mounting") volname = tc.config_data['VOLNAME'] mount_type = tc.config_data['MOUNT_TYPE'] mountpoint = tc.config_data['MOUNTPOINT'] mnode = tc.nodes[0] client = tc.clients[0] _rc = True ret = setup_vol() if not ret: tc.logger.error("Unable to setup the volume %s" % volname) return False tc.run(mnode, "gluster volume status %s" % volname) ret, _, _ = mount_volume(volname, mount_type, mountpoint, mclient=client) if ret != 0: tc.logger.error("mounting volume %s failed" % volname) _rc = False else: ret, _, _ = tc.run(client, "cp -r /etc %s" % mountpoint) if ret != 0: tc.logger.error("cp failed on the mountpoint") _rc = False umount_volume(client, mountpoint) ret = stop_volume(volname) if not ret: _rc = False ret = delete_volume(volname) if not ret: _rc = False return _rc
def check_geo_arequal_status(master, mastervol, slave, slavevol, \ timeout=600, mclient='', sclient=''): """ checks the arequal checksum of master and slave Returns True if arequal checksum matches between master and slave Returns False if arequal checksum differs between master and slave """ if mclient == '': mclient = tc.clients[0] if sclient == '': sclient = tc.clients[0] master_mount = '/mnt/master' slave_mount = '/mnt/slave' retm, _, _ = mount_volume(mastervol, mpoint=master_mount, mserver=master, \ mclient=mclient) rets, _, _ = mount_volume(slavevol, mpoint=slave_mount, mserver=slave, \ mclient=sclient) if retm != 0 or rets != 0: tc.logger.error("Failed to mount the master or slave volume") return False rc = False while timeout >= 0: retm = tc.run_async(mclient, "/usr/local/bin/arequal-checksum -p %s" \ % master_mount) rets = tc.run_async(sclient, "/usr/local/bin/arequal-checksum -p %s" \ % slave_mount) retm.wait() rets.wait() retm = retm.value() rets = rets.value() tc.logger.debug("The arequal-checksum of master is %s" % retm[1]) tc.logger.debug("The arequal-checksum of slave is %s" % rets[1]) if retm[0] != 0 or rets[0] != 0: tc.logger.error("arequal returned error. Check glusterfs logs") elif retm[1] != rets[1]: tc.logger.debug("arequal-checksum does not match master and slave") else: tc.logger.info("arequal-checksum of master and slave match") rc = True break time.sleep(120) timeout = timeout - 120 umount_volume(mclient, master_mount) umount_volume(sclient, slave_mount) return rc
def check_geo_filecount_status(master, mastervol, slave, slavevol, \ timeout=1200, mclient='', sclient=''): """ checks the number of files in master and slave Returns True if number of files are same in master and slave Returns False if number of files differ in master and slave """ if mclient == '': mclient = tc.clients[0] if sclient == '': sclient = tc.clients[0] master_mount = '/mnt/master' slave_mount = '/mnt/slave' retm, _, _ = mount_volume(mastervol, mpoint=master_mount, \ mserver=master, mclient=mclient) rets, _, _ = mount_volume(slavevol, mpoint=slave_mount, \ mserver=slave, mclient=sclient) if retm != 0 or rets != 0: tc.logger.error("Failed to mount the master or slave volume") return False rc = False while timeout >= 0: retm = tc.run_async(mclient, "find %s | wc -l" % master_mount) rets = tc.run_async(sclient, "find %s | wc -l" % slave_mount) retm.wait() rets.wait() retm = retm.value() rets = rets.value() tc.logger.debug("The number of files in master is %s" % int(retm[1])) tc.logger.debug("The number of files in slave is %s" % int(rets[1])) if retm[0] != 0 or rets[0] != 0: tc.logger.error("find returned error. Please check glusterfs logs") elif int(retm[1]) != int(rets[1]): tc.logger.debug("filecount doesn't match between master and slave") else: tc.logger.info("filecount of master and slave match") rc = True break time.sleep(120) timeout = timeout - 120 umount_volume(mclient, master_mount) umount_volume(sclient, slave_mount) return rc
def run(self): _rc = True client = self.clients[0] tc.run(self.mnode, "gluster volume status %s" % self.volname) ret, _, _ = mount_volume(self.volname, self.mount_proto, \ self.mountpoint, mclient=client) if ret != 0: tc.logger.error("Unable to mount the volume %s in %s" \ "Please check the logs" % (self.volname, client)) return False ret, _, _ = tc.run(client, "cp -r /etc %s" % self.mountpoint) if ret != 0: tc.logger.error("cp failed in %s. Please check the logs" % client) _rc = False tc.run(client, "rm -rf %s/etc" % self.mountpoint) umount_volume(client, self.mountpoint) return _rc
def setup_meta_vol(servers=''): """ Creates, starts and mounts the gluster meta-volume on the servers specified. """ if servers == '': servers = tc.nodes meta_volname = 'gluster_shared_storage' mount_point = '/var/run/gluster/shared_storage' metav_dist = int(tc.config_data['META_VOL_DIST_COUNT']) metav_rep = int(tc.config_data['META_VOL_REP_COUNT']) _num_bricks = metav_dist * metav_rep repc = '' if metav_rep > 1: repc = "replica %d" % metav_rep bricks = '' brick_root = "/bricks" _n = 0 for i in range(0, _num_bricks): bricks = "%s %s:%s/%s_brick%d" % (bricks, servers[_n], \ brick_root, meta_volname, i) if _n < len(servers) - 1: _n = _n + 1 else: _n = 0 gluster_cmd = "gluster volume create %s %s %s force" \ % (meta_volname, repc, bricks) ret = tc.run(servers[0], gluster_cmd) if ret[0] != 0: tc.logger.error("Unable to create meta volume") return False ret = start_volume(meta_volname, servers[0]) if not ret: tc.logger.error("Unable to start the meta volume") return False time.sleep(5) for server in servers: ret = mount_volume(meta_volname, 'glusterfs', mount_point, server, \ server) if ret[0] != 0: tc.logger.error("Unable to mount meta volume on %s" % server) return False return True
def geo_rep_basic_test(fop, cd="changelog", history=False): """ Changelog tests for geo-rep """ mountbroker = "" guser = tc.config_data["GEO_USER"] if tc.config_data["MOUNTBROKER"] == "True": mountbroker = "%s@" % guser try: temp = tc.geo_rep_setup except AttributeError: tc.geo_rep_setup = False if not tc.geo_rep_setup: ret = setup_geo_rep_session() if not ret: tc.logger.error("Unable to create geo_rep session. Skipping test") return False mastervol = tc.config_data["MASTERVOL"] mountpoint = tc.config_data["MOUNTPOINT"] mnode = tc.gm_nodes[0] snode = tc.gs_nodes[0] slavevol = tc.config_data["SLAVEVOL"] if len(tc.clients) >= 2: mclient = tc.clients[0] sclient = tc.clients[1] else: mclient = tc.clients[0] sclient = tc.clients[0] ret = set_change_detector(mastervol, slavevol, cd) if not ret: tc.logger.error("change detector set failed. Marking test case FAIL") return False if history: ret = tc.run( mnode, "gluster volume geo-replication %s %s%s::%s stop" % (mastervol, mountbroker, snode, slavevol) ) if ret[0] != 0: tc.logger.error("Unable to stop geo-rep session in history tests") return False ret, _, _ = mount_volume(mastervol, tc.config_data["MOUNT_TYPE"], mountpoint, mnode, mclient) if ret != 0: tc.logger.error("Unable to mount the volume. Marking the test as FAIL") return False ret = create_geo_rep_data(mclient, mountpoint, fop, guser) if not ret: tc.logger.error("Data creation failed. Marking the test as FAIL") return False if history: ret = tc.run( mnode, "gluster volume geo-replication %s %s%s::%s \ start" % (mastervol, mountbroker, snode, slavevol), ) if ret[0] != 0: tc.logger.error("Unable to start geo-rep session in history tests") return False ret = check_geo_filecount_status(mnode, mastervol, snode, slavevol, mclient=mclient, sclient=sclient) if not ret: tc.logger.error("filecount does not match. Marking testcase as FAIL") return False ret = check_geo_arequal_status(mnode, mastervol, snode, slavevol, mclient=mclient, sclient=sclient) if not ret: tc.logger.error("arequal checksum do not match. Marking the test FAIL") return False return True