def write_data(self,share_name,num_of_files=100): #mounting client mnt_point = '/mnt/' + utils.lower_generator(8) mnt = self.call_mount(share_name, mnt_point) i=0 while i < num_of_files: file_name = mnt_point + '/' + '%s' % (utils.lower_generator(8)) + '%s' % (i) ctx.clients[0].execute(['dd','if=/dev/urandom','of=%s' % (file_name),'bs=1M','count=10','conv=fsync']) i = i + 1 return mnt
def test_poc_script_phase_5(self): def getconn(): ssh_conn = SshConn(address=addr, username='******', password='******') econn = ExtendedConn(ssh_conn) return econn hypervisor = ctx.hypervisor with self.step('clone VM and boot'): vm, addr = self._clone_vm_on_vvol(hypervisor) with self.step('take snapshot'): vm.take_snapshot(self._SNAPSHOT_NAME, self._SNAPSHOT_DESC) snapshot = vm.get_snapshot(self._SNAPSHOT_NAME) assert snapshot, 'no snapshot created' with self.step('make a change to the VM'): randfile = '/tmp/' + utils.lower_generator(8) conn = getconn() assert not conn.exists(randfile), 'file already exists' cmd = ['echo', 'hello', '>', randfile] conn.execute(cmd) assert conn.exists(randfile), 'file does not exist' with self.step('revert to snapshot'): vm.revert_to_snapshot(self._SNAPSHOT_NAME) with self.step('verify that the vm has reverted'): conn = getconn() assert not conn.exists(randfile), 'file still exists'
def test_019_susp_src_pd_ds_csm_during_rebalance1(self): #at least three PD-DSs should be in the system + share assert TestDSFailure.pd_ds_count(self) >= 3 TestDSFailure.check_ds_state_all(self) ds_list_used = [] #picking two PD-DSs with the same label dst1_obs_obj, dst2_obs_obj = TestDSFailure.pick_two_pd_ds_with_same_label(self) ds_list_used.append(dst1_obs_obj) ds_list_used.append(dst2_obs_obj) #we are looking for a DS with different label than two above src_obs_obj = TestDSFailure.pick_pd_ds(self,label=dst1_obs_obj.label,ds_list_used=ds_list_used) ds_list_used.append(src_obs_obj) src_obs_server = src_obs_obj.ds_share.server share = (ctx.cluster.cli.share_list())[0] src_dp = utils.lower_generator(8) dst_dp = utils.lower_generator(8) ctx.cluster.cli.data_profile_create(src_dp, [src_obs_obj.label.name]) ctx.cluster.cli.data_profile_create(dst_dp, [dst1_obs_obj.label.name,dst2_obs_obj.label.name]) src_cls = utils.lower_generator(8) dst_cls = utils.lower_generator(8) ctx.cluster.cli.classifier_create(src_cls, ['Active,NOOP,%s,CREATE' % (src_dp)]) ctx.cluster.cli.classifier_create(dst_cls, ['Active,NOOP,%s,CREATE' % (dst_dp)]) ctx.cluster.cli.share_classifier_set('/', name=share.name, classifier_name=src_cls) #writing 50 files mnt = TestDSFailure.write_data(self,share.name,50) ctx.cluster.cli.share_classifier_set('/', name=share.name, classifier_name=dst_cls) ctx.cluster.cli.share_rebalance(share.name) #let the rebalance start sleep(5) src_obs_server.hw.power_off() ctx.cluster.wait_for_ds_state(src_obs_obj, expected_state=DsState.SUSPECTED, attempt=12, interval=10) #TODO change to random sleep less then 30 min sleep(60) src_obs_server.hw.power_on() ctx.cluster.wait_for_ds_state(src_obs_obj, expected_state=DsState.ONLINE, attempt=50, interval=10) #TODO replace sleep below when PD-4273 fixed (rebalance finished event) sleep(120) ctx.cluster.shares[share.name].verify_location() self.call_umount(mnt)
def _clone_vm_on_vvol(self, hypervisor, vm_name=None): vm_name = vm_name or self._VMPREFIX.format(utils.lower_generator(5)) vvol_ds = hypervisor._get_datastore_by_name(self._VVOL_NAME) folder = hypervisor.get_folder_by_name(self._CLIENTS_DIR) self._logger.info('Cloning template {!r} to folder {!r} ' 'with name {!r} in datastore {!r}'.format( self._TEMPLATE, self._CLIENTS_DIR, vm_name, self._VVOL_NAME)) clonetask = hypervisor.clone_vm(self._TEMPLATE, vm_name, datastore=vvol_ds, folder=folder) client = hypervisor.get_first_vm(vm_name) self._logger.debug('Clone succeeded, powering on VM') client.poweron() self._logger.debug('Waiting for IP') address = client.wait_for_ip() return client, address
def test_poc_script_phase_1(self): pd_dss = ctx.data_stores.filter(NodeType.PD) other_dss = ctx.data_stores - pd_dss for ds in pd_dss: with self.step('pd-ds node-add'): ds_name = ds.get_hostname() un_node = ctx.cluster.unauthenticated_nodes[ds_name] ctx.cluster.cli._node_add(ds, name=un_node.name, clish_username='******', clish_password='******') with self.step('pd-ds volume-add'): lv = ds.get_free_export() ctx.cluster.cli.ds_add(node_name=ds_name, logical_volume_name=lv.name, clish_username='******', clish_password='******') for ds in other_dss: ds_name = 'poc_{0}'.format(utils.lower_generator(5)) lv = ds.create_share(join(choice(ds.get_fses()).name, ds_name)) with self.step('3party node-add'): ctx.cluster.cli._node_add(ds, name=ds_name, clish_username='******', clish_password='******') with self.step('3party ds-add'): ctx.cluster.cli.ds_add(node_name=ds_name, logical_volume_name=lv.name, clish_username='******', clish_password='******') with self.step('share-create'): share = ctx.cluster.cli.share_create( name='poc', path='/poc', export_option='*,RW,no-root-squash', clish_username='******', clish_password='******') for dp in ctx.data_portals: with self.step('data-portal node-add'): dp_name = dp.get_hostname() un_node = ctx.cluster.unauthenticated_nodes[dp_name] ctx.cluster.cli._node_add(dp, name=un_node.name, clish_username='******', clish_password='******') secs = 60 self._logger.debug('Sleeping for {} s'.format(secs)) sleep(secs) with self.step('Register vasa provider'): hypervisor = ctx.hypervisors[0] vasa_address = ctx.data_director.address vasa_username, vasa_password = ctx.data_director.auth hypervisor.register_vasa_provider(address=vasa_address, prov_username=vasa_username, prov_password=vasa_password, name=_POC_VASA_NAME) sleep(secs) with self.step('Add vvol datastore'): hosts = [host.name for host in hypervisor._get_hosts()] hypervisor.add_vvol_datastore(self._VVOL_NAME, share.name, hosts_list=hosts) sleep(secs * 3) with self.step('Create vm on VVOL'): client, address = self._clone_vm_on_vvol(hypervisor) ctx.clients += Client(address=address, username='******', password='******', hw_mgmt=(hypervisor._address, hypervisor._username, hypervisor._password, client.name)) sleep(secs * 3)
def node_add_netapp_cmode(self, name_size=12, cert=None, verify=True): name = utils.lower_generator(size=name_size) node_add(NodeType.NETAPP_CMODE, name, cert=cert) if verify: self.verification(node_name=name, node_type=NodeType.NETAPP_CMODE)
def node_add_nexenta(self, name_size=12, cert=None, verify=True): name = utils.lower_generator(size=name_size) node_add(NodeType.NEXENTA, name, cert=cert) if verify: self.verification(node_name=name, node_type=NodeType.NEXENTA)
def node_add_isilon(self, name_size=12, cert=None, verify=True): name = utils.lower_generator(size=name_size) node_add(NodeType.EMC_ISILON, name, cert=cert) if verify: self.verification(node_name=name, node_type=NodeType.EMC_ISILON)
def add_shares(self, amount, nodes): for i in xrange(amount): share_name = utils.lower_generator(10) for node in nodes: node.create_share()