def UseFileInMountPoint(volume):
    output = getoutput('mount | grep %s | awk \'{print $3}\'' \
            %(volume['mountPoint']))
    mount_point = output[0].strip('\n')
    exe_vdb = executeVdbenchFile(volume, VdbFile)
    time.sleep(20)
    #sleeping for 30s before trying to umount the volume on which vdbench is running
    logging.info('Now unmounting the mountPoint which is in use')
    check_vdbench = is_vdbench_alive(volume['name'])
    if check_vdbench:
        umount_output = executeCmd('umount mount/%s' %(volume['mountPoint']))
        if umount_output[0] == 'FAILED':
            print 'expected result : %s' %(umount_output[1])
            logging.debug('expected result: %s', umount_output[1])
            umountFail = True
            return ['PASSED', umountFail]
        else:
            print 'Unexpected Result: Unmount happened when file in '\
                    'mountPoint is in use'
            kill_process(volume['name'])
            return ['FAILED', 'Unmount happened when file in mountPoint is in use']
    else:
        return ['FAILED', 'Vdbench stopped, MountPoint not in use']
Beispiel #2
0
    logging.debug('Added nfs client "ALL" to the volume')

    volume = {'TSMIPAddress' : tsmIP, 'mountPoint': vol_mntPoint,\
     'name' : volname}

    nfsMount = mountNFS(volume)
    if nfsMount[0] == 'FAILED':
        msg = 'failed to mount NFS share "%s"' % (volume['name'])
        logAndresult(testcase, 'FAILED', msg, startTime, endTime)
    logging.info('Mounted Nfs Share "%s" successfully', volname)

    mount_point =  getoutput('mount | grep %s | awk \'{print $3}\'' \
          %(volume['mountPoint']))
    mount_point = mount_point[0].strip('\n')

    executeVdbenchFile(volume, 'filesystem_nfs')
    check_vdbench = is_vdbench_alive(volname)
    time.sleep(1)
    while True:
        mountDetails = mountPointDetails('-m', mount_point)
        Used = mountDetails[2]
        if int(Used) >= 1000:
            logging.debug('vdbench has successfully created 1 GB file of the')
            logging.debug('going to stop vdbench after 10 seconds...')
            time.sleep(10)
            break
        check_vdbench = is_vdbench_alive(volname)
        if check_vdbench:
            continue
        else:
            logging.debug('vdbench has stopped unexpectedly....')
Beispiel #3
0
iqn = verify_iqn(iqn)
logging.debug('iqn for discovered iSCSI LUN... %s', iqn)

login_result = iscsi_login_logout(iqn, VSM_IP, 'login')
verify_iscsi_operation(login_result, volumeDict['name'], 'login')
time.sleep(2)
result = getDiskAllocatedToISCSI(VSM_IP, mnt_point)
iscsi_device = verify_getDiskAllocatedToISCSI(result, mnt_point)
result = execute_mkfs(iscsi_device, 'ext3')
verify_execute_mkfs(result)
mount_result = mount_iscsi(iscsi_device, volumeDict['name'])
verify_mount(mount_result)
#mount_dir = 'mount/%s' %(volumeDict['name'])
mount_dir = {'name': volumeDict['name'], 'mountPoint': volumeDict['name']}
logging.info('...executing vdbench....')
executeVdbenchFile(mount_dir, 'filesystem_iscsi')
time.sleep(20)
logging.info('verifying the IOPS before Node goes to reset...')
iops_datapath = poolName + '/' + accName + tsm_name + '/' + volumeDict['name']
verify_IOPS(NODE1_IP, PASSWD, iops_datapath)
logging.debug('going to move node to reset...')
getControllerInfo(NODE1_IP, PASSWD, 'reboot', 'reboot.txt')

logging.debug('verifying pool import at peer Node: %s', NODE2_IP)
verify_pool_import(poolName, NODE2_IP, PASSWD)
logging.debug(
    'before verifying IOPS after reset the Node, sleeping for 10secs')
time.sleep(10)
logging.debug('verifying IOPS at peer Node IP: %s', NODE2_IP)
verify_IOPS_afterHA(NODE2_IP, PASSWD, iops_datapath)
Beispiel #4
0
else:
    endTime = ctime()
    print 'Not able to list Tsm  "%s" due to: ' \
            %(tsmIP) + tsmList[1]
    logAndresult(testcase, 'BLOCKED', tsmList[1], startTime, endTime)


vol1 = {'name': 'nfsDataset1', 'tsmid': tsmID, \
    'datasetid': datasetID, 'protocoltype': 'NFS', 'iops': 200}

#creating volume and mouting the same
final_result1 = addVol_client_Mount(vol1, stdurl, tsmID, tsmName, ClientIP)

#running vdbench on the first volume
logging.info('Running vdbench  by using file')
exe1 = executeVdbenchFile(final_result1[0], VdbFile)
logging.info('waiting for 30s for vdbench to run on first dataset')
time.sleep(30)
check_vdbench1 = is_vdbench_alive(vol1['name'])

if check_vdbench1:
    logging.info('vdbench is running on first dataset....')
    logging.info('Adding one more NFS dataset to the same VSM and '\
            'mounting it on the same client and running vdbench on it')

    #while Ios are running creating another volume to the same tenant
    vol2 = {'name': 'nfsDataset2', 'tsmid': tsmID, \
        'datasetid': datasetID, 'protocoltype': 'NFS', 'iops': 200}
    final_result2 = addVol_client_Mount(vol2, stdurl, tsmID, tsmName, ClientIP)
    time.sleep(2)
    logging.info('Checking whether vdbench is running on first dataset after' \
Beispiel #5
0
volume = {'TSMIPAddress' : tsmIP, 'mountPoint': vol_mntPoint,\
                'name' : volname}

startTime = ctime()
logging.info("Mounting NFS Share '%s'", volname)
nfsMount = mountNFS(volume)
if nfsMount == 'PASSED':
    logging.info('Mounted Nfs Share "%s" successfully', volume['name'])
else:
    endTime = ctime()
    msg = 'failed to mount NFS share "%s"' % volume['name']
    logAndresult(testcase, 'FAILED', msg, startTime, endTime)

logging.info('Running vdbench  by using file')
exe = executeVdbenchFile(volume, VdbFile)
check_vdbench = is_vdbench_alive(volname)
if check_vdbench:
    logging.info('waiting for 2mins for vdbench to run....')
    time.sleep(120)  #waiting for 2mins for vdbench to run
else:
    logging.debug('vdbench dint start')

startTime = ctime()
check_vdbench = is_vdbench_alive(volname)
if check_vdbench:
    logging.info('vdbench is running')
    logging.info('While IOs are running adding client to exports')
    startTime = ctime()
    setalldir = allDirEdit(volid, ClientIP)
    if setalldir[0] == 'FAILED':
mountResult = mount_iscsi(device, VOL_NAME)
if mountResult[0] == 'PASSED':
    CLIENT_MOUNT_PNT = mountResult[1]
    print 'volume is mounted successfully at %s' % (CLIENT_MOUNT_PNT)
else:
    print "Volume is not mounted"
    logging.error('Testcase AMSx-WLAx-NSWx-ISCSI-IORx-TC_CONTINUOUS_ISTGTCONTROL_REFRESH is blocked due to' \
            ': %s', mountResult)
    is_blocked(startTime, FOOTER_MSG, BLOCKED_MSG)

# Creating Dictionary for vdbench use
dicForVdbenchUse = {'name': VOL_NAME, 'mountPoint': CLIENT_MOUNT_PNT}

# Executing vdbench
logging.info('...executing vdbench....')
executeVdbenchFile(dicForVdbenchUse, 'filesystem_nfs')
dataSize = 1000
# wait till vdbench complete seeding...
time.sleep(20)  # sleep according to data size given for seeding

# verofy iops are running or not
iops = get_iops_by_api(volId, STDURL)
if iops[0] == 'FAILED':
    logging.error('Testcase AMSx-WLAx-NSWx-ISCSI-IORx-TC_CONTINUOUS_ISTGTCONTROL_REFRESH is blocked due to' \
            ': %s', iops[1])
    is_blocked(startTime, FOOTER_MSG, BLOCKED_MSG)

iops = iops[1]
if iops > 0:
    print 'iops are running fine...'
    logging.info('IOPS are running fine after starting of vdbench..., '\
Beispiel #7
0
logging.info('listing volume...')
volumes = listVolumeWithTSMId_new(STDURL, tsm_id)
volumes = verify_list_volumes(volumes)
vol_id, account_id, mnt_point = get_vol_id(volumes, volumeDict['name'])
logging.debug('volume_id: %s, aacount_id: %s, and mountpoint: %s', vol_id, \
        account_id, mnt_point)
volume_dir = {'mountPoint': mnt_point, 'TSMIPAddress': VSM_IP, 'name': \
        volumeDict['name']}
add_client_result = addNFSclient(STDURL, vol_id, 'ALL')
verify_ddNFSclient(add_client_result, 'ALL', volumeDict['name'])
mount_result = mountNFS(volume_dir)
verify_mountNFS(mount_result, volume_dir)
mount_dir = 'mount/%s' %(mnt_point)
mount_dir2 = {'name': volumeDict['name'], 'mountPoint': volumeDict['name']}
logging.info('...executing vdbench....')
executeVdbenchFile(mount_dir2, 'filesystem_nfs')
time.sleep(20)
logging.info('verifying the IOPS before Node goes to reset...')
iops_datapath = poolName+'/'+accName+tsm_name+'/'+volumeDict['name']
verify_IOPS(NODE1_IP, PASSWD, iops_datapath)
logging.debug('going to move node to reset...')
getControllerInfo(NODE1_IP, PASSWD, 'reboot', 'reboot.txt')

logging.debug('verifying pool import at peer Node: %s', NODE2_IP)
verify_pool_import(poolName, NODE2_IP, PASSWD)
logging.debug('before verifying IOPS after reset the Node, sleeping for 10secs')
time.sleep(10)
logging.debug('verifying IOPS at peer Node IP: %s', NODE2_IP)
verify_IOPS_afterHA(NODE2_IP, PASSWD, iops_datapath)

#Wait till Node come up and move it to available state
mnt_iscsi = iscsi_mount_flow(volname, tsmIP, vol_iqn, vol_mntPoint, 'ext3')
endTime = ctime()
if mnt_iscsi[0] == 'FAILED':
    logAndresult(tcName, 'BLOCKED', mnt_iscsi[1], startTime, endTime)
logging.debug('%s', mnt_iscsi[1])
device, iqn = mnt_iscsi[3], mnt_iscsi[2]
#******************************************************************************

logging.info('Iops details of pool and Volume is as follows:')
logging.info('Pool iops : %s, volume iops : %s, remaining pool iops : %s', \
        pool_iops, volIops, (pool_iops - volIops))

#***************Operations during Vdbench Execution****************************

####----------------Vdbench Exection---------------------------------------####
executeVdbenchFile(volume, 'filesystem_iscsi')
check_vdbench = is_vdbench_alive(volname)
time.sleep(1)
startTime = ctime()
#####--------Running Vdbench for certain time before enabling Grace--------####
logging.info('Running vdbench for certain time....')
for x in range(1, 4):
    time.sleep(60)
    check_vdbench = is_vdbench_alive(volname)
    if not check_vdbench:
        endTime = ctime()
        msg = 'Vdbench has stopped, hence cannot validate Grace feature'
        logAndresult(tcName, 'BLOCKED', msg, startTime, endTime)
x1 = x * 60
time.sleep(30)
x1 = x1 + 30
Beispiel #9
0
    logging.error('Testcase Multiple_Sighups_During_File_IO is blocked due to' \
            ': %s', result[1])
    is_blocked(startTime, FOOTER_MSG, BLOCKED_MSG)

volDir = {'mountPoint': vol_mnt_pt, 'TSMIPAddress': VSM_IP, 'name': \
        VOL_NAME}
mountResult = mountNFS_new(volDir)
verify_mount(mountResult)
volMntPoint = 'mount/%s' % (vol_mnt_pt)

# Creating Dictionary for vdbench use
dic_for_vdbench_use = {'name': VOL_NAME, 'mountPoint': volMntPoint}

# Executing vdbench
logging.info('...executing vdbench....')
executeVdbenchFile(dic_for_vdbench_use, 'filesystem_nfs')
data_size = 1000
# wait till vdbench complete seeding...
time.sleep(60)  # sleep according to data size given for seeding

# verofy iops are running or not
iops = get_iops_by_api(vol_id, STDURL)
if iops[0] == 'FAILED':
    logging.error('Testcase Multiple_Sighups_During_File_IO is blocked due to' \
            ': %s', iops[1])
    is_blocked(startTime, FOOTER_MSG, BLOCKED_MSG)

iops = iops[1]
if iops > 0:
    print 'iops are running fine...'
    logging.info('IOPS are running fine after starting of vdbench..., '\
Beispiel #10
0
            %(tsmIP) + tsmList[1]
    logAndresult(testcase, 'BLOCKED', tsmList[1], startTime, endTime)

# creating volume and mounting the same
vol1 = {'name': 'nfsSmallVol3', 'tsmid': tsmID, 'quotasize': '25G', \
    'datasetid': datasetID, 'protocoltype': 'NFS', 'iops': 200}

final_result1 = addVol_client_Mount(vol1, stdurl, tsmID, tsmName, ClientIP)

# creating another volume and mounting the same
vol2 = {'name': 'nfsSmallVol4', 'tsmid': tsmID, 'quotasize': '25G', \
        'datasetid': datasetID, 'protocoltype': 'NFS', 'iops': 200}
final_result2 = addVol_client_Mount(vol2, stdurl, tsmID, tsmName, ClientIP)

# executing vdbench
exe = executeVdbenchFile(final_result1[0], VdbFile)
check_vdbench = is_vdbench_alive(vol1['name'])

mount_point1 = getoutput('mount | grep %s | awk \'{print $3}\'' \
                %(final_result1[0]['mountPoint']))
mount_point1 = mount_point1[0].strip('\n')

startTime = ctime()
while True:
    mountDetails = mountPointDetails('-m', mount_point1)
    Used = mountDetails[2]
    if int(Used) >= int(20480):
        logging.info('Vdbench has filed large file of 20G, waiting for 30s')
        time.sleep(30)
        break
    else: