def verify_IOPS(NODE1_IP, PASSWD, iops_datapath): cmd = 'reng stats access dataset %s qos | head -n 4 ; sleep 2 ;\ echo "-----------------"; reng stats access dataset %s qos |\ head -n 4' % (iops_datapath, iops_datapath) logging.debug('executing the command %s in controller', str(cmd)) iops_res = getControllerInfo(NODE1_IP, PASSWD, cmd, 'beforeHAIO1.txt') print iops_res logging.debug('iops result is %s', (iops_res)) logging.debug('sleeping for 3 seconds before fatching new IOPS value...') time.sleep(3) logging.debug('executing the command %s in controller', str(cmd)) iops_res = getControllerInfo(NODE1_IP, PASSWD, cmd, 'beforeHAIO2.txt') print iops_res logging.debug('iops result is %s', (iops_res)) io_output = executeCmdNegative('diff beforeHAIO1.txt beforeHAIO2.txt') if io_output[0] == 'FAILED': msg = 'IOPS are not running, please make sure to run IOPS properly' logging.debug('Compared result: %s, Error: %s', io_output[0], msg) logging.debug('IOPS Error: Not going to reset the Node...') is_blocked(startTime, FOOTER_MSG, BLOCKED_MSG) elif io_output[0] == 'PASSED': msg = "Iops are running fine" logging.debug('Compared result: %s', msg) print msg return else: print "problem in comparing files" logging.error('problem in comparing files') is_blocked(startTime, FOOTER_MSG, BLOCKED_MSG)
def node_upgrade(node_ip, node_user, node_pwd, patchnum, patchpath): patch_dir_prepare(node_ip, node_user, node_pwd, patchnum, patchpath) patch_type = 'nodeha' apply_patch(patch_type, node_ip, node_user, node_pwd, patchnum) passCmdToPanic(node_ip, node_pwd, 'sysctl kern.coredump_on_panic=0; sysctl debug.kdb.panic=1') time.sleep(300) node_up_status = bring_up_check(node_ip) if node_up_status == True: patch_type = 'kernel' apply_patch(patch_type, node_ip, node_user, node_pwd, patchnum) getControllerInfo(node_ip, node_pwd, 'reboot', 'reboot.txt') time.sleep(300) node_up_status = bring_up_check(node_ip) if node_up_status == True: time.sleep(30) #Grace period before state change available_state = change_node_state(STDURL, node_ip, 'available') if available_state == 'FAILED': print "failed to move node to available state, unable to proceed" exit() else: print "Node is available. Patch application on this node is Complete" else: print "box did not come up after 20 min, unable to proceed" exit() else: print "box did not come up after 20 min, unable to proceed" exit()
def verify_IOPS_afterHA(NODE2_IP, PASSWD, iops_datapath): endTime = ctime() cmd = 'reng stats access dataset %s qos | head -n 4 ; sleep 2 ;\ echo "-----------------"; reng stats access dataset %s qos |\ head -n 4' % (iops_datapath, iops_datapath) logging.debug('executing the command %s in controller', str(cmd)) iops_res = getControllerInfo(NODE2_IP, PASSWD, cmd, 'afterHAIO1.txt') print iops_res logging.debug('iops result is %s', (iops_res)) logging.debug('sleeping for 3 seconds before fatching new IOPS value...') time.sleep(3) logging.debug('executing the command %s in controller', str(cmd)) iops_res = getControllerInfo(NODE2_IP, PASSWD, cmd, 'afterHAIO2.txt') print iops_res logging.debug('iops result is %s', (iops_res)) io_output = executeCmdNegative('diff afterHAIO1.txt afterHAIO2.txt') if io_output[0] == 'FAILED': msg = 'IOPS are not running after HA failover...' logging.debug('Compared result: %s, Error: %s', io_output[0], msg) logging.debug('IOPS Error: ungracefull HA test case(iSCSI) failed...') resultCollection('Ungracefull HA test case(iSCSI) failed', \ ['FAILED', ''], startTime, endTime) elif io_output[0] == 'PASSED': msg = "IOPS are running fine after ungracefull HA" logging.debug('Compared result: %s', msg) resultCollection('Ungracefull HA test case(iSCSI) passed', \ ['PASSED', ''], startTime, endTime) print msg else: print "problem in comparing files" logging.error('problem in comparing files') is_blocked(startTime, FOOTER_MSG, BLOCKED_MSG)
def standalone_devman_upgrade(ec_ip, ec_user, ec_pwd, patchnum, patchpath): patch_dir_prepare(ec_ip, ec_user, ec_pwd, patchnum, patchpath) patch_type = 'ec' apply_patch(patch_type, ec_ip, ec_user, ec_pwd, patchnum) getControllerInfo(node_ip, node_pwd, 'reboot', 'reboot.txt') time.sleep(300) ec_up_status = bring_up_check(ec_ip) if ec_up_status == True: print "Devman patched and brought up" else: print "Could't bring up devman after patching, unable to proceed" exit()
def getting_qos_data_from_node(node_ip, passwd, datapath, outFile): cmd = 'reng stats access dataset %s qos | head -n 4 ; sleep 2 ;\ echo "-----------------"; reng stats access dataset %s qos |\ head -n 4' % (datapath, datapath) logging.debug('executing the command %s in controller', str(cmd)) iops_res = getControllerInfo(node_ip, passwd, cmd, outFile) logging.debug('iops result is %s', (iops_res)) return iops_res
def poolfunction(): poolcmd = 'echo > dbfile' pooluuidnull=getControllerInfo(devip, devpasswd,poolcmd,"output.txt") poolcmd1 = 'mysql -uroot -ptest cloud -e "select name,uuid from cb_pool where removed is NULL" > dbfile' allpooluuid = getControllerInfo(devip, devpasswd,poolcmd1,"output.txt") poolcmd2 = 'cat dbfile | grep -w %s |awk \'{print $2}\'' % (poolname) pooluuid = getControllerInfo(devip, devpasswd,poolcmd2,"output.txt") print pooluuid poolrengcmd1 =' reng list | grep -A 9 %s | grep \'Renegade IO remainder\' | awk \'{print$4}\' ' %(pooluuid[0:-1]) print poolrengcmd1 global poolrengiops poolrengiops= getControllerInfo(nodeip, nodepasswd,poolrengcmd1,"output.txt") print "poolrengremainediops= "+ poolrengiops poolrengcmd2 = " reng list | grep -A 9 %s | grep 'Renegade throughput remainder' | awk '{print$4}'" %(pooluuid[0:-1]) print poolrengcmd2 global poolrengthroughput poolrengthroughput= getControllerInfo(nodeip, nodepasswd,poolrengcmd2,"output.txt") print "poolrengremained throughput="+ poolrengthroughput
def create_snapshot(stdurl, vol_id, vol_name, snp_name, node_ip, passwd): logging.debug('Inside create_snapshot method...') querycommand = 'command=createStorageSnapshot&id=%s&name=%s' \ %(vol_id, snp_name) resp_create_snapshot = sendrequest(stdurl, querycommand) createSnpResp = json.loads(resp_create_snapshot.text) if 'errortext' in str(createSnpResp): errormsg = createSnpResp['createStorageSnapshotResponse'].get( 'errortext') logging.debug('Not able create snapshot %s, %s', snp_name, errormsg) return ['FAILED', ''] logging.debug('createStorageSnapshot executed successfully') getControllerInfo(node_ip, passwd, 'zfs list -t snapshot | grep %s' \ %(snp_name), 'snp_result.txt') out = executeCmd('cat snp_result.txt | grep %s@%s > snp_result2.txt' \ %(vol_name, snp_name)) if out[0] == 'FAILED': logging.debug('Snapshot %s is not created at Controller', snp_name) return ['FAILED', ''] logging.debug('snapshot %s created successfully at Controller', snp_name) return ['PASSED', '']
def checkMetaSize(ip, passwd, PoolName): type = "meta" executeCmd("python enableDedup.py") executeCmd("python copyData.py %s %s %s" % (sys.argv[1], PoolName, 'testfile')) time.sleep(60) poolUsedSize1 = getControllerInfo( ip, passwd, "zpool iostat -v %s | grep %s | awk '{print $2}'" % (PoolName, PoolName), "output.txt") metaUsedSize1 = getControllerInfo( ip, passwd, "zpool iostat -v %s | grep -A 1 %s | grep raidz1 | awk '{print $2}'" % (PoolName, type), "output.txt") print poolUsedSize1.split('M')[0] print metaUsedSize1.split('M')[0] #for x in range (1, 40): for x in range(1, 11): executeCmd("python copyData.py %s %s %s" % (sys.argv[1], PoolName, 'testfile%s' % (x))) time.sleep(60) poolUsedSize2 = getControllerInfo( ip, passwd, "zpool iostat -v %s | grep %s | awk '{print $2}'" % (PoolName, PoolName), "output.txt") metaUsedSize2 = getControllerInfo( ip, passwd, "zpool iostat -v %s | grep -A 1 %s | grep raidz1 | awk '{print $2}'" % (PoolName, type), "output.txt") print poolUsedSize2.split('M')[0] print metaUsedSize2.split('M')[0] if ((float(poolUsedSize1.split('M')[0]) == float( poolUsedSize2.split('M')[0])) and (float(metaUsedSize2.split('M')[0]) > float( metaUsedSize1.split('M')[0]))): return ("PASSED", "") else: return ("FAILED", "")
def verify_pool_import(poolName, NODE2_IP, PASSWD): logging.debug('pool import at peer Node would take some time, '\ 'sleeping for 25 seconds') time.sleep(25) logging.debug('executing zpool list at Node:%s', NODE2_IP) getControllerInfo(NODE2_IP, PASSWD, 'zpool list', 'listpool.txt') import_result = executeCmd('cat listpool.txt | grep %s' % (poolName)) if import_result[0] == 'PASSED': logging.debug('pool %s imported successfully at peer Node:%s', \ poolName, NODE2_IP) return logging.debug('pool is not imported till 25 seconds after reboot the Node') logging.debug('sleeping for another 10 seconds') time.sleep(10) logging.debug('executing zpool list at Node:%s', NODE2_IP) getControllerInfo(NODE2_IP, PASSWD, 'zpool list', 'listpool.txt') import_result = executeCmd('cat listpool.txt | grep %s' % (poolName)) if import_result[0] == 'PASSED': logging.debug('pool %s imported successfully at peer Node:%s', \ poolName, NODE2_IP) return logging.debug('pool is not imported till 35 seconds after reboot the Node') logging.debug('sleeping for another 10 seconds') time.sleep(10) logging.debug('executing zpool list at Node:%s', NODE2_IP) getControllerInfo(NODE2_IP, PASSWD, 'zpool list', 'listpool.txt') import_result = executeCmd('cat listpool.txt | grep %s' % (poolName)) if import_result[0] == 'PASSED': logging.debug('pool %s imported successfully at peer Node:%s', \ poolName, NODE2_IP) return logging.error('pool %s import failed at peer Node:%s', poolName, NODE2_IP) is_blocked(startTime, FOOTER_MSG, BLOCKED_MSG)
import json import requests from hashlib import md5 import fileinput import subprocess import time from cbrequest import sendrequest, filesave1, timetrack, queryAsyncJobResult, configFile, executeCmd, createSFTPConnection, getControllerInfo, getoutput file = 'logs/if_config_output' IP = "20.10.57.103" passwd = "test" command = "zpool status -v Pool1" t = getControllerInfo(IP, passwd, command, file) poolType = '' poolDisks = 0 noDiskGroups = 0 noOfSpares = 0 noOfCaches = 0 noOfLogs = 0 noOfMetaDisks = 0 noOfMetaDiskGroups = 0 noCacheDisks = 0 noSpareDisks = 0 noLogDisks = 0 noLogMirrDisks = 0 poolName = 'Pool1' #### Function(s) Declartion Ends output = [] list = getoutput('cat %s' % (file)) for st in list:
for x in range(1, int(config['Number_of_NFSVolumes'])+1): startTime = ctime() r1=executeCmd(' rm -rf mount1/%s/* '%(config['volMountpoint%d' %(x)])) sleep(5) r2=executeCmd('umount mount1/%s'%(config['volMountpoint%d' %(x)])) r3=executeCmd (' rm -rf mount1/%s'%(config['volMountpoint%d' %(x)])) r4=executeCmd('mkdir -p mount1/%s ' %(config['volMountpoint%d' %(x)])) ############### Mount ############### executeCmd('mount -t nfs %s:/%s mount1/%s' %(config['volIPAddress%d' %(x)], config['volMountpoint%d' %(x)], config['volMountpoint%d' %(x)])) output=executeCmd('mount | grep %s' %(config['volMountpoint%d' %(x)])) endTime = ctime() resultCollection("Mount of NFS Volume %s" %(config['volDatasetname%d' %(x)]), output,startTime, endTime) print "mounting NFS volume %s is " %(config['volMountpoint%d' %(x)]),output[0] ############### Before Copy ############## cmd1 = " zfs get -Hp all | grep %s | grep -w used | awk \'{print $3}\'"%(config['volDatasetname%d' %(x)]) size1 = getControllerInfo(IP,password, cmd1, "backend_output_bc0.txt") print " Before copy used space of NFS volume %s is "%(config['volDatasetname%d' %(x)]) print size1 cmd2 = " zfs get -Hp all | grep %s | grep -w compressratio | awk \'{print $3}\'"%(config['volDatasetname%d' %(x)]) ratio1 = getControllerInfo(IP,password, cmd2, "backend_output_cr0.txt") print " Before copy Compresion Ratio of volume %s is "%(config['volDatasetname%d' %(x)]), print ratio1 cr1 = float(ratio1[0:-2]) print cr1 ############### After Copy ################ if output[0] == "PASSED": copyResult=executeCmd('cp -v %s mount1/%s' %(testfile,config['volMountpoint%d' %(x)])) print copyResult[0] sleep(5) cmd1 = " zfs get -Hp all | grep %s | grep -w used | awk \'{print $3}\'"%(config['volDatasetname%d' %(x)]) size2= getControllerInfo(IP,'test', cmd1, "backend_output0.txt")
import json import sys from time import ctime from cbrequest import configFile, resultCollection, executeCmd, getControllerInfo, createSFTPConnection, putFileToController config = configFile(sys.argv) IP = "ESXIP" passwd = "ESXPASSWORD" for x in range(1, int(config['Number_of_VMs']) + 1): executeCmd('> temp/createrdm.sh') executeCmd('cat sample/createrdm.sh >> temp/createrdm.sh') vmname = "%s" % (config['vmName%d' % (x)]) datastore = "%s" % (config['datastoreName%d' % (x)]) vmpassword = "******" % (config['vmPassword%d' % (x)]) executeCmd('sed -i s/VMNAME/%s/g temp/createrdm.sh' % vmname) executeCmd('sed -i s/DATASTORE/%s/g temp/createrdm.sh' % datastore) executeCmd('sed -i s/VMLFILE/vmlfile%d/g temp/createrdm.sh' % (x)) output = putFileToController( IP, passwd, "temp/createrdm.sh", "/vmfs/volumes/%s/%s/createrdm.sh" % (datastore, vmname)) print output cmd = getControllerInfo( IP, passwd, "cp /autofolder/vmlfile%d /vmfs/volumes/%s/%s/vmlfile%d" % (x, datastore, vmname, x), "vmlfile.txt") print cmd cmd = getControllerInfo( IP, passwd, "sh -x /vmfs/volumes/%s/%s/createrdm.sh" % (datastore, vmname), "rdmfile.txt") print cmd
for y in range(1, int(config['Number_of_Pools'])+1): if ctrl_name == "%s" %(config['poolNodeName%d' %(y)]): pool = "%s" %(config['poolName%d' %(y)]) zpoolList1.append(pool) health = list(); if rstatus[0] == 'PASSED': endTime = ctime() resultCollection(" %s changing state to %s mode" %(ctrl_name,mode), rstatus, startTime, endTime) else: endTime = ctime() resultCollection(" %s changing state to %s mode, further conditions skipped" %(ctrl_name,mode), ['FAILED', ''], startTime, endTime) exit() time.sleep(5) if mode == 'Maintenance': for x in range(0, len(zpoolList1)): value = getControllerInfo(IP1, passwd1, "zpool list | grep %s | awk '{print $7}'" %(zpoolList1[x]), "zpoolList.txt") endTime = ctime() if value == '': print 'No pool available with this name %s, Node status is: %s' %(zpoolList1[x], mode) resultCollection('Pool export passed, not able to see pools: ', ['PASSED', ''], startTime, endTime) else: print 'Able to list pool with name %s, even node status is: %s' %(zpoolList1[x], mode) resultCollection('Pool export failed, able to see pool', ['FAILED', ''], startTime, endTime) print zpoolList1[x]+" "+value else: for x in range(0, len(zpoolList1)): value = getControllerInfo(IP1, passwd1, "zpool list | grep %s | awk '{print $7}' " %zpoolList1[x],"zpoolList.txt") print zpoolList1[x]+" "+value health.append(value) flag = all_same(health) print flag
querycommand = 'command=addLagg&name=lagg%s&clusterid=%s&protocoltype=%s&portslist=%s' % ( config['laggTag%d' % (x)], cluster_id, config['laggType%d' % (x)], nic_id) resp_addLagg = sendrequest(stdurl, querycommand) filesave("logs/addLagg.txt", "w", resp_addLagg) data = json.loads(resp_addLagg.text) if not "errortext" in str(data): print "Lagg added successfully" resultCollection( "Lagg Addition %s Verification from Devman" % (config['laggTag%d' % (x)]), ["PASSED", ""]) else: print "Lagg addition Failed " errorstatus = str(data['addLaggResponse']['errortext']) resultCollection( "Lagg Addition %s Verification from Devman" % (config['LaggTag%d' % (x)]), ["FAILED", errorstatus]) routput = getControllerInfo( IP, Password, "ifconfig lagg%s| grep status | awk '{print $2}'" % (config['laggTag%d' % (x)]), "logs/test") if "active" in routput: resultCollection( "Lagg Addition lagg%s Verification from Node" % (config['laggTag%d' % (x)]), ["PASSED", ""]) else: resultCollection( "Lagg Addition lagg%s Verification from Node" % (config['laggTag%d' % (x)]), ["FAILED", str(routput)])
(config['volIPAddress%d' % (x)], config['volMountpoint%d' % (x)], config['volMountpoint%d' % (x)])) output = executeCmd('mount | grep %s' % (config['volMountpoint%d' % (x)])) endTime = ctime() resultCollection( "Mount of NFS Volume %s" % (config['volDatasetname%d' % (x)]), output, startTime, endTime) print "before mount" if output[0] == "PASSED": print "mount passed" #executeCmd('cp -v %s mount1/%s/%s' %(testfile, config['volMountpoint%d' %(x)],testfile)) time.sleep(3) dedupRatio1 = getControllerInfo( ip, passwd, "zpool list %s | awk '{print $6}' | tail -n1" % (PoolName), "output.txt") print dedupRatio1 cmd1 = 'zfs get -Hp all | grep %s | grep -w used | awk \'{print $3}\' ' % ( config['volDatasetname%d' % (x)]) print cmd1 volused1 = getControllerInfo(ip, passwd, cmd1, "output.txt") print "used" + volused1 dr1 = float(dedupRatio1[0:-2]) print dr1 for y in range(1, 2): executeCmd("cp -v %s mount1/%s/%s" % (testfile, config['volMountpoint%d' % (x)], testfile + '%d' % (y))) time.sleep(10) dedupRatio2 = getControllerInfo(
import json import sys import time from time import ctime from cbrequest import configFile, executeCmd, resultCollection, getoutput, getControllerInfo, createSFTPConnection, putFileToController IP = "ESXIP" passwd = "ESXPASSWORD" cmd = getControllerInfo(IP, passwd, "mkdir autofolder", "autofolder.txt") print cmd output = putFileToController(IP, passwd, "temp/createvml.sh", "/autofolder/createvml.sh") print output output = putFileToController(IP, passwd, "tsmlist", "/autofolder/tsmlist") print output cmd = getControllerInfo(IP, passwd, "sh -x /autofolder/createvml.sh", "esx.txt") print cmd
iops = iops[1] if iops > 0: print 'iops are running fine...' logging.info('IOPS are running fine after starting of vdbench..., '\ 'iops value is: %s', iops) else: print 'iops are not running...' logging.debug('IOPS are not running after starting of vdbench...') logging.error('Testcase Multiple_Sighups_During_File_IO is blocked due ' \ 'to IOPS are not running') is_blocked(startTime, FOOTER_MSG, BLOCKED_MSG) # getting jail id for issuing sighup command directly cmd = 'jls | grep %s | awk \'{print $1}\'' % (VSM_IP) jail_id = getControllerInfo(ctrl_ip, NODE_PSWD, cmd, 'IOPS.txt') logging.info('jail id: %s', jail_id) logging.info('Going to execute multiple sighups...') cmd = 'sh /usr/local/cb/bin/services_bkptenant nfs sighup %s' % (jail_id) getControllerInfo(ctrl_ip, NODE_PSWD, cmd, 'sighup.txt') time.sleep(5) getControllerInfo(ctrl_ip, NODE_PSWD, cmd, 'sighup.txt') time.sleep(5) getControllerInfo(ctrl_ip, NODE_PSWD, cmd, 'sighup.txt') time.sleep(5) getControllerInfo(ctrl_ip, NODE_PSWD, cmd, 'sighup.txt')
throughputcontrolled = "%s" %(config['volTpcontrol%d' %(x)]) datasetname = "%s" %(config['volDatasetname%d' %(x)]) tsmname = "%s" %(config['volTSMName%d' %(x)]) accountname = "%s" %(config['volAccountName%d' %(x)]) searchname = datasetname + accountname + tsmname print datasetname print tsmname print accountname print searchname print "iopscontrolled = "+iopscontrolled print "throughputcontrolled = "+throughputcontrolled if iopscontrolled == "true" and throughputcontrolled == "false": querycommand='command=updateQosGroup&id=%s&iops=%s' %(id, volIops) resp_updateNFS = sendrequest(stdurl, querycommand) cmd1 ='reng list | grep -A 6 %s | grep \'Renegade IO limit\' | awk \'{print $4}\' ' %(searchname) iops= getControllerInfo(ip, passwd,cmd1,"output.txt") print "volIops="+ volIops print "rengiops="+ iops endTime=ctime() if int(volIops) == int(iops): print "Changing IOPS updated in Reng list on "+ datasetname +": PASSED" resultCollection("Changing IOPS updated in Reng list on\'%s\'" %(datasetname),["PASSED", ' '],startTime, endTime) elif int(volIops) != int(rengiops): print "Changing IOPS updated in Reng list on "+ datasetname +": FAILED" resultCollection("Changing IOPS updated in Reng list on\'%s\'" %(datasetname),["FAILED", ' '],startTime, endTime) filesave("logs/resp_updateNFS.txt", "w", resp_listFileSystem) print ">>>> NFS >>>>updated %s" %(filesystem_name) elif iopscontrolled == "false" and throughputcontrolled == "true": querycommand='command=updateQosGroup&id=%s&throughput=%s' %(id, tpvalue) resp_updateNFS = sendrequest(stdurl, querycommand) cmd2 = 'reng list | grep -A 6 %s | grep \'Renegade throughput limit\' | awk \'{print $4}\' ' %(searchname)
fcFlag = 1 else: print "Argument is not correct.. Correct way as below" print "python CompressionExecution.py compressConfig.txt cifs/nfs/iscsi/fc/all on/off IP Password filename" exit() compvalue = sys.argv[3] IP = sys.argv[4] password = sys.argv[5] if len(sys.argv) == 7: testfile == sys.argv[6] else: testfile = "textfile" if sys.argv[3] == 'on': cmd = 'sysctl vfs.zfs.disable_zvol_compression=0' comressionOn = getControllerInfo(IP, password, cmd, 'CompressionOn.txt') print cmd elif sys.argv[3] == 'off': cmd = 'sysctl vfs.zfs.disable_zvol_compression=1' comressionOff = getControllerInfo(IP, password, cmd, 'CompressionOff.txt') print cmd else: print 'Fourth parameter is wrong please provide as \"on/off\" only' exit() ############### NFS EXECUTION STARTS ############### if nfsFlag == 1 or allFlag == 1: for x in range(1, int(config['Number_of_NFSVolumes']) + 1): startTime = ctime() r1 = executeCmd(' rm -rf mount/%s/* ' % (config['volMountpoint%d' % (x)]))
def all_same(items): return all(x == "ONLINE\n" for x in items) if len(sys.argv)<6: print "Argument is not correct.. Correct way as below" print "python rebootNode.py config.txt reboot node1IP node1passwd node2IP node2passwd" exit() mode = sys.argv[2] IP1 = sys.argv[3] passwd1 = sys.argv[4] IP2 = sys.argv[5] passwd2 = sys.argv[6] getControllerInfo(IP1,passwd1,"reboot","r.txt") pingvalue = executeCmd('ping -c 1 %s'%IP1) while pingvalue[0] == "PASSED": pingvalue = executeCmd('ping -c 1 %s'%IP1) time.sleep(1) print pingvalue[0] startTime = ctime() time.sleep(120) endTime = ctime() querycommand = 'command=listController' resp_listcontroller = sendrequest(stdurl, querycommand) filesave("logs/ListController.txt", "w", resp_listcontroller) data = json.loads(resp_listcontroller.text) #print data
result = getDiskAllocatedToISCSI(VSM_IP, mnt_point) iscsi_device = verify_getDiskAllocatedToISCSI(result, mnt_point) result = execute_mkfs(iscsi_device, 'ext3') verify_execute_mkfs(result) mount_result = mount_iscsi(iscsi_device, volumeDict['name']) verify_mount(mount_result) #mount_dir = 'mount/%s' %(volumeDict['name']) mount_dir = {'name': volumeDict['name'], 'mountPoint': volumeDict['name']} logging.info('...executing vdbench....') executeVdbenchFile(mount_dir, 'filesystem_iscsi') time.sleep(20) logging.info('verifying the IOPS before Node goes to reset...') iops_datapath = poolName + '/' + accName + tsm_name + '/' + volumeDict['name'] verify_IOPS(NODE1_IP, PASSWD, iops_datapath) logging.debug('going to move node to reset...') getControllerInfo(NODE1_IP, PASSWD, 'reboot', 'reboot.txt') logging.debug('verifying pool import at peer Node: %s', NODE2_IP) verify_pool_import(poolName, NODE2_IP, PASSWD) logging.debug( 'before verifying IOPS after reset the Node, sleeping for 10secs') time.sleep(10) logging.debug('verifying IOPS at peer Node IP: %s', NODE2_IP) verify_IOPS_afterHA(NODE2_IP, PASSWD, iops_datapath) #Wait till Node come up and move it to available state logging.debug('Waiting for 3 minutes to come up the Node:%s, It may take 5 to '\ '15 minutes', NODE1_IP) time.sleep(180) node_online = True ping_result = ping_machine(NODE1_IP)
accountname = "%s" % (config['volAccountName%d' % (x)]) searchname = datasetname + accountname + tsmname print datasetname print tsmname print accountname print searchname print "iopscontrolled = " + iopscontrolled print "throughputcontrolled = " + throughputcontrolled if iopscontrolled == "true" and throughputcontrolled == "false": print "inside iops loop" poolfunction(fullpoolname) poolrengiops1 = poolrengiops print "poolrengiops1= " + poolrengiops1 volcmd1 = 'reng list | grep -A 6 %s | grep \'Renegade IO limit\' | awk \'{print $4}\' ' % ( searchname) voliops = getControllerInfo(nodeip, nodepasswd, volcmd1, "output.txt") volIops = int(voliops) print "volumeusediops=" print volIops querycommand = 'command=deleteFileSystem&id=%s' % (id) resp_deleteNFS = sendrequest(stdurl, querycommand) filesave("logs/updateQosGroup.txt", "w", resp_deleteNFS) response = json.loads(resp_deleteNFS.text) if 'errortext' in str(response): errorstatus = str( response['deleteFileSystemResponse']['errortext']) print errorstatus resultCollection( "Deletion of Dataset \'%s\' failed not able to test Pool QoS change Post Dataset Deletion " % (datasetname), ["FAILED", ' '], startTime, endTime)
startTime = ctime() executeCmd('mkdir -p mount/%s' % (config['volMountpoint%d' % (x)])) ######Mount executeCmd('mount -t nfs %s:/%s mount/%s' % (config['volIPAddress%d' % (x)], config['volMountpoint%d' % (x)], config['volMountpoint%d' % (x)])) output = executeCmd('mount | grep %s' % (config['volMountpoint%d' % (x)])) #resultCollection("Mount of NFS Volume %s" %(config['volDatasetname%d' %(x)]), output) q = getoutput( 'df -h | grep %s | awk \'{print $2}\' | sed s/\[A-Z\]//' % (config['volMountpoint%d' % (x)])) print " used space of volume on client = %s after mount" % q[1].strip() size = getControllerInfo( IP, passwd, " zfs list | grep %s | awk '{print $2}' | sed s/\[A-Z\]//" % (config['volDatasetname%d' % (x)]), "size.txt") print "size=%s" % size.strip() #######Copy if output[0] == "PASSED": executeCmd('cp testfile mount/%s' % (config['volMountpoint%d' % (x)])) time.sleep(5) output = executeCmd('diff testfile mount/%s' % (config['volMountpoint%d' % (x)])) if output == "FAILED": endTime = ctime() resultCollection( "Creation of File on NFS Volume %s" % (config['volDatasetname%d' % (x)]), output, startTime,
verify_execute_mkfs(result) mount_result = mount_iscsi(iscsi_device, volumeDict['name']) verify_mount(mount_result) #mount_dir = 'mount/%s' %(volumeDict['name']) mount_dir = {'name': volumeDict['name'], 'mountPoint': volumeDict['name']} logging.info('...executing vdbench....') executeVdbenchFile(mount_dir, 'filesystem_iscsi') time.sleep(20) logging.info('verifying the IOPS before Partial fail over...') iops_datapath = poolName + '/' + accName + tsm_name + '/' + volumeDict['name'] verify_IOPS(NODE1_IP, PASSWD) logging.debug('Making down the VSMs interface for partial fail over...') interface1 = conf['interfaceVSM1'] cmd = 'ifconfig %s down' % (interface1) logging.debug('executing <ifconfig %s down> at controller', interface1) interface_down = getControllerInfo(NODE1_IP, PASSWD, cmd, 'interfacedown.txt') logging.debug('sleeping for 5 seconds for pool export...') time.sleep(5) # have to write code for verofication of pool export logging.debug('verifying IOPS after partial fail over...') logging.debug('before verifying IOPS sleeping for 2 seconds...') time.sleep(2) logging.debug('verifying IOPS at peer Node IP: %s', NODE2_IP) verify_IOPS_afterHA(NODE2_IP, PASSWD) logging.debug('Making interface up for partial give back') cmd = 'ifconfig %s up' % (interface1) logging.debug('executing <ifconfig %s up> at controller', interface1) interface_up = getControllerInfo(NODE1_IP, PASSWD, cmd, 'interfaceup.txt') logging.debug('sleeping for 5 seconds for pool import...') time.sleep(5) # have to write code for verofication of pool export
Name = sys.argv[4] IP = sys.argv[5] passwd = sys.argv[6] if sys.argv[3].lower() == 'create': createSnp = 1 elif sys.argv[3].lower() == 'delete': deleteSnp = 1 else: print bcolors.FAIL + 'PARAMETER ERROR: '+ bcolors.ENDC + bcolors.WARNING + 'Fourth parameter should be \"create or delete\"...' + bcolors.ENDC exit() # exit() stdurl = 'https://%s/client/api?apikey=%s&response=%s&' %(config['host'], config['apikey'], config['response']) result = getControllerInfo('20.10.48.140', 'test', 'zfs list -t snapshot', "tsm_snapshot_result.txt") print result exit() querycommand = 'command=listFileSystem' resp_listFileSystem = sendrequest(stdurl, querycommand) filesave("logs/ListFileSystem.txt", "w", resp_listFileSystem) data = json.loads(resp_listFileSystem.text) filesystems = data["listFilesystemResponse"]["filesystem"] if tsmFlag == 1: #print tsmFlag querycommand = 'command=listTsm' resp_listTsm = sendrequest(stdurl, querycommand) filesave("logs/ListTsm.txt", "w", resp_listTsm)
iops = iops[1] if iops > 0: print 'iops are running fine...' logging.info('IOPS are running fine after starting of vdbench..., '\ 'iops value is: %s', iops) else: print 'iops are not running...' logging.debug('IOPS are not running after starting of vdbench...') logging.error('Testcase AMSx-WLAx-NSWx-ISCSI-IORx-TC_CONTINUOUS_ISTGTCONTROL_REFRESH is blocked due ' \ 'to IOPS are not running') is_blocked(startTime, FOOTER_MSG, BLOCKED_MSG) # getting jail id for issuing istgtcontrol refresh command directly cmd = 'jls | grep %s | awk \'{print $1}\'' % (VSM_IP) jailId = getControllerInfo(controllerIP, NODE_PSWD, cmd, 'IOPS.txt') logging.info('jail id: %s', jailId) logging.info('Going to execute multiple istgtcontrol refresh...') cmd = 'jexec %s istgtcontrol refresh' % (jailId) getControllerInfo(controllerIP, NODE_PSWD, cmd, 'istgtcontrolRefresh.txt') time.sleep(3) getControllerInfo(controllerIP, NODE_PSWD, cmd, 'istgtcontrolRefresh.txt') time.sleep(3) getControllerInfo(controllerIP, NODE_PSWD, cmd, 'istgtcontrolRefresh.txt') time.sleep(3) getControllerInfo(controllerIP, NODE_PSWD, cmd, 'istgtcontrolRefresh.txt')
endTime = ctime() resultCollection( "No snapshot is available for volume \"%s\" with provided name:" % (filesystem_name), "FAILED", startTime, endTime) print 'No snapshot is available' # create clone dataset else: querycommand = 'command=cloneDatasetSnapshot&id=%s&path=%s&clonename=%s&mountpoint=%s' % ( filesystem_id, path, cloneName, cloneName) out = sendrequest(stdurl, querycommand) filesave("logs/ListSnapshot.txt", "w", out) data3 = json.loads(out.text) fsAvlSpace = getControllerInfo( IP, passwd, "zfs list | grep %s | awk \'{print $3}\'" % (filesystem_name), "dataset_result.txt") cloneAvlSpace = getControllerInfo( IP, passwd, "zfs list | grep %s | awk \'{print $3}\'" % (cloneName), "clone_result.txt") if not "errortext" in str(data3): #endTime = ctime() #resultCollection("Result for clone \"%s\" creation on volume %s is: " %(cloneName, filesystem_name), ["PASSED", ""], startTime, endTime) #if fsAvlSpace == cloneAvlSpace: endTime = ctime() #resultCollection("Availavle space of clone dataset \"%s\" is same as parent dataset \"%s\": " %(cloneName, filesystem_name), ["PASSED", ""], startTime, endTime) resultCollection( "Creation of Clone \"%s\" is: " % (cloneName), ["PASSED", ""], startTime, endTime)
tsm_dataset_id = listTsm['datasetid'] if tsm_dataset_id is not None: ### Create TSM level snapshot if createSnp: querycommand = 'command=createStorageSnapshot&id=%s&name=%s' %(tsm_dataset_id, Name) resp_tsm_create_snp = sendrequest(stdurl, querycommand) filesave("logs/create_tsm_snapshot.txt", "w",resp_tsm_create_snp) tsmSnpResp = json.loads(resp_tsm_create_snp.text) if 'errortext' in str(tsmSnpResp): endTime = ctime() errorstatus = str(tsmSnpResp['createStorageSnapshotResponse']['errortext']) resultCollection("Result for snapshot \"%s\" creation on TSM %s is: " %(Name, config['tsmName%d' %(x)]), ['FAILED', errorstatus], startTime, endTime) else: getControllerInfo(IP, passwd, 'zfs list -t snapshot | grep %s' %(Name), "tsm_snapshot_result.txt") ## verifying snapshot for cifs volumes for p in range(1, int(config['Number_of_CIFSVolumes'])+1): cifsresult = executeCmd('cat tsm_snapshot_result.txt | grep %s@%s > tsm_snapshot_result2.txt' %(config['volCifsDatasetname%d' %(p)], Name)) if cifsresult[0] == 'FAILED': tsmFlag = 0 endTime = ctime() resultCollection("Result for snapshot \"%s\" creation on volume %s is: " %(Name, config['volCifsDatasetname%d' %(p)]), cifsresult, startTime, endTime) ## verifying snapshot for nfs volumes for q in range(1, int(config['Number_of_NFSVolumes'])+1): nfsresult = executeCmd('cat tsm_snapshot_result.txt | grep %s@%s > tsm_snapshot_result2.txt' %(config['volDatasetname%d' %(q)], Name)) if nfsresult[0] == 'FAILED': tsmFlag = 0 endTime = ctime()
endTime = ctime() print "VLAN deleteed successfully" resultCollection( "VLAN deletion %s Verification from Devman" % (config['vlanTag%d' % (x)]), ["PASSED", ""], startTime, endTime) else: print "VLAN deleteition Failed " errorstatus = str(data['deleteVLANResponse']['errortext']) endTime = ctime() resultCollection( "VLAN deletion %s Verification from Devman" % (config['vlanTag%d' % (x)]), ["FAILED", errorstatus], startTime, endTime) routput = getControllerInfo( IP, Password, "ifconfig vlan%s | grep parent" % (config['vlanTag%d' % (x)]), "logs/test") if (("%s" % (config['vlanTag%d' % (x)]) in str(routput)) and ("%s" % (config['vlanInterface%d' % (x)]) in str(routput))): endTime = ctime() resultCollection( "VLAN deletion %s Verification from Node" % (config['vlanTag%d' % (x)]), ["FAILED", str(routput)], startTime, endTime) else: endTime = ctime() resultCollection( "VLAN deletion %s Verification from Node" % (config['vlanTag%d' % (x)]), ["PASSED", ""], startTime, endTime)
import json import requests from hashlib import md5 import fileinput import subprocess import time import sys from cbrequest import sendrequest, filesave, timetrack, queryAsyncJobResult, configFile, resultCollection, getControllerInfo, executeCmd config = configFile(sys.argv) if len(sys.argv) < 4: print "Argument is not correct.. Correct way as below" print "python createDisableSCSI.py config.txt nodeIP nodePassword outputfile" exit() IP = sys.argv[2] passwd = sys.argv[3] outputfile = sys.argv[4] routput = getControllerInfo(IP, passwd, "touch /etc/disablescsi", outputfile)