def testValidDeleg(t, env): """REBOOT with read delegation and reclaim it FLAGS: reboot delegations DEPEND: MKFILE CODE: REBT8 """ from st_delegation import _get_deleg c = env.c1 id = "pynfs%i_%s" % (os.getpid(), t.code) c.init_connection(id, cb_ident=0) deleg_info, fh, stateid = _get_deleg(t, c, c.homedir + [t.code], None, NFS4_OK) sleeptime = _waitForReboot(c, env) try: res = c.open_file(t.code, fh, claim_type=CLAIM_PREVIOUS, deleg_type=OPEN_DELEGATE_NONE) check(res, NFS4ERR_STALE_CLIENTID, "Reclaim using old clientid") # res = c.compound([c.renew_op(c.clientid)]) # check(res, NFS4ERR_STALE_CLIENTID, "RENEW after reboot") c.init_connection(id, cb_ident=0) res = c.open_file(t.code, fh, claim_type=CLAIM_PREVIOUS, deleg_type=OPEN_DELEGATE_READ) check(res, msg="Reclaim using newly created clientid") deleg_info = res.resarray[-2].switch.switch.delegation if deleg_info.delegation_type != OPEN_DELEGATE_READ: t.fail("Could not reclaim read delegation") finally: env.sleep(sleeptime, "Waiting for grace period to end")
def _get_deleg(t, c, path, funct=None, response=NFS4_OK, write=False, deny=OPEN4_SHARE_DENY_NONE): time.sleep(0.5) # Give server time to check callback path if write: access = OPEN4_SHARE_ACCESS_WRITE deleg = OPEN_DELEGATE_WRITE name = "write delegation" else: access = OPEN4_SHARE_ACCESS_READ deleg = OPEN_DELEGATE_READ name = "read delegation" # Create the file res = c.create_file(t.code, path, access=access, deny=deny, set_recall=True, recall_funct=funct, recall_return=response) check(res) fh, stateid = c.confirm(t.code, res) # Check for delegation deleg_info = res.resarray[-2].switch.switch.delegation if deleg_info.delegation_type == deleg: return deleg_info, fh, stateid # Try opening the file again res = c.open_file(t.code, path, access=access, deny=deny, set_recall=True, recall_funct=funct, recall_return=response) check(res) fh, stateid = c.confirm(t.code, res) deleg_info = res.resarray[-2].switch.switch.delegation if deleg_info.delegation_type != deleg: t.pass_warn("Could not get %s" % name) return deleg_info, fh, stateid
def _try_empty(t, c, path): ops = c.use_obj(path) + [c.getattr([])] res = c.compound(ops) check(res, msg="GETTATTR with empty attr list") attrs = res.resarray[-1].obj_attributes if attrs: t.fail("GETTATTR with empty attr list returned %s" % str(attrs))
def testDeadlock(t, env): """Trigger deadlock bug FLAGS: debug all CODE: DEBUG1 """ c1 = env.c1.new_client(env.testname(t)) sess1 = c1.create_session() owner = open_owner4(0, "My Open Owner") how = openflag4(OPEN4_CREATE, createhow4(GUARDED4, {FATTR4_SIZE:0})) claim = open_claim4(CLAIM_NULL, env.testname(t)) open_op = op.open(0, OPEN4_SHARE_ACCESS_BOTH , OPEN4_SHARE_DENY_NONE, owner, how, claim) res = sess1.compound(env.home + [open_op, op.getfh()]) # OPEN fh = res.resarray[-1].object stateid = res.resarray[-2].stateid #### def ops(i): return [op.putfh(fh), op.write(stateid, i*1000, UNSTABLE4, chr(97+i)*100), op.getattr(42950721818L) ] xids = [sess1.compound_async(ops(i), slot=i) for i in range(4)] for xid in xids: res = sess1.listen(xid) check(res) print res res = close_file(sess1, fh, stateid=stateid) check(res)
def testDots(t, env): """LOOKUP on . and .. OK or _NOENT - WARN _BADNAME - PASS FLAGS: lookup dots all DEPEND: MKDIR CODE: LOOK8 """ # Create dir/foo c = env.c1 dir = c.homedir + [t.code] res = c.create_obj(dir) check(res) res = c.create_obj(dir + ['foo']) check(res) # Run tests res1 = c.compound(c.use_obj(dir + ['.'])) checklist(res1, [NFS4ERR_NOENT, NFS4ERR_BADNAME], "LOOKUP a nonexistant '.'") res2 = c.compound(c.use_obj(dir + ['..'])) checklist(res2, [NFS4ERR_NOENT, NFS4ERR_BADNAME], "LOOKUP a nonexistant '..'") res1 = c.compound(c.use_obj(dir + ['.', 'foo'])) checklist(res1, [NFS4ERR_NOENT, NFS4ERR_BADNAME], "LOOKUP a nonexistant '.'") res2 = c.compound(c.use_obj(dir + ['..', t.code])) checklist(res2, [NFS4ERR_NOENT, NFS4ERR_BADNAME], "LOOKUP a nonexistant '..'")
def testUnaccessibleDirAttrs(t, env): """READDIR with (cfh) in unaccessible directory requesting attrs FLAGS: readdir all mode000 DEPEND: MKDIR MODE CODE: RDDR12 """ c = env.c1 path = c.homedir + [t.code] c.maketree([t.code, ['hidden']]) ops = c.use_obj(path) + [c.setattr({FATTR4_MODE:0})] res = c.compound(ops) check(res, msg="Setting mode=0 on directory %s" % t.code) ops = c.use_obj(path) + \ [c.readdir(attr_request=[FATTR4_RDATTR_ERROR, FATTR4_TYPE])] res = c.compound(ops) if env.opts.uid == 0: checklist(res, [NFS4_OK, NFS4ERR_ACCESS], "READDIR of directory with mode=000") else: check(res, NFS4ERR_ACCESS, "READDIR of directory with mode=000") ########################################### def testStrangeNames(t, env): """READDIR should obey OPEN naming policy Extra test Comments: Verifying that readdir obeys the same naming policy as OPEN. """ self.init_connection() try: (accepted_names, rejected_names) = self.try_file_names(remove_files=0) except SkipException, e: self.skip(e) fh = self.do_rpc(self.ncl.do_getfh, self.tmp_dir) entries = self.do_rpc(self.ncl.do_readdir, fh) readdir_names = [entry.name for entry in entries] # Verify that READDIR returned all accepted_names missing_names = [] for name in accepted_names: if name not in readdir_names: missing_names.append(name) self.failIf(missing_names, "Missing names in READDIR results: %s" \ % missing_names) # ... and nothing more extra_names = [] for name in readdir_names: if not name in accepted_names: extra_names.append(name) self.failIf(extra_names, "Extra names in READDIR results: %s" \ % extra_names)
def testLeasePeriod(t, env): """Any unconfirmed record that is not confirmed within a lease period SHOULD be removed. FLAGS: exchange_id all CODE: EID9 """ c1 = env.c1.new_client("%s_1" % env.testname(t)) c2 = env.c1.new_client("%s_2" % env.testname(t)) # Get server's lease time c3 = env.c1.new_client("%s_3" % env.testname(t)) sess = c3.create_session() lease = _getleasetime(sess) # CREATE_SESSION chan_attrs = channel_attrs4(0,8192,8192,8192,128,8,[]) time.sleep(min(lease - 10, 1)) # Inside lease period, create_session will success. res1 = c1.c.compound([op.create_session(c1.clientid, c1.seqid, 0, chan_attrs, chan_attrs, 123, [])], None) check(res1) time.sleep(lease + 10) # After lease period, create_session will get error NFS4ERR_STALE_CLIENTID res2 = c2.c.compound([op.create_session(c2.clientid, c2.seqid, 0, chan_attrs, chan_attrs, 123, [])], None) check(res2, NFS4ERR_STALE_CLIENTID)
def testOPENClaimFH(t, env): """OPEN file with claim_type is CLAIM_FH FLAGS: open all CODE: OPEN7 """ sess1 = env.c1.new_client_session(env.testname(t)) res = create_file(sess1, env.testname(t)) check(res) fh = res.resarray[-1].object stateid = res.resarray[-2].stateid res = close_file(sess1, fh, stateid=stateid) check(res) claim = open_claim4(CLAIM_FH) how = openflag4(OPEN4_NOCREATE) oowner = open_owner4(0, "My Open Owner 2") open_op = op.open(0, OPEN4_SHARE_ACCESS_BOTH, OPEN4_SHARE_DENY_NONE, oowner, how, claim) res = sess1.compound([op.putfh(fh), open_op]) check(res) stateid = res.resarray[-1].stateid stateid.seqid = 0 data = "write test data" res = sess1.compound([op.putfh(fh), op.write(stateid, 5, FILE_SYNC4, data)]) check(res) res = sess1.compound([op.putfh(fh), op.read(stateid, 0, 1000)]) check(res) if not res.resarray[-1].eof: fail("EOF not set on read") desired = "\0"*5 + data if res.resarray[-1].data != desired: fail("Expected %r, got %r" % (desired, res.resarray[-1].data))
def testChangeDeleg(t, env, funct=_recall): """Get a read delegation, change to a different callback server, then recall the delegation FLAGS: delegations CODE: DELEG9 """ from nfs4lib import CBServer c = env.c1 id = 'pynfs%i_%s' % (os.getpid(), t.code) c.init_connection(id, cb_ident=0) deleg_info, fh, stateid = _get_deleg(t, c, c.homedir + [t.code], funct, NFS4_OK) # Create new callback server new_server = CBServer(c) new_server.set_cb_recall(c.cbid, funct, NFS4_OK); cb_thread = threading.Thread(target=new_server.run) cb_thread.setDaemon(1) cb_thread.start() c.cb_server = new_server env.sleep(3) # Switch to using new server res = c.compound([_set_clientid(c, id, new_server)]) check(res, msg="Switch to new callback server") c.clientid = res.resarray[0].switch.switch.clientid confirm = res.resarray[0].switch.switch.setclientid_confirm confirmop = c.setclientid_confirm_op(c.clientid, confirm) res = c.compound([confirmop]) checklist(res, [NFS4_OK, NFS4ERR_RESOURCE]) if res.status == NFS4ERR_RESOURCE: # ibm workaround res = c.compound([confirmop]) check(res) count = new_server.opcounts[OP_CB_RECALL] fh2, stateid2 = _cause_recall(t, env) _verify_cb_occurred(t, c, count)
def _try_changed_size(env, path): c = env.c1 dict = c.do_getattrdict(path, [FATTR4_SIZE]) dict[FATTR4_SIZE] += 1 ops = c.use_obj(path) + [c.nverify_op(dict)] + c.use_obj(path) res = c.compound(ops) check(res, msg="NVerifying incorrect size")
def testLargeReadWrite(t, env): """Compound with large READ and large WRITE FLAGS: write ganesha DEPEND: MKFILE CODE: WRT16 """ c = env.c1 c.init_connection() maxread, maxwrite = _get_iosize(t, c, c.homedir) # linux server really should be able to handle (maxread, maxwrite) # but can't: size = min(maxread/4, maxwrite/4) writedata = 'A'*size attrs = {FATTR4_SIZE: size} fh, stateid = c.create_confirm(t.code, attrs=attrs, deny=OPEN4_SHARE_DENY_NONE) ops = c.use_obj(fh) ops += [c.read_op(stateid, 0, size)] ops += [c.write_op(stateid, 0, UNSTABLE4, writedata)] res = c.compound(ops) check(res) data = res.resarray[-2].switch.switch.data if len(data) != len(writedata): t.fail("READ returned %d bytes, expected %d" % (len(data), len(writedata))) if (data != '\0'*size): t.fail("READ returned unexpected data") res = c.read_file(fh, 0, size) _compare(t, res, writedata, True)
def testCallbackInfoUpdate(t, env): """A probable callback information update and records an unconfirmed { v, x, c, k, t } and leaves the confirmed { v, x, c, l, s } in place, such that t != s. FLAGS: setclientid all DEPEND: INIT CODE: CID4a """ c1 = env.c1 clid = "Clid_for_%s_pid=%i" % (t.code, os.getpid()) # confirmed { v, x, c, l, s } (cclientid, cconfirm) = c1.init_connection(clid, verifier=c1.verifier) # request { v, x, c, k, s } --> unconfirmed { v, x, c, k, t } ops = [c1.setclientid(clid, verifier=c1.verifier)] res = c1.compound(ops) check(res) tclientid = res.resarray[0].switch.switch.clientid tconfirm = res.resarray[0].switch.switch.setclientid_confirm # (t != s) if tconfirm == '\x00\x00\x00\x00\x00\x00\x00\x00': t.fail("Got clientid confirm verifier with all zero!") if cclientid != tclientid: t.fail("Return a different clientID for callback information updating!") if tconfirm == cconfirm: t.fail("Return a same confirm for callback information updating!")
def _try_mand(env, path): c = env.c1 mand_bits = [attr.bitnum for attr in env.attr_info if attr.mandatory and attr.name != "rdattr_error"] dict = c.do_getattrdict(path, mand_bits) ops = c.use_obj(path) + [c.nverify_op(dict)] + c.use_obj(path) res = c.compound(ops) check(res, NFS4ERR_SAME, "NVerifying mandatory attributes against getattr")
def testConfirmedDiffVerifier(t, env): """The server has previously recorded a confirmed { u, x, c, l, s } record such that v != u, l may or may not equal k, and has not recorded any unconfirmed { *, x, *, *, * } record for x. The server records an unconfirmed { v, x, d, k, t } (d != c, t != s). FLAGS: setclientid all DEPEND: INIT CODE: CID4b """ c1 = env.c1 clid = "Clid_for_%s_pid=%i" % (t.code, os.getpid()) # confirmed { u, x, c, l, s } (cclientid, cconfirm) = c1.init_connection(clid, verifier=c1.verifier) # request { v, x, c, k, s } --> unconfirmed { v, x, d, k, t } ops = [c1.setclientid(clid, verifier="diff")] res = c1.compound(ops) check(res) tclientid = res.resarray[0].switch.switch.clientid tconfirm = res.resarray[0].switch.switch.setclientid_confirm # (d != c, t != s) if tconfirm == '\x00\x00\x00\x00\x00\x00\x00\x00': t.fail("Got clientid confirm verifier with all zero!") if cclientid == tclientid: t.fail("Return a same clientID for different verifier!") if tconfirm == cconfirm: t.fail("Return a same confirm for different verifier!")
def testClaimCur(t, env): """DELEGATION test Get read delegation, then have it recalled. In the process of returning, send some OPENs with CLAIM_DELEGATE_CUR FLAGS: delegations CODE: DELEG14 """ c = env.c1 c.init_connection('pynfs%i_%s' % (os.getpid(), t.code), cb_ident=0) deleg_info, fh, stateid = _get_deleg(t, c, c.homedir + [t.code], None, NFS4_OK) # Cause it to be recalled, and wait for cb_recall to finish # FRED - this is problematic if server doesn't reply until # it gets the DELEGRETURN res = c.open_file('newowner', c.homedir + [t.code], access=OPEN4_SHARE_ACCESS_WRITE, deny=OPEN4_SHARE_DENY_NONE) checklist(res, [NFS4_OK, NFS4ERR_DELAY], "Open which causes recall") env.sleep(2, "Waiting for recall") # Now send some opens path = c.homedir + [t.code] res = c.open_file('owner1', path, access=OPEN4_SHARE_ACCESS_READ, claim_type=CLAIM_DELEGATE_CUR, deleg_stateid=deleg_info.read.stateid) check(res) ops = c.use_obj(path) + [c.delegreturn_op(deleg_info.read.stateid)] res = c.compound(ops) check(res)
def _verify_cb_occurred(t, c, count): newcount = c.cb_server.opcounts[OP_CB_RECALL] if newcount <= count: t.fail("Recall for callback_ident=%i never occurred" % c.cbid) res = c.cb_server.get_recall_res(c.cbid) if res is not None: check(res, msg="DELEGRETURN")
def _try_long(env, path): c = env.c1 all = [attr.bitnum for attr in env.attr_info if not attr.writeonly] ops = c.use_obj(path) + [c.getattr(all)] res = c.compound(ops) # Note attrs are unpacked which helps check for corruption check(res, msg="Asking for all legal attributes")
def testLargeData(t, env): """WRITE with a large amount of data FLAGS: write read all DEPEND: MKFILE CODE: WRT5 """ c = env.c1 c.init_connection() fh, stateid = c.create_confirm(t.code) data = "abcdefghijklmnopq" * 0x10000 # Write the data pos = 0 while pos < len(data): res = c.write_file(fh, data[pos:], pos, stateid) check(res, msg="WRITE with a large amount of data") pos += res.count if res.count == 0: t.fail("WRITE with a large amount of data returned count=0") # Read the data back in eof = False newdata = '' while not eof: res = c.read_file(fh, len(newdata), len(data) - len(newdata), stateid) check(res, msg="READ with large amount of data") newdata += res.data eof = res.eof if data != newdata: t.fail("READ did not correspond to WRITE with large dataset")
def testSecinfoExportToExport(t, env): """SECINFO across exports should return NFS4_OK FLAGS: exports DEPEND: EXP1 CODE: EXP3 """ c = env.c1 # Check /ifs export ops = [c.putrootfh_op()] ops += [c.secinfo_op("ifs")] res = c.compound(ops) check(res) if len(res.resarray[-1].switch.switch) != 1: t.fail("SECINFO on /ifs returned incorrect number of flavors") # Check /ifs/data export ops = [c.putrootfh_op()] ops += [c.lookup_op("ifs")] ops += [c.secinfo_op("data")] res = c.compound(ops) check(res) if len(res.resarray[-1].switch.switch) != 4: t.fail("SECINFO on /ifs/data returned incorrect number of flavors")
def testEmptyCommit(t, env): """Check for proper handling of empty LAYOUTCOMMIT. FLAGS: block CODE: BLOCK3 """ sess = env.c1.new_pnfs_client_session(env.testname(t)) # Create the file res = create_file(sess, env.testname(t)) check(res) # Get layout 1 fh = res.resarray[-1].object open_stateid = res.resarray[-2].stateid print open_stateid ops = [op.putfh(fh), op.layoutget(False, LAYOUT4_BLOCK_VOLUME, LAYOUTIOMODE4_RW, 0, 8192, 8192, open_stateid, 0xffff)] res = sess.compound(ops) check(res) # Get layout 2 lo_stateid1 = res.resarray[-1].logr_stateid print lo_stateid1 ops = [op.putfh(fh), op.layoutget(False, LAYOUT4_BLOCK_VOLUME, LAYOUTIOMODE4_RW, 8192, 8192, 8192, lo_stateid1, 0xffff)] res = sess.compound(ops) check(res) lo_stateid2 = res.resarray[-1].logr_stateid print lo_stateid2 # Parse opaque to get info for commit # STUB not very general layout = res.resarray[-1].logr_layout[-1] p = BlockUnpacker(layout.loc_body) opaque = p.unpack_pnfs_block_layout4() p.done() extent = opaque.blo_extents[-1] extent.bex_state = PNFS_BLOCK_READWRITE_DATA p = BlockPacker() p.pack_pnfs_block_layoutupdate4(pnfs_block_layoutupdate4([extent])) time = newtime4(True, get_nfstime()) ops = [op.putfh(fh), op.layoutcommit(extent.bex_file_offset, extent.bex_length, False, lo_stateid2, newoffset4(True, 2 * 8192 - 1), time, layoutupdate4(LAYOUT4_BLOCK_VOLUME, p.get_buffer()))] res = sess.compound(ops) check(res) # Send another LAYOUTCOMMIT, with an empty opaque time = newtime4(True, get_nfstime()) ops = [op.putfh(fh), op.layoutcommit(extent.bex_file_offset, extent.bex_length, False, lo_stateid2, newoffset4(True, 2 * 8192 - 1), time, layoutupdate4(LAYOUT4_BLOCK_VOLUME, ""))] res = sess.compound(ops) check(res)
def testLongName(t, env): """CREATE should fail with NFS4ERR_NAMETOOLONG with long filenames FLAGS: create longname all CODE: CR15 """ c = env.c1 res = c.create_obj(c.homedir + [env.longname]) check(res, NFS4ERR_NAMETOOLONG, "CREATE with very long component") ############################################## #FRED - need utf8 check # FIXME def testNamingPolicy(self): """CREATE should obey OPEN file name creation policy Extra test """ self.init_connection() try: (x, rejected_names_open) = self.try_file_names(creator=self.create_via_open) (x, rejected_names_create) = self.try_file_names(creator=self.create_via_create) self.failIf(rejected_names_open != rejected_names_create, "CREATE does not obey OPEN naming policy") except SkipException, e: self.skip(e)
def testCreatExclusiveFile(t, env): """OPEN normal file with create and exclusive flags FLAGS: open all DEPEND: INIT CODE: OPEN4 """ c = env.c1 c.init_connection() # Create the file res = c.create_file(t.code, mode=EXCLUSIVE4, verifier='12345678', deny=OPEN4_SHARE_DENY_NONE) checklist(res, [NFS4_OK, NFS4ERR_NOTSUPP], "Trying to do exclusive create of file %s" % t.code) if res.status == NFS4ERR_NOTSUPP: c.fail_support("Exclusive OPEN not supported") fh, stateid = c.confirm(t.code, res) # Create the file again, should return an error res = c.create_file(t.code, mode=EXCLUSIVE4, verifier='87654321', deny=OPEN4_SHARE_DENY_NONE) check(res, NFS4ERR_EXIST, "Trying to do exclusive recreate of file %s" % t.code) # Create with same verifier should return same object res = c.create_file(t.code, mode=EXCLUSIVE4, verifier='12345678', deny=OPEN4_SHARE_DENY_NONE) check(res, msg="Trying to do exclusive recreate of file %s" % t.code) newfh, stateid = c.confirm(t.code, res) if fh != newfh: c.fail("Filehandle changed on duplicate exclusive create")
def testTimedoutGrabLock(t, env): """LOCK: server should release locks of timed out client FLAGS: lock timed all DEPEND: MKFILE CODE: LOCK13 """ c1 = env.c1 c1.init_connection() # Client 1: create a file and get its fh fh1, stateid1 = c1.create_confirm(t.code) c2 = env.c2 c2.init_connection() # Client 2: open the file fh2, stateid2 = c2.open_confirm(t.code, deny=OPEN4_SHARE_DENY_NONE) # Client 1: lock file res1 = c1.lock_file(t.code, fh1, stateid1) check(res1) # Now wait, let client1 expire while client2 sends RENEWs sleeptime = c2.getLeaseTime() // 2 for i in range(3): env.sleep(sleeptime) res = c2.compound([c2.renew_op(c2.clientid)]) checklist(res, [NFS4_OK, NFS4ERR_CB_PATH_DOWN]) # Client 2: Lock file, should work since Client 1's lock has expired res2 = c2.lock_file(t.code, fh2, stateid2, type=READ_LT) check(res2, msg="Locking file after another client's lock expires")
def testUpgrades(t, env): """OPEN read, write, and read-write, then close Inspired by a linux nfsd regression: the final close closes all the opens, and nfsd did that right, but some misaccounting somewhere leaked a file reference with the result that the filesystem would be unmountable after running this test. FLAGS: open all DEPEND: MKFILE CODE: OPEN29 """ c = env.c1 c.init_connection() file = c.homedir + [t.code] owner = t.code c.create_confirm(owner, file, access=OPEN4_SHARE_ACCESS_READ, deny=OPEN4_SHARE_DENY_NONE) c.open_file(owner, file, access=OPEN4_SHARE_ACCESS_WRITE, deny=OPEN4_SHARE_DENY_NONE) res = c.open_file(owner, file, access=OPEN4_SHARE_ACCESS_BOTH, deny=OPEN4_SHARE_DENY_NONE) check(res) fh = res.resarray[-1].switch.switch.object stateid = res.resarray[-2].switch.switch.stateid c.close_file(owner, fh, stateid)
def testUndefined(t, env): """COMPOUND with ops 0, 1, 2 and 200 should return NFS4ERR_OP_ILLEGAL Comments: The server should return NFS4ERR_OP_ILLEGAL for the undefined operations 0, 1 and 2. Although operation 2 may be introduced in later minor versions, the server should always return NFS4ERR_NOTSUPP if the minorversion is 0. FLAGS: compound all CODE: COMP5 """ # pack_nfs_argop4 does not allow packing invalid operations. opnum = OP_ILLEGAL class custom_packer(NFS4Packer): def pack_nfs_argop4(self, data): self.pack_int(data.argop) c = env.c1 origpacker = c.nfs4packer try: c.nfs4packer = custom_packer() for opnum in [OP_ILLEGAL, 0, 1, 2, 200]: try: res = c.compound([nfs_argop4(argop=opnum)]) check(res, NFS4ERR_OP_ILLEGAL, "Sent illegal op=%i" % opnum) except RPCError, e: t.fail("COMPOUND with illegal op=%i got %s, " "expected NFS4ERR_OP_ILLEGAL" % (opnum,e)) finally: c.nfs4packer = origpacker
def testDenyRead3(t, env): """READ on a read-denied file NFS4ERR_LOCKED return is specified in 8.1.4: seems to apply to conflicts due to an OPEN(deny=x) NFS4ERR_ACCESS return is specified in 14.2.16: seems to apply to principle not having access to file NFS4ERR_OPENMODE return is specified in 8.1.4: (does not apply to special stateids) Why is this again? seems to apply to doing WRITE on OPEN(allow=read) FLAGS: open read all DEPEND: MKFILE CODE: OPEN23 """ c = env.c1 c.init_connection() fh, stateid = c.create_confirm(t.code, access=OPEN4_SHARE_ACCESS_BOTH, deny=OPEN4_SHARE_DENY_READ) res = c.write_file(fh, 'data', 0, stateid) check(res) # Try to read file w/o opening res = c.read_file(fh) check(res, NFS4ERR_LOCKED, "Trying to READ a read-denied file")
def _testDeleg(t, env, openaccess, want, breakaccess, sec = None, sec2 = None): recall = threading.Event() def pre_hook(arg, env): recall.stateid = arg.stateid # NOTE this must be done before set() recall.cred = env.cred.raw_cred env.notify = recall.set # This is called after compound sent to queue def post_hook(arg, env, res): return res sess1 = env.c1.new_client_session("%s_1" % env.testname(t), sec = sec) sess1.client.cb_pre_hook(OP_CB_RECALL, pre_hook) sess1.client.cb_post_hook(OP_CB_RECALL, post_hook) if sec2: sess1.compound([op.backchannel_ctl(env.c1.prog, sec2)]) fh = _create_file_with_deleg(sess1, env.testname(t), openaccess | want) sess2 = env.c1.new_client_session("%s_2" % env.testname(t)) claim = open_claim4(CLAIM_NULL, env.testname(t)) owner = open_owner4(0, "My Open Owner 2") how = openflag4(OPEN4_NOCREATE) open_op = op.open(0, breakaccess, OPEN4_SHARE_DENY_NONE, owner, how, claim) slot = sess2.compound_async(env.home + [open_op]) # Wait for recall, and return delegation recall.wait() # STUB - deal with timeout # Getting here means CB_RECALL reply is in the send queue. # Give it a moment to actually be sent env.sleep(.1) res = sess1.compound([op.putfh(fh), op.delegreturn(recall.stateid)]) check(res) # Now get OPEN reply res = sess2.listen(slot) checklist(res, [NFS4_OK, NFS4ERR_DELAY]) return recall
def _try_unknown(t, c, path): ops = c.use_obj(path) + [c.getattr([1000])] res = c.compound(ops) check(res, msg="GETTATTR with unknown attr") attrs = res.resarray[-1].obj_attributes if attrs: t.fail("GETTATTR with unknown attr returned %s" % str(attrs))
def testInodeLocking(t, env): """SETATTR: This causes printk message due to inode locking bug log shows - nfsd: inode locked twice during operation. Sporadic system crashes can occur after running this test FLAGS: setattr all DEPEND: MODE MKDIR MKFILE CODE: SATT13 """ #t.fail("Test set to fail without running. Currently causes " # "inode corruption leading to sporadic system crashes.") c = env.c1 c.init_connection() basedir = c.homedir + [t.code] res = c.create_obj(basedir) check(res) fh, stateid = c.create_confirm(t.code, basedir + ['file']) # In a single compound statement, setattr on dir and then # do a state operation on a file in dir (like write or remove) ops = c.use_obj(basedir) + [c.setattr({FATTR4_MODE:0754})] ops += [c.lookup_op('file'), c.write_op(stateid, 0, 0, 'blahblah')] res = c.compound(ops) check(res, msg="SETATTR on dir and state operation on file in dir")
def testManyClaims(t, env): """REBOOT test FLAGS: reboot DEPEND: MKDIR MKFILE CODE: REBT2 """ c = env.c1 clientcount = 5 pid = str(os.getpid()) basedir = c.homedir + [t.code] res = c.create_obj(basedir) check(res, msg="Creating test directory %s" % t.code) # Make lots of client ids fhdict = {} idlist = ["pynfs%s%06i" % (pid, x) for x in range(clientcount)] badids = ["badpynfs%s%06i" % (pid, x) for x in range(clientcount)] for id in idlist: c.init_connection(id) fh, stateid = c.create_confirm(t.code, basedir + [id]) fhdict[id] = fh sleeptime = _waitForReboot(c, env) try: # Lots of reclaims badfh = fhdict[idlist[-1]] for goodid, badid in zip(idlist, badids): c.init_connection(goodid) res = c.open_file(t.code, fhdict[goodid], claim_type=CLAIM_PREVIOUS, deleg_type=OPEN_DELEGATE_NONE) check(res, msg="Reclaim with valid clientid %s" % goodid) c.init_connection(badid) res = c.open_file(t.code, badfh, claim_type=CLAIM_PREVIOUS, deleg_type=OPEN_DELEGATE_NONE) checklist(res, [NFS4ERR_NO_GRACE, NFS4ERR_RECLAIM_BAD], "Reclaim with bad clientid %s" % badid) finally: env.sleep(sleeptime, "Waiting for grace period to end")
def testOpenRemoveUnconfirmed(t, env): """ OPEN and REMOVE without confirming FLAGS: open remove all DEPEND: MKFILE CODE: OPENREMOVE """ c = env.c1 c.init_connection() res = c.create_file(t.code, deny=OPEN4_SHARE_DENY_BOTH, mode=GUARDED4) check(res, msg="creating") res = c.remove_obj(c.homedir, t.code) check(res, msg="removing") res = c.create_file(t.code, deny=OPEN4_SHARE_DENY_BOTH, mode=GUARDED4) check(res, msg="creating2")
def testSecondServer(t, env): """ SECONDSERVER1 - Tests the "--secondserver" parameter FLAGS: clustered DEPEND: CODE: SECONDSERV1 """ if not env.opts.secondserver: t.fail("SECONDSERV1 test being skipped: Second server not defined!") c1 = env.c1 c1.init_connection() # XXX Using c1node2 here can cause problems when passing in the same server # for secondserver as the first server. c3node2 = env.c3node2 c3node2.init_connection() # Use the second server: create, close and remove the file. print "creating/removing on second server" fh1, stateid1 = c3node2.create_confirm(t.code, c3node2.homedir + [t.code]) res = c3node2.close_file(t.code, fh1, stateid1) check(res) res = c3node2.remove_obj(c3node2.homedir, t.code) check(res) # Create/close a file from connection 1 print "creating on first server" fh1, stateid1 = c1.create_confirm(t.code, c1.homedir + [t.code]) res = c1.close_file(t.code, fh1, stateid1) check(res) # Remove the file from connection 2 print "removing on second server" res = c3node2.remove_obj(c3node2.homedir, t.code) check(res)
def testSplitCommit(t, env): """Check for proper handling of disjoint LAYOUTCOMMIT.opaque FLAGS: block CODE: BLOCK4 """ sess = env.c1.new_client_session(env.testname(t), flags=EXCHGID4_FLAG_USE_PNFS_MDS) # Create the file res = create_file(sess, env.testname(t)) check(res) # Get layout 1 fh = res.resarray[-1].object open_stateid = res.resarray[-2].stateid print open_stateid ops = [ op.putfh(fh), op.layoutget(False, LAYOUT4_BLOCK_VOLUME, LAYOUTIOMODE4_RW, 0, 2 * 8192, 2 * 8192, open_stateid, 0xffff) ] res = sess.compound(ops) check(res) lo_stateid1 = res.resarray[-1].logr_stateid print lo_stateid1 # Parse opaque to get info for commit # STUB not very general layout = res.resarray[-1].logr_layout[-1] p = BlockUnpacker(layout.loc_body) opaque = p.unpack_pnfs_block_layout4() p.done() dev = opaque.blo_extents[-1].bex_vol_id extent1 = pnfs_block_extent4(dev, 0, 8192, 0, PNFS_BLOCK_READWRITE_DATA) extent2 = pnfs_block_extent4(dev, 8192, 8192, 0, PNFS_BLOCK_READWRITE_DATA) p = BlockPacker() p.pack_pnfs_block_layoutupdate4( pnfs_block_layoutupdate4([extent1, extent2])) time = newtime4(True, get_nfstime()) ops = [ op.putfh(fh), op.layoutcommit(0, 2 * 8192, False, lo_stateid1, newoffset4(True, 2 * 8192 - 1), time, layoutupdate4(LAYOUT4_BLOCK_VOLUME, p.get_buffer())) ] res = sess.compound(ops) check(res)
def testBadLockSeqid3(t, env): """LOCKU with a bad lockseqid should return NFS4ERR_BAD_SEQID # FLAGS: locku seqid all FLAGS: ganesha DEPEND: MKFILE CODE: LKU6c """ c = env.c1 c.init_connection() fh, stateid = c.create_confirm(t.code) res1 = c.lock_file(t.code, fh, stateid, 0, 50) check(res1) res2 = c.relock_file(1, fh, res1.lockid, 100, 50) check(res2) res3 = c.unlock_file(1, fh, res2.lockid) check(res3, NFS4ERR_BAD_SEQID, "LOCKU with a bad lockseqid=1")
def testStateid1(t, env): """Check for proper sequence handling in layout stateids. FLAGS: block CODE: BLOCK1 """ sess = env.c1.new_client_session(env.testname(t), flags=EXCHGID4_FLAG_USE_PNFS_MDS) # Create the file res = create_file(sess, env.testname(t)) check(res) # Get layout 1 fh = res.resarray[-1].object open_stateid = res.resarray[-2].stateid print open_stateid ops = [ op.putfh(fh), op.layoutget(False, LAYOUT4_BLOCK_VOLUME, LAYOUTIOMODE4_RW, 0, 8192, 8192, open_stateid, 0xffff) ] res = sess.compound(ops) check(res) lo_stateid = res.resarray[-1].logr_stateid print lo_stateid if lo_stateid.seqid != 1: # From draft23 12.5.2 "The first successful LAYOUTGET processed by # the server using a non-layout stateid as an argument MUST have the # "seqid" field of the layout stateid in the response set to one." fail("Expected stateid.seqid==1, got %i" % lo_stateid.seqid) for i in range(6): # Get subsequent layouts ops = [ op.putfh(fh), op.layoutget(False, LAYOUT4_BLOCK_VOLUME, LAYOUTIOMODE4_RW, (i + 1) * 8192, 8192, 8192, lo_stateid, 0xffff) ] res = sess.compound(ops) check(res) lo_stateid = res.resarray[-1].logr_stateid print lo_stateid if lo_stateid.seqid != i + 2: # From draft23 12.5.3 "After the layout stateid is established, # the server increments by one the value of the "seqid" in each # subsequent LAYOUTGET and LAYOUTRETURN response, fail("Expected stateid.seqid==%i, got %i" % (i + 2, lo_stateid.seqid))
def testOldLockStateid(t, env): """LOCK with old lock stateid should return NFS4ERR_OLD_STATEID FLAGS: lock oldid all DEPEND: MKFILE CODE: LOCK9a """ c = env.c1 c.init_connection() fh, stateid = c.create_confirm(t.code) res1 = c.lock_file(t.code, fh, stateid, 0, 25) check(res1) res2 = c.relock_file(1, fh, res1.lockid, 50, 25) check(res2) res3 = c.relock_file(2, fh, res1.lockid, 100, 25) check(res3, NFS4ERR_OLD_STATEID, "LOCK with old lockstateid", [NFS4ERR_BAD_STATEID])
def testGrabLock2(t, env): """MULTIPLE clients trying to get lock FLAGS: lock all DEPEND: MKFILE CODE: LOCK15 """ c1 = env.c1 c1.init_connection() c2 = env.c2 c2.init_connection() file = c1.homedir + [t.code] # Client1 creates a file fh1, stateid1 = c1.create_confirm('owner1', file, access=OPEN4_SHARE_ACCESS_BOTH, deny=OPEN4_SHARE_DENY_WRITE) # Client2 opens the file fh2, stateid2 = c2.open_confirm('owner2', file, access=OPEN4_SHARE_ACCESS_READ, deny=OPEN4_SHARE_DENY_NONE) # Client1 locks the file res1 = c1.lock_file('owner1', fh1, stateid1, type=WRITE_LT) check(res1) # Client2 tries to lock the file, should fail res2 = c2.lock_file('owner2', fh2, stateid2, type=READ_LT) check(res2, NFS4ERR_DENIED, "Getting read lock when another owner has write lock") # Client1 unlocks the file res1 = c1.unlock_file(1, fh1, res1.lockid) check(res1) # Client2 tries to lock the file, should work now res2 = c2.lock_file('owner2', fh2, stateid2, type=READ_LT) check(res2, msg="Getting read lock after another owner has released write lock")
def testGraceSeqid(t, env): """Make sure NFS4ERR_GRACE bumps seqid FLAGS: reboot DEPEND: CODE: REBT11 """ c = env.c1 c.init_connection() fh, stateid = c.create_confirm(t.code) sleeptime = _waitForReboot(c) try: c.init_connection() res = c.open_file(t.code, fh, claim_type=CLAIM_PREVIOUS, deleg_type=OPEN_DELEGATE_NONE) check(res, msg="Reclaim using newly created clientid") res = c.open_file(t.code) check(res, NFS4ERR_GRACE, "First OPEN during grace period") res = c.open_file(t.code) check(res, NFS4ERR_GRACE, "Second OPEN during grace period") finally: env.sleep(sleeptime, "Waiting for grace period to end") res = c.open_file(t.code) check(res, NFS4_OK, "OPEN after grace period")
def testDots(t, env): """REMOVE on . or .. should return NFS4ERR_NOENT or NFS4ERR_BADNAME FLAGS: remove dots all DEPEND: MKDIR CODE: RM7 """ c = env.c1 basedir = c.homedir + [t.code] res = c.create_obj(basedir) check(res) ops = c.use_obj(basedir) + [c.remove_op('.')] res = c.compound(ops) check(res, NFS4ERR_BADNAME, "REMOVE nonexistant '.'", [NFS4ERR_NOENT]) ops = c.use_obj(basedir) + [c.remove_op('..')] res = c.compound(ops) check(res, NFS4ERR_BADNAME, "REMOVE nonexistant '..'", [NFS4ERR_NOENT])
def testFlexLayoutReturnFile(t, env): """ Return a file's layout FLAGS: flex DEPEND: FFGLO1 CODE: FFLOR1 """ sess = env.c1.new_pnfs_client_session(env.testname(t)) # Create the file res = create_file(sess, env.testname(t)) check(res) # Get layout fh = res.resarray[-1].object open_stateid = res.resarray[-2].stateid ops = [ op.putfh(fh), op.layoutget(False, LAYOUT4_FLEX_FILES, LAYOUTIOMODE4_READ, 0, 0xffffffffffffffff, 4196, open_stateid, 0xffff) ] res = sess.compound(ops) check(res) # Return layout layout_stateid = res.resarray[-1].logr_stateid ops = [ op.putfh(fh), op.layoutreturn( False, LAYOUT4_FLEX_FILES, LAYOUTIOMODE4_ANY, layoutreturn4( LAYOUTRETURN4_FILE, layoutreturn_file4(0, 0xffffffffffffffff, layout_stateid, ""))) ] res = sess.compound(ops) check(res) res = close_file(sess, fh, stateid=open_stateid) check(res)
def testDisconnect3(t, env): """DISCONNECT - Very similar to testDisconnect2, but with multiple confirmed clientids. Register several clientids and open a file with the middle one. Then disconnect and be able to open the file again, if disconnect causes lease expiration. FLAGS: multiconn disconnect timed all DEPEND: INIT DISCONN2 MULTICONN CODE: DISCONN3 """ cid1 = 'pynfs%i_%s_1' % (os.getpid(), t.code) cid2 = 'pynfs%i_%s_2' % (os.getpid(), t.code) cid3 = 'pynfs%i_%s_3' % (os.getpid(), t.code) cid4 = 'pynfs%i_%s_4' % (os.getpid(), t.code) cid5 = 'pynfs%i_%s_5' % (os.getpid(), t.code) cid6 = 'pynfs%i_%s_6' % (os.getpid(), t.code) c = env.c1 c2 = env.get_and_init_secondconn(c) c.init_connection(id=cid1) c.init_connection(id=cid2) c.init_connection(id=cid3) # Open file once, make sure it can't be opened twice fh, stateid = c.create_confirm(t.code, deny=OPEN4_SHARE_DENY_BOTH) res = c2.open_file(t.code, access=OPEN4_SHARE_ACCESS_WRITE) check(res, NFS4ERR_SHARE_DENIED, "Second OPEN should be denied") # Register a few more clientids c.init_connection(id=cid4) c.init_connection(id=cid5) # Check that the open is still denied res = c2.open_file(t.code, access=OPEN4_SHARE_ACCESS_WRITE) check(res, NFS4ERR_SHARE_DENIED, "Second OPEN should be denied") # Disconnect from connection 1 c.reconnect() sleeptime = c.getLeaseTime() * 3 / 2 env.sleep(sleeptime) # Open file again - should work c2.init_connection(id=cid6) res = c2.open_file(t.code, access=OPEN4_SHARE_ACCESS_WRITE) check(res, msg="Reconnect did not expire leased data.")
def testUnConfReplaced(t, env): """The server has no confirmed { *, x, *, *, * } for x. It may or may not have recorded an unconfirmed { u, x, c, l, s }, where l may or may not equal k, and u may or may not equal v. Any unconfirmed record { u, x, c, l, * }, regardless of whether u == v or l == k, is replaced with an unconfirmed record { v, x, d, k, t} where d != c, t != s. FLAGS: setclientid all DEPEND: INIT CODE: CID4e """ c1 = env.c1 clid = "Clid_for_%s_pid=%i" % (t.code, os.getpid()) # unconfirmed { w, x, d, m, t } ops = [c1.setclientid(clid, verifier="unconf")] res = c1.compound(ops) check(res) uclientid = res.resarray[0].switch.switch.clientid uconfirm = res.resarray[0].switch.switch.setclientid_confirm # request { v, x, c, k, s } --> unconfirmed { v, x, d, k, t } ops = [c1.setclientid(clid, verifier="diff")] res = c1.compound(ops) check(res) tclientid = res.resarray[0].switch.switch.clientid tconfirm = res.resarray[0].switch.switch.setclientid_confirm # removes the unconfirmed { w, x, d, m, t } ops = [c1.setclientid_confirm_op(uclientid, uconfirm)] res = c1.compound(ops) check(res, NFS4ERR_STALE_CLIENTID) # (d != c, t != s) if tconfirm == '\x00\x00\x00\x00\x00\x00\x00\x00': t.fail("Got clientid confirm verifier with all zero!") if uclientid == tclientid: t.fail("Return a same clientID for different verifier!") if tconfirm == uconfirm: t.fail("Return a same confirm for different verifier!")
def testNotEmpty(t, env): """REMOVE called on nonempty directory should return NFS4ERR_NOTEMPTY FLAGS: remove all DEPEND: MKDIR CODE: RM8 """ c = env.c1 # Create non-empty dir basedir = c.homedir + [t.code] res = c.create_obj(basedir) check(res) res = c.create_obj(basedir + [t.code]) check(res) # Now try to remove it ops = c.use_obj(c.homedir) + [c.remove_op(t.code)] res = c.compound(ops) check(res, NFS4ERR_NOTEMPTY, "REMOVE called on nonempty directory") ####################################### def testValidNames(t, env): """REMOVE should succeed on all legal names Extra test Comments: This test tries REMOVE on all names returned from try_file_names() """ # This test tests the lookup part of REMOVE self.init_connection() # Save files for REMOVE try: (accepted_names, rejected_names) = self.try_file_names(remove_files=0) except SkipException, e: self.skip(e) # Ok, lets try REMOVE on all accepted names lookup_dir_ops = self.ncl.lookup_path(self.tmp_dir) for filename in accepted_names: ops = [self.ncl.putrootfh_op()] + lookup_dir_ops ops.append(self.ncl.remove_op(filename)) res = self.ncl.do_ops(ops) self.assert_OK(res)
def testEMCGetLayout(t, env): """Verify layout handling Debugging test that looks for pre-existing file (server2fs1/dump.eth) so we don't have to worry about creating a file. FLAGS: CODE: GETLAYOUT100 """ # Make sure E_ID returns MDS capabilities c1 = env.c1.new_client(env.testname(t), flags=EXCHGID4_FLAG_USE_PNFS_MDS) if not c1.flags & EXCHGID4_FLAG_USE_PNFS_MDS: fail("Server can not be used as pnfs metadata server") sess = c1.create_session() # Test that fs handles block layouts ops = use_obj(env.opts.path) + [op.getattr(1 << FATTR4_FS_LAYOUT_TYPE)] res = sess.compound(ops) check(res) if FATTR4_FS_LAYOUT_TYPE not in res.resarray[-1].obj_attributes: fail("fs_layout_type not available") if LAYOUT4_BLOCK_VOLUME not in res.resarray[-1].obj_attributes[ FATTR4_FS_LAYOUT_TYPE]: fail("layout_type does not contain BLOCK") # Create the file file = ["server2fs1", "dump.eth"] res = open_file(sess, env.testname(t), file) check(res) # Get layout fh = res.resarray[-1].object stateid = res.resarray[-2].stateid stateid.seqid = 0 ops = [ op.putfh(fh), op.layoutget(False, LAYOUT4_BLOCK_VOLUME, LAYOUTIOMODE4_READ, 0, 0xffffffffffffffff, 0, stateid, 0xffff) ] res = sess.compound(ops) check(res) # Parse opaque for layout in res.resarray[-1].logr_layout: if layout.loc_type == LAYOUT4_BLOCK_VOLUME: p = BlockUnpacker(layout.loc_body) opaque = p.unpack_pnfs_block_layout4() p.done() print opaque
def testReplayCache002(t, env): """Send two successful non-idempotent compounds with same seqid FLAGS: sequence all CODE: SEQ9b """ sess1 = env.c1.new_client_session(env.testname(t)) res = create_file(sess1, "%s_1" % env.testname(t)) check(res) ops = env.home + [op.savefh(),\ op.rename("%s_1" % env.testname(t), "%s_2" % env.testname(t))] res1 = sess1.compound(ops, cache_this=True) check(res1) res2 = sess1.compound(ops, seq_delta=0) check(res2) res1.tag = res2.tag = "" if not nfs4lib.test_equal(res1, res2): fail("Replay results not equal")
def testOldLockStateid(t, env): """LOCKU with old lock stateid should return NFS4ERR_OLD_STATEID FLAGS: locku oldid all DEPEND: MKFILE CODE: LKU7 """ c = env.c1 c.init_connection() fh, stateid = c.create_confirm(t.code) res1 = c.lock_file(t.code, fh, stateid) check(res1) res2 = c.unlock_file(1, fh, res1.lockid) check(res2) # Try to unlock again with old stateid res3 = c.unlock_file(2, fh, res1.lockid) check(res3, NFS4ERR_OLD_STATEID, "LOCKU with old lockstateid", [NFS4ERR_BAD_STATEID])
def testModeChange(t, env): """OPEN conflicting with mode bits FLAGS: open all DEPEND: MODE MKFILE CODE: OPEN17 """ c = env.c1 c.init_connection() fh, stateid = c.create_confirm(t.code) res = c.close_file(t.code, fh, stateid) check(res) ops = c.use_obj(fh) + [c.setattr({FATTR4_MODE:0})] res = c.compound(ops) check(res, msg="Setting mode of file %s to 000" % t.code) res = c.open_file(t.code, access=OPEN4_SHARE_ACCESS_BOTH, deny=OPEN4_SHARE_DENY_NONE) check(res, NFS4ERR_ACCESS, "Opening file %s with mode=000" % t.code)
def testConfUnConfDiffVerifier2(t, env): """Whether w == v or w != v makes no difference. FLAGS: setclientid all DEPEND: INIT CODE: CID4d """ c1 = env.c1 clid = "Clid_for_%s_pid=%i" % (t.code, os.getpid()) # confirmed { u, x, c, l, s } (cclientid, cconfirm) = c1.init_connection(clid, verifier=c1.verifier) # unconfirmed { w, x, d, m, t } ops = [c1.setclientid(clid, verifier="unconf")] res = c1.compound(ops) check(res) uclientid = res.resarray[0].switch.switch.clientid uconfirm = res.resarray[0].switch.switch.setclientid_confirm # request { v, x, c, k, s } --> unconfirmed { v, x, e, k, r } # (v != w) ops = [c1.setclientid(clid, verifier="testconf")] res = c1.compound(ops) check(res) tclientid = res.resarray[0].switch.switch.clientid tconfirm = res.resarray[0].switch.switch.setclientid_confirm # removes the unconfirmed { w, x, d, m, t } ops = [c1.setclientid_confirm_op(uclientid, uconfirm)] res = c1.compound(ops) check(res, NFS4ERR_STALE_CLIENTID) # (e != d, e != c, r != t, r != s) if tconfirm == '\x00\x00\x00\x00\x00\x00\x00\x00': t.fail("Got clientid confirm verifier with all zero!") if cclientid == tclientid or uclientid == tclientid: t.fail("Return a same clientID for different verifier!") if tconfirm == cconfirm or tconfirm == uconfirm: t.fail("Return a same confirm for different verifier!")
def testFile(t, env): """RELEASE_LOCKOWNER - basic test FLAGS: releaselockowner all DEPEND: CODE: RLOWN1 """ c = env.c1 c.init_connection() fh, stateid = c.create_confirm(t.code) res = c.lock_file(t.code, fh, stateid, lockowner="lockowner_RLOWN1") check(res) res = c.unlock_file(1, fh, res.lockid) check(res) # Release lockowner owner = lock_owner4(c.clientid, "lockowner_RLOWN1") res = c.compound([c.release_lockowner_op(owner)]) check(res)
def testUnaccessibleDir(t, env): """READDIR with (cfh) in unaccessible directory FLAGS: readdir all mode000 DEPEND: MKDIR MODE CODE: RDDR11 """ c = env.c1 path = c.homedir + [t.code] c.maketree([t.code, ['hidden']]) ops = c.use_obj(path) + [c.setattr({FATTR4_MODE:0})] res = c.compound(ops) check(res, msg="Setting mode=0 on directory %s" % t.code) ops = c.use_obj(path) + [c.readdir()] res = c.compound(ops) if env.opts.uid == 0: check(res, [NFS4_OK, NFS4ERR_ACCESS], "READDIR of directory with mode=000") else: check(res, NFS4ERR_ACCESS, "READDIR of directory with mode=000")
def testStaleHandle(t, env): """PUTFH which nolonger exists should return NFS4ERR_STALE FLAGS: putfh all DEPEND: MKFILE CODE: PUTFH3 """ c = env.c1 c.init_connection() # Create a stale fh stale_fh, stateid = c.create_confirm(t.code) res = c.close_file(t.code, stale_fh, stateid) check(res) ops = c.use_obj(c.homedir) + [c.remove_op(t.code)] res = c.compound(ops) check(res) # Now try to use it res = c.compound([c.putfh_op(stale_fh)]) check(res, NFS4ERR_STALE, "Using a stale fh")
def testUnaccessibleDir(t, env): """LOOKUP with (cfh) in unaccessible directory FLAGS: lookup all mode000 DEPEND: MKDIR MODE CODE: LOOK9 """ c = env.c1 path = c.homedir + [t.code] c.maketree([t.code, ['hidden']]) ops = c.use_obj(path) + [c.setattr({FATTR4_MODE: 0})] res = c.compound(ops) check(res, msg="Setting mode=0 on directory %s" % t.code) res = c.compound(c.use_obj(path + ['hidden'])) if env.opts.uid == 0: check(res, [NFS4_OK, NFS4ERR_ACCESS], "LOOKUP off of dir with mode=000") else: check(res, NFS4ERR_ACCESS, "LOOKUP off of dir with mode=000")
def testNoConfirm(t, env): """SETCLIENTID - create a stale clientid, and use it. FLAGS: setclientid all DEPEND: INIT CODE: CID6 """ c = env.c1 id = "Clientid_for_%s_pid=%i" % (t.code, os.getpid()) res = c.compound([c.setclientid(id)]) check(res) res = c.compound([c.setclientid(id, '')]) check(res) c.clientid = res.resarray[0].switch.switch.clientid ops = c.use_obj(c.homedir) ops += [c.open(t.code, t.code, OPEN4_CREATE)] res = c.compound(ops) check(res, NFS4ERR_STALE_CLIENTID, "OPEN using clientid that was never confirmed")
def testNfs3Create_UidRoot(t, env): """ Create a file with uid_set=1 as root FLAGS: nfsv3 create all DEPEND: CODE: CREATE6 """ ### Setup Phase ### c = env.rootclient test_file = t.name + "_file_1" test_dir = t.name + "_dir_1" mnt_fh = homedir_fh(env.mc, env.c1) res = env.c1.mkdir(mnt_fh, test_dir, dir_mode_set=1, dir_mode_val=0777) check(res, msg="MKDIR - test dir %s" % test_dir) test_dir_fh = res.resok.obj.handle.data ### Execution Phase ### res = env.c1.create(test_dir_fh, test_file, file_mode_set=1, file_mode_val=0777, uid_set=1, uid_val=1234, gid_set=1, gid_val=5678) test_file_fh = res.resok.obj.handle.data #print "###DEBUG - CREATE_FILEMODESET RESULTS:", res, "\n" ### Verification Phase ### check(res, msg="CREATE - file %s" % test_file) res = env.c1.lookup(test_dir_fh, test_file) check(res, msg="LOOKUP - file %s" % test_file) res = env.c1.getattr(test_file_fh) # Allow maproot=nobody checkvalid(res.attributes.uid == 1234 or \ res.attributes.uid == 65534, \ "CREATE - file %s (uid=%d expected %d)" \ % (test_file, res.attributes.uid, 1234)) checkvalid(res.attributes.gid == 5678 or \ res.attributes.gid == 0, \ "CREATE - file %s (gid=%d expected %d)" \ % (test_file, res.attributes.gid, 5678))
def testOverlap(t, env): """LOCKT against your own lock should return NFS4_OK or NFS4ERR_LOCK_RANGE FLAGS: lockt all DEPEND: MKFILE CODE: LKTOVER """ c = env.c1 c.init_connection() fh, stateid = c.create_confirm(t.code) lockowner = "lockowner_LKTOVER" res = c.lock_file(t.code, fh, stateid, 100, 50, lockowner=lockowner) check(res, msg="Locking file %s" % t.code) res = c.lock_test(fh, 100, 50, tester=lockowner) check(res, msg="LOCKT against own exactly matching lock") res = c.lock_test(fh, 75, 50, tester=lockowner) check(res, [NFS4_OK, NFS4ERR_LOCK_RANGE], "LOCKT against own overlapping lock") if res.status == NFS4ERR_LOCK_RANGE: t.fail_support("Server does not support lock consolidation")
def testCloseNoStateid(t, env): """test current state id processing by having CLOSE without operation which provides stateid FLAGS: currentstateid all CODE: CSID6 """ sess1 = env.c1.new_client_session(env.testname(t)) res = create_file(sess1, env.testname(t)) check(res) fh = res.resarray[-1].object stateid = res.resarray[-2].stateid res = sess1.compound([op.putfh(fh), op.close(0, current_stateid)]) check(res, [NFS4ERR_STALE_STATEID, NFS4ERR_BAD_STATEID]) # Test passed, now cleanup! res = sess1.compound([op.putfh(fh), op.close(0, stateid)]) check(res)
def testStaleHandle(t, env): """PUTFH which nolonger exists should return NFS4ERR_STALE FLAGS: putfh ganesha DEPEND: MKFILE CODE: PUTFH3 """ c = env.c1 c.init_connection() # Create a stale fh stale_fh, stateid = c.create_confirm(t.code) res = c.close_file(t.code, stale_fh, stateid) check(res) ops = c.use_obj(c.homedir) + [c.remove_op(t.code)] res = c.compound(ops) check(res) # Now try to use it; but note a server may still allow use and # that's not necessarily a protocol violation; disabling this test # by default until we think of something better. res = c.compound([c.putfh_op(stale_fh)]) check(res, NFS4ERR_STALE, "Using a stale fh")
def testReplay(t, env): """Send the same OPEN twice FLAGS: open seqid all DEPEND: MKFILE CODE: OPEN30 """ c = env.c1 c.init_connection() file = c.homedir + [t.code] owner = t.code fh, stateid = c.create_confirm(owner, file, deny=OPEN4_SHARE_DENY_NONE) res = c.close_file(owner, fh, stateid) seqid = c.get_seqid(owner) res = c.open_file(owner, file, deny=OPEN4_SHARE_DENY_BOTH) check(res) c.seqid[owner] -= 1 res = c.open_file(owner, file, deny=OPEN4_SHARE_DENY_BOTH) check(res, msg="replayed open should succeed again") res = c.open_file(owner, file, deny=OPEN4_SHARE_DENY_BOTH) check(res, NFS4ERR_SHARE_DENIED, msg="non-replayed open should fail")
def testExistingFile(t, env): """LOCK a regular file that was opened w/o create Note several servers return _ERR_OPENMODE, but this is not a legit option. (FRED - why not?) FLAGS: lock all DEPEND: MKFILE LOCK1 CODE: LOCK3 """ c = env.c1 c.init_connection() fh, stateid = c.create_confirm(t.code) res = c.close_file(t.code, fh, stateid) check(res) fh, stateid = c.open_confirm(t.code, access=OPEN4_SHARE_ACCESS_BOTH, deny=OPEN4_SHARE_DENY_NONE) res = c.lock_file(t.code, fh, stateid) check(res, msg="Locking file %s" % t.code) res = c.lock_test(fh) check(res, NFS4ERR_DENIED, "Testing file %s is locked" % t.code)
def testDots(t, env): """LINK with . or .. should succeed or return NFSERR_BADNAME FLAGS: link dots all DEPEND: LINKS LOOKFILE MKDIR CODE: LINK9 """ c = env.c1 dir = c.homedir + [t.code] res = c.create_obj(dir) check(res) res1 = c.link(env.opts.usefile, dir + ['.']) check(res1, [NFS4_OK, NFS4ERR_BADNAME], "Trying to make a hardlink named '.'") res2 = c.link(env.opts.usefile, dir + ['..']) check(res2, [NFS4_OK, NFS4ERR_BADNAME], "Trying to make a hardlink named '..'") if res1.status == NFS4_OK or res2.status == NFS4_OK: t.pass_warn("Allowed creation of hardlink named '.' or '..'") ################################################### # FRED - make test to invoke _MLINK def testNamingPolicy(self): """LINK should obey OPEN file name creation policy Extra test """ self.init_connection() try: (x, rejected_names_open) = self.try_file_names( creator=self.create_via_open) (x, rejected_names_link) = self.try_file_names( creator=self.create_via_link) self.failIf(rejected_names_open != rejected_names_link, "LINK does not obey OPEN naming policy") except SkipException, e: self.skip(e)