def test_rule_engine_2309(self): corefile = lib.get_core_re_dir() + "/core.re" coredvm = lib.get_core_re_dir() + "/core.dvm" with lib.file_backed_up(coredvm): lib.prepend_string_to_file('oprType||rei->doinp->oprType\n', coredvm) with lib.file_backed_up(corefile): initial_size_of_server_log = lib.get_log_size('server') rules_to_prepend = ''' acSetNumThreads() { writeLine("serverLog","test_rule_engine_2309: put: acSetNumThreads oprType [$oprType]"); } ''' time.sleep(1) # remove once file hash fix is commited #2279 lib.prepend_string_to_file(rules_to_prepend, corefile) time.sleep(1) # remove once file hash fix is commited #2279 trigger_file = 'file_to_trigger_acSetNumThreads' lib.make_file(trigger_file, 4 * pow(10, 7)) self.admin.assert_icommand('iput {0}'.format(trigger_file)) assert 1 == lib.count_occurrences_of_string_in_log( 'server', 'writeLine: inString = test_rule_engine_2309: put: acSetNumThreads oprType [1]', start_index=initial_size_of_server_log) assert 0 == lib.count_occurrences_of_string_in_log( 'server', 'RE_UNABLE_TO_READ_SESSION_VAR', start_index=initial_size_of_server_log) os.unlink(trigger_file) with lib.file_backed_up(corefile): initial_size_of_server_log = lib.get_log_size('server') rules_to_prepend = ''' acSetNumThreads() { writeLine("serverLog","test_rule_engine_2309: get: acSetNumThreads oprType [$oprType]"); } ''' time.sleep(1) # remove once file hash fix is commited #2279 lib.prepend_string_to_file(rules_to_prepend, corefile) time.sleep(1) # remove once file hash fix is commited #2279 self.admin.assert_icommand('iget {0}'.format(trigger_file), use_unsafe_shell=True) assert 1 == lib.count_occurrences_of_string_in_log( 'server', 'writeLine: inString = test_rule_engine_2309: get: acSetNumThreads oprType [2]', start_index=initial_size_of_server_log) assert 0 == lib.count_occurrences_of_string_in_log( 'server', 'RE_UNABLE_TO_READ_SESSION_VAR', start_index=initial_size_of_server_log) os.unlink(trigger_file)
def test_acPostProcForPut_replicate_to_multiple_resources(self): # create new resources hostname = socket.gethostname() self.admin.assert_icommand( "iadmin mkresc r1 unixfilesystem " + hostname + ":/tmp/irods/r1", 'STDOUT_SINGLELINE', "Creating") self.admin.assert_icommand( "iadmin mkresc r2 unixfilesystem " + hostname + ":/tmp/irods/r2", 'STDOUT_SINGLELINE', "Creating") corefile = os.path.join(lib.get_core_re_dir(), 'core.re') with lib.file_backed_up(corefile): time.sleep(2) # remove once file hash fix is commited #2279 lib.prepend_string_to_file( '\nacPostProcForPut { replicateMultiple( \"r1,r2\" ); }\n', corefile) time.sleep(2) # remove once file hash fix is commited #2279 # add new rule to end of core.re newrule = """ # multiple replication rule replicateMultiple(*destRgStr) { *destRgList = split(*destRgStr, ','); writeLine("serverLog", " acPostProcForPut multiple replicate $objPath $filePath -> *destRgStr"); foreach (*destRg in *destRgList) { writeLine("serverLog", " acPostProcForPut replicate $objPath $filePath -> *destRg"); *e = errorcode(msiSysReplDataObj(*destRg,"null")); if (*e != 0) { if(*e == -808000) { writeLine("serverLog", "$objPath cannot be found"); $status = 0; succeed; } else { fail(*e); } } } } """ time.sleep(2) # remove once file hash fix is commited #2279 lib.prepend_string_to_file(newrule, corefile) time.sleep(2) # remove once file hash fix is commited #2279 # put data tfile = "rulebasetestfile" lib.touch(tfile) self.admin.assert_icommand(['iput', tfile]) # check replicas self.admin.assert_icommand(['ils', '-L', tfile], 'STDOUT_MULTILINE', [' demoResc ', ' r1 ', ' r2 ']) # clean up and remove new resources self.admin.assert_icommand("irm -rf " + tfile) self.admin.assert_icommand("iadmin rmresc r1") self.admin.assert_icommand("iadmin rmresc r2") time.sleep(2) # remove once file hash fix is commited #2279
def test_auth_pep(self): rules_to_prepend = """ pep_resource_resolve_hierarchy_pre(*A,*B,*OUT,*E,*F,*G,*H){ *OUT = "THIS IS AN OUT VARIABLE" } pep_resource_resolve_hierarchy_post(*A,*B,*OUT,*E,*F,*G,*H){ writeLine( 'serverLog', '*OUT') } """ corefile = lib.get_core_re_dir() + "/core.re" with lib.file_backed_up(corefile): time.sleep(1) # remove once file hash fix is commited #2279 lib.prepend_string_to_file(rules_to_prepend, corefile) time.sleep(1) # remove once file hash fix is commited #2279 initial_size_of_server_log = lib.get_log_size('server') filename = "test_re_serialization.txt" lib.make_file(filename, 1000) self.admin.assert_icommand("iput -f --metadata ATTR;VALUE;UNIT " + filename) out_count = lib.count_occurrences_of_string_in_log( 'server', 'THIS IS AN OUT VARIABLE', start_index=initial_size_of_server_log) output = commands.getstatusoutput('rm ' + filename) print("counts: " + str(out_count)) assert 1 == out_count
def test_api_plugin(self): rules_to_prepend = """ pep_rs_hello_world_pre(*INST,*OUT,*COMM,*HELLO_IN,*HELLO_OUT) { writeLine("serverLog", "pep_rs_hello_world_pre - *INST *OUT *HELLO_IN, *HELLO_OUT"); } pep_rs_hello_world_post(*INST,*OUT,*COMM,*HELLO_IN,*HELLO_OUT) { writeLine("serverLog", "pep_rs_hello_world_post - *INST *OUT *HELLO_IN, *HELLO_OUT"); } """ corefile = lib.get_core_re_dir() + "/core.re" with lib.file_backed_up(corefile): time.sleep(1) # remove once file hash fix is commited #2279 lib.prepend_string_to_file(rules_to_prepend, corefile) time.sleep(1) # remove once file hash fix is commited #2279 initial_size_of_server_log = lib.get_log_size('server') self.admin.assert_icommand("iapitest", 'STDOUT_SINGLELINE', 'this') pre_count = lib.count_occurrences_of_string_in_log( 'server', 'pep_rs_hello_world_pre - api_instance <unconvertible> that=hello, world.++++this=42, null_value', start_index=initial_size_of_server_log) hello_count = lib.count_occurrences_of_string_in_log( 'server', 'HELLO WORLD', start_index=initial_size_of_server_log) post_count = lib.count_occurrences_of_string_in_log( 'server', 'pep_rs_hello_world_post - api_instance <unconvertible> that=hello, world.++++this=42, that=hello, world.++++this=42++++value=128', start_index=initial_size_of_server_log) assert 1 == pre_count assert 1 == hello_count assert 1 == post_count
def test_iquota__3044(self): myfile = 'quotafile' corefile = lib.get_core_re_dir() + "/core.re" with lib.file_backed_up(corefile): rules_to_prepend = 'acRescQuotaPolicy {msiSetRescQuotaPolicy("on"); }\n' time.sleep(2) # remove once file hash fix is commited #2279 lib.prepend_string_to_file(rules_to_prepend, corefile) time.sleep(2) # remove once file hash fix is commited #2279 for quotatype in [['suq',self.admin.username], ['sgq','public']]: # user and group for quotaresc in [self.testresc, 'total']: # resc and total cmd = 'iadmin {0} {1} {2} 8000'.format(quotatype[0], quotatype[1], quotaresc) # set high quota self.admin.assert_icommand(cmd.split()) cmd = 'irepl -R {0} {1}'.format(self.testresc, self.testfile) self.admin.assert_icommand(cmd.split()) cmd = 'iadmin cu' # calculate, update db self.admin.assert_icommand(cmd.split()) cmd = 'iquota' self.admin.assert_icommand(cmd.split(), 'STDOUT_SINGLELINE', 'Nearing quota') # not over yet cmd = 'iadmin {0} {1} {2} 40'.format(quotatype[0], quotatype[1], quotaresc) # set low quota self.admin.assert_icommand(cmd.split()) cmd = 'iquota' self.admin.assert_icommand(cmd.split(), 'STDOUT_SINGLELINE', 'OVER QUOTA') # confirm it's over lib.make_file(myfile, 30, contents='arbitrary') cmd = 'iput -R {0} {1}'.format(self.testresc, myfile) # should fail self.admin.assert_icommand(cmd.split(), 'STDERR_SINGLELINE', 'SYS_RESC_QUOTA_EXCEEDED') cmd = 'iadmin {0} {1} {2} 0'.format(quotatype[0], quotatype[1], quotaresc) # remove quota self.admin.assert_icommand(cmd.split()) cmd = 'iadmin cu' # update db self.admin.assert_icommand(cmd.split()) cmd = 'iput -R {0} {1}'.format(self.testresc, myfile) # should succeed again self.admin.assert_icommand(cmd.split()) cmd = 'irm -rf {0}'.format(myfile) # clean up self.admin.assert_icommand(cmd.split()) time.sleep(2) # remove once file hash fix is commited #2279
def test_dynamic_pep_with_rscomm_usage(self): # save original core.re corefile = os.path.join(lib.get_core_re_dir(), "core.re") origcorefile = os.path.join(lib.get_core_re_dir(), "core.re.orig") os.system("cp " + corefile + " " + origcorefile) # add dynamic PEP with rscomm usage time.sleep(1) # remove once file hash fix is commited #2279 os.system('''echo "pep_resource_open_pre(*OUT) { msiGetSystemTime( *junk, '' ); }" >> ''' + corefile) time.sleep(1) # remove once file hash fix is commited #2279 # check rei functioning self.admin.assert_icommand("iget " + self.testfile + " - ", 'STDOUT_SINGLELINE', self.testfile) # restore core.re time.sleep(1) # remove once file hash fix is commited #2279 os.system("cp " + origcorefile + " " + corefile) time.sleep(1) # remove once file hash fix is commited #2279
def test_dynamic_pep_with_rscomm_usage(self): # save original core.re corefile = os.path.join(lib.get_core_re_dir(), "core.re") origcorefile = os.path.join(lib.get_core_re_dir(), "core.re.orig") os.system("cp " + corefile + " " + origcorefile) # add dynamic PEP with rscomm usage time.sleep(1) # remove once file hash fix is commited #2279 os.system( '''echo "pep_resource_open_pre(*OUT) { msiGetSystemTime( *junk, '' ); }" >> ''' + corefile) time.sleep(1) # remove once file hash fix is commited #2279 # check rei functioning self.admin.assert_icommand("iget " + self.testfile + " - ", 'STDOUT_SINGLELINE', self.testfile) # restore core.re time.sleep(1) # remove once file hash fix is commited #2279 os.system("cp " + origcorefile + " " + corefile) time.sleep(1) # remove once file hash fix is commited #2279
def test_acPostProcForPut_replicate_to_multiple_resources(self): # create new resources hostname = socket.gethostname() self.admin.assert_icommand("iadmin mkresc r1 unixfilesystem " + hostname + ":/tmp/irods/r1", 'STDOUT_SINGLELINE', "Creating") self.admin.assert_icommand("iadmin mkresc r2 unixfilesystem " + hostname + ":/tmp/irods/r2", 'STDOUT_SINGLELINE', "Creating") corefile = os.path.join(lib.get_core_re_dir(), 'core.re') with lib.file_backed_up(corefile): time.sleep(2) # remove once file hash fix is commited #2279 lib.prepend_string_to_file('\nacPostProcForPut { replicateMultiple( \"r1,r2\" ); }\n', corefile) time.sleep(2) # remove once file hash fix is commited #2279 # add new rule to end of core.re newrule = """ # multiple replication rule replicateMultiple(*destRgStr) { *destRgList = split(*destRgStr, ','); writeLine("serverLog", " acPostProcForPut multiple replicate $objPath $filePath -> *destRgStr"); foreach (*destRg in *destRgList) { writeLine("serverLog", " acPostProcForPut replicate $objPath $filePath -> *destRg"); *e = errorcode(msiSysReplDataObj(*destRg,"null")); if (*e != 0) { if(*e == -808000) { writeLine("serverLog", "$objPath cannot be found"); $status = 0; succeed; } else { fail(*e); } } } } """ time.sleep(2) # remove once file hash fix is commited #2279 lib.prepend_string_to_file(newrule, corefile) time.sleep(2) # remove once file hash fix is commited #2279 # put data tfile = "rulebasetestfile" lib.touch(tfile) self.admin.assert_icommand(['iput', tfile]) # check replicas self.admin.assert_icommand(['ils', '-L', tfile], 'STDOUT_MULTILINE', [' demoResc ', ' r1 ', ' r2 ']) # clean up and remove new resources self.admin.assert_icommand("irm -rf " + tfile) self.admin.assert_icommand("iadmin rmresc r1") self.admin.assert_icommand("iadmin rmresc r2") time.sleep(2) # remove once file hash fix is commited #2279
def test_rulebase_update__2585(self): rule_file = 'my_rule.r' test_re = os.path.join(lib.get_core_re_dir(), 'test.re') my_rule = """ my_rule { delay("<PLUSET>1s</PLUSET>") { do_some_stuff(); } } INPUT null OUTPUT ruleExecOut """ with open(rule_file, 'w') as f: f.write(my_rule) server_config_filename = lib.get_irods_config_dir() + '/server_config.json' with lib.file_backed_up(server_config_filename): # write new rule file to config dir test_rule = 'do_some_stuff() { writeLine( "serverLog", "TEST_STRING_TO_FIND_1_2585" ); }' with open(test_re, 'w') as f: f.write(test_rule) # update server config with additional rule file server_config_update = { "re_rulebase_set": [{"filename": "test"}, {"filename": "core"}] } lib.update_json_file_from_dict(server_config_filename, server_config_update) time.sleep(35) # wait for delay rule engine to wake # checkpoint log to know where to look for the string initial_log_size = lib.get_log_size('re') self.admin.assert_icommand('irule -F ' + rule_file) time.sleep(35) # wait for test to fire assert lib.count_occurrences_of_string_in_log('re', 'TEST_STRING_TO_FIND_1_2585', start_index=initial_log_size) # repave rule with new string test_rule = 'do_some_stuff() { writeLine( "serverLog", "TEST_STRING_TO_FIND_2_2585" ); }' os.unlink(test_re) with open(test_re, 'w') as f: f.write(test_rule) time.sleep(35) # wait for delay rule engine to wake # checkpoint log to know where to look for the string initial_log_size = lib.get_log_size('re') self.admin.assert_icommand('irule -F ' + rule_file) time.sleep(35) # wait for test to fire assert lib.count_occurrences_of_string_in_log('re', 'TEST_STRING_TO_FIND_2_2585', start_index=initial_log_size) # cleanup os.unlink(test_re) os.unlink(rule_file)
def test_client_server_negotiation__2564(self): corefile = lib.get_core_re_dir() + "/core.re" with lib.file_backed_up(corefile): client_update = { 'irods_client_server_policy': 'CS_NEG_REFUSE' } session_env_backup = copy.deepcopy(self.admin.environment_file_contents) self.admin.environment_file_contents.update(client_update) time.sleep(2) # remove once file hash fix is commited #2279 lib.prepend_string_to_file('\nacPreConnect(*OUT) { *OUT="CS_NEG_REQUIRE"; }\n', corefile) time.sleep(2) # remove once file hash fix is commited #2279 self.admin.assert_icommand( 'ils','STDERR_SINGLELINE','CLIENT_NEGOTIATION_ERROR') self.admin.environment_file_contents = session_env_backup
def test_re_serialization(self): rules_to_prepend = """ pep_resource_resolve_hierarchy_pre(*A,*B,*OUT,*E,*F,*G,*H){ writeLine("serverLog", "pep_resource_resolve_hierarchy_pre - [*A] [*B] [*OUT] [*E] [*F] [*G] [*H]"); } """ corefile = lib.get_core_re_dir() + "/core.re" with lib.file_backed_up(corefile): time.sleep(1) # remove once file hash fix is commited #2279 lib.prepend_string_to_file(rules_to_prepend, corefile) time.sleep(1) # remove once file hash fix is commited #2279 initial_size_of_server_log = lib.get_log_size('server') filename = "test_re_serialization.txt" lib.make_file(filename, 1000) self.admin.assert_icommand("iput -f --metadata ATTR;VALUE;UNIT " + filename) auth_count = lib.count_occurrences_of_string_in_log( 'server', 'user_auth_info_auth_flag=5', start_index=initial_size_of_server_log) zone_count = lib.count_occurrences_of_string_in_log( 'server', 'user_rods_zone=tempZone', start_index=initial_size_of_server_log) user_count = lib.count_occurrences_of_string_in_log( 'server', 'user_user_name=otherrods', start_index=initial_size_of_server_log) mdata_count = lib.count_occurrences_of_string_in_log( 'server', 'ATTR;VALUE;UNIT', start_index=initial_size_of_server_log) output = commands.getstatusoutput('rm ' + filename) print("counts: " + str(auth_count) + " " + str(zone_count) + " " + str(user_count) + " " + str(mdata_count)) assert 1 == auth_count assert 1 == zone_count assert 1 == user_count assert 1 == mdata_count
def test_iput_bulk_check_acpostprocforput__2841(self): # prepare test directory number_of_files = 5 dirname = self.admin.local_session_dir + '/files' corefile = lib.get_core_re_dir() + "/core.re" # files less than 4200000 were failing to trigger the writeLine for filesize in range(5000, 6000000, 500000): files = lib.make_large_local_tmp_dir(dirname, number_of_files, filesize) # manipulate core.re and check the server log with lib.file_backed_up(corefile): initial_size_of_server_log = lib.get_log_size('server') rules_to_prepend = ''' acBulkPutPostProcPolicy { msiSetBulkPutPostProcPolicy("on"); } acPostProcForPut { writeLine("serverLog", "acPostProcForPut called for $objPath"); } ''' time.sleep(1) # remove once file hash fix is committed #2279 lib.prepend_string_to_file(rules_to_prepend, corefile) time.sleep(1) # remove once file hash fix is committed #2279 self.admin.assert_icommand(['iput', '-frb', dirname]) assert number_of_files == lib.count_occurrences_of_string_in_log( 'server', 'writeLine: inString = acPostProcForPut called for', start_index=initial_size_of_server_log) shutil.rmtree(dirname)
class Test_AllRules(resource_suite.ResourceBase, unittest.TestCase): __metaclass__ = metaclass_unittest_test_case_generator.MetaclassUnittestTestCaseGenerator global rules30dir currentdir = os.path.dirname(os.path.realpath(__file__)) rules30dir = currentdir + "/../../iRODS/clients/icommands/test/rules3.0/" conf_dir = lib.get_core_re_dir() def setUp(self): super(Test_AllRules, self).setUp() self.rods_session = lib.make_session_for_existing_admin( ) # some rules hardcode 'rods' and 'tempZone' hostname = socket.gethostname() hostuser = getpass.getuser() progname = __file__ dir_w = rules30dir + ".." self.rods_session.assert_icommand( 'icd' ) # to get into the home directory (for testallrules assumption) self.rods_session.assert_icommand('iadmin mkuser devtestuser rodsuser') self.rods_session.assert_icommand( 'iadmin mkresc testallrulesResc unixfilesystem ' + hostname + ':/tmp/' + hostuser + '/pydevtest_testallrulesResc', 'STDOUT_SINGLELINE', 'unixfilesystem') self.rods_session.assert_icommand('imkdir sub1') self.rods_session.assert_icommand('imkdir sub3') self.rods_session.assert_icommand('imkdir forphymv') self.rods_session.assert_icommand('imkdir ruletest') self.rods_session.assert_icommand('imkdir test') self.rods_session.assert_icommand('imkdir test/phypathreg') self.rods_session.assert_icommand('imkdir ruletest/subforrmcoll') self.rods_session.assert_icommand('iput ' + progname + ' test/foo1') self.rods_session.assert_icommand( 'icp test/foo1 sub1/dcmetadatatarget') self.rods_session.assert_icommand('icp test/foo1 sub1/mdcopysource') self.rods_session.assert_icommand('icp test/foo1 sub1/mdcopydest') self.rods_session.assert_icommand('icp test/foo1 sub1/foo1') self.rods_session.assert_icommand('icp test/foo1 sub1/foo2') self.rods_session.assert_icommand('icp test/foo1 sub1/foo3') self.rods_session.assert_icommand('icp test/foo1 forphymv/phymvfile') self.rods_session.assert_icommand('icp test/foo1 sub1/objunlink1') self.rods_session.assert_icommand( 'irm sub1/objunlink1') # put it in the trash self.rods_session.assert_icommand('icp test/foo1 sub1/objunlink2') self.rods_session.assert_icommand( 'irepl -R testallrulesResc sub1/objunlink2') self.rods_session.assert_icommand('icp test/foo1 sub1/freebuffer') self.rods_session.assert_icommand('icp test/foo1 sub1/automove') self.rods_session.assert_icommand('icp test/foo1 test/versiontest.txt') self.rods_session.assert_icommand( 'icp test/foo1 test/metadata-target.txt') self.rods_session.assert_icommand('icp test/foo1 test/ERAtestfile.txt') self.rods_session.assert_icommand( 'ichmod read devtestuser test/ERAtestfile.txt') self.rods_session.assert_icommand( 'imeta add -d test/ERAtestfile.txt Fun 99 Balloons') self.rods_session.assert_icommand( 'icp test/foo1 sub1/for_versioning.txt') self.rods_session.assert_icommand('imkdir sub1/SaveVersions') self.rods_session.assert_icommand( 'iput ' + dir_w + '/misc/devtestuser-account-ACL.txt test') self.rods_session.assert_icommand('iput ' + dir_w + '/misc/load-metadata.txt test') self.rods_session.assert_icommand('iput ' + dir_w + '/misc/load-usermods.txt test') self.rods_session.assert_icommand('iput ' + dir_w + '/misc/sample.email test') self.rods_session.assert_icommand('iput ' + dir_w + '/misc/email.tag test') self.rods_session.assert_icommand( 'iput ' + dir_w + '/misc/sample.email test/sample2.email') self.rods_session.assert_icommand('iput ' + dir_w + '/misc/email.tag test/email2.tag') # setup for rulemsiAdmChangeCoreRE and the likes empty_core_file_name = 'empty.test.re' new_core_file_name = 'new.test.re' with open(self.conf_dir + '/' + empty_core_file_name, 'w'): pass shutil.copy(self.conf_dir + "/core.re", self.conf_dir + "/core.re.bckp") # back up core.re shutil.copy(self.conf_dir + "/core.re", self.conf_dir + "/" + new_core_file_name) # copy core.re def tearDown(self): self.rods_session.assert_icommand( 'icd') # for home directory assumption self.rods_session.assert_icommand( ['ichmod', '-r', 'own', self.rods_session.username, '.']) self.rods_session.run_icommand([ 'imcoll', '-U', self.rods_session.home_collection + '/test/phypathreg' ]) self.rods_session.run_icommand( 'irm -rf test ruletest forphymv sub1 sub2 sub3 bagit rules bagit.tar /' + self.rods_session.zone_name + '/bundle/home/' + self.rods_session.username) self.rods_session.assert_icommand('iadmin rmresc testallrulesResc') self.rods_session.assert_icommand('iadmin rmuser devtestuser') self.rods_session.assert_icommand( 'iqdel -a') # remove all/any queued rules # cleanup mods in iRODS config dir lib.run_command('mv -f {0}/core.re.bckp {0}/core.re'.format( self.conf_dir, self.conf_dir)) lib.run_command('rm -f %s/*.test.re' % self.conf_dir) self.rods_session.__exit__() super(Test_AllRules, self).tearDown() def generate_tests_allrules(): def filter_rulefiles(rulefile): # skip rules that handle .irb files names_to_skip = [ "rulemsiAdmAppendToTopOfCoreIRB", "rulemsiAdmChangeCoreIRB", "rulemsiGetRulesFromDBIntoStruct", ] for n in names_to_skip: if n in rulefile: # print "skipping " + rulefile + " ----- RE" return False # skip rules that fail by design names_to_skip = ["GoodFailure"] for n in names_to_skip: if n in rulefile: # print "skipping " + rulefile + " ----- failbydesign" return False for n in names_to_skip: if n in rulefile: # print "skipping " + rulefile + " ----- failbydesign" return False # skip if an action (run in the core.re), not enough input/output for irule names_to_skip = [ "rulemsiAclPolicy", "rulemsiAddUserToGroup", "rulemsiCheckHostAccessControl", "rulemsiCheckOwner", "rulemsiCheckPermission", "rulemsiCommit", "rulemsiCreateCollByAdmin", "rulemsiCreateUser", "rulemsiDeleteCollByAdmin", "rulemsiDeleteDisallowed", "rulemsiDeleteUser", "rulemsiExtractNaraMetadata", "rulemsiOprDisallowed", "rulemsiRegisterData", "rulemsiRenameCollection", "rulemsiRenameLocalZone", "rulemsiRollback", "rulemsiSetBulkPutPostProcPolicy", "rulemsiSetDataObjAvoidResc", "rulemsiSetDataObjPreferredResc", "rulemsiSetDataTypeFromExt", "rulemsiSetDefaultResc", "rulemsiSetGraftPathScheme", "rulemsiSetMultiReplPerResc", "rulemsiSetNoDirectRescInp", "rulemsiSetNumThreads", "rulemsiSetPublicUserOpr", "rulemsiSetRandomScheme", "rulemsiSetRescQuotaPolicy", "rulemsiSetRescSortScheme", "rulemsiSetReServerNumProc", "rulemsiSetResource", "rulemsiSortDataObj", "rulemsiStageDataObj", "rulemsiSysChksumDataObj", "rulemsiSysMetaModify", "rulemsiSysReplDataObj", "rulemsiNoChkFilePathPerm", "rulemsiNoTrashCan", ] for n in names_to_skip: if n in rulefile: # print "skipping " + rulefile + " ----- input/output" return False # skip rules we are not yet supporting names_to_skip = [ "rulemsiobj", ] for n in names_to_skip: if n in rulefile: # print "skipping " + rulefile + " ----- msiobj" return False # ERA names_to_skip = [ "rulemsiFlagInfectedObjs", "rulemsiGetAuditTrailInfoByActionID", "rulemsiGetAuditTrailInfoByKeywords", "rulemsiGetAuditTrailInfoByObjectID", "rulemsiGetAuditTrailInfoByTimeStamp", "rulemsiGetAuditTrailInfoByUserID", "rulemsiMergeDataCopies", "rulemsiGetCollectionPSmeta-null" # marked for removal - iquest now handles this natively ] for n in names_to_skip: if n in rulefile: # print "skipping " + rulefile + " ----- ERA" return False # XMSG names_to_skip = [ "rulemsiCreateXmsgInp", "rulemsiRcvXmsg", "rulemsiSendXmsg", "rulemsiXmsgCreateStream", "rulemsiXmsgServerConnect", "rulemsiXmsgServerDisConnect", "rulereadXMsg", "rulewriteXMsg", ] for n in names_to_skip: if n in rulefile: # print "skipping " + rulefile + " ----- XMSG" return False # FTP names_to_skip = [ "rulemsiFtpGet", "rulemsiTwitterPost", ] for n in names_to_skip: if n in rulefile: # print "skipping " + rulefile + " ----- FTP" return False # webservices names_to_skip = [ "rulemsiConvertCurrency", "rulemsiGetQuote", "rulemsiIp2location", "rulemsiObjByName", "rulemsiSdssImgCutout_GetJpeg", ] for n in names_to_skip: if n in rulefile: # print "skipping " + rulefile + " ----- webservices" return False # XML names_to_skip = [ "rulemsiLoadMetadataFromXml", "rulemsiXmlDocSchemaValidate", "rulemsiXsltApply", ] for n in names_to_skip: if n in rulefile: # print "skipping " + rulefile + " ----- XML" return False # transition to core microservices only names_to_skip = [ "rulemsiAddKeyVal.r", "rulemsiApplyDCMetadataTemplate.r", "rulemsiAssociateKeyValuePairsToObj.r", "rulemsiCollectionSpider.r", "rulemsiCopyAVUMetadata.r", "rulemsiExportRecursiveCollMeta.r", "rulemsiFlagDataObjwithAVU.r", "rulemsiGetCollectionACL.r", "rulemsiGetCollectionContentsReport.r", "rulemsiGetCollectionPSmeta.r", "rulemsiGetCollectionSize.r", "rulemsiGetDataObjACL.r", "rulemsiGetDataObjAIP.r", "rulemsiGetDataObjAVUs.r", "rulemsiGetDataObjPSmeta.r", "rulemsiGetObjectPath.r", "rulemsiGetUserACL.r", "rulemsiGetUserInfo.r", "rulemsiGuessDataType.r", "rulemsiIsColl.r", "rulemsiIsData.r", "rulemsiLoadACLFromDataObj.r", "rulemsiLoadMetadataFromDataObj.r", "rulemsiLoadUserModsFromDataObj.r", "rulemsiPropertiesAdd.r", "rulemsiPropertiesClear.r", "rulemsiPropertiesClone.r", "rulemsiPropertiesExists.r", "rulemsiPropertiesFromString.r", "rulemsiPropertiesGet.r", "rulemsiPropertiesNew.r", "rulemsiPropertiesRemove.r", "rulemsiPropertiesSet.r", "rulemsiRecursiveCollCopy.r", "rulemsiRemoveKeyValuePairsFromObj.r", "rulemsiSetDataType.r", "rulemsiString2KeyValPair.r", "rulemsiStripAVUs.r", "rulemsiStructFileBundle.r", "rulewriteKeyValPairs.r", ] for n in names_to_skip: if n in rulefile: # print "skipping " + rulefile + " ----- transition to core" return False # skipping rules requiring additional .re files in community code names_to_skip = [ "rulemsiAdmAddAppRuleStruct.r", "rulemsiAdmClearAppRuleStruct.r", "rulemsiAdmInsertRulesFromStructIntoDB.r", "rulemsiAdmReadRulesFromFileIntoStruct.r", "rulemsiAdmRetrieveRulesFromDBIntoStruct.r", "rulemsiAdmWriteRulesFromStructIntoFile.r", ] for n in names_to_skip: if n in rulefile: # print "skipping " + rulefile + " ----- community" return False # skipping for now, not sure why it's throwing a stacktrace at the moment if "rulemsiPropertiesToString" in rulefile: # print "skipping " + rulefile + " ----- b/c of stacktrace" return False # misc / other if "ruleintegrity" in rulefile: # print "skipping " + rulefile + " ----- integrityChecks" return False if "z3950" in rulefile: # print "skipping " + rulefile + " ----- z3950" return False if "rulemsiImage" in rulefile: # print "skipping " + rulefile + " ----- image" return False if "rulemsiRda" in rulefile: # print "skipping " + rulefile + " ----- RDA" return False if "rulemsiCollRepl" in rulefile: # print "skipping " + rulefile + " ----- deprecated" return False if "rulemsiTarFileExtract" in rulefile: # print "skipping " + rulefile + " ----- CAT_NO_ROWS_FOUND - failed in # call to getDataObjInfoIncSpecColl" return False if "rulemsiDataObjRsync" in rulefile: # print "skipping " + rulefile + " ----- tested separately" return False return True for rulefile in filter(filter_rulefiles, sorted(os.listdir(rules30dir))): def make_test(rulefile): def test(self): self.rods_session.assert_icommand("icd") self.rods_session.assert_icommand( "irule -vF " + rules30dir + rulefile, 'STDOUT_SINGLELINE', "completed successfully") return test yield 'test_' + rulefile.replace('.', '_'), make_test(rulefile) def test_rulemsiDataObjRsync(self): rulefile = 'rulemsiDataObjRsync.r' src_filename = 'source.txt' dest_filename = 'dest.txt' test_dir = '/tmp' test_coll = self.rods_session.home_collection + '/synctest' src_file = os.path.join(test_dir, src_filename) src_obj = test_coll + '/' + src_filename dest_obj = test_coll + '/' + dest_filename # create test collection self.rods_session.run_icommand(['imkdir', test_coll]) # create source test file with open(src_file, 'a') as f: f.write('blah\n') # upload source test file self.rods_session.run_icommand(['iput', src_file, test_coll]) # first rsync rule test self.rods_session.assert_icommand("irule -F " + rules30dir + rulefile, 'STDOUT_SINGLELINE', "status = 99999992") # modify the source and try again for i in range(1, 5): with open(src_file, 'a') as f: f.write('blah_' + str(i) + '\n') # force upload source self.rods_session.run_icommand(['iput', '-f', src_file, test_coll]) # sync test self.rods_session.assert_icommand( "irule -F " + rules30dir + rulefile, 'STDOUT_SINGLELINE', "status = 99999992") # cleanup self.rods_session.run_icommand(['irm', '-rf', test_coll]) os.remove(src_file) def test_rulemsiPhyBundleColl(self): rulefile = 'rulemsiPhyBundleColl.r' # rule test self.rods_session.assert_icommand( "irule -F " + rules30dir + rulefile, 'STDOUT_SINGLELINE', "Create tar file of collection /tempZone/home/rods/test on resource testallrulesResc" ) # look for the bundle bundle_path = '/tempZone/bundle/home/' + self.rods_session.username output = self.rods_session.run_icommand(['ils', '-L', bundle_path]) # last token in stdout should be the bundle file's full physical path bundlefile = output[1].split()[-1] # check on the bundle file's name assert bundlefile.find('test.') >= 0 # check physical path on resource assert os.path.isfile(bundlefile) # now try as a normal user (expect err msg) self.user0.assert_icommand("irule -F " + rules30dir + rulefile, 'STDERR_SINGLELINE', "SYS_NO_API_PRIV") # cleanup self.rods_session.run_icommand(['irm', '-rf', bundle_path]) def test_str_2528(self): self.rods_session.assert_icommand( '''irule "*a.a = 'A'; *a.b = 'B'; writeLine('stdout', str(*a))" null ruleExecOut''', 'STDOUT_SINGLELINE', "a=A++++b=B") def test_return_data_structure_non_null_2604(self): self.rods_session.assert_icommand( '''irule "*Err = errorcode(msiExecCmd('cmd', '', '', '', '', *Out)); msiGetStderrInExecCmdOut(*Out, *Stderr); writeLine('stdout', 'stderr: *Err*Stderr')" null ruleExecOut''', 'STDOUT_SINGLELINE', "stderr")
def test_rulebase_update__2585(self): rule_file = 'my_rule.r' test_re = os.path.join(lib.get_core_re_dir(), 'test.re') my_rule = """ my_rule { delay("<PLUSET>1s</PLUSET>") { do_some_stuff(); } } INPUT null OUTPUT ruleExecOut """ with open(rule_file, 'w') as f: f.write(my_rule) server_config_filename = lib.get_irods_config_dir( ) + '/server_config.json' with lib.file_backed_up(server_config_filename): # write new rule file to config dir test_rule = 'do_some_stuff() { writeLine( "serverLog", "TEST_STRING_TO_FIND_1_2585" ); }' with open(test_re, 'w') as f: f.write(test_rule) # update server config with additional rule file server_config_update = { "re_rulebase_set": [{ "filename": "test" }, { "filename": "core" }] } lib.update_json_file_from_dict(server_config_filename, server_config_update) time.sleep(35) # wait for delay rule engine to wake # checkpoint log to know where to look for the string initial_log_size = lib.get_log_size('re') self.admin.assert_icommand('irule -F ' + rule_file) time.sleep(35) # wait for test to fire assert lib.count_occurrences_of_string_in_log( 're', 'TEST_STRING_TO_FIND_1_2585', start_index=initial_log_size) # repave rule with new string test_rule = 'do_some_stuff() { writeLine( "serverLog", "TEST_STRING_TO_FIND_2_2585" ); }' os.unlink(test_re) with open(test_re, 'w') as f: f.write(test_rule) time.sleep(35) # wait for delay rule engine to wake # checkpoint log to know where to look for the string initial_log_size = lib.get_log_size('re') self.admin.assert_icommand('irule -F ' + rule_file) time.sleep(35) # wait for test to fire assert lib.count_occurrences_of_string_in_log( 're', 'TEST_STRING_TO_FIND_2_2585', start_index=initial_log_size) # cleanup os.unlink(test_re) os.unlink(rule_file)
def test_acPostProcForOpen__3024(self): test_re = os.path.join(lib.get_core_re_dir(), 'test.re') server_config_filename = lib.get_irods_config_dir() + '/server_config.json' # get PEP name from function name pep_name = inspect.stack()[0][3].split('_')[1] # user session sesh = self.user0 testfile = self.testfile target_obj = os.path.join(sesh.session_collection, testfile) # prepare rule file rule_file = "test_rule_file.r" rule_string = ''' test_acPostProcForCreate__3024 {{ msiDataObjOpen("{target_obj}",*FD); msiDataObjClose(*FD,*Status); }} INPUT null OUTPUT ruleExecOut '''.format(**locals()) with open(rule_file, 'w') as f: f.write(rule_string) # query for resource properties columns = ('RESC_ZONE_NAME, ' 'RESC_FREE_SPACE, ' 'RESC_STATUS, ' 'RESC_ID, ' 'RESC_NAME, ' 'RESC_TYPE_NAME, ' 'RESC_LOC, ' 'RESC_CLASS_NAME, ' 'RESC_VAULT_PATH, ' 'RESC_INFO, ' 'RESC_COMMENT, ' 'RESC_CREATE_TIME, ' 'RESC_MODIFY_TIME') resource = sesh.default_resource query = '''iquest "SELECT {columns} WHERE RESC_NAME ='{resource}'"'''.format(**locals()) result = sesh.run_icommand(query)[1] # last line is iquest default formatting separator resource_property_list = result.splitlines()[:-1] with lib.file_backed_up(server_config_filename): # prepare rule # rule will write PEP name as well as # resource related rule session vars to server log rule_body = 'writeLine("serverLog", "{pep_name}");'.format(**locals()) rule_body += ('writeLine("serverLog", $KVPairs.zoneName);' 'writeLine("serverLog", $KVPairs.freeSpace);' 'writeLine("serverLog", $KVPairs.quotaLimit);' 'writeLine("serverLog", $KVPairs.rescStatus);' 'writeLine("serverLog", $KVPairs.rescId);' 'writeLine("serverLog", $KVPairs.rescName);' 'writeLine("serverLog", $KVPairs.rescType);' 'writeLine("serverLog", $KVPairs.rescLoc);' 'writeLine("serverLog", $KVPairs.rescClass);' 'writeLine("serverLog", $KVPairs.rescVaultPath);' 'writeLine("serverLog", $KVPairs.rescInfo);' 'writeLine("serverLog", $KVPairs.rescComments);' 'writeLine("serverLog", $KVPairs.rescCreate);' 'writeLine("serverLog", $KVPairs.rescModify);') test_rule = '{pep_name} {{ {rule_body} }}'.format(**locals()) # write new rule file with open(test_re, 'w') as f: f.write(test_rule) # update server config with additional rule file server_config_update = { "re_rulebase_set": [{"filename": "test"}, {"filename": "core"}] } lib.update_json_file_from_dict(server_config_filename, server_config_update) # iput test file sesh.assert_icommand('iput -f {testfile}'.format(**locals())) # checkpoint log to know where to look for the string initial_log_size = lib.get_log_size('server') # invoke irule to trigger PEP sesh.assert_icommand('irule -F {rule_file}'.format(**locals())) # confirm that PEP was hit by looking for pep name in server log assert lib.count_occurrences_of_string_in_log('server', pep_name, start_index=initial_log_size) # check that resource session vars were written to the server log for line in resource_property_list: column = line.rsplit('=', 1)[0].strip() property = line.rsplit('=', 1)[1].strip() if property: if column != 'RESC_MODIFY_TIME': assert lib.count_occurrences_of_string_in_log('server', property, start_index=initial_log_size) else: # for resource modify time skip last 2 second digits assert lib.count_occurrences_of_string_in_log('server', property[:-2], start_index=initial_log_size) # cleanup sesh.run_icommand('irm -f {target_obj}'.format(**locals())) os.unlink(rule_file) os.unlink(test_re)
def test_acPostProcForFilePathReg__3024(self): test_re = os.path.join(lib.get_core_re_dir(), 'test.re') server_config_filename = lib.get_irods_config_dir() + '/server_config.json' # get PEP name from function name pep_name = inspect.stack()[0][3].split('_')[1] # user session # use admin to be allowed to register stuff sesh = self.admin # test file for ireg username = sesh.username resc_vault_path = lib.get_vault_path(sesh) testfile = '{resc_vault_path}/home/{username}/foo.txt'.format(**locals()) open(testfile, 'a').close() # query for resource properties columns = ('RESC_ZONE_NAME, ' 'RESC_FREE_SPACE, ' 'RESC_STATUS, ' 'RESC_ID, ' 'RESC_NAME, ' 'RESC_TYPE_NAME, ' 'RESC_LOC, ' 'RESC_CLASS_NAME, ' 'RESC_VAULT_PATH, ' 'RESC_INFO, ' 'RESC_COMMENT, ' 'RESC_CREATE_TIME, ' 'RESC_MODIFY_TIME') resource = sesh.default_resource query = '''iquest "SELECT {columns} WHERE RESC_NAME ='{resource}'"'''.format(**locals()) result = sesh.run_icommand(query)[1] # last line is iquest default formatting separator resource_property_list = result.splitlines()[:-1] with lib.file_backed_up(server_config_filename): # prepare rule # rule will write PEP name as well as # resource related rule session vars to server log rule_body = 'writeLine("serverLog", "{pep_name}");'.format(**locals()) rule_body += ('writeLine("serverLog", $KVPairs.zoneName);' 'writeLine("serverLog", $KVPairs.freeSpace);' 'writeLine("serverLog", $KVPairs.quotaLimit);' 'writeLine("serverLog", $KVPairs.rescStatus);' 'writeLine("serverLog", $KVPairs.rescId);' 'writeLine("serverLog", $KVPairs.rescName);' 'writeLine("serverLog", $KVPairs.rescType);' 'writeLine("serverLog", $KVPairs.rescLoc);' 'writeLine("serverLog", $KVPairs.rescClass);' 'writeLine("serverLog", $KVPairs.rescVaultPath);' 'writeLine("serverLog", $KVPairs.rescInfo);' 'writeLine("serverLog", $KVPairs.rescComments);' 'writeLine("serverLog", $KVPairs.rescCreate);' 'writeLine("serverLog", $KVPairs.rescModify);') test_rule = '{pep_name} {{ {rule_body} }}'.format(**locals()) # write new rule file with open(test_re, 'w') as f: f.write(test_rule) # update server config with additional rule file server_config_update = { "re_rulebase_set": [{"filename": "test"}, {"filename": "core"}] } lib.update_json_file_from_dict(server_config_filename, server_config_update) # checkpoint log to know where to look for the string initial_log_size = lib.get_log_size('server') # ireg test file to trigger PEP target_obj = os.path.join(sesh.home_collection, os.path.basename(testfile)) sesh.assert_icommand('ireg {testfile} {target_obj}'.format(**locals())) # confirm that PEP was hit by looking for pep name in server log assert lib.count_occurrences_of_string_in_log('server', pep_name, start_index=initial_log_size) # check that resource session vars were written to the server log for line in resource_property_list: column = line.rsplit('=', 1)[0].strip() property = line.rsplit('=', 1)[1].strip() if property: if column != 'RESC_MODIFY_TIME': assert lib.count_occurrences_of_string_in_log('server', property, start_index=initial_log_size) # cleanup sesh.run_icommand('irm -f {target_obj}'.format(**locals())) os.unlink(test_re)