def test7_bypassParallelProcessing(self): """Test 7: Bypass Parallel Processing mode """ simple_cluster.setDefaults(default_mem_per_engine=33554432) # Prepare MMS self.setUpFile("Four_ants_3C286.mms",'vis') # Create list file text = "mode='unflag'\n"\ "mode='clip' clipminmax=[0,0.1]" filename = 'list_flagdata.txt' self.create_input(text, filename) # step 1: Do unflag+clip flagdata(vis=self.vis, mode='list', inpfile=filename) # step 2: Now do summary ret_dict = flagdata(vis=self.vis, mode='summary') # Check summary self.assertTrue(ret_dict['name']=='Summary') self.assertTrue(ret_dict['spw']['15']['flagged'] == 96284.0) self.assertTrue(ret_dict['spw']['0']['flagged'] == 129711.0) self.assertTrue(ret_dict['spw']['1']['flagged'] == 128551.0) self.assertTrue(ret_dict['spw']['2']['flagged'] == 125686.0) self.assertTrue(ret_dict['spw']['3']['flagged'] == 122862.0) self.assertTrue(ret_dict['spw']['4']['flagged'] == 109317.0) self.assertTrue(ret_dict['spw']['5']['flagged'] == 24481.0) self.assertTrue(ret_dict['spw']['6']['flagged'] == 0) self.assertTrue(ret_dict['spw']['7']['flagged'] == 0) self.assertTrue(ret_dict['spw']['8']['flagged'] == 0) self.assertTrue(ret_dict['spw']['9']['flagged'] == 27422.0) self.assertTrue(ret_dict['spw']['10']['flagged'] == 124638.0) self.assertTrue(ret_dict['spw']['11']['flagged'] == 137813.0) self.assertTrue(ret_dict['spw']['12']['flagged'] == 131896.0) self.assertTrue(ret_dict['spw']['13']['flagged'] == 125074.0) self.assertTrue(ret_dict['spw']['14']['flagged'] == 118039.0) # Remove MMS os.system('rm -rf ' + self.vis) # Restore default values simple_cluster.setDefaults(default_mem_per_engine=512) if not self.bypassParallelProcessing: ParallelTaskHelper.bypassParallelProcessing(0)
def test_mpi4casa_flagdata_list_return_multithreading(self): """Test flagdata summary in multithreading mode""" # First run flagdata sequentially bypassParallelProcessing = ParallelTaskHelper.getBypassParallelProcessing() ParallelTaskHelper.bypassParallelProcessing(2) res = flagdata(vis=self.vis, mode='summary') ParallelTaskHelper.bypassParallelProcessing(bypassParallelProcessing) # Make a copy of the input MMS for each flagdata instance os.system("cp -r %s %s" % (self.vis,self.vis2)) os.system("cp -r %s %s" % (self.vis,self.vis3)) ParallelTaskHelper.setMultithreadingMode(True) # Set up workers cmd1 = "flagdata(vis='%s', mode='summary')" % (self.vis) worker1 = ParallelTaskWorker(cmd1) cmd2 = "flagdata(vis='%s', mode='summary')" % (self.vis2) worker2 = ParallelTaskWorker(cmd2) cmd3 = "flagdata(vis='%s', mode='summary')" % (self.vis3) worker3 = ParallelTaskWorker(cmd3) # Spawn worker threads worker1.start() worker2.start() worker3.start() # Get resulting summary ict from each worker res1 = worker1.getResult() res2 = worker2.getResult() res3 = worker3.getResult() ParallelTaskHelper.setMultithreadingMode(False) # Compare return summary dicts with the one generated with a sequential run self.assertEqual(res1,res, "flagdata dictionary does not match for the first flagdata run") self.assertEqual(res2,res, "flagdata dictionary does not match for the second flagdata run") self.assertEqual(res3,res, "flagdata dictionary does not match for the third flagdata run")
def test_mpi4casa_flagdata_list_return_async(self): """Test flagdata summary in async mode""" # First run flagdata sequentially bypassParallelProcessing = ParallelTaskHelper.getBypassParallelProcessing() ParallelTaskHelper.bypassParallelProcessing(2) res = flagdata(vis=self.vis, mode='summary') ParallelTaskHelper.bypassParallelProcessing(bypassParallelProcessing) # Make a copy of the input MMS for each flagdata instance os.system("cp -r %s %s" % (self.vis,self.vis2)) os.system("cp -r %s %s" % (self.vis,self.vis3)) # Set async mode in ParallelTaskHelper ParallelTaskHelper.setAsyncMode(True) # Run applycal in MMS mode with the first set request_id_1 = flagdata(vis=self.vis, mode='summary') # Run applycal in MMS mode with the second set request_id_2 = flagdata(vis=self.vis2, mode='summary') # Run applycal in MMS mode with the third set request_id_3 = flagdata(vis=self.vis3, mode='summary') # Get response in block mode reques_id_list = request_id_1 + request_id_2 + request_id_3 command_response_list = self.client.get_command_response(reques_id_list,True,True) # Get result res1 = ParallelTaskHelper.getResult(request_id_1,'flagdata') res2 = ParallelTaskHelper.getResult(request_id_2,'flagdata') res3 = ParallelTaskHelper.getResult(request_id_3,'flagdata') # Unset async mode in ParallelTaskHelper ParallelTaskHelper.setAsyncMode(False) self.assertEqual(res1,res, "flagdata dictionary does not match for the first flagdata run") self.assertEqual(res2,res, "flagdata dictionary does not match for the second flagdata run") self.assertEqual(res3,res, "flagdata dictionary does not match for the third flagdata run")
# Path for data datapath = os.environ.get('CASAPATH').split()[0] + "/data/regression/unittest/flagdata/" # Pick up alternative data directory to run tests on MMSs testmms = False if os.environ.has_key('TEST_DATADIR'): DATADIR = str(os.environ.get('TEST_DATADIR'))+'/flagdata/' if os.path.isdir(DATADIR): testmms = True datapath = DATADIR print 'flagmanager tests will use data from '+datapath # jagonzal (CAS-4287): Add a cluster-less mode to by-pass parallel processing for MMSs as requested if os.environ.has_key('BYPASS_PARALLEL_PROCESSING'): ParallelTaskHelper.bypassParallelProcessing(1) # Local copy of the agentflagger tool aflocal = aftool() # Base class which defines setUp functions for importing different data sets class test_base(unittest.TestCase): def setUp_flagdatatest(self): '''VLA data set, scan=2500~2600 spw=0 1 chan, RR,LL''' self.vis = "flagdatatest.ms" if testmms: self.vis = "flagdatatest.mms" if os.path.exists(self.vis): print "The MS is already around, just unflag"
4. It gets the right answer for a known line + 0th order continuum, even when fitorder = 4. ''' datapath = os.environ.get('CASAPATH').split()[0] + '/data/regression/unittest' uvcdatadir = 'uvcontsub' # Pick up alternative data directory to run tests on MMSs testmms = False if os.environ.has_key('TEST_DATADIR'): testmms = True DATADIR = str(os.environ.get('TEST_DATADIR')) if os.path.isdir(DATADIR): datapath = DATADIR if os.environ.has_key('BYPASS_PARALLEL_PROCESSING'): ParallelTaskHelper.bypassParallelProcessing(1) #Commented out for refactoring (eliminated test_split dependence) #class UVContChecker(SplitChecker): # """ # Base class for uvcontsub unit testing. # """ # need_to_initialize = True # records = {} # # def do_split(self, corrsel): # """ # This is only called do_split because it comes from SplitChecker. # run_task (uvcontsub in this case) would have been a better name. # """ # record = {}
class test_simplecluster(unittest.TestCase): projectname="test_simplecluster" clusterfile="test_simplecluster_config.txt" monitorFile="monitoring.log" cluster=None # Get local host configuration parameters host=os.uname()[1] cwd=os.getcwd() ncpu=multiprocessing.cpu_count() # jagonzal (CAS-4287): Add a cluster-less mode to by-pass parallel processing for MMSs as requested if os.environ.has_key('BYPASS_SEQUENTIAL_PROCESSING'): ParallelTaskHelper.bypassParallelProcessing(1) bypassParallelProcessing = True else: bypassParallelProcessing = False vis = "" ref = "" aux = "" def stopCluster(self): # Stop thread services and cluster self.cluster.stop_cluster() # Remove log files, cluster files, and monitoring files self.cleanUp() def cleanUp(self): logfiles=glob.glob("engine-*.log") for i in logfiles: os.remove(i) if os.path.exists(self.clusterfile): os.remove(self.clusterfile) if os.path.exists(self.monitorFile): os.remove(self.monitorFile) def initCluster(self,userMonitorFile="",max_engines=2,max_memory=0.,memory_per_engine=512.): # First of all clean up files from previous sessions self.cleanUp() # Create cluster object if (len(userMonitorFile) > 0): self.cluster = simple_cluster(userMonitorFile) self.monitorFile = userMonitorFile else: self.cluster = simple_cluster() self.monitorFile = "monitoring.log" # Create cluster configuration file self.createClusterFile(max_engines,max_memory,memory_per_engine) # Initialize cluster object if (self.cluster.init_cluster(self.clusterfile, self.projectname)): self.cluster.check_resource() # Wait unit cluster is producing monitoring info if (len(userMonitorFile) > 0): self.waitForFile(userMonitorFile, 20) else: self.waitForFile('monitoring.log', 20) def createClusterFile(self,max_engines=0.,max_memory=0.,memory_per_engine=512.): if (max_engines>1): max_memory = 512*max_engines msg=self.host + ', ' + str(max_engines) + ', ' + self.cwd + ', ' + str(max_memory) else: msg=self.host + ', ' + str(max_engines) + ', ' + self.cwd + ', ' + str(max_memory) + ', ' + str(memory_per_engine) f=open(self.clusterfile, 'w') f.write(msg) f.close() self.waitForFile(self.clusterfile, 10) def waitForFile(self, file, seconds): for i in range(0,seconds): if (os.path.isfile(file)): return time.sleep(1) def setUpFile(self,file,type_file): if type(file) is list: for file_i in file: self.setUpFileCore(file_i,type_file) else: self.setUpFileCore(file,type_file) if type_file=='vis': self.vis = file elif type_file =='ref': self.ref = file elif type_file=='aux': self.aux = file def setUpFileCore(self,file,type_file): if os.path.exists(file): print "%s file %s is already in the working area, deleting ..." % (type_file,file) os.system('rm -rf ' + file) print "Copy %s file %s into the working area..." % (type_file,file) os.system('cp -R ' + os.environ.get('CASAPATH').split()[0] + '/data/regression/unittest/simplecluster/' + file + ' ' + file) def create_input(self,str_text, filename): """Save the string in a text file""" inp = filename cmd = str_text # remove file first if os.path.exists(inp): os.system('rm -f '+ inp) # save to a file fid = open(inp, 'w') fid.write(cmd) # close file fid.close() # wait until file is visible for the filesystem self.waitForFile(filename, 10) return def test1_defaultCluster(self): """Test 1: Create a default cluster""" # Create cluster file self.initCluster(max_engines=0.) cluster_list = self.cluster.get_hosts() self.assertTrue(cluster_list[0][0]==self.host) self.assertTrue(cluster_list[0][2]==self.cwd) self.stopCluster() def test2_availableResourcesCluster(self): """Test 2: Create a custom cluster to use all the available resources""" # Create cluster file max_memory_local = self.ncpu*1024 self.initCluster(max_engines=1.,max_memory=max_memory_local,memory_per_engine=1024.) cluster_list = self.cluster.get_hosts() self.assertTrue(cluster_list[0][0]==self.host) self.assertTrue(cluster_list[0][1]==self.ncpu) self.assertTrue(cluster_list[0][2]==self.cwd) self.stopCluster() def test3_halfCPUCluster(self): """Test 3: Create a custom cluster to use half of available CPU capacity""" # Create cluster file max_memory_local = self.ncpu*512 self.initCluster(max_engines=0.5,max_memory=max_memory_local,memory_per_engine=512.) cluster_list = self.cluster.get_hosts() self.assertTrue(cluster_list[0][0]==self.host) self.assertTrue(cluster_list[0][2]==self.cwd) self.stopCluster() def test3_halfMemoryCluster(self): """Test 3: Create a custom cluster to use half of available RAM memory""" # Create cluster file self.initCluster(max_engines=self.ncpu,max_memory=0.5,memory_per_engine=128.) cluster_list = self.cluster.get_hosts() self.assertTrue(cluster_list[0][0]==self.host) self.assertTrue(cluster_list[0][2]==self.cwd) self.stopCluster() def test4_monitoringDefault(self): """Test 4: Check default monitoring file exists""" # Create cluster file self.initCluster() fid = open('monitoring.log', 'r') line = fid.readline() self.assertTrue(line.find('Host')>=0) self.assertTrue(line.find('Engine')>=0) self.assertTrue(line.find('Status')>=0) self.assertTrue(line.find('CPU[%]')>=0) self.assertTrue(line.find('Memory[%]')>=0) self.assertTrue(line.find('Time[s]')>=0) self.assertTrue(line.find('Read[MB]')>=0) self.assertTrue(line.find('Write[MB]')>=0) self.assertTrue(line.find('Read[MB/s]')>=0) self.assertTrue(line.find('Write[MB/s]')>=0) self.assertTrue(line.find('Job')>=0) self.assertTrue(line.find('Sub-MS')>=0) self.stopCluster() def test5_monitoringUser(self): """Test 5: Check custom monitoring file exists""" # Create cluster file self.initCluster('userMonitorFile.log') fid = open('userMonitorFile.log', 'r') line = fid.readline() self.assertTrue(line.find('Host')>=0) self.assertTrue(line.find('Engine')>=0) self.assertTrue(line.find('Status')>=0) self.assertTrue(line.find('CPU[%]')>=0) self.assertTrue(line.find('Memory[%]')>=0) self.assertTrue(line.find('Time[s]')>=0) self.assertTrue(line.find('Read[MB]')>=0) self.assertTrue(line.find('Write[MB]')>=0) self.assertTrue(line.find('Read[MB/s]')>=0) self.assertTrue(line.find('Write[MB/s]')>=0) self.assertTrue(line.find('Job')>=0) self.assertTrue(line.find('Sub-MS')>=0) self.stopCluster() def test6_monitoringStandAlone(self): """Test 6: Check the dict structure of the stand-alone method """ # Create cluster file self.initCluster('userMonitorFile.log') state = self.cluster.show_resource(True) cluster_list = self.cluster.get_hosts() for engine in range(cluster_list[0][1]): self.assertTrue(state[self.host][engine].has_key('Status')) self.assertTrue(state[self.host][engine].has_key('Sub-MS')) self.assertTrue(state[self.host][engine].has_key('Read')) self.assertTrue(state[self.host][engine].has_key('Write')) self.assertTrue(state[self.host][engine].has_key('Job')) self.assertTrue(state[self.host][engine].has_key('Memory')) self.assertTrue(state[self.host][engine].has_key('ReadRate')) self.assertTrue(state[self.host][engine].has_key('WriteRate')) self.stopCluster() def test7_bypassParallelProcessing(self): """Test 7: Bypass Parallel Processing mode """ simple_cluster.setDefaults(default_mem_per_engine=33554432) # Prepare MMS self.setUpFile("Four_ants_3C286.mms",'vis') # Create list file text = "mode='unflag'\n"\ "mode='clip' clipminmax=[0,0.1]" filename = 'list_flagdata.txt' self.create_input(text, filename) # step 1: Do unflag+clip flagdata(vis=self.vis, mode='list', inpfile=filename) # step 2: Now do summary ret_dict = flagdata(vis=self.vis, mode='summary') # Check summary self.assertTrue(ret_dict['name']=='Summary') self.assertTrue(ret_dict['spw']['15']['flagged'] == 96284.0) self.assertTrue(ret_dict['spw']['0']['flagged'] == 129711.0) self.assertTrue(ret_dict['spw']['1']['flagged'] == 128551.0) self.assertTrue(ret_dict['spw']['2']['flagged'] == 125686.0) self.assertTrue(ret_dict['spw']['3']['flagged'] == 122862.0) self.assertTrue(ret_dict['spw']['4']['flagged'] == 109317.0) self.assertTrue(ret_dict['spw']['5']['flagged'] == 24481.0) self.assertTrue(ret_dict['spw']['6']['flagged'] == 0) self.assertTrue(ret_dict['spw']['7']['flagged'] == 0) self.assertTrue(ret_dict['spw']['8']['flagged'] == 0) self.assertTrue(ret_dict['spw']['9']['flagged'] == 27422.0) self.assertTrue(ret_dict['spw']['10']['flagged'] == 124638.0) self.assertTrue(ret_dict['spw']['11']['flagged'] == 137813.0) self.assertTrue(ret_dict['spw']['12']['flagged'] == 131896.0) self.assertTrue(ret_dict['spw']['13']['flagged'] == 125074.0) self.assertTrue(ret_dict['spw']['14']['flagged'] == 118039.0) # Remove MMS os.system('rm -rf ' + self.vis) # Restore default values simple_cluster.setDefaults(default_mem_per_engine=512) if not self.bypassParallelProcessing: ParallelTaskHelper.bypassParallelProcessing(0) def test8_IgnoreNullSelectionError(self): """Test 8: Check that NullSelection errors happening for some sub-MSs are ignored """ """Note: In this test we also check simple_cluster initialization via ParallelTaskHelper """ # Prepare MMS self.setUpFile("Four_ants_3C286.mms",'vis') # Unflag entire MMS flagdata(vis=self.vis, mode='unflag') # Manually flag scan 30 flagdata(vis=self.vis, mode='manual', scan='30') # step 2: Now do summary ret_dict = flagdata(vis=self.vis, mode='summary') # Check summary self.assertTrue(ret_dict['scan']['30']['flagged'] == 2187264.0) self.assertTrue(ret_dict['scan']['31']['flagged'] == 0) # Stop cluster if it was started self.cluster = simple_cluster.getCluster() if (self.cluster != None): self.stopCluster() # Remove MMS os.system('rm -rf ' + self.vis)