コード例 #1
0
 def setUp(self):
     
     self.vis = "Four_ants_3C286.mms"
     setUpFile(self.vis,'vis')
     
     # Tmp files
     self.vis2 = self.vis + '.2'
     self.vis3 = self.vis + '.3'    
     
     # Set up cluster
     self.client = MPICommandClient()
     self.client.set_log_mode('redirect')
     self.client.start_services()  
コード例 #2
0
 def setUp(self):
     
     self.vis = 'Four_ants_3C286.mms'
     setUpFile(self.vis,'vis')
     
     self.client = MPICommandClient()
     self.client.set_log_mode('redirect')
     self.client.start_services()       
     
     # Prepare list of servers
     self.server_list = []
     server_list = self.client.get_server_status()
     for server in server_list:
         if not server_list[server]['timeout']:
             self.server_list.append(server_list[server]['rank'])          
コード例 #3
0
    def getResult(command_request_id_list, taskname):

        # Access MPICommandClietn singleton instance
        client = MPICommandClient()

        # Get response list
        command_response_list = client.get_command_response(command_request_id_list, True, True)

        # Format list in the form of vis dict
        ret_list = {}
        for command_response in command_response_list:
            vis = command_response["parameters"]["vis"]
            ret_list[vis] = command_response["ret"]

        # Consolidate results and return
        ret = ParallelTaskHelper.consolidateResults(ret_list, taskname)

        return ret
コード例 #4
0
 def getResult(command_request_id_list,taskname):
     
     # Access MPICommandClietn singleton instance
     client = MPICommandClient()
     
     # Get response list
     command_response_list =  client.get_command_response(command_request_id_list,True,True)
             
     # Format list in the form of vis dict
     ret_list = {}
     for command_response in command_response_list:
         vis = command_response['parameters']['vis']
         ret_list[vis] = command_response['ret']
         
     # Consolidate results and return
     ret = ParallelTaskHelper.consolidateResults(ret_list,taskname)
     
     return ret                    
コード例 #5
0
 def test_singleton_behaviour(self):
     
     # Delete current MPICommandClient singleton instance reference
     client_ref = self.client
     del client_ref
     
     # Create a new MPICommandClient singleton instance reference
     new_client_ref = MPICommandClient()
     
     # Execute some command
     command_response_list = new_client_ref.push_command_request("a+b",True,[self.server_list[0]],{'a':1,'b':1})
     
     # Analyze command response list contents
     self.assertEqual(len(command_response_list), 1, "Command response list should contain one element")
     self.assertEqual(command_response_list[0]['successful'], True, "Command execution was not successful")
     self.assertEqual(command_response_list[0]['traceback'], None, "Command execution trace-back should be None")
     self.assertEqual(command_response_list[0]['status'], 'response received', "Command status should be 'response received'")
     self.assertEqual(command_response_list[0]['ret'], 2, "Command return variable should be 2")       
コード例 #6
0
class test_mpi4casa_plotms(unittest.TestCase):

    def setUp(self):
        
        self.vis = 'Four_ants_3C286.mms'
        setUpFile(self.vis,'vis')
        
        self.client = MPICommandClient()
        self.client.set_log_mode('redirect')
        self.client.start_services()       
        
        # Prepare list of servers
        self.server_list = []
        server_list = self.client.get_server_status()
        for server in server_list:
            if not server_list[server]['timeout']:
                self.server_list.append(server_list[server]['rank'])          

    def tearDown(self):

        os.system('rm -rf ' + self.vis)
    
    def test_mpi4casa_plotms_concurrent(self):
        """Run plotms on the same MS from each server simulateneously"""
        
        # Change current working directory
        self.client.push_command_request("os.chdir('%s')" % os.getcwd(),True,self.server_list)
        
        # Farm plotms jobs
        command_request_id_list = []
        for server in self.server_list:
            plotfile = 'test_mpi4casa_plotms_concurrent-%s.png' % str(server)
            cmd = "plotms('%s', avgchannel='8',avgtime='60',plotfile='%s',showgui=False)" % (self.vis,plotfile)
            command_request_id = self.client.push_command_request(cmd,False,server)
            command_request_id_list.append(command_request_id[0])
            
        # Get response in block mode
        command_response_list = self.client.get_command_response(command_request_id_list,True,True)
        
        # Compare files
        for server_idx in range(0,len(self.server_list)):
            for server_idy in range(server_idx+1,len(self.server_list)):
                server_x = self.server_list[server_idx]
                server_y = self.server_list[server_idy]
                plotfile_server_idx = 'test_mpi4casa_plotms_concurrent-%s.png' % str(server_x)
                plotfile_server_idy = 'test_mpi4casa_plotms_concurrent-%s.png' % str(server_y)
                areEqual = filecmp.cmp(plotfile_server_idx,plotfile_server_idy)
                self.assertTrue(areEqual,"Plotfile generated by server %s is different from plotfile generated by server %s" 
                                % (str(server_x),str(server_y)))
コード例 #7
0
class test_MPICommandServer(unittest.TestCase):            
            
    def setUp(self):
        
        self.client = MPICommandClient()
        self.client.set_log_mode('unified')
        self.server_list = MPIEnvironment.mpi_server_rank_list()
        self.client.start_services()
                        
    def test_server_cannot_be_instantiated(self):
        
        instantiated = False
        try:
            server = MPICommandServer()
            instantiated = True
        except Exception, instance:
            instantiated = False
            
        self.assertEqual(instantiated, False, "It should not be possible to instantiate MPICommandServer in the client")
コード例 #8
0
 def setUp(self):
     
     # Set-up MMS
     self.vis = "ngc5921.applycal.mms"
     self.vis_sorted = "ngc5921.applycal.sorted.mms"
     setUpFile(self.vis,'vis')
     
     # Set-up reference MMS
     self.ref = "ngc5921.applycal.ms"
     self.ref_sorted = "ngc5921.applycal.sorted.ms"
     setUpFile(self.ref,'ref')
     
     # Set-up auxiliary files
     self.aux = ["ngc5921.fluxscale", "ngc5921.gcal", "ngc5921.bcal"]
     setUpFile(self.aux ,'aux')
     
     # Repository caltables are pre-v4.1, and we
     # must update them _before_ applycal to avoid contention
     casalog.post("Updating pre-v4.1 caltables: %s" % str(self.aux),"WARN","test1_applycal_fluxscale_gcal_bcal")
     cblocal = cbtool()
     for oldct in self.aux:
         cblocal.updatecaltable(oldct)
     casalog.post("Pre-v4.1 caltables updated","INFO","test_mpi4casa_applycal")        
     
     # Tmp files
     self.vis2 = self.vis + '.2'
     self.vis3 = self.vis + '.3'
     self.vis_sorted2 = self.vis_sorted + '.2'
     self.vis_sorted3 = self.vis_sorted + '.3'
     
     # Tmp aux files
     self.aux2 = []
     self.aux3 = []
     for file in self.aux:
         self.aux2.append(file + '.2')
         self.aux3.append(file + '.3')
     
     # Set up cluster
     self.client = MPICommandClient()
     self.client.set_log_mode('redirect')
     self.client.start_services()          
コード例 #9
0
 def __init__(self, task_name, args={}):
     self._arg = dict(args)
     self._arguser = {}
     self._taskName = task_name
     self._executionList = []
     self._jobQueue = None
     # Cache the initial inputs
     self.__originalParams = args
     # jagonzal: Add reference to cluster object
     self._cluster = None
     self._mpi_cluster = False
     self._command_request_id_list = None
     if not MPIEnvironment.is_mpi_enabled:
         self.__bypass_parallel_processing = 1
     if (self.__bypass_parallel_processing == 0):
         self._mpi_cluster = True
         self._command_request_id_list = []
         self._cluster = MPICommandClient()
     # jagonzal: To inhibit return values consolidation
     self._consolidateOutput = True
     # jagonzal (CAS-4287): Add a cluster-less mode to by-pass parallel processing for MMSs as requested
     self._sequential_return_list = {}
コード例 #10
0
 def __init__(self, task_name, args={}):
     self._arg = dict(args)
     self._arguser = {}
     self._taskName = task_name
     self._executionList = []
     self._jobQueue = None
     # Cache the initial inputs
     self.__originalParams = args
     # jagonzal: Add reference to cluster object
     self._cluster = None
     self._mpi_cluster = False
     self._command_request_id_list = None
     if not MPIEnvironment.is_mpi_enabled:
         self.__bypass_parallel_processing = 1
     if self.__bypass_parallel_processing == 0:
         self._mpi_cluster = True
         self._command_request_id_list = []
         self._cluster = MPICommandClient()
     # jagonzal: To inhibit return values consolidation
     self._consolidateOutput = True
     # jagonzal (CAS-4287): Add a cluster-less mode to by-pass parallel processing for MMSs as requested
     self._sequential_return_list = {}
コード例 #11
0
ファイル: task_pieflag.py プロジェクト: chrishales/pieflag
def pieflag(vis,
            field,          # data selection parameters
            refchanfile,
            fitorder_RR_LL,
            fitorder_RL_LR,
            scalethresh,
            SEFDfile,       # scalethresh parameter
            plotSEFD,
            dynamicflag,
            chunktime,      # dynamicflag parameters
            stdmax,
            maxoffset,
            staticflag,
            madmax,         # staticflag parameter
            binsamples,
            extendflag,
            boxtime,        # extendflag parameters
            boxthresh):

    #
    # Task pieflag
    #    Flags bad data by comparing with clean channels in bandpass-calibrated data.
    #
    #    Original reference: E. Middelberg, 2006, PASA, 23, 64
    #    Rewritten for use in CASA and updated to account for wideband
    #    and SEFD effects by Christopher A. Hales 2014.
    #
    #    Thanks to Kumar Golap, Justo Gonzalez, Jeff Kern, James Robnett,
    #    Urvashi Rau, Sanjay Bhatnagar, and of course Enno Middelberg
    #    for expert advice. Thanks to Emmanuel Momjian for providing
    #    Jansky VLA SEFD data for L and X bands (EVLA Memos 152 and 166)
    #    and to Bryan Butler for providing access to all other bands
    #    from the Jansky VLA Exposure Calculator.
    #
    #    Version 4.4 released 26 October 2016
    #    Tested with CASA 4.7.0 using Jansky VLA data
    #    Available at: http://github.com/chrishales/pieflag
    #
    #    Reference for this version:
    #    C. A. Hales, E. Middelberg, 2014, Astrophysics Source Code Library, 1408.014
    #    http://adsabs.harvard.edu/abs/2014ascl.soft08014H
    #
    
    startTime = time.time()
    casalog.origin('pieflag')
    casalog.post('--> pieflag version 4.4')
    
    if (not staticflag) and (not dynamicflag):
        casalog.post('*** ERROR: You need to select static or dynamic flagging.', 'ERROR')
        casalog.post('*** ERROR: Exiting pieflag.', 'ERROR')
        return
    
    ms.open(vis)
    vis=ms.name()
    ms.close()
    
    useMPI = MPIEnvironment.is_mpi_enabled
    if useMPI:
        if vis.lower().endswith('.ms'):
            useMPI=False
            casalog.post('--> MS will be processed in serial mode.')
        elif ph.axisType(vis) == 'baseline':
            # client is ID 0 and will not perform parallel processing, servers start from ID 1
            nthreads = MPIEnvironment.rank
            subms_path = vis+'/SUBMSS/'
            subms = filter(lambda x: os.path.isdir(os.path.join(subms_path, x)), os.listdir(subms_path))
            if len(subms) != nthreads:
                casalog.post('*** ERROR: Mismatch, MMS tailored for '+str(len(subms))+' engines but '+\
                                         'CASA session tailored for '+str(nthreads)+' engines.', 'ERROR')
                casalog.post('*** ERROR: Exiting pieflag.', 'ERROR')
                return
            
            server_list = MPIEnvironment.mpi_server_rank_list()
            casalog.post('--> Initializing MPI parallel cluster with '+str(nthreads)+' engines.')
            client = MPICommandClient()
            client.start_services()
            # do some detective work to find appropriate path to push to clients
            syspaths = sys.path
            n = 0
            for k in range(len(syspaths)):
                if os.path.isfile(syspaths[k]+'/mytasks.py'):
                    for line in open(syspaths[k]+'/mytasks.py','r'):
                        if re.search("task_location\['pieflag'\]",line):
                            if n==0:
                                n += 1
                                addpath = syspaths[k]
                            elif syspaths[k] != addpath:
                                n += 1
            
            if n == 1:
                casalog.filter('WARN')
                #client.set_log_level('WARN')
                client.push_command_request("casalog.filter('WARN')",True,server_list)
                client.push_command_request("sys.path.append('"+addpath+"')",True,server_list)
                client.push_command_request('from task_pieflag import pieflag_getflagstats',True,server_list)
                client.push_command_request('from task_pieflag import pieflag_flag',True,server_list)
                casalog.filter('INFO')
            else:
                if n == 0:
                    casalog.post('*** ERROR: pieflag mytasks.py installation not found in sys.path', 'ERROR')
                else:
                    casalog.post('*** ERROR: Ambiguity, sys.path contains more than 1 pieflag installation', 'ERROR')
                    casalog.post('***        (pieflag referenced in '+str(n)+' unique path/mytasks.py)', 'ERROR')
                
                casalog.post('*** ERROR: Exiting pieflag.', 'ERROR')
                return
            
            fcall1 = 'pieflag_getflagstats(vis,field,spw,npol,feedbasis)'
            fcall2 = 'pieflag_flag(vis,datacol,nthreads,field,vtbleLIST,inttime,nant,ddid,spw,refchan,nchan,npol,'+\
                     'feedbasis,fitorderLIST,sefdLIST,staticflag,madmax,binsamples,dynamicflag,chunktime,stdmax,'+\
                     'maxoffset,extendflag,boxtime,boxthresh)'
        else:
            casalog.post('*** ERROR: MMS is not partitioned by baseline. Cannot process.', 'ERROR')
            casalog.post('***        Use partition() to revert to MS then create baseline MMS.', 'ERROR')
            casalog.post('*** ERROR: Exiting pieflag.', 'ERROR')
            return
    else:
        if vis.lower().endswith('.mms'):
            casalog.post('*** ERROR: pieflag cannot handle MMS in non-MPI-enabled CASA session.', 'ERROR')
            casalog.post('*** ERROR: Exiting pieflag.', 'ERROR')
            return
        else:
            casalog.post('--> MS will be processed in serial mode.')
    
    tb.open(vis)
    if any('CORRECTED_DATA' in colnames for colnames in tb.colnames()):
        datacol='CORRECTED_DATA'
    else:
        datacol='DATA'
    
    tb.close()
    
    # load in reference channel details
    # OK, there are probably more elegant ways
    # of implementing the following code...meh
    refchandict=json.load(open(refchanfile))
    spw=[]
    for i in refchandict.keys():
        spw.append(int(i))
    
    nspw=len(spw)
    # json doesn't seem to load in the spw order properly
    # The user might not have entered spw's in order either
    # so perform sort just in case
    # note: no need to perform sort on the string versions
    spw.sort()
    # now get reference channels in corresponding sorted order
    refchan=[]
    for i in range(nspw):
        refchan.append(refchandict[str(spw[i])])
    
    # open MS and select relevant data
    ms.open(vis)
    ms.msselect({'field':str(field)})
    
    # get integration time
    scan_summary = ms.getscansummary()
    ms.close()
    scan_list = []
    for scan in scan_summary:
        if scan_summary[scan]['0']['FieldId'] == field:
            scan_list.append(int(scan))
    
    inttime=scan_summary[str(scan_list[0])]['0']['IntegrationTime']
    # get around potential floating point issues by rounding to nearest 1e-5 seconds
    if inttime != round(inttime,5):
        casalog.post('*** WARNING: It seems your integration time is specified to finer than 1e-5 seconds.','WARN')
        casalog.post('***          pieflag will assume this is a rounding error and carry on.','WARN')
    
    for i in range(len(scan_list)):
        if round(inttime,5) != round(scan_summary[str(scan_list[i])]['0']['IntegrationTime'],5):
            casalog.post('*** ERROR: Bummer, pieflag is not set up to handle '+\
                              'changing integration times throughout your MS.', 'ERROR')
            casalog.post('*** ERROR: Exiting pieflag.','ERROR')
            return
    
    # get number of baselines
    tb.open(vis+'/ANTENNA')
    atble=tb.getcol('NAME')
    tb.close()
    nant=atble.shape[0]
    nbaselines=nant*(nant-1)/2
    
    # channel to frequency (Hz) conversion
    tb.open(vis+'/SPECTRAL_WINDOW')
    vtble=tb.getcol('CHAN_FREQ')
    tb.close()
    # vtble format is vtble[channel][spw]
    # assume each spw has the same number of channels
    nchan=vtble.shape[0]
    
    # check that spw frequencies increase monotonically
    spwcheck=vtble[0,0]
    for s in range(1,len(vtble[0,:])):
        if vtble[0,s]<spwcheck:
            casalog.post("*** ERROR: Your spw's are not ordered with increasing frequency.",'ERROR')
            casalog.post('*** ERROR: Consider splitting your data and restarting pieflag. Exiting','ERROR')
            return
        
        spwcheck=vtble[0,s]
    
    # get number of polarizations, assume they don't change throughout observation
    # get details from the first user-selected spw within the first scan on target field
    # note: I won't assume that spw specifies data_desc_id in the main table, even
    #       though in most cases it probably does. Probably overkill given the lack
    #       of checks done elsewhere in this code...
    tb.open(vis+'/DATA_DESCRIPTION')
    temptb=tb.query('SPECTRAL_WINDOW_ID='+str(spw[0]))
    # while here, get the data_desc_id values that pair with spw number
    tempddid=tb.getcol('SPECTRAL_WINDOW_ID').tolist()
    ddid=[]
    for s in range(nspw):
        ddid.append(tempddid.index(spw[s]))
    
    tb.close()
    polid=temptb.getcell('POLARIZATION_ID')
    tb.open(vis+'/POLARIZATION')
    npol=tb.getcell('NUM_CORR',polid)
    poltype=tb.getcell('CORR_TYPE',polid)
    tb.close()
    
    if not (npol == 2 or npol == 4):
        casalog.post('*** ERROR: Your data contains '+str(npol)+' polarization products.','ERROR')
        casalog.post('*** ERROR: pieflag can only handle 2 (eg RR/LL) or 4 (eg RR/RL/LR/LL). Exiting.','ERROR')
        return
    
    # see stokes.h for details
    if poltype[0] == 5:
        # circular
        feedbasis = 1
    elif poltype[0] == 9:
        #linear
        feedbasis = 0
    else:
        casalog.post('*** ERROR: Your data uses an unsupported feed basis. Exiting','ERROR')
        return
    
    casalog.post('--> Some details about your data:')
    casalog.post('    data column to process = '+datacol)
    casalog.post('    integration time = '+str(inttime)+' sec')
    casalog.post('    number of baselines = '+str(nbaselines))
    casalog.post('    spectral windows to process = '+str(spw))
    casalog.post('    number of channels per spectral window = '+str(nchan))
    if feedbasis:
        casalog.post('    feed basis = circular')
    else:
        casalog.post('    feed basis = linear')
    
    casalog.post('    number of polarization products to process = '+str(npol))
    casalog.post('--> Statistics of pre-existing flags:')
    flag0 = np.zeros((nspw,2*npol+2))
    for i in range(nspw):
        casalog.filter('WARN')
        if useMPI:
            for k in range(nthreads):
                param = {'vis':vis+'/SUBMSS/'+subms[k],'field':field,\
                         'spw':spw[i],'npol':npol,'feedbasis':feedbasis}
                if k == 0:
                    pid = client.push_command_request(fcall1,False,None,param)
                else:
                    pid.append((client.push_command_request(fcall1,False,None,param))[0])
            
            presults = client.get_command_response(pid,True)
            for k in range(nthreads):
                flag0[i] += presults[k]['ret']
            
        else:
            flag0[i] = pieflag_getflagstats(vis,field,spw[i],npol,feedbasis)
        
        casalog.filter('INFO')
        RRs="{:.1f}".format(flag0[i][0]/flag0[i][1]*100.)
        LLs="{:.1f}".format(flag0[i][2]/flag0[i][3]*100.)
        TOTs="{:.1f}".format(flag0[i][4]/flag0[i][5]*100.)
        if npol == 2:
            if feedbasis:
                outstr='    flagged data in spw='+str(spw[i])+':  RR='+RRs+'%  LL='+LLs+'%  total='+TOTs+'%'
            else:
                outstr='    flagged data in spw='+str(spw[i])+':  XX='+RRs+'%  YY='+LLs+'%  total='+TOTs+'%'
        else:
            RLs="{:.1f}".format(flag0[i][6]/flag0[i][7]*100.)
            LRs="{:.1f}".format(flag0[i][8]/flag0[i][9]*100.)
            if feedbasis:
                outstr='    flagged data in spw='+str(spw[i])+':  RR='+RRs+'%  RL='+RLs+'%  LR='+LRs+'%  LL='+LLs+'%  total='+TOTs+'%'
            else:
                outstr='    flagged data in spw='+str(spw[i])+':  XX='+RRs+'%  XY='+RLs+'%  YX='+LRs+'%  YY='+LLs+'%  total='+TOTs+'%'
        
        casalog.post(outstr)
    
    # Check there are enough spectral windows to perform the fitting later on. If not, lower the order.
    if fitorder_RR_LL > nspw-1:
        if fitorder_RR_LL == 2:
            if feedbasis:
                casalog.post('*** WARNING: pieflag needs at least 3 spectral windows to fit for RR or LL spectral curvature.','WARN')
            else:
                casalog.post('*** WARNING: pieflag needs at least 3 spectral windows to fit for XX or YY spectral curvature.','WARN')
        else:
            if feedbasis:
                casalog.post('*** WARNING: pieflag needs at least 2 spectral windows to fit for RR or LL spectral index.','WARN')
            else:
                casalog.post('*** WARNING: pieflag needs at least 2 spectral windows to fit for XX or YY spectral index.','WARN')
        
        if nspw == 2:
            fitorder_RR_LL=1
        else:
            fitorder_RR_LL=0
        
        casalog.post('*** WARNING: fitorder_RR_LL has been reduced to '+str(int(fitorder_RR_LL))+ ' and','WARN')
        casalog.post('***          may be reduced further for some baselines if the','WARN')
        casalog.post('***          reference channel isn\'t available in all selected spw\'s.','WARN')
    
    if npol == 2:
        fitorder    = np.zeros(2)
        fitorder[0] = fitorder_RR_LL
        fitorder[1] = fitorder_RR_LL
    elif npol == 4:
        if fitorder_RL_LR > nspw-1:
            if fitorder_RL_LR == 2:
                casalog.post('*** WARNING: pieflag needs at least 3 spectral windows to fit for RL or LR spectral curvature.','WARN')
            else:
                casalog.post('*** WARNING: pieflag needs at least 2 spectral windows to fit for RL or LR spectral index.','WARN')
            
            if nspw == 2:
                fitorder_RL_LR=1
            else:
                fitorder_RL_LR=0
            
            casalog.post('*** WARNING: fitorder_RL_LR has been reduced to '+str(int(fitorder_RL_LR))+' and','WARN')
            casalog.post('***          may be reduced further for some baselines if the','WARN')
            casalog.post('***          reference channel isn\'t available in all selected spw\'s.','WARN')
        
        fitorder    = np.zeros(4)
        fitorder[0] = fitorder_RR_LL
        fitorder[1] = fitorder_RL_LR
        fitorder[2] = fitorder_RL_LR
        fitorder[3] = fitorder_RR_LL
    
    if scalethresh:
        # read in SEFD data and interpolate to get values at our channel frequencies
        casalog.post('--> Reading in SEFD and interpolating at channel frequencies...')
        sefdRAW=np.loadtxt(SEFDfile)
        sefd=np.zeros((nspw,nchan))
        if not np.all(np.diff(sefdRAW[:,0]) >= 0):
            casalog.post('*** ERROR: Your SEFD file must be in order of increasing frequency.','ERROR')
            casalog.post('*** ERROR: Exiting pieflag.','ERROR')
            return
        
        for i in range(nspw):
            if (vtble[:,spw[i]].min() < sefdRAW[:,0].min()) or (vtble[:,spw[i]].max() > sefdRAW[:,0].max()):
                casalog.post('*** ERROR: pieflag cannot extrapolate your SEFD.','ERROR')
                casalog.post('*** ERROR: Provide new SEFD covering your entire frequency range.','ERROR')
                casalog.post('*** ERROR: Exiting pieflag.','ERROR')
                return
        
        sefdINTERP = interp1d(sefdRAW[:,0],sefdRAW[:,1])
        for i in range(nspw):
            sefdREFCHAN = sefdINTERP(vtble[refchan[i]][spw[i]])
            for j in range(nchan):
                # values in each spectral window will be relative to the reference channel value
                sefd[i][j] = sefdINTERP(vtble[j][spw[i]]) / sefdREFCHAN
        
        if plotSEFD:
            # clunky, but works, meh...
            sefdPLOT=np.zeros((nspw*nchan,3))
            k=0
            for i in range(nspw):
                sefdREFCHAN = sefdINTERP(vtble[refchan[i]][spw[i]])
                for j in range(nchan):
                    sefdPLOT[k][0] = vtble[j][spw[i]]/1.0e9
                    sefdPLOT[k][1] = sefd[i][j] * sefdREFCHAN
                    sefdPLOT[k][2] = sefd[i][j]
                    k += 1
            
            f, (ax1, ax2) = plt.subplots(2,sharex=True)
            ax1.plot(sefdRAW[:,0]/1.0e9,sefdRAW[:,1],'b-',sefdPLOT[:,0],sefdPLOT[:,1],'r.',markersize=10)
            ax2.plot([sefdRAW[0,0]/1.0e9,sefdRAW[len(sefdRAW[:,0])-1,0]/1.0e9],[1.,1.],'c-',sefdPLOT[:,0],sefdPLOT[:,2],'r.',markersize=10)
            f.subplots_adjust(hspace=0)
            plt.setp([a.get_xticklabels() for a in f.axes[:-1]], visible=False)
            ax1.set_title('relative sensitivity assumed across your band,\nnormalized to the reference channel in each spw')
            ax1.legend(['raw input','interpolated'])
            ax1.set_ylabel('SEFD (arbitrary units)')
            ax2.set_xlabel('frequency (GHz)')
            ax2.set_ylabel('SEFD (normalized units per spw)')
    else:
        sefd=np.ones((nspw,nchan))
    
    if not staticflag:
        madmax = 0
        binsamples = 0
    
    if not dynamicflag:
        chunktime = 0
        stdmax = 0
        maxoffset = 0
    
    if not extendflag:
        boxtime = 0
        boxthresh = 0
    
    # forcibly remove all lock files
    #os.system('find '+vis+' -name "*lock" -print | xargs rm')
    
    if useMPI:
        casalog.post('--> pieflag will now flag your data using '+str(nthreads)+' parallel threads.')
        casalog.filter('WARN')
        for k in range(nthreads):
            param = {'vis':vis+'/SUBMSS/'+subms[k],'datacol':datacol,'nthreads':nthreads,'field':field,
                     'vtbleLIST':vtble.tolist(),'inttime':inttime,'nant':nant,
                     'ddid':ddid,'spw':spw,'refchan':refchan,'nchan':nchan,'npol':npol,'feedbasis':feedbasis,
                     'fitorderLIST':fitorder.tolist(),'sefdLIST':sefd.tolist(),
                     'staticflag':staticflag,'madmax':madmax,'binsamples':binsamples,
                     'dynamicflag':dynamicflag,'chunktime':chunktime,'stdmax':stdmax,'maxoffset':maxoffset,
                     'extendflag':extendflag,'boxtime':boxtime,'boxthresh':boxthresh}
            if k == 0:
                pid = client.push_command_request(fcall2,False,None,param)
            else:
                pid.append((client.push_command_request(fcall2,False,None,param))[0])
        
        presults = client.get_command_response(pid,True)
        casalog.filter('INFO')
    else:
        casalog.post('--> pieflag will now flag your data in serial mode.')
        pieflag_flag(vis,datacol,1,field,
                     vtble.tolist(),inttime,nant,
                     ddid,spw,refchan,nchan,npol,feedbasis,
                     fitorder.tolist(),sefd.tolist(),
                     staticflag,madmax,binsamples,
                     dynamicflag,chunktime,stdmax,maxoffset,
                     extendflag,boxtime,boxthresh)
    
    # show updated flagging statistics
    casalog.post('--> Statistics of final flags (including pre-existing):')
    flag1 = np.zeros((nspw,2*npol+2))
    for i in range(nspw):
        casalog.filter('WARN')
        if useMPI:
            for k in range(nthreads):
                param = {'vis':vis+'/SUBMSS/'+subms[k],'field':field,\
                         'spw':spw[i],'npol':npol,'feedbasis':feedbasis}
                if k == 0:
                    pid = client.push_command_request(fcall1,False,None,param)
                else:
                    pid.append((client.push_command_request(fcall1,False,None,param))[0])
            
            presults = client.get_command_response(pid,True)
            for k in range(nthreads):
                flag1[i] += presults[k]['ret']
            
        else:
            flag1[i] = pieflag_getflagstats(vis,field,spw[i],npol,feedbasis)
        
        casalog.filter('INFO')
        RRs="{:.1f}".format(flag1[i][0]/flag1[i][1]*100.)
        LLs="{:.1f}".format(flag1[i][2]/flag1[i][3]*100.)
        TOTs="{:.1f}".format(flag1[i][4]/flag1[i][5]*100.)
        if npol == 2:
            if feedbasis:
                outstr='    flagged data in spw='+str(spw[i])+':  RR='+RRs+'%  LL='+LLs+'%  total='+TOTs+'%'
            else:
                outstr='    flagged data in spw='+str(spw[i])+':  XX='+RRs+'%  YY='+LLs+'%  total='+TOTs+'%'
        else:
            RLs="{:.1f}".format(flag1[i][6]/flag1[i][7]*100.)
            LRs="{:.1f}".format(flag1[i][8]/flag1[i][9]*100.)
            if feedbasis:
                outstr='    flagged data in spw='+str(spw[i])+':  RR='+RRs+'%  RL='+RLs+'%  LR='+LRs+'%  LL='+LLs+'%  total='+TOTs+'%'
            else:
                outstr='    flagged data in spw='+str(spw[i])+':  XX='+RRs+'%  XY='+RLs+'%  YX='+LRs+'%  YY='+LLs+'%  total='+TOTs+'%'
        
        casalog.post(outstr)
    
    casalog.post('--> Statistics of pieflag flags (excluding pre-existing):')
    for i in range(nspw):
        RRs="{:.1f}".format((flag1[i][0]-flag0[i][0])/flag0[i][1]*100.)
        LLs="{:.1f}".format((flag1[i][2]-flag0[i][2])/flag0[i][3]*100.)
        TOTs="{:.1f}".format((flag1[i][4]-flag0[i][4])/flag0[i][5]*100.)
        if npol == 2:
            if feedbasis:
                outstr='    data flagged in spw='+str(spw[i])+':  RR='+RRs+'%  LL='+LLs+'%  total='+TOTs+'%'
            else:
                outstr='    data flagged in spw='+str(spw[i])+':  XX='+RRs+'%  YY='+LLs+'%  total='+TOTs+'%'
        else:
            RLs="{:.1f}".format((flag1[i][6]-flag0[i][6])/flag0[i][7]*100.)
            LRs="{:.1f}".format((flag1[i][8]-flag0[i][8])/flag0[i][9]*100.)
            if feedbasis:
                outstr='    data flagged in spw='+str(spw[i])+':  RR='+RRs+'%  RL='+RLs+'%  LR='+LRs+'%  LL='+LLs+'%  total='+TOTs+'%'
            else:
                outstr='    data flagged in spw='+str(spw[i])+':  XX='+RRs+'%  XY='+RLs+'%  YX='+LRs+'%  YY='+LLs+'%  total='+TOTs+'%'
        
        casalog.post(outstr)
    
    # forcibly remove all lock files
    #os.system('find '+vis+' -name "*lock" -print | xargs rm')
    
    if useMPI:
        #client.set_log_level('INFO')
        client.push_command_request("casalog.filter('INFO')",True,server_list)
    
    t=time.time()-startTime
    casalog.post('--> pieflag run time:  '+str(int(t//3600))+' hours  '+\
                 str(int(t%3600//60))+' minutes  '+str(int(t%60))+' seconds')
コード例 #12
0
class ParallelTaskHelper:
    """
    This is the extension of the TaskHelper to allow for parallel
    operation.  For simple tasks all that should be required to make
    a task parallel is to use this rather than the TaskHelper method
    above
    """

    __bypass_parallel_processing = 0
    __async_mode = False
    __multithreading = False

    def __init__(self, task_name, args={}):
        self._arg = dict(args)
        self._arguser = {}
        self._taskName = task_name
        self._executionList = []
        self._jobQueue = None
        # Cache the initial inputs
        self.__originalParams = args
        # jagonzal: Add reference to cluster object
        self._cluster = None
        self._mpi_cluster = False
        self._command_request_id_list = None
        if not MPIEnvironment.is_mpi_enabled:
            self.__bypass_parallel_processing = 1
        if (self.__bypass_parallel_processing == 0):
            self._mpi_cluster = True
            self._command_request_id_list = []
            self._cluster = MPICommandClient()
        # jagonzal: To inhibit return values consolidation
        self._consolidateOutput = True
        # jagonzal (CAS-4287): Add a cluster-less mode to by-pass parallel processing for MMSs as requested
        self._sequential_return_list = {}

    def override_arg(self, arg, value):
        self._arguser[arg] = value

    def initialize(self):
        """
        This is the setup portion.
        Currently it:
           * Finds the full path for the input vis.
           * Initialize the MPICommandClient
        """
        self._arg['vis'] = os.path.abspath(self._arg['vis'])

        # jagonzal (Migration to MPI)
        if self._mpi_cluster:
            self._cluster.start_services()

    def getNumberOfServers(self):
        """
        Return the number of engines (iPython cluster) or the number of servers (MPI cluster)
        """
        if (self.__bypass_parallel_processing == 0):
            return len(MPIEnvironment.mpi_server_rank_list())
        else:
            return None

    def generateJobs(self):
        """
        This is the method which generates all of the actual jobs to be
        done.  The default is to assume the input vis is a reference ms and
        build one job for each referenced ms.
        """

        casalog.origin("ParallelTaskHelper")

        try:
            msTool = mstool()
            if not msTool.open(self._arg['vis']):
                raise ValueError, "Unable to open MS %s," % self._arg['vis']
            if not msTool.ismultims():
                raise ValueError, \
                      "MS is not a MultiMS, simple parallelization failed"

            subMs_idx = 0
            for subMS in msTool.getreferencedtables():
                localArgs = copy.deepcopy(self._arg)
                localArgs['vis'] = subMS

                for key in self._arguser:
                    localArgs[key] = self._arguser[key][subMs_idx]
                subMs_idx += 1

                if self._mpi_cluster:
                    self._executionList.append(
                        [self._taskName + '()', localArgs])
                else:
                    self._executionList.append(
                        JobData(self._taskName, localArgs))

            msTool.close()
            return True
        except Exception, instance:
            casalog.post(
                "Error handling MMS %s: %s" % (self._arg['vis'], instance),
                "WARN", "generateJobs")
            msTool.close()
            return False
コード例 #13
0
class test_mpi4casa_flagdata(unittest.TestCase):

    def setUp(self):
        
        self.vis = "Four_ants_3C286.mms"
        setUpFile(self.vis,'vis')
        
        # Tmp files
        self.vis2 = self.vis + '.2'
        self.vis3 = self.vis + '.3'    
        
        # Set up cluster
        self.client = MPICommandClient()
        self.client.set_log_mode('redirect')
        self.client.start_services()  

    def tearDown(self):

        os.system('rm -rf ' + self.vis)
        
        # Remove tmp files
        os.system('rm -rf ' + self.vis2)
        os.system('rm -rf ' + self.vis3)        
    
    def test_mpi4casa_flagdata_list_return(self):
        """Test support for MMS using flagdata in unflag+clip mode"""

        # Create list file
        text = "mode='unflag'\n"\
               "mode='clip' clipminmax=[0,0.1]"
        filename = 'list_flagdata.txt'
        create_input(text, filename)

        # step 1: Do unflag+clip
        flagdata(vis=self.vis, mode='list', inpfile=filename)

        # step 2: Now do summary
        ret_dict = flagdata(vis=self.vis, mode='summary')

        # Check summary
        self.assertTrue(ret_dict['name']=='Summary')
        self.assertTrue(ret_dict['spw']['15']['flagged'] == 96284.0)
        self.assertTrue(ret_dict['spw']['0']['flagged'] == 129711.0)
        self.assertTrue(ret_dict['spw']['1']['flagged'] == 128551.0)
        self.assertTrue(ret_dict['spw']['2']['flagged'] == 125686.0)
        self.assertTrue(ret_dict['spw']['3']['flagged'] == 122862.0)
        self.assertTrue(ret_dict['spw']['4']['flagged'] == 109317.0)
        self.assertTrue(ret_dict['spw']['5']['flagged'] == 24481.0)
        self.assertTrue(ret_dict['spw']['6']['flagged'] == 0)
        self.assertTrue(ret_dict['spw']['7']['flagged'] == 0)
        self.assertTrue(ret_dict['spw']['8']['flagged'] == 0)
        self.assertTrue(ret_dict['spw']['9']['flagged'] == 27422.0)
        self.assertTrue(ret_dict['spw']['10']['flagged'] == 124638.0)
        self.assertTrue(ret_dict['spw']['11']['flagged'] == 137813.0)
        self.assertTrue(ret_dict['spw']['12']['flagged'] == 131896.0)
        self.assertTrue(ret_dict['spw']['13']['flagged'] == 125074.0)
        self.assertTrue(ret_dict['spw']['14']['flagged'] == 118039.0)
        
    def test_mpi4casa_flagdata_list_return_async(self):
        """Test flagdata summary in async mode"""
        
        # First run flagdata sequentially
        bypassParallelProcessing = ParallelTaskHelper.getBypassParallelProcessing()
        ParallelTaskHelper.bypassParallelProcessing(2)
        res = flagdata(vis=self.vis, mode='summary')
        ParallelTaskHelper.bypassParallelProcessing(bypassParallelProcessing)
        
        # Make a copy of the input MMS for each flagdata instance
        os.system("cp -r %s %s" % (self.vis,self.vis2))
        os.system("cp -r %s %s" % (self.vis,self.vis3))
        
        # Set async mode in ParallelTaskHelper
        ParallelTaskHelper.setAsyncMode(True)
        
        # Run applycal in MMS mode with the first set
        request_id_1 = flagdata(vis=self.vis, mode='summary')    
        
        # Run applycal in MMS mode with the second set
        request_id_2 = flagdata(vis=self.vis2, mode='summary')
        
        # Run applycal in MMS mode with the third set
        request_id_3 = flagdata(vis=self.vis3, mode='summary')
        
        # Get response in block mode
        reques_id_list = request_id_1 + request_id_2 + request_id_3
        command_response_list = self.client.get_command_response(reques_id_list,True,True)        
        
        # Get result
        res1 = ParallelTaskHelper.getResult(request_id_1,'flagdata')
        res2 = ParallelTaskHelper.getResult(request_id_2,'flagdata')
        res3 = ParallelTaskHelper.getResult(request_id_3,'flagdata')   
        
        # Unset async mode in ParallelTaskHelper
        ParallelTaskHelper.setAsyncMode(False)         
        
        self.assertEqual(res1,res, "flagdata dictionary does not match for the first flagdata run")
        self.assertEqual(res2,res, "flagdata dictionary does not match for the second flagdata run")
        self.assertEqual(res3,res, "flagdata dictionary does not match for the third flagdata run")       
        
        
    def test_mpi4casa_flagdata_list_return_multithreading(self):
        """Test flagdata summary in multithreading mode"""
        
        # First run flagdata sequentially
        bypassParallelProcessing = ParallelTaskHelper.getBypassParallelProcessing()
        ParallelTaskHelper.bypassParallelProcessing(2)
        res = flagdata(vis=self.vis, mode='summary')
        ParallelTaskHelper.bypassParallelProcessing(bypassParallelProcessing)
        
        # Make a copy of the input MMS for each flagdata instance
        os.system("cp -r %s %s" % (self.vis,self.vis2))
        os.system("cp -r %s %s" % (self.vis,self.vis3))
        
        ParallelTaskHelper.setMultithreadingMode(True)        
        
        # Set up workers
        cmd1 = "flagdata(vis='%s', mode='summary')" % (self.vis)
        worker1 = ParallelTaskWorker(cmd1)
        
        cmd2 = "flagdata(vis='%s', mode='summary')" % (self.vis2)
        worker2 = ParallelTaskWorker(cmd2)        
        
        cmd3 = "flagdata(vis='%s', mode='summary')" % (self.vis3)
        worker3 = ParallelTaskWorker(cmd3)          
        
        # Spawn worker threads
        worker1.start()
        worker2.start()
        worker3.start()
        
        # Get resulting summary ict from each worker
        res1 = worker1.getResult()
        res2 = worker2.getResult()
        res3 = worker3.getResult()
        
        ParallelTaskHelper.setMultithreadingMode(False) 
        
        # Compare return summary dicts with the one generated with a sequential run
        self.assertEqual(res1,res, "flagdata dictionary does not match for the first flagdata run")
        self.assertEqual(res2,res, "flagdata dictionary does not match for the second flagdata run")
        self.assertEqual(res3,res, "flagdata dictionary does not match for the third flagdata run")    
コード例 #14
0
class test_mpi4casa_applycal(unittest.TestCase):

    def setUp(self):
        
        # Set-up MMS
        self.vis = "ngc5921.applycal.mms"
        self.vis_sorted = "ngc5921.applycal.sorted.mms"
        setUpFile(self.vis,'vis')
        
        # Set-up reference MMS
        self.ref = "ngc5921.applycal.ms"
        self.ref_sorted = "ngc5921.applycal.sorted.ms"
        setUpFile(self.ref,'ref')
        
        # Set-up auxiliary files
        self.aux = ["ngc5921.fluxscale", "ngc5921.gcal", "ngc5921.bcal"]
        setUpFile(self.aux ,'aux')
        
        # Repository caltables are pre-v4.1, and we
        # must update them _before_ applycal to avoid contention
        casalog.post("Updating pre-v4.1 caltables: %s" % str(self.aux),"WARN","test1_applycal_fluxscale_gcal_bcal")
        cblocal = cbtool()
        for oldct in self.aux:
            cblocal.updatecaltable(oldct)
        casalog.post("Pre-v4.1 caltables updated","INFO","test_mpi4casa_applycal")        
        
        # Tmp files
        self.vis2 = self.vis + '.2'
        self.vis3 = self.vis + '.3'
        self.vis_sorted2 = self.vis_sorted + '.2'
        self.vis_sorted3 = self.vis_sorted + '.3'
        
        # Tmp aux files
        self.aux2 = []
        self.aux3 = []
        for file in self.aux:
            self.aux2.append(file + '.2')
            self.aux3.append(file + '.3')
        
        # Set up cluster
        self.client = MPICommandClient()
        self.client.set_log_mode('redirect')
        self.client.start_services()          

    def tearDown(self):
        
        # Remove MMS
        os.system('rm -rf ' + self.vis) 
        os.system('rm -rf ' + self.vis_sorted) 
        
        # Remove ref MMS
        os.system('rm -rf ' + self.ref) 
        os.system('rm -rf ' + self.ref_sorted) 
        
        # Remove tmp files
        os.system('rm -rf ' + self.vis2)
        os.system('rm -rf ' + self.vis3)
        os.system('rm -rf ' + self.vis_sorted2)
        os.system('rm -rf ' + self.vis_sorted3)        
        
        # Remove aux files
        for file in self.aux: os.system('rm -rf ' + file)
            
        # Remove tmp aux files
        for file in self.aux2: os.system('rm -rf ' + file)            
        for file in self.aux3: os.system('rm -rf ' + file)                  
                     
        
    def test1_applycal_fluxscale_gcal_bcal(self):
        """Test 1: Apply calibration using fluxscal gcal and bcal tables"""
        
        # Run applycal in MS mode
        applycal(vis=self.ref,field='',spw='',selectdata=False,gaintable=self.aux,
                 gainfield=['nearest','nearest','0'],
                 interp=['linear', 'linear','nearest'],spwmap=[])
        
        # Run applycal in MMS mode
        applycal(vis=self.vis,field='',spw='',selectdata=False,gaintable=self.aux,
                 gainfield=['nearest','nearest','0'],
                 interp=['linear', 'linear','nearest'],spwmap=[])
        
        # Sort file to properly match rows for comparison
        casalog.post("Sorting vis file: %s" % str(self.vis),"INFO","test1_applycal_fluxscale_gcal_bcal")
        sortFile(self.vis,self.vis_sorted)  
        casalog.post("Sorting ref file: %s" % str(self.ref),"INFO","test1_applycal_fluxscale_gcal_bcal")    
        sortFile(self.ref,self.ref_sorted)        
        
        # Compare files
        compare = testhelper.compTables(self.ref_sorted,self.vis_sorted,['FLAG_CATEGORY'])
        self.assertTrue(compare)      
コード例 #15
0
class ParallelTaskHelper:
    """
    This is the extension of the TaskHelper to allow for parallel
    operation.  For simple tasks all that should be required to make
    a task parallel is to use this rather than the TaskHelper method
    above
    """

    __bypass_parallel_processing = 0
    __async_mode = False
    __multithreading = False

    def __init__(self, task_name, args={}):
        self._arg = dict(args)
        self._arguser = {}
        self._taskName = task_name
        self._executionList = []
        self._jobQueue = None
        # Cache the initial inputs
        self.__originalParams = args
        # jagonzal: Add reference to cluster object
        self._cluster = None
        self._mpi_cluster = False
        self._command_request_id_list = None
        if not MPIEnvironment.is_mpi_enabled:
            self.__bypass_parallel_processing = 1
        if self.__bypass_parallel_processing == 0:
            self._mpi_cluster = True
            self._command_request_id_list = []
            self._cluster = MPICommandClient()
        # jagonzal: To inhibit return values consolidation
        self._consolidateOutput = True
        # jagonzal (CAS-4287): Add a cluster-less mode to by-pass parallel processing for MMSs as requested
        self._sequential_return_list = {}

    def override_arg(self, arg, value):
        self._arguser[arg] = value

    def initialize(self):
        """
        This is the setup portion.
        Currently it:
           * Finds the full path for the input vis.
           * Initialize the MPICommandClient
        """
        self._arg["vis"] = os.path.abspath(self._arg["vis"])

        # jagonzal (Migration to MPI)
        if self._mpi_cluster:
            self._cluster.start_services()

    def getNumberOfServers(self):
        """
        Return the number of engines (iPython cluster) or the number of servers (MPI cluster)
        """
        if self.__bypass_parallel_processing == 0:
            return len(MPIEnvironment.mpi_server_rank_list())
        else:
            return None

    def generateJobs(self):
        """
        This is the method which generates all of the actual jobs to be
        done.  The default is to asume the input vis is a reference ms and
        build one job for each referenced ms.
        """

        casalog.origin("ParallelTaskHelper")

        try:
            msTool = mstool()
            if not msTool.open(self._arg["vis"]):
                raise ValueError, "Unable to open MS %s," % self._arg["vis"]
            if not msTool.ismultims():
                raise ValueError, "MS is not a MultiMS, simple parallelization failed"

            subMs_idx = 0
            for subMS in msTool.getreferencedtables():
                localArgs = copy.deepcopy(self._arg)
                localArgs["vis"] = subMS

                for key in self._arguser:
                    localArgs[key] = self._arguser[key][subMs_idx]
                subMs_idx += 1

                if self._mpi_cluster:
                    self._executionList.append([self._taskName + "()", localArgs])
                else:
                    self._executionList.append(JobData(self._taskName, localArgs))

            msTool.close()
            return True
        except Exception, instance:
            casalog.post("Error handling MMS %s: %s" % (self._arg["vis"], instance), "WARN", "generateJobs")
            msTool.close()
            return False
コード例 #16
0
 def setUp(self):
     
     self.client = MPICommandClient()
     self.client.set_log_mode('redirect')
     self.server_list = MPIEnvironment.mpi_server_rank_list()
     self.client.start_services()
コード例 #17
0
class test_MPICommandClient(unittest.TestCase):
       
    def setUp(self):
        
        self.client = MPICommandClient()
        self.client.set_log_mode('redirect')
        self.server_list = MPIEnvironment.mpi_server_rank_list()
        self.client.start_services()
                            
    def test_exec_multiple_target_non_blocking_mode_str_params_successful(self):
        
        command_request_id_list = self.client.push_command_request("import time; time.sleep(3)",False,[self.server_list[0],self.server_list[1]])
        
        # Try to get responses before time in non-blocking more
        command_response_list = self.client.get_command_response(command_request_id_list,False,True)
        
        # Get response in blocking mode
        command_response_list = self.client.get_command_response(command_request_id_list,True,True)
        
        # Analyze command response list contents
        self.assertEqual(len(command_response_list), 2, "Command response list should contain two elements")
        for command_response in command_response_list:
            self.assertEqual(command_response['successful'], True, "Command execution was not successful")
            self.assertEqual(command_response['traceback'], None, "Command execution trace-back should be None")
            self.assertEqual(command_response['status'], 'response received', "Command status should be 'response received'")
            self.assertEqual(command_response['ret'], None, "Command return variable should be None")
                    
    def test_eval_multiple_target_blocking_mode_str_params_successful(self):
        
        command_response_list = self.client.push_command_request("1+1",True,[self.server_list[0],self.server_list[1]])
        self.assertEqual(len(command_response_list), 2, "Command response list should contain two elements")
        for command_response in command_response_list:
            self.assertEqual(command_response['successful'], True, "Command execution was not successful")
            self.assertEqual(command_response['traceback'], None, "Command execution trace-back should be None")
            self.assertEqual(command_response['status'], 'response received', "Command status should be 'response received'")
            self.assertEqual(command_response['ret'], 2, "Command return variable should be 2")
                        
    def test_eval_undefined_target_non_blocking_mode_dict_params_not_successful(self):
        
        command_request_id_list = self.client.push_command_request("pow(a,b)",False,None,{'a':'test','b':2})
        
        # Try to get responses before time in non-blocking more
        command_response_list = self.client.get_command_response(command_request_id_list,False,True)
        
        # Get response in blocking mode
        command_response_list = self.client.get_command_response(command_request_id_list,True,True)
        
        # Analyze command response list contents
        self.assertEqual(len(command_response_list), 1, "Command response list should contain one element")
        self.assertEqual(command_response_list[0]['successful'], False, "Command execution was successful")
        self.assertEqual(command_response_list[0]['traceback'].find("TypeError:")>=0, True, "Trace-back should contain TypeError")
        self.assertEqual(command_response_list[0]['status'], 'response received', "Command status should be 'response received'")
        self.assertEqual(command_response_list[0]['ret'], None, "Command return variable should be None")
                    
    def test_singleton_behaviour(self):
        
        # Delete current MPICommandClient singleton instance reference
        client_ref = self.client
        del client_ref
        
        # Create a new MPICommandClient singleton instance reference
        new_client_ref = MPICommandClient()
        
        # Execute some command
        command_response_list = new_client_ref.push_command_request("a+b",True,[self.server_list[0]],{'a':1,'b':1})
        
        # Analyze command response list contents
        self.assertEqual(len(command_response_list), 1, "Command response list should contain one element")
        self.assertEqual(command_response_list[0]['successful'], True, "Command execution was not successful")
        self.assertEqual(command_response_list[0]['traceback'], None, "Command execution trace-back should be None")
        self.assertEqual(command_response_list[0]['status'], 'response received', "Command status should be 'response received'")
        self.assertEqual(command_response_list[0]['ret'], 2, "Command return variable should be 2")       
コード例 #18
0
class test_mpi4casa_NullSelection(unittest.TestCase):

    def setUp(self):
        
        self.vis = "Four_ants_3C286.mms"
        setUpFile(self.vis,'vis')
        
        self.client = MPICommandClient()
        self.client.set_log_mode('redirect')
        self.client.start_services()       
        
        # Prepare list of servers
        self.server_list = []
        server_list = self.client.get_server_status()
        for server in server_list:
            if not server_list[server]['timeout']:
                self.server_list.append(server_list[server]['rank'])          
                
        self.client.push_command_request("import os",True,self.server_list)

    def tearDown(self):

        os.system('rm -rf ' + self.vis)
        
        # Restore log file and filter
        self.client.push_command_request("casalog.setlogfile(casa['files']['logfile'])",True,self.server_list)        
    
    def test_mpi4casa_NullSelection_entire_mms(self):
        """Test filter out NullSelection exceptions"""
        
        # First clear list of filter out msgs. and make sure that the MSSelectionNullSelection shows up
        for server in self.server_list:
            logfile = 'MSSelectionNullSelection-Not-Filtered.log-server-%s' % str(server)
            self.client.push_command_request("casalog.setlogfile('%s'); casalog.clearFilterMsgList()" % (logfile),True,server)
            
        # Run flagdata selecting a non-existing scan
        flagdata(vis=self.vis, scan='99')  
        
        # Iterate trough log files to see if we find the exception
        for server in self.server_list:
            # Get current working directory (we might be in the 'nosedir' subdirectory)
            cwd = self.client.push_command_request("os.getcwd()",True,server)[0]['ret']
            logfile = '%s/MSSelectionNullSelection-Not-Filtered.log-server-%s' % (cwd,str(server))
            content = open(logfile, 'r').read()
            if content.find('flagdata')>0: # Check only server with processed a flagdata sub-job
                self.assertEqual(content.find("MSSelectionNullSelection")>0, True, "MSSelectionNullSelection should not be filtered out")

        # Now populate the list of msg to be filter out including MSSelectionNullSelection
        text = ['MSSelectionNullSelection','NeverHappens']
        for server in self.server_list:
            logfile = 'MSSelectionNullSelection-Filtered.log-server-%s' % str(server)
            self.client.push_command_request("casalog.setlogfile('%s'); casalog.filterMsg(%s)" % (logfile,str(text)),True,server) 
        
        # Run flagdata selecting a non-existing scan
        flagdata(vis=self.vis, scan='99')  
        
        # Iterate trough log files to see if we find the exception
        for server in self.server_list:
            # Get current working directory (we might be in the 'nosedir' subdirectory)
            cwd = self.client.push_command_request("os.getcwd()",True,server)[0]['ret']
            logfile = '%s/MSSelectionNullSelection-Filtered.log-server-%s' % (cwd,str(server))
            content = open(logfile, 'r').read()
            if content.find('flagdata')>0: # Check only server with processed a flagdata sub-job
                self.assertEqual(content.find("MSSelectionNullSelection")<0, True, "MSSelectionNullSelection should be filtered out")       
コード例 #19
0
class test_mpi4casa_log_level(unittest.TestCase):

    def setUp(self):
        
        self.vis = "Four_ants_3C286.mms"
        setUpFile(self.vis,'vis')
        
        self.client = MPICommandClient()
        self.client.set_log_mode('redirect')
        self.client.start_services()       
        
        # Prepare list of servers
        self.server_list = []
        server_list = self.client.get_server_status()
        for server in server_list:
            if not server_list[server]['timeout']:
                self.server_list.append(server_list[server]['rank'])          
                
        self.client.push_command_request("import os",True,self.server_list)

    def tearDown(self):

        os.system('rm -rf ' + self.vis)
        
        # Restore log file and level
        self.client.push_command_request("casalog.setlogfile(casa['files']['logfile'])",True,self.server_list)
        self.client.set_log_level("INFO")
    
    def test_mpi4casa_log_level_default_to_debug(self):
        """Test changing globally log level from default to debug """
            
        # Change log level globally (test via MPIInterface as it internally uses MPICommandClient so both are tested)
        mpi_interface = MPIInterface()
        mpi_interface.set_log_level("DEBUG")    
                
        # Use a separated log file per server to facilitate analysis
        for server in self.server_list:
            logfile = 'test_mpi4casa_log_level_debug-server-%s.log' % str(server)
            self.client.push_command_request("casalog.setlogfile('%s')" % (logfile),True,server)        
            
        # Run flagdata 
        flagdata(vis=self.vis, mode='summary')  
        
        # Iterate trough log files to see if we find command handling msgs
        for server in self.server_list:
            # Get current working directory (we might be in the 'nosedir' subdirectory)
            cwd = self.client.push_command_request("os.getcwd()",True,server)[0]['ret']
            logfile = '%s/test_mpi4casa_log_level_debug-server-%s.log' % (cwd,str(server))
            content = open(logfile, 'r').read()
            if content.find('flagdata')>0: # Check only server with processed a flagdata sub-job
                self.assertEqual(content.find("MPICommandServer")<0, True, "MPICommandServer msgs should be filtered out")