def _compare( self, name, ref ): self._checkfile( name ) # reference data rsp=[] with tbmanager(ref) as tb: nrow0=tb.nrows() for irow in xrange(nrow0): rsp.append(tb.getcell('SPECTRA',irow)) # check shape sp=[] with tbmanager(name) as tb: nrow=tb.nrows() self.assertEqual(nrow,nrow0,msg='number of rows mismatch') for irow in xrange(nrow): sp.append(tb.getcell('SPECTRA',irow)) self.assertEqual(len(sp[irow]),len(rsp[irow]), msg='SPECTRA: number of channel mismatch in row%s'%(irow)) # check data valuetype=type(sp[0][0]) #print '' for irow in xrange(nrow): #print 'irow=%s'%(irow) #print ' rsp=%s'%(rsp[irow]) #print ' sp=%s'%(sp[irow]) ret=numpy.allclose(rsp[irow],sp[irow]) self.assertEqual(ret,True, msg='SPECTRA: data differ in row%s'%(irow))
def _verify_saved_flag(self, infile, flagdatafile): # export infile to MS to obtain expected FLAG and FLAG_ROW with temporary_file() as name: s = sd.scantable(infile, average=False) s.save(name, format='MS2') with tbmanager(name) as tb: expected_flag_row = tb.getcol('FLAG_ROW') expected_flag = tb.getcol('FLAG') # actual FLAG and FLAG_ROW with tbmanager(flagdatafile) as tb: flag_row = tb.getcol('FLAG_ROW') flag = tb.getcol('FLAG') # compare self.assertEqual(len(flag_row), len(expected_flag_row), msg='length of FLAG_ROW differ') self.assertEqual(flag.shape, expected_flag.shape, msg='shape of FLAG differ') nrow = len(flag_row) for irow in xrange(nrow): self.assertEqual(flag_row[irow], expected_flag_row[irow], msg='Row %s: FLAG_ROW differ' % (irow)) self.assertTrue( all(flag[:, :, irow].flatten() == expected_flag[:, :, irow].flatten()), msg='Row %s: FLAG differ' % (irow))
def _getStats(self, filename=None, spw=None, pol=None, colname=None, mask=None): """ Returns a list of statistics dictionary of selected rows in an MS. filename : the name of MS spw : spw ID selection (default: all spws in MS) pol : pol ID selection (default: all pols in MS) colname : the name of data column (default: 'FLOAT_DATA') mask : a mask list of the channel ranges to use. The format is [[start_idx0, end_idx0], [start_idx1, end_idx1], ...] The order of output list is in the ascending order of selected row IDs. The dictionary in output list has keys: 'row' (row ID in MS), 'pol' (pol ID), 'rms', 'min', 'max', 'median', and 'stddev' """ # Get selected row and pol IDs in MS. Also get spectrumn in the MS if not spw: spw = '' select_spw = (spw not in ['', '*']) if select_spw: spw_sel = self._getListSelection(spw) if not pol: pol = '' select_pol = (pol not in ['', '*']) if select_pol: pol_sel = self._getListSelection(pol) if not colname: colname = 'FLOAT_DATA' self._checkfile(filename) with tbmanager(filename) as tb: data = tb.getcol(colname) ddid = tb.getcol('DATA_DESC_ID') with tbmanager(filename + '/DATA_DESCRIPTION') as tb: spwid = tb.getcol('SPECTRAL_WINDOW_ID').tolist() if not select_spw: spw_sel = spwid # get the selected DD IDs from selected SPW IDs. dd_sel = self._getListSelectedRowID(spwid, spw_sel) # get the selected row IDs from selected DD IDs row_sel = self._getListSelectedRowID(ddid, dd_sel) if not select_spw: row_sel = range(len(ddid)) if not select_pol: pol_sel = range(len(data)) res = [] for irow in row_sel: for ipol in pol_sel: spec = data[ipol, :, irow] res_elem = self._calc_stats_of_array(spec, mask=mask) res_elem['row'] = irow res_elem['pol'] = ipol res.append(res_elem) return res
def _getStats(self, filename=None, spw=None, pol=None, colname=None, mask=None): """ Returns a list of statistics dictionary of selected rows in an MS. filename : the name of MS spw : spw ID selection (default: all spws in MS) pol : pol ID selection (default: all pols in MS) colname : the name of data column (default: 'FLOAT_DATA') mask : a mask list of the channel ranges to use. The format is [[start_idx0, end_idx0], [start_idx1, end_idx1], ...] The order of output list is in the ascending order of selected row IDs. The dictionary in output list has keys: 'row' (row ID in MS), 'pol' (pol ID), 'rms', 'min', 'max', 'median', and 'stddev' """ # Get selected row and pol IDs in MS. Also get spectrumn in the MS if not spw: spw = '' select_spw = (spw not in ['', '*']) if select_spw: spw_sel = self._getListSelection(spw) if not pol: pol = '' select_pol = (pol not in ['', '*']) if select_pol: pol_sel = self._getListSelection(pol) if not colname: colname='FLOAT_DATA' self._checkfile(filename) with tbmanager(filename) as tb: data = tb.getcol(colname) ddid = tb.getcol('DATA_DESC_ID') with tbmanager(filename+'/DATA_DESCRIPTION') as tb: spwid = tb.getcol('SPECTRAL_WINDOW_ID').tolist() if not select_spw: spw_sel = spwid # get the selected DD IDs from selected SPW IDs. dd_sel = self._getListSelectedRowID(spwid, spw_sel) # get the selected row IDs from selected DD IDs row_sel = self._getListSelectedRowID(ddid, dd_sel) if not select_spw: row_sel = range(len(ddid)) if not select_pol: pol_sel = range(len(data)) res = [] for irow in row_sel: for ipol in pol_sel: spec = data[ipol,:,irow] res_elem = self._calc_stats_of_array(spec, mask=mask) res_elem['row'] = irow res_elem['pol'] = ipol res.append(res_elem) return res
def _getspectra_selected( self, name, tbsel={} ): """ Returns an array of spectra in rows selected in table. name : the name of scantable tbsel : a dictionary of table selection information. The key should be column name and the value should be a list of column values to select. """ isthere=os.path.exists(name) self.assertEqual(isthere,True, msg='file %s does not exist'%(name)) with tbmanager(name) as tb: sp = [] if len(tbsel) == 0: for i in range(tb.nrows()): sp.append(tb.getcell('SPECTRA', i).tolist()) else: command = '' for key, val in tbsel.items(): if len(command) > 0: command += ' AND ' command += ('%s in %s' % (key, str(val))) try: newtb = tb.query(command) for i in range(newtb.nrows()): sp.append(newtb.getcell('SPECTRA', i).tolist()) finally: newtb.close() return sp
def _getinfo(self, infile): with tbmanager(infile) as tb: self.nchan_orig = len(tb.getcell("FLAGTRA", 0)) self.rowid_rflag_orig = tb.getcol("FLAGROW") cfraw = tb.getcol("FLAGTRA").sum(axis=0) self.rowid_cflag_orig = [cfraw[i] > 0 for i in xrange(len(cfraw))] self.cflag_orig = tb.getcell("FLAGTRA", numpy.where(self.rowid_cflag_orig)[0][0])
def _get_flag_from_scantable(self, infile): with tbmanager(infile) as tb: tsort = tb.query('', sortlist='SCANNO,CYCLENO,IFNO,POLNO') flagrow = tsort.getcol('FLAGROW') flagtra = tsort.getcol('FLAGTRA') tsort.close() return flagrow, flagtra
def _getinfo(self, infile): with tbmanager(infile) as tb: self.nchan_orig = len(tb.getcell('FLAGTRA', 0)) self.rowid_rflag_orig = tb.getcol('FLAGROW') cfraw = tb.getcol('FLAGTRA').sum(axis=0) self.rowid_cflag_orig = [cfraw[i] > 0 for i in xrange(len(cfraw))] self.cflag_orig = tb.getcell('FLAGTRA', numpy.where(self.rowid_cflag_orig)[0][0])
def restore_sorted_table_keyword(infile, sorttab_info): if sorttab_info['is_sorttab'] and (sorttab_info['sorttab_name'] != ''): with sdutil.tbmanager(infile, nomodify=False) as tb: try: tb.putkeyword(sorttab_info['sorttab_keywd'], sorttab_info['sorttab_name']) except Exception, e: raise Exception, e
def _check_flags_no_change(self): """check if no changes applied on flag values""" with tbmanager(self.outfile) as tb: self.assertTrue(all(tb.getcol('FLAGROW')==self.rowid_rflag_orig)) for i in xrange(tb.nrows()): chanflag = tb.getcell('FLAGTRA', i) chanflag_ref = self.cflag_orig if self.rowid_cflag_orig[i] else numpy.zeros(self.nchan_orig, numpy.int32) self.assertTrue(all(chanflag==chanflag_ref))
def wrapper(self): with sdutil.tbmanager(self.infile) as tb: for irow in xrange(tb.nrows()): self.assertTrue(tb.iscelldefined('WEIGHT_SPECTRUM', irow)) # weight mode flag self.weight_propagation = True func(self)
def _resetweight(vis): # work with private tb tool casalog.post('fill WEIGHT and SIGMA failed. reset all WEIGHT and SIGMA to 1.0...', priority='WARN') with sdutil.tbmanager(vis, nomodify=False) as tb: for column in ['WEIGHT', 'SIGMA']: values = tb.getvarcol(column) for v in values.values(): v[:] = 1.0 tb.putvarcol(column, values)
def _fillweight(vis): if not os.path.exists(vis): return casalog.post('fill WEIGHT and SIGMA columns for %s ...' % (vis)) # work with private cb tool with sdutil.cbmanager(vis, compress=False, addcorr=False, addmodel=False) as cb: status = cb.initweights() if status: # cb.initweights() succeeded so try to apply Tsys to # weight column # procedure: # 1. generate temporary Tsys caltable # 2. apply temporary Tsys caltable to vis # 3. remove temporary Tsys caltable import time from gencal import gencal from applycal import applycal caltable = 'temporary_caltable%s.tsys' % (str(time.time()).replace( '.', '')) casalog.post('tempolary caltable name: %s' % (caltable)) try: gencal(vis=vis, caltable=caltable, caltype='tsys') # add 0.5*INTERVAL to the TIME values in caltable to make them time of # integration midpoint, because gencal currently sets (TIME_VIS - # INTERVAL_VIS/2), namely start time, into TIME in its output based on # ALMA's conventions...(2014/6/17 WK) with sdutil.tbmanager(caltable, nomodify=False) as tbcal: with sdutil.tbmanager(vis) as tbvis: interval = tbvis.getcol('INTERVAL')[0] tbcal.putcol('TIME', tbcal.getcol('TIME') + 0.5 * interval) applycal(vis=vis, docallib=False, gaintable=[caltable], applymode='calonly') except Exception, e: # Tsys application failed so that reset WEIGHT and SIGMA to 1.0 _resetweight(vis) raise e finally:
def _check_values(self, scaletsys): """check spectra and tsys values""" with tbmanager(self.outfile) as tb: for irow, col in itertools.product(xrange(tb.nrows()), ["SPECTRA", "TSYS"]): data = tb.getcell(col, irow) data_ref = numpy.ones(self.nchan_orig, numpy.float) if self.rowid_rflag_orig[irow] == 0: if scaletsys or col == "SPECTRA": data_ref *= self.factor self.assertTrue(all(data == data_ref))
def _check_values(self, scaletsys): """check spectra and tsys values""" with tbmanager(self.outfile) as tb: for irow, col in itertools.product(xrange(tb.nrows()), ['SPECTRA', 'TSYS']): data = tb.getcell(col, irow) data_ref = numpy.ones(self.nchan_orig, numpy.float) if self.rowid_rflag_orig[irow] == 0: if scaletsys or col=='SPECTRA': data_ref *= self.factor self.assertTrue(all(data==data_ref))
def remove_sorted_table_keyword(infile): res = {'is_sorttab': False, 'sorttab_keywd': '', 'sorttab_name': ''} with sdutil.tbmanager(infile, nomodify=False) as tb: try: sorttab_keywd = 'SORTED_TABLE' if sorttab_keywd in tb.keywordnames(): res['is_sorttab'] = True res['sorttab_keywd'] = sorttab_keywd res['sorttab_name'] = tb.getkeyword(sorttab_keywd) tb.removekeyword(sorttab_keywd) except Exception, e: raise Exception, e
def test000(self): # testing kwidth from 1 to 5. for kwidth in range(1,6): result = tsdsmooth(infile=self.infile, outfile=self.outfile, datacolumn=self.datacolumn, overwrite=True, kernel='boxcar', kwidth = kwidth) with sdutil.tbmanager(self.outfile) as tb: for irow in range(tb.nrows()): spec = tb.getcell(self.datacolumn.upper(), irow) for ipol in range(len(spec)): center = self.centers[str(irow)+str(ipol)] self._checkResult(spec[ipol], kwidth, center)
def _verify_saved_flag(self, infile, flagdatafile): # export infile to MS to obtain expected FLAG and FLAG_ROW with temporary_file() as name: s = sd.scantable(infile, average=False) s.save(name, format='MS2') with tbmanager(name) as tb: expected_flag_row = tb.getcol('FLAG_ROW') expected_flag = tb.getcol('FLAG') # actual FLAG and FLAG_ROW with tbmanager(flagdatafile) as tb: flag_row = tb.getcol('FLAG_ROW') flag = tb.getcol('FLAG') # compare self.assertEqual(len(flag_row), len(expected_flag_row), msg='length of FLAG_ROW differ') self.assertEqual(flag.shape, expected_flag.shape, msg='shape of FLAG differ') nrow = len(flag_row) for irow in xrange(nrow): self.assertEqual(flag_row[irow], expected_flag_row[irow], msg='Row %s: FLAG_ROW differ'%(irow)) self.assertTrue(all(flag[:,:,irow].flatten() == expected_flag[:,:,irow].flatten()), msg='Row %s: FLAG differ'%(irow))
def _fillweight(vis): if not os.path.exists(vis): return casalog.post('fill WEIGHT and SIGMA columns for %s ...'%(vis)) # work with private cb tool with sdutil.cbmanager(vis, compress=False, addcorr=False, addmodel=False) as cb: status = cb.initweights() if status: # cb.initweights() succeeded so try to apply Tsys to # weight column # procedure: # 1. generate temporary Tsys caltable # 2. apply temporary Tsys caltable to vis # 3. remove temporary Tsys caltable import time from gencal import gencal from applycal import applycal caltable = 'temporary_caltable%s.tsys'%(str(time.time()).replace('.','')) casalog.post('tempolary caltable name: %s'%(caltable)) try: gencal(vis=vis, caltable=caltable, caltype='tsys') # add 0.5*INTERVAL to the TIME values in caltable to make them time of # integration midpoint, because gencal currently sets (TIME_VIS - # INTERVAL_VIS/2), namely start time, into TIME in its output based on # ALMA's conventions...(2014/6/17 WK) with sdutil.tbmanager(caltable, nomodify=False) as tbcal: with sdutil.tbmanager(vis) as tbvis: interval = tbvis.getcol('INTERVAL')[0] tbcal.putcol('TIME', tbcal.getcol('TIME') + 0.5*interval) applycal(vis=vis, docallib=False, gaintable=[caltable], applymode='calonly') except Exception, e: # Tsys application failed so that reset WEIGHT and SIGMA to 1.0 _resetweight(vis) raise e finally:
def test000(self): # testing kwidth from 1 to 5. for kwidth in range(1, 6): result = sdsmooth(infile=self.infile, outfile=self.outfile, datacolumn=self.datacolumn, overwrite=True, kernel='boxcar', kwidth=kwidth) with sdutil.tbmanager(self.outfile) as tb: for irow in range(tb.nrows()): spec = tb.getcell(self.datacolumn.upper(), irow) for ipol in range(len(spec)): center = self.centers[str(irow) + str(ipol)] self._checkResult(spec[ipol], kwidth, center)
def test000(self): """Basic Test 000: default values for all parameters (nfit=[0] : no fitting)""" tid = '000' for infile in self.infiles: datacolumn = 'float_data' result = tsdfit(infile=infile, datacolumn=datacolumn) npol = 2 with tbmanager(infile) as tb: nrow = tb.nrows() for key in result.keys(): self.assertEqual(len(result[key]), nrow*npol, msg="The result data has wrong data length") for i in range(len(result[key])): if (key == "nfit"): self.assertEqual(result[key][i], 0, msg="%s has wrong value."%(key)) else: self.assertEqual(result[key][i], [], msg="%s has wrong value."%(key))
def test_single_pol(self): """test_single_pol: test single-polarization calibration (YY)""" # generate single-polarization MS from mstransform_cli import mstransform_cli as mstransform mstransform(vis=self.infile, outputvis=self.infile_YY, correlation='YY', datacolumn='float_data') self.assertTrue(os.path.exists(self.infile_YY)) with sdutil.tbmanager(self.infile_YY) as tb: try: for irow in xrange(tb.nrows()): flag = tb.getcell('FLAG', irow) self.assertEqual(flag.shape[0], 1) finally: tb.close() params = self.generate_params(radius='65arcsec') params['infile'] = self.infile_YY self.run_task(**params) self._verify_caltable(self._generic_verify, **params)
def test000(self): """Basic Test 000: default values for all parameters (nfit=[0] : no fitting)""" tid = '000' infile = self.infile outfile = self.outroot + tid + '.ms' datacolumn = 'float_data' result = tsdfit(infile=infile, datacolumn=datacolumn) npol = 2 with tbmanager(infile) as tb: nrow = tb.nrows() for key in result.keys(): self.assertEqual(len(result[key]), nrow * npol, msg="The result data has wrong data length") for i in range(len(result[key])): if (key == "nfit"): self.assertEqual(result[key][i], 0, msg="%s has wrong value." % (key)) else: self.assertEqual(result[key][i], [], msg="%s has wrong value." % (key))
def run_test(self, *args, **kwargs): datacol_name = self.datacolumn.upper() weight_mode = hasattr(self, 'weight_propagation') and getattr(self, 'weight_propagation') is True if kwargs.has_key('kwidth'): kwidth = kwargs['kwidth'] else: kwidth = 5 self.result = tsdsmooth(infile=self.infile, outfile=self.outfile, kernel='gaussian', datacolumn=self.datacolumn, **kwargs) # sanity check self.assertIsNone(self.result, msg='The task must complete without error') self.assertTrue(os.path.exists(self.outfile), msg='Output file is not properly created.') if kwargs.has_key('spw'): spw = kwargs['spw'] else: spw = '' dd_selection = None if len(spw) == 0: expected_nrow = 2 with sdutil.tbmanager(self.infile) as tb: data_in = tb.getvarcol(datacol_name) flag_in = tb.getvarcol('FLAG') if weight_mode is True: weight_in = tb.getvarcol('WEIGHT_SPECTRUM') else: myms = gentools(['ms'])[0] a = myms.msseltoindex(self.infile, spw=spw) spw_selection = a['spw'] dd_selection = a['dd'] expected_nrow = len(spw_selection) with sdutil.tbmanager(self.infile) as tb: try: tsel = tb.query('DATA_DESC_ID IN %s'%(dd_selection.tolist())) data_in = tsel.getvarcol(datacol_name) flag_in = tsel.getvarcol('FLAG') if weight_mode is True: weight_in = tsel.getvarcol('WEIGHT_SPECTRUM') finally: tsel.close() with sdutil.tbmanager(self.outfile) as tb: nrow = tb.nrows() data_out = tb.getvarcol(datacol_name) flag_out = tb.getvarcol('FLAG') if weight_mode is True: weight_out = tb.getvarcol('WEIGHT_SPECTRUM') # verify nrow self.assertEqual(nrow, expected_nrow, msg='Number of rows mismatch (expected %s actual %s)'%(expected_nrow, nrow)) # verify data eps = 1.0e-6 for key in data_out.keys(): row_in = data_in[key] flg_in = flag_in[key] row_in[numpy.where(flg_in == True)] = 0.0 row_out = data_out[key] self.assertEqual(row_in.shape, row_out.shape, msg='Shape mismatch in row %s'%(key)) npol, nchan, _ = row_out.shape kernel_array = gaussian_kernel(nchan, kwidth) expected = numpy.convolve(row_in[0,:,0], kernel_array, mode='same') output = row_out[0,:,0] zero_index = numpy.where(numpy.abs(expected) <= eps) self.assertTrue(all(numpy.abs(output[zero_index]) < eps), msg='Failed to verify zero values: row %s'%(key)) nonzero_index= numpy.where(numpy.abs(expected) > eps) diff = numpy.abs((output[nonzero_index] - expected[nonzero_index]) / expected[nonzero_index].max()) #print diff #print output[nonzero_index] #print expected[nonzero_index] self.assertTrue(all(diff < eps), msg='Failed to verify nonzero values: row %s'%(key)) #print 'row_in', row_in[0,:,0].tolist() #print 'gaussian', kernel_array.tolist() #print 'expected', expected.tolist() #print 'result', row_out[0,:,0].tolist() # weight check if this is weight test if weight_mode is True: #print 'Weight propagation test' wgt_in = weight_in[key] wgt_out = weight_out[key] wkwidth = int(kwidth + 0.5) wkwidth += (1 if wkwidth % 2 == 0 else 0) half_width = wkwidth / 2 peak_chan = kernel_array.argmax() start_chan = peak_chan - half_width wkernel = kernel_array[start_chan:start_chan+wkwidth].copy() wkernel /= sum(wkernel) weight_expected = wgt_in.copy() for ichan in xrange(half_width, nchan-half_width): s = numpy.zeros(npol, dtype=float) for jchan in xrange(wkwidth): s += wkernel[jchan] * wkernel[jchan] / wgt_in[:,ichan-half_width+jchan,0] weight_expected[:,ichan,0] = 1.0 / s #print weight_expected[:,:10] diff = numpy.abs((wgt_out - weight_expected) / weight_expected) self.assertTrue(all(diff.flatten() < eps), msg='Failed to verify spectral weight: row %s'%(key))
def run_test(self, *args, **kwargs): datacol_name = self.datacolumn.upper() weight_mode = hasattr(self, 'weight_propagation') and getattr( self, 'weight_propagation') is True if kwargs.has_key('kwidth'): kwidth = kwargs['kwidth'] else: kwidth = 5 self.result = sdsmooth(infile=self.infile, outfile=self.outfile, kernel='gaussian', datacolumn=self.datacolumn, **kwargs) # sanity check self.assertIsNone(self.result, msg='The task must complete without error') self.assertTrue(os.path.exists(self.outfile), msg='Output file is not properly created.') if kwargs.has_key('spw'): spw = kwargs['spw'] else: spw = '' dd_selection = None if len(spw) == 0: expected_nrow = 2 with sdutil.tbmanager(self.infile) as tb: data_in = tb.getvarcol(datacol_name) flag_in = tb.getvarcol('FLAG') if weight_mode is True: weight_in = tb.getvarcol('WEIGHT_SPECTRUM') else: myms = gentools(['ms'])[0] a = myms.msseltoindex(self.infile, spw=spw) spw_selection = a['spw'] dd_selection = a['dd'] expected_nrow = len(spw_selection) with sdutil.tbmanager(self.infile) as tb: try: tsel = tb.query('DATA_DESC_ID IN %s' % (dd_selection.tolist())) data_in = tsel.getvarcol(datacol_name) flag_in = tsel.getvarcol('FLAG') if weight_mode is True: weight_in = tsel.getvarcol('WEIGHT_SPECTRUM') finally: tsel.close() with sdutil.tbmanager(self.outfile) as tb: nrow = tb.nrows() data_out = tb.getvarcol(datacol_name) flag_out = tb.getvarcol('FLAG') if weight_mode is True: weight_out = tb.getvarcol('WEIGHT_SPECTRUM') # verify nrow self.assertEqual( nrow, expected_nrow, msg='Number of rows mismatch (expected %s actual %s)' % (expected_nrow, nrow)) # verify data eps = 1.0e-6 for key in data_out.keys(): row_in = data_in[key] flg_in = flag_in[key] row_in[numpy.where(flg_in == True)] = 0.0 row_out = data_out[key] self.assertEqual(row_in.shape, row_out.shape, msg='Shape mismatch in row %s' % (key)) npol, nchan, _ = row_out.shape kernel_array = gaussian_kernel(nchan, kwidth) expected = numpy.convolve(row_in[0, :, 0], kernel_array, mode='same') output = row_out[0, :, 0] zero_index = numpy.where(numpy.abs(expected) <= eps) self.assertTrue(all(numpy.abs(output[zero_index]) < eps), msg='Failed to verify zero values: row %s' % (key)) nonzero_index = numpy.where(numpy.abs(expected) > eps) diff = numpy.abs( (output[nonzero_index] - expected[nonzero_index]) / expected[nonzero_index].max()) #print diff #print output[nonzero_index] #print expected[nonzero_index] self.assertTrue(all(diff < eps), msg='Failed to verify nonzero values: row %s' % (key)) #print 'row_in', row_in[0,:,0].tolist() #print 'gaussian', kernel_array.tolist() #print 'expected', expected.tolist() #print 'result', row_out[0,:,0].tolist() # weight check if this is weight test if weight_mode is True: #print 'Weight propagation test' wgt_in = weight_in[key] wgt_out = weight_out[key] wkwidth = int(kwidth + 0.5) wkwidth += (1 if wkwidth % 2 == 0 else 0) half_width = wkwidth / 2 peak_chan = kernel_array.argmax() start_chan = peak_chan - half_width wkernel = kernel_array[start_chan:start_chan + wkwidth].copy() wkernel /= sum(wkernel) weight_expected = wgt_in.copy() for ichan in xrange(half_width, nchan - half_width): s = numpy.zeros(npol, dtype=float) for jchan in xrange(wkwidth): s += wkernel[jchan] * wkernel[ jchan] / wgt_in[:, ichan - half_width + jchan, 0] weight_expected[:, ichan, 0] = 1.0 / s #print weight_expected[:,:10] diff = numpy.abs((wgt_out - weight_expected) / weight_expected) self.assertTrue( all(diff.flatten() < eps), msg='Failed to verify spectral weight: row %s' % (key))
def getdata(self): with tbmanager(self.outfile) as tb: self.data = tb.getcol('SPECTRA')
def run_test(self, *args, **kwargs): datacol_name = self.datacolumn.upper() if kwargs.has_key('kwidth'): kwidth = kwargs['kwidth'] else: kwidth = 5 self.result = tsdsmooth(infile=self.infile, outfile=self.outfile, kernel='gaussian', datacolumn=self.datacolumn, **kwargs) # sanity check self.assertIsNone(self.result, msg='The task must complete without error') self.assertTrue(os.path.exists(self.outfile), msg='Output file is not properly created.') if kwargs.has_key('spw'): spw = kwargs['spw'] else: spw = '' dd_selection = None if len(spw) == 0: expected_nrow = 2 with sdutil.tbmanager(self.infile) as tb: data_in = tb.getvarcol(datacol_name) flag_in = tb.getvarcol('FLAG') else: myms = gentools(['ms'])[0] a = myms.msseltoindex(self.infile, spw=spw) spw_selection = a['spw'] dd_selection = a['dd'] expected_nrow = len(spw_selection) with sdutil.tbmanager(self.infile) as tb: try: tsel = tb.query('DATA_DESC_ID IN %s' % (dd_selection.tolist())) data_in = tsel.getvarcol(datacol_name) flag_in = tsel.getvarcol('FLAG') finally: tsel.close() with sdutil.tbmanager(self.outfile) as tb: nrow = tb.nrows() data_out = tb.getvarcol(datacol_name) flag_out = tb.getvarcol('FLAG') # verify nrow self.assertEqual( nrow, expected_nrow, msg='Number of rows mismatch (expected %s actual %s)' % (expected_nrow, nrow)) # verify data eps = 1.0e-6 for key in data_out.keys(): row_in = data_in[key] flg_in = flag_in[key] row_in[numpy.where(flg_in == True)] = 0.0 row_out = data_out[key] self.assertEqual(row_in.shape, row_out.shape, msg='Shape mismatch in row %s' % (key)) nchan = row_out.shape[1] kernel_array = gaussian_kernel(nchan, kwidth) expected = numpy.convolve(row_in[0, :, 0], kernel_array, mode='same') output = row_out[0, :, 0] zero_index = numpy.where(numpy.abs(expected) <= eps) self.assertTrue(all(numpy.abs(output[zero_index]) < eps), msg='Failed to verify zero values: row %s' % (key)) nonzero_index = numpy.where(numpy.abs(expected) > eps) diff = numpy.abs( (output[nonzero_index] - expected[nonzero_index]) / expected[nonzero_index].max()) #print diff #print output[nonzero_index] #print expected[nonzero_index] self.assertTrue(all(diff < eps), msg='Failed to verify nonzero values: row %s' % (key))
def inspect_caltype(table): caltype = 'UNKNOWN' with sdutil.tbmanager(table) as tb: if 'VisCal' in tb.keywordnames(): caltype = tb.getkeyword('VisCal') return caltype
def run_test(self, *args, **kwargs): datacol_name = self.datacolumn.upper() if kwargs.has_key('kwidth'): kwidth = kwargs['kwidth'] else: kwidth = 5 self.result = tsdsmooth(infile=self.infile, outfile=self.outfile, kernel='gaussian', datacolumn=self.datacolumn, **kwargs) # sanity check self.assertIsNone(self.result, msg='The task must complete without error') self.assertTrue(os.path.exists(self.outfile), msg='Output file is not properly created.') if kwargs.has_key('spw'): spw = kwargs['spw'] else: spw = '' dd_selection = None if len(spw) == 0: expected_nrow = 2 with sdutil.tbmanager(self.infile) as tb: data_in = tb.getvarcol(datacol_name) flag_in = tb.getvarcol('FLAG') else: myms = gentools(['ms'])[0] a = myms.msseltoindex(self.infile, spw=spw) spw_selection = a['spw'] dd_selection = a['dd'] expected_nrow = len(spw_selection) with sdutil.tbmanager(self.infile) as tb: try: tsel = tb.query('DATA_DESC_ID IN %s'%(dd_selection.tolist())) data_in = tsel.getvarcol(datacol_name) flag_in = tsel.getvarcol('FLAG') finally: tsel.close() with sdutil.tbmanager(self.outfile) as tb: nrow = tb.nrows() data_out = tb.getvarcol(datacol_name) flag_out = tb.getvarcol('FLAG') # verify nrow self.assertEqual(nrow, expected_nrow, msg='Number of rows mismatch (expected %s actual %s)'%(expected_nrow, nrow)) # verify data eps = 1.0e-6 for key in data_out.keys(): row_in = data_in[key] flg_in = flag_in[key] row_in[numpy.where(flg_in == True)] = 0.0 row_out = data_out[key] self.assertEqual(row_in.shape, row_out.shape, msg='Shape mismatch in row %s'%(key)) nchan = row_out.shape[1] kernel_array = gaussian_kernel(nchan, kwidth) expected = numpy.convolve(row_in[0,:,0], kernel_array, mode='same') output = row_out[0,:,0] zero_index = numpy.where(numpy.abs(expected) <= eps) self.assertTrue(all(numpy.abs(output[zero_index]) < eps), msg='Failed to verify zero values: row %s'%(key)) nonzero_index= numpy.where(numpy.abs(expected) > eps) diff = numpy.abs((output[nonzero_index] - expected[nonzero_index]) / expected[nonzero_index].max()) #print diff #print output[nonzero_index] #print expected[nonzero_index] self.assertTrue(all(diff < eps), msg='Failed to verify nonzero values: row %s'%(key))
def tsdbaseline(infile=None, datacolumn=None, antenna=None, field=None, spw=None, timerange=None, scan=None, pol=None, maskmode=None, thresh=None, avg_limit=None, minwidth=None, edge=None, blmode=None, dosubtract=None, blformat=None, bloutput=None, bltable=None, blfunc=None, order=None, npiece=None, applyfft=None, fftmethod=None, fftthresh=None, addwn=None, rejwn=None, clipthresh=None, clipniter=None, blparam=None, verify=None, verbose=None, showprogress=None, minnrow=None, outfile=None, overwrite=None): casalog.origin('tsdbaseline') try: if (outfile == '') or not isinstance(outfile, str): print("type=%s, value=%s" % (type(outfile), str(outfile))) raise ValueError, "outfile name is empty." if os.path.exists(outfile) and not overwrite: raise Exception(outfile + ' exists.') if (maskmode == 'interact'): raise ValueError, "maskmode='%s' is not supported yet" % maskmode if (blfunc == 'variable' and not os.path.exists(blparam)): raise ValueError, "input file '%s' does not exists" % blparam if (spw == ''): spw = '*' if blmode == 'apply': if not os.path.exists(bltable): raise ValueError, "file specified in bltable '%s' does not exist." % bltable with sdutil.tbmanager(infile + '/DATA_DESCRIPTION') as tb: spw_ids = tb.getcol('SPECTRAL_WINDOW_ID') with sdutil.tbmanager(infile + '/ANTENNA') as tb: ant_ids = range(tb.nrows()) with sdutil.tbmanager(infile + '/FEED') as tb: feed_ids = numpy.unique(tb.getcol('FEED_ID')) sorttab_info = remove_sorted_table_keyword(infile) if overwrite and os.path.exists(outfile): os.system('rm -rf %s' % outfile) selection = ms.msseltoindex(vis=infile, spw=spw, field=field, baseline=str(antenna), time=timerange, scan=scan) sdms.open(infile) sdms.set_selection(spw=sdutil.get_spwids(selection), field=field, antenna=str(antenna), timerange=timerange, scan=scan) sdms.apply_baseline_table(bltable=bltable, datacolumn=datacolumn, spw=spw, outfile=outfile) sdms.close() restore_sorted_table_keyword(infile, sorttab_info) elif blmode == 'fit': blout_exists = False if (isinstance(bloutput, str) and os.path.exists(bloutput)): blout_exists = True elif isinstance(bloutput, list): for blout in bloutput: if os.path.exists(blout): blout_exists = True break if blout_exists and not overwrite: raise ValueError, "file(s) specified in bloutput exists." selection = ms.msseltoindex(vis=infile, spw=spw, field=field, baseline=str(antenna), time=timerange, scan=scan) if blfunc == 'variable': sorttab_info = remove_sorted_table_keyword(infile) sdms.open(infile) sdms.set_selection(spw=sdutil.get_spwids(selection), field=field, antenna=str(antenna), timerange=timerange, scan=scan) #if(outfile==''): # outfile=infile # if isinstance(blformat, str): # if('' == blformat): # #blformat=',,' # bloutput=',,' # print 'blformat' # if('text' == blformat): # bloutput_tmp = infile + '_blparam.txt' # bloutput = ',' + bloutput_tmp + ',' # if os.path.exists(bloutput): # raise Exception(fname + ' exists.') new_bloutput = [] if isinstance(blformat, str): blformat = [blformat] if isinstance(bloutput, str): bloutput = [bloutput] if isinstance(blformat, list): if ('' in blformat): blformat = ',,' #elif(len(blformat) > 3 ): # raise ValueError, "The maximum size of blformat must be three." elif (len(blformat) != len(bloutput)): raise ValueError, "The size of blfomat must be the same as that of bloutput." elif [ key for key, val in Counter(blformat).items() if val > 1 ]: raise ValueError, "Same multiple blformats are not allowed." else: if ('csv' in blformat): if ('' != bloutput[blformat.index('csv')]): fname = bloutput[blformat.index('csv')] if not overwrite and os.path.exists(fname): raise Exception(fname + ' exists.') new_bloutput.append( bloutput[blformat.index('csv')]) else: fname = infile + '_blparam.csv' if not overwrite and os.path.exists(fname): raise Exception( fname + ' exists. ' ) ###################################################### new_bloutput.append(infile + '_blparam.csv') else: new_bloutput.append('') if ('text' in blformat): if ('' != bloutput[blformat.index('text')]): new_bloutput.append( bloutput[blformat.index('text')]) fname = bloutput[blformat.index('text')] if not overwrite and os.path.exists(fname): raise Exception(fname + ' exists.') f = open(fname, "w") blf = blfunc.lower() mm = maskmode.lower() if blf == 'poly': ftitles = ['Fit order'] elif blf == 'chebyshev': ftitles = ['Fit order'] elif blf == 'cspline': ftitles = ['nPiece'] #print 'task 2' else: # sinusoid ftitles = [ 'applyFFT', 'fftMethod', 'fftThresh', 'addWaveN', 'rejWaveN' ] if mm == 'auto': mtitles = ['Threshold', 'avg_limit', 'Edge'] elif mm == 'list': mtitles = [] else: # interact mtitles = [] ctitles = ['clipThresh', 'clipNIter'] #fkeys = getattr(self, '%s_keys'%(blfunc)) #mkeys = getattr(self, '%s_keys'%(maskmode)) outfile2 = '' if (outfile == ''): outfile2 = infile else: outfile2 = outfile info = [['Source Table', infile], ['Output File', outfile2]] #['Function', blfunc], #['Fit order', order]] #for i in xrange(len(ftitles)): # info.append([ftitles[i],getattr(self,fkeys[i])]) #if blf != 'poly': # for i in xrange(len(ctitles)): # info.append([ctitles[i],clip_keys[i]]) info.append(['Mask mode', maskmode]) #for i in xrange(len(mtitles)): # info.append([mtitles[i],getattr(mkeys[i])]) separator = "#" * 60 + "\n" f.write(separator) for i in xrange(len(info)): f.write('%12s: %s\n' % tuple(info[i])) f.write(separator) f.close() else: #new_bloutput.append(infile + '_blparam.txt') new_bloutput.append(infile + '_blparam.txt') fname2 = infile + '_blparam.txt' if not overwrite and os.path.exists(fname2): raise Exception(fname2 + ' exists.') f = open(fname2, "w") blf = blfunc.lower() mm = maskmode.lower() if blf == 'poly': ftitles = ['Fit order'] elif blf == 'chebyshev': ftitles = ['Fit order'] elif blf == 'cspline': ftitles = ['nPiece'] else: # sinusoid ftitles = [ 'applyFFT', 'fftMethod', 'fftThresh', 'addWaveN', 'rejWaveN' ] if mm == 'auto': mtitles = ['Threshold', 'avg_limit', 'Edge'] elif mm == 'list': mtitles = [] else: # interact mtitles = [] ctitles = ['clipThresh', 'clipNIter'] #fkeys = getattr(self, '%s_keys'%(blfunc)) #mkeys = getattr(self, '%s_keys'%(maskmode)) outfile2 = '' if (outfile == ''): outfile2 = infile info = [['Source Table', infile], ['Output File', outfile]] #['Function', blfunc], #['Fit order', order]] #for i in xrange(len(ftitles)): # info.append([ftitles[i],getattr(self,fkeys[i])]) #if blf != 'poly': # for i in xrange(len(ctitles)): # info.append([ctitles[i],clip_keys[i]]) info.append(['Mask mode', maskmode]) #for i in xrange(len(mtitles)): # info.append([mtitles[i],getattr(mkeys[i])]) separator = "#" * 60 + "\n" f.write(separator) for i in xrange(len(info)): f.write('%12s: %s\n' % tuple(info[i])) f.write(separator) f.close() else: new_bloutput.append('') if ('table' in blformat): #print 'blformat->',type(blformat), blformat #print "blformat.index('table')", blformat.index('table') #print "bloutput[blformat.index('table')])", bloutput[blformat.index('table')] if ('' != bloutput[blformat.index('table')]): fname = bloutput[blformat.index('table')] if not overwrite and os.path.exists(fname): raise Exception( fname + ' exists.' ) ############################################# new_bloutput.append( bloutput[blformat.index('table')]) else: fname = infile + '_blparam.bltable' if not overwrite and os.path.exists(fname): raise Exception( fname + ' exists.' ) ############################################# print '' new_bloutput.append(fname) else: new_bloutput.append('') blformat = ",".join(blformat) bloutput = ",".join(new_bloutput) #print 'task blformat',type(blformat), blformat #print 'task bloutput',type(bloutput), bloutput params, func = prepare_for_baselining( blfunc=blfunc, datacolumn=datacolumn, outfile=outfile, bltable= bloutput, # remove this line once text/csv output becomes available (2015/7/1 WK) blformat=blformat, bloutput=bloutput, dosubtract=dosubtract, spw=spw, pol=pol, order=order, npiece=npiece, blparam=blparam, clip_threshold_sigma=clipthresh, num_fitting_max=clipniter + 1, linefinding=(maskmode == 'auto'), threshold=thresh, avg_limit=avg_limit, minwidth=minwidth, edge=edge) if overwrite: if os.path.exists(outfile): os.system('rm -rf %s' % outfile) for bloutfile in new_bloutput: if os.path.exists(bloutfile): os.system('rm -rf %s' % bloutfile) #print params if (blformat != ',,'): func(**params) if (blfunc == 'variable'): restore_sorted_table_keyword(infile, sorttab_info) except Exception, instance: raise Exception, instance
tbcal.putcol('TIME', tbcal.getcol('TIME') + 0.5 * interval) applycal(vis=vis, docallib=False, gaintable=[caltable], applymode='calonly') except Exception, e: # Tsys application failed so that reset WEIGHT and SIGMA to 1.0 _resetweight(vis) raise e finally: if os.path.exists(caltable): casalog.post('remove %s...' % (caltable)) os.system('rm -rf %s' % (caltable)) # remove CORRECTED_DATA column casalog.post('remove CORRECTED_DATA from %s...' % (vis)) with sdutil.tbmanager(vis, nomodify=False) as tb: if 'CORRECTED_DATA' in tb.colnames(): tb.removecols('CORRECTED_DATA') else: # initweights failed so reset WEIGHT and SIGMA to 1.0 _resetweight(vis) def _resetweight(vis): # work with private tb tool casalog.post( 'fill WEIGHT and SIGMA failed. reset all WEIGHT and SIGMA to 1.0...', priority='WARN') with sdutil.tbmanager(vis, nomodify=False) as tb: for column in ['WEIGHT', 'SIGMA']:
with sdutil.tbmanager(caltable, nomodify=False) as tbcal: with sdutil.tbmanager(vis) as tbvis: interval = tbvis.getcol('INTERVAL')[0] tbcal.putcol('TIME', tbcal.getcol('TIME') + 0.5*interval) applycal(vis=vis, docallib=False, gaintable=[caltable], applymode='calonly') except Exception, e: # Tsys application failed so that reset WEIGHT and SIGMA to 1.0 _resetweight(vis) raise e finally: if os.path.exists(caltable): casalog.post('remove %s...'%(caltable)) os.system('rm -rf %s'%(caltable)) # remove CORRECTED_DATA column casalog.post('remove CORRECTED_DATA from %s...'%(vis)) with sdutil.tbmanager(vis, nomodify=False) as tb: if 'CORRECTED_DATA' in tb.colnames(): tb.removecols('CORRECTED_DATA') else: # initweights failed so reset WEIGHT and SIGMA to 1.0 _resetweight(vis) def _resetweight(vis): # work with private tb tool casalog.post('fill WEIGHT and SIGMA failed. reset all WEIGHT and SIGMA to 1.0...', priority='WARN') with sdutil.tbmanager(vis, nomodify=False) as tb: for column in ['WEIGHT', 'SIGMA']: values = tb.getvarcol(column) for v in values.values(): v[:] = 1.0
def tsdbaseline(infile=None, datacolumn=None, antenna=None, field=None, spw=None, timerange=None, scan=None, pol=None, maskmode=None, thresh=None, avg_limit=None, minwidth=None, edge=None, blmode=None, dosubtract=None, blformat=None, bloutput=None, bltable=None, blfunc=None, order=None, npiece=None, applyfft=None, fftmethod=None, fftthresh=None, addwn=None, rejwn=None, clipthresh=None, clipniter=None, blparam=None, verify=None, verbose=None, showprogress=None, minnrow=None, outfile=None, overwrite=None): casalog.origin('tsdbaseline') try: if (outfile == '') or not isinstance(outfile, str): print("type=%s, value=%s" % (type(outfile), str(outfile))) raise ValueError, "outfile name is empty." if os.path.exists(outfile) and not overwrite: raise Exception(outfile + ' exists.') if (maskmode == 'interact'): raise ValueError, "maskmode='%s' is not supported yet" % maskmode if (blfunc == 'variable' and not os.path.exists(blparam)): raise ValueError, "input file '%s' does not exists" % blparam if (spw == ''): spw = '*' if blmode == 'apply': if not os.path.exists(bltable): raise ValueError, "file specified in bltable '%s' does not exist." % bltable with sdutil.tbmanager(infile + '/DATA_DESCRIPTION') as tb: spw_ids = tb.getcol('SPECTRAL_WINDOW_ID') with sdutil.tbmanager(infile + '/ANTENNA') as tb: ant_ids = range(tb.nrows()) with sdutil.tbmanager(infile + '/FEED') as tb: feed_ids = numpy.unique(tb.getcol('FEED_ID')) sorttab_info = remove_sorted_table_keyword(infile) if overwrite and os.path.exists(outfile): os.system('rm -rf %s' % outfile) selection = ms.msseltoindex(vis=infile, spw=spw, field=field, baseline=str(antenna), time=timerange, scan=scan) sdms.open(infile) sdms.set_selection(spw=sdutil.get_spwids(selection), field=field, antenna=str(antenna), timerange=timerange, scan=scan) sdms.apply_baseline_table(bltable=bltable, datacolumn=datacolumn, spw=spw, outfile=outfile) sdms.close() restore_sorted_table_keyword(infile, sorttab_info) elif blmode == 'fit': blout_exists = False if (isinstance(bloutput, str) and os.path.exists(bloutput)): blout_exists = True elif isinstance(bloutput, list): for blout in bloutput: if os.path.exists(blout): blout_exists = True break if blout_exists and not overwrite: raise ValueError, "file(s) specified in bloutput exists." selection = ms.msseltoindex(vis=infile, spw=spw, field=field, baseline=str(antenna), time=timerange, scan=scan) if blfunc == 'variable': sorttab_info = remove_sorted_table_keyword(infile) sdms.open(infile) sdms.set_selection(spw=sdutil.get_spwids(selection), field=field, antenna=str(antenna), timerange=timerange, scan=scan) #if(outfile==''): # outfile=infile # if isinstance(blformat, str): # if('' == blformat): # #blformat=',,' # bloutput=',,' # print 'blformat' # if('text' == blformat): # bloutput_tmp = infile + '_blparam.txt' # bloutput = ',' + bloutput_tmp + ',' # if os.path.exists(bloutput): # raise Exception(fname + ' exists.') new_bloutput=[] if isinstance(blformat, str): blformat = [blformat] if isinstance(bloutput, str): bloutput = [bloutput] if isinstance(blformat, list): if('' in blformat): blformat = ',,' #elif(len(blformat) > 3 ): # raise ValueError, "The maximum size of blformat must be three." elif(len(blformat) != len(bloutput)): raise ValueError, "The size of blfomat must be the same as that of bloutput." elif [key for key, val in Counter(blformat).items() if val>1]: raise ValueError, "Same multiple blformats are not allowed." else: if('csv' in blformat): if(''!= bloutput[blformat.index('csv')]): fname=bloutput[blformat.index('csv')] if not overwrite and os.path.exists(fname): raise Exception(fname + ' exists.') new_bloutput.append(bloutput[blformat.index('csv')]) else: fname= infile + '_blparam.csv' if not overwrite and os.path.exists(fname): raise Exception(fname + ' exists. ')###################################################### new_bloutput.append(infile + '_blparam.csv') else: new_bloutput.append('') if('text' in blformat): if(''!= bloutput[blformat.index('text')]): new_bloutput.append(bloutput[blformat.index('text')]) fname = bloutput[blformat.index('text')] if not overwrite and os.path.exists(fname): raise Exception(fname + ' exists.') f = open(fname, "w") blf = blfunc.lower() mm = maskmode.lower() if blf == 'poly': ftitles = ['Fit order'] elif blf == 'chebyshev': ftitles = ['Fit order'] elif blf == 'cspline': ftitles = ['nPiece'] #print 'task 2' else: # sinusoid ftitles = ['applyFFT', 'fftMethod', 'fftThresh', 'addWaveN', 'rejWaveN'] if mm == 'auto': mtitles = ['Threshold', 'avg_limit', 'Edge'] elif mm == 'list': mtitles = [] else: # interact mtitles = [] ctitles = ['clipThresh', 'clipNIter'] #fkeys = getattr(self, '%s_keys'%(blfunc)) #mkeys = getattr(self, '%s_keys'%(maskmode)) outfile2='' if(outfile==''): outfile2 = infile else: outfile2 = outfile info = [['Source Table', infile], ['Output File', outfile2]] #['Function', blfunc], #['Fit order', order]] #for i in xrange(len(ftitles)): # info.append([ftitles[i],getattr(self,fkeys[i])]) #if blf != 'poly': # for i in xrange(len(ctitles)): # info.append([ctitles[i],clip_keys[i]]) info.append(['Mask mode', maskmode]) #for i in xrange(len(mtitles)): # info.append([mtitles[i],getattr(mkeys[i])]) separator = "#"*60 + "\n" f.write(separator) for i in xrange(len(info)): f.write('%12s: %s\n'%tuple(info[i])) f.write(separator) f.close() else: #new_bloutput.append(infile + '_blparam.txt') new_bloutput.append(infile + '_blparam.txt') fname2 = infile + '_blparam.txt' if not overwrite and os.path.exists(fname2): raise Exception(fname2 + ' exists.') f = open(fname2, "w") blf = blfunc.lower() mm = maskmode.lower() if blf == 'poly': ftitles = ['Fit order'] elif blf == 'chebyshev': ftitles = ['Fit order'] elif blf == 'cspline': ftitles = ['nPiece'] else: # sinusoid ftitles = ['applyFFT', 'fftMethod', 'fftThresh', 'addWaveN', 'rejWaveN'] if mm == 'auto': mtitles = ['Threshold', 'avg_limit', 'Edge'] elif mm == 'list': mtitles = [] else: # interact mtitles = [] ctitles = ['clipThresh', 'clipNIter'] #fkeys = getattr(self, '%s_keys'%(blfunc)) #mkeys = getattr(self, '%s_keys'%(maskmode)) outfile2='' if(outfile==''): outfile2 = infile info = [['Source Table', infile], ['Output File', outfile]] #['Function', blfunc], #['Fit order', order]] #for i in xrange(len(ftitles)): # info.append([ftitles[i],getattr(self,fkeys[i])]) #if blf != 'poly': # for i in xrange(len(ctitles)): # info.append([ctitles[i],clip_keys[i]]) info.append(['Mask mode', maskmode]) #for i in xrange(len(mtitles)): # info.append([mtitles[i],getattr(mkeys[i])]) separator = "#"*60 + "\n" f.write(separator) for i in xrange(len(info)): f.write('%12s: %s\n'%tuple(info[i])) f.write(separator) f.close() else: new_bloutput.append('') if('table' in blformat): #print 'blformat->',type(blformat), blformat #print "blformat.index('table')", blformat.index('table') #print "bloutput[blformat.index('table')])", bloutput[blformat.index('table')] if(''!= bloutput[blformat.index('table')]): fname = bloutput[blformat.index('table')] if not overwrite and os.path.exists(fname): raise Exception(fname + ' exists.')############################################# new_bloutput.append(bloutput[blformat.index('table')]) else: fname = infile + '_blparam.bltable' if not overwrite and os.path.exists(fname): raise Exception(fname + ' exists.')############################################# print '' new_bloutput.append(fname) else: new_bloutput.append('') blformat = ",".join(blformat) bloutput = ",".join(new_bloutput) #print 'task blformat',type(blformat), blformat #print 'task bloutput',type(bloutput), bloutput params, func = prepare_for_baselining(blfunc=blfunc, datacolumn=datacolumn, outfile=outfile, bltable=bloutput, # remove this line once text/csv output becomes available (2015/7/1 WK) blformat=blformat, bloutput=bloutput, dosubtract=dosubtract, spw=spw, pol=pol, order=order, npiece=npiece, blparam=blparam, clip_threshold_sigma=clipthresh, num_fitting_max=clipniter+1, linefinding=(maskmode=='auto'), threshold=thresh, avg_limit=avg_limit, minwidth=minwidth, edge=edge) if overwrite: if os.path.exists(outfile): os.system('rm -rf %s' % outfile) for bloutfile in new_bloutput: if os.path.exists(bloutfile): os.system('rm -rf %s' % bloutfile) #print params if(blformat != ',,'): func(**params) if (blfunc == 'variable'): restore_sorted_table_keyword(infile, sorttab_info) except Exception, instance: raise Exception, instance