def test_hypercolumn(self): """Test hypercolumns.""" scd1 = makescacoldesc("col2", "aa") scd2 = makescacoldesc("col1", 1, "IncrementalStMan") scd3 = makescacoldesc("colrec1", {}) acd1 = makearrcoldesc("arr1", 1, 0, [2, 3, 4]) acd2 = makearrcoldesc("arr2", 0. + 0j) td = maketabdesc([scd1, scd2, scd3, acd1, acd2]) tabledefinehypercolumn(td, "TiledArray", 4, ["arr1"]) tab = table("mytable", tabledesc=td, nrow=100) tab.done() tabledelete("mytable")
def test_adddmcolumns(self): """Add some columns.""" c1 = makescacoldesc("coli", 0) c2 = makescacoldesc("cold", 0.) c3 = makescacoldesc("cols", "") c4 = makescacoldesc("colb", True) c5 = makescacoldesc("colc", 0. + 0j) c6 = makearrcoldesc("colarr", 0.) t = table("ttable.py_tmp.tab1", maketabdesc((c1, c2, c3, c4, c5, c6)), ack=False) # A scalar with the IncrementalStMan storage manager t.addcols(maketabdesc(makescacoldesc("coli2", 0)), dminfo={ 'TYPE': "IncrementalStMan", 'NAME': "ism1", 'SPEC': {} }) self.assertIn("coli2", t.colnames()) # An array with the StandardStMan t.addcols(maketabdesc(makearrcoldesc("colarrssm", ""))) self.assertIn("colarrssm", t.colnames()) # An array with the TiledShapeStMan t.addcols(maketabdesc(makearrcoldesc("colarrtsm", 0. + 0j, ndim=2)), dminfo={ 'TYPE': "TiledShapeStMan", 'NAME': "tsm1", 'SPEC': {} }) self.assertIn("colarrtsm", t.colnames()) print(t.getdminfo()) coldmi = t.getdminfo('colarrtsm') print(t.getcoldesc('colarrtsm')) coldmi["NAME"] = 'tsm2' t.addcols(maketabdesc(makearrcoldesc("colarrtsm2", 0., ndim=2)), coldmi) self.assertEqual(t.getdminfo('colarrtsm2')["NAME"], 'tsm2') t.removecols('colarrtsm2') # Write some data. t.addrows(22) t.putcell('colarrtsm', 0, np.array([[1, 2, 3], [4, 5, 6]])) t.putcell('colarrtsm', 1, t.getcell('colarrtsm', 0) + 10) self.assertEqual(t.getcell('colarrtsm', 0)[1, 2], 6) print(t.getvarcol('colarrtsm')) np.testing.assert_array_equal( t.getcellslice('colarrtsm', 0, [1, 1], [1, 2]), np.array([[5. + 0.j, 6. + 0.j]])) print(t.getvarcol('colarrtsm')) t.close() tabledelete("ttable.py_tmp.tab1")
def test_tableinfo(self): """Test table info.""" c1 = makescacoldesc("coli", 0) c2 = makescacoldesc("cold", 0.) c3 = makescacoldesc("cols", "") c4 = makescacoldesc("colb", True) c5 = makescacoldesc("colc", 0. + 0j) c6 = makearrcoldesc("colarr", 0.) t = table("ttable.py_tmp.tab1", maketabdesc((c1, c2, c3, c4, c5, c6)), ack=False) self.assertTrue(tableexists("ttable.py_tmp.tab1")) self.assertTrue(tableiswritable("ttable.py_tmp.tab1")) self.assertEqual(t.nrows(), 0) self.assertEqual(t.ncols(), 6) self.assertTrue(compare(t.colnames(), ['cols', 'colc', 'coli', 'cold', 'colb', 'colarr'])) self.assertEqual(tableinfo("ttable.py_tmp.tab1"), {'readme': '', 'subType': '', 'type': ''}) t.addreadmeline("test table run") t.putinfo({'type': 'test', 'subType': 'test1'}) self.assertEqual(t.info()['readme'], 'test table run\n') self.assertEqual(t.info()['subType'], 'test1') self.assertEqual(t.info()['type'], 'test') self.assertEqual(len(t), 0) print(str(t)) self.assertEqual(t.endianformat(), 'little') t.close() tabledelete("ttable.py_tmp.tab1")
def test_tableinfo(self): """Test table info.""" c1 = makescacoldesc("coli", 0) c2 = makescacoldesc("cold", 0.) c3 = makescacoldesc("cols", "") c4 = makescacoldesc("colb", True) c5 = makescacoldesc("colc", 0. + 0j) c6 = makearrcoldesc("colarr", 0.) t = table("ttable.py_tmp.tab1", maketabdesc((c1, c2, c3, c4, c5, c6)), ack=False) self.assertTrue(tableexists("ttable.py_tmp.tab1")) self.assertTrue(tableiswritable("ttable.py_tmp.tab1")) self.assertEqual(t.nrows(), 0) self.assertEqual(t.ncols(), 6) self.assertTrue( compare(t.colnames(), ['cols', 'colc', 'coli', 'cold', 'colb', 'colarr'])) self.assertEqual(tableinfo("ttable.py_tmp.tab1"), { 'readme': '', 'subType': '', 'type': '' }) t.addreadmeline("test table run") t.putinfo({'type': 'test', 'subType': 'test1'}) self.assertEqual(t.info()['readme'], 'test table run\n') self.assertEqual(t.info()['subType'], 'test1') self.assertEqual(t.info()['type'], 'test') self.assertEqual(len(t), 0) print(str(t)) self.assertEqual(t.endianformat(), 'little') t.close() tabledelete("ttable.py_tmp.tab1")
def test_adddmcolumns(self): """Add some columns.""" c1 = makescacoldesc("coli", 0) c2 = makescacoldesc("cold", 0.) c3 = makescacoldesc("cols", "") c4 = makescacoldesc("colb", True) c5 = makescacoldesc("colc", 0. + 0j) c6 = makearrcoldesc("colarr", 0.) t = table("ttable.py_tmp.tab1", maketabdesc((c1, c2, c3, c4, c5, c6)), ack=False) # A scalar with the IncrementalStMan storage manager t.addcols(maketabdesc(makescacoldesc("coli2", 0)), dminfo={'TYPE': "IncrementalStMan", 'NAME': "ism1", 'SPEC': {}}) self.assertIn("coli2", t.colnames()) # An array with the StandardStMan t.addcols(maketabdesc(makearrcoldesc("colarrssm", ""))) self.assertIn("colarrssm", t.colnames()) # An array with the TiledShapeStMan t.addcols(maketabdesc(makearrcoldesc("colarrtsm", 0. + 0j, ndim=2)), dminfo={'TYPE': "TiledShapeStMan", 'NAME': "tsm1", 'SPEC': {}}) self.assertIn("colarrtsm", t.colnames()) print(t.getdminfo()) coldmi = t.getdminfo('colarrtsm') print(t.getcoldesc('colarrtsm')) coldmi["NAME"] = 'tsm2' t.addcols(maketabdesc(makearrcoldesc( "colarrtsm2", 0., ndim=2)), coldmi) self.assertEqual(t.getdminfo('colarrtsm2')["NAME"], 'tsm2') t.removecols('colarrtsm2') # Write some data. t.addrows(22) t.putcell('colarrtsm', 0, np.array([[1, 2, 3], [4, 5, 6]])) t.putcell('colarrtsm', 1, t.getcell('colarrtsm', 0) + 10) self.assertEqual(t.getcell('colarrtsm', 0)[1, 2], 6) print(t.getvarcol('colarrtsm')) np.testing.assert_array_equal(t.getcellslice('colarrtsm', 0, [1, 1], [1, 2]), np.array([[5. + 0.j, 6. + 0.j]])) print(t.getvarcol('colarrtsm')) t.close() tabledelete("ttable.py_tmp.tab1")
def add_col(tbl, colnme): """Add a column 'colnme' to the MS""" col_dmi = tbl.getdminfo("DATA") col_dmi["NAME"] = colnme shape = tbl.getcell("DATA", 0).shape tbl.addcols( maketabdesc( makearrcoldesc(colnme, 0.0 + 0.0j, valuetype="complex", shape=shape) ), col_dmi, addtoparent=True, )
def add_phased_array_table(oskar_ms_name: str): """Add PHASED_ARRAY subtable to measurement set""" anttable = pt.table(f"{oskar_ms_name}::ANTENNA", ack=False) phasedarraytable = pt.table(f"{oskar_ms_name}/PHASED_ARRAY", pt.maketabdesc([]), nrow=anttable.nrows()) oskar_ms = pt.table(f"{oskar_ms_name}", readonly=False, ack=False) oskar_ms.putkeyword("PHASED_ARRAY", phasedarraytable, makesubrecord=True) oskar_ms.close() position_coldesc = anttable.getcoldesc("POSITION") position_coldesc['comment'] = 'Position of antenna field' position_coldesc['name'] = 'POSITION' phasedarraytable.addcols(position_coldesc) coordinate_system_coldesc = pt.makearrcoldesc("COORDINATE_AXES", 0., shape=[3, 3], comment="Local coordinate system", valuetype='double', keywords={'QuantumUnits': ['m', 'm', 'm'], 'MEASINFO': {'Ref': 'ITRF', 'type': 'direction'}}) phasedarraytable.addcols(coordinate_system_coldesc) pt.taql("UPDATE $phasedarraytable SET COORDINATE_AXES=0."); element_offset_coldesc = pt.makearrcoldesc("ELEMENT_OFFSET", 0., ndim=2, comment="Offset per element", valuetype='double', keywords={'QuantumUnits': ['m', 'm', 'm'], "MEASINFO": {"type": "position", "Ref": "ITRF"}}) phasedarraytable.addcols(element_offset_coldesc) element_flag_coldesc = pt.makearrcoldesc("ELEMENT_FLAG", 0., ndim=2, comment="Offset per element", valuetype='bool') phasedarraytable.addcols(element_flag_coldesc) phasedarraytable.close()
def test_subtables(self): """Testing subtables.""" c1 = makescacoldesc("coli", 0) c2 = makescacoldesc("cold", 0.) c3 = makescacoldesc("cols", "") c4 = makescacoldesc("colb", True) c5 = makescacoldesc("colc", 0. + 0j) c6 = makearrcoldesc("colarr", 0.) t = table("ttable.py_tmp.tab1", maketabdesc((c1, c2, c3, c4, c5, c6)), ack=False) sub = table("sub", maketabdesc((c1, c2, c3))) t.putkeyword("subtablename", sub, makesubrecord=True) print(t.getsubtables()) t.close() tabledelete("ttable.py_tmp.tab1")
def test_deletecols(self): """Delete some columns.""" c1 = makescacoldesc("coli", 0) c2 = makescacoldesc("cold", 0.) c3 = makescacoldesc("cols", "") c4 = makescacoldesc("colb", True) c5 = makescacoldesc("colc", 0. + 0j) c6 = makearrcoldesc("colarr", 0.) t = table("ttable.py_tmp.tab1", maketabdesc((c1, c2, c3, c4, c5, c6)), ack=False) a = ['colarr', 'cols', 'colb', 'colc'] t.removecols(a) self.assertNotIn(a, t.colnames()) t.close() tabledelete("ttable.py_tmp.tab1")
def test_iter(self): """Testing tableiter.""" c1 = makescacoldesc("coli", 0) c2 = makescacoldesc("cold", 0.) c3 = makescacoldesc("cols", "") c4 = makescacoldesc("colb", True) c5 = makescacoldesc("colc", 0. + 0j) c6 = makearrcoldesc("colarr", 0.) t = table("ttable.py_tmp.tab1", maketabdesc((c1, c2, c3, c4, c5, c6)), ack=False) t.addrows(2) for iter_ in t.iter('coli', sort=False): print(iter_.getcol('coli'), iter_.rownumbers(t)) iter_.close() t.close() tabledelete("ttable.py_tmp.tab1")
def test_adddata(self): """Add some more data.""" c1 = makescacoldesc("coli", 0) c2 = makescacoldesc("cold", 0.) c3 = makescacoldesc("cols", "") c4 = makescacoldesc("colb", True) c5 = makescacoldesc("colc", 0. + 0j) c6 = makearrcoldesc("colarr", 0.) t = table("ttable.py_tmp.tab1", maketabdesc((c1, c2, c3, c4, c5, c6)), ack=False) t.addrows(22) for i in range(2, 22): t.putcell('coli', i, i / 2) print(t[10]) t.close() tabledelete("ttable.py_tmp.tab1")
def test_taqlcalc(self): """Some TaQL calculations.""" c1 = makescacoldesc("coli", 0) c2 = makescacoldesc("cold", 0.) c3 = makescacoldesc("cols", "") c4 = makescacoldesc("colb", True) c5 = makescacoldesc("colc", 0. + 0j) c6 = makearrcoldesc("colarr", 0.) t = table("ttable.py_tmp.tab1", maketabdesc((c1, c2, c3, c4, c5, c6)), ack=False) t.addrows(2) np.testing.assert_array_almost_equal( t.calc("(1 km)cm"), np.array([100000.])) np.testing.assert_array_equal(t.calc("coli+1"), np.array([1, 1])) t.close() tabledelete("ttable.py_tmp.tab1")
def test_taqlcalc(self): """Some TaQL calculations.""" c1 = makescacoldesc("coli", 0) c2 = makescacoldesc("cold", 0.) c3 = makescacoldesc("cols", "") c4 = makescacoldesc("colb", True) c5 = makescacoldesc("colc", 0. + 0j) c6 = makearrcoldesc("colarr", 0.) t = table("ttable.py_tmp.tab1", maketabdesc((c1, c2, c3, c4, c5, c6)), ack=False) t.addrows(2) np.testing.assert_array_almost_equal(t.calc("(1 km)cm"), np.array([100000.])) np.testing.assert_array_equal(t.calc("coli+1"), np.array([1, 1])) t.close() tabledelete("ttable.py_tmp.tab1")
def test_subset(self): """Create a subset.""" c1 = makescacoldesc("coli", 0) c2 = makescacoldesc("cold", 0.) c3 = makescacoldesc("cols", "") c4 = makescacoldesc("colb", True) c5 = makescacoldesc("colc", 0. + 0j) c6 = makearrcoldesc("colarr", 0.) t = table("ttable.py_tmp.tab1", maketabdesc((c1, c2, c3, c4, c5, c6)), ack=False) t1 = t.query('coli >0', sortlist='coli desc', columns='coli,cold') querycols = t1.colnames() t1 = taql('select coli,cold from $t where coli>0 order by coli desc') taqlcol = t1.colnames() self.assertEqual(querycols, taqlcol) t1.close() t.close() tabledelete("ttable.py_tmp.tab1")
def test_check_datatypes(self): """Checking datatypes.""" c1 = makescacoldesc("coli", 0) c2 = makescacoldesc("cold", 0.) c3 = makescacoldesc("cols", "") c4 = makescacoldesc("colb", True) c5 = makescacoldesc("colc", 0. + 0j) c6 = makearrcoldesc("colarr", 0.) t = table("ttable.py_tmp.tab1", maketabdesc((c1, c2, c3, c4, c5, c6)), ack=False) self.assertEqual(t.coldatatype("coli"), 'int') self.assertEqual(t.coldatatype("cold"), 'double') self.assertEqual(t.coldatatype("cols"), 'string') self.assertEqual(t.coldatatype("colb"), 'boolean') self.assertEqual(t.coldatatype("colc"), 'dcomplex') self.assertEqual(t.coldatatype("colarr"), 'double') t.close() tabledelete("ttable.py_tmp.tab1")
def test_tablecolumn(self): """Table column.""" c1 = makescacoldesc("coli", 0) c2 = makescacoldesc("cold", 0.) c3 = makescacoldesc("cols", "") c4 = makescacoldesc("colb", True) c5 = makescacoldesc("colc", 0. + 0j) c6 = makearrcoldesc("colarr", 0.) t = table("ttable.py_tmp.tab1", maketabdesc((c1, c2, c3, c4, c5, c6)), ack=False) t.addrows(20) with tablecolumn(t, 'coli') as tc: tc[6] += 20 self.assertEqual(tc[6], 20) self.assertIn(20, tc[18:4:-2]) self.assertEqual(tc[0:][6], 20) self.assertEqual(tc.datatype(), 'int') self.assertEqual(tc.name(), 'coli') print(tc.table()) self.assertTrue(tc.isscalar()) self.assertFalse(tc.isvar()) self.assertEqual(tc.nrows(), 20) self.assertTrue(tc.iscelldefined(2)) self.assertEqual(tc.getcell(3), 0) print(tc._) self.assertEqual(tc.getcol(2, 15)[4], 20) tc.putkeyword('key1', "keyval") self.assertIn('key1', tc.keywordnames()) self.assertIn('key1', tc.fieldnames()) self.assertEqual(tc.getdesc()['dataManagerType'], 'StandardStMan') self.assertEqual(tc.getdminfo()['TYPE'], 'StandardStMan') for iter_ in tc.iter(sort=False): print(iter_[0]['coli']) self.assertEqual(len(tc), 20) self.assertEqual(tc.getkeywords()['key1'], 'keyval') np.testing.assert_equal(tc.getvarcol()['r1'], 0) tc.putcell(2, 55) self.assertEqual(tc[2], 55) iter_.close() t.close() tabledelete("ttable.py_tmp.tab1")
def test_addcolumns(self): """Add columns.""" c1 = makescacoldesc("coli", 0) c2 = makescacoldesc("cold", 0.) c3 = makescacoldesc("cols", "") c4 = makescacoldesc("colb", True) c5 = makescacoldesc("colc", 0. + 0j) c6 = makearrcoldesc("colarr", 0.) t = table("ttable.py_tmp.tab1", maketabdesc((c1, c2, c3, c4, c5, c6)), ack=False) t.addrows(2) cd1 = makecoldesc("col2", t.getcoldesc('coli')) t.addcols(cd1) self.assertEqual(t.ncols(), 7) self.assertIn('col2', t.colnames()) t.renamecol("col2", "ncol2") self.assertNotIn('col2', t.colnames()) self.assertIn('ncol2', t.colnames()) t.close() tabledelete("ttable.py_tmp.tab1")
def test_keywords(self): """Do keyword handling.""" c1 = makescacoldesc("coli", 0) c2 = makescacoldesc("cold", 0.) c3 = makescacoldesc("cols", "") c4 = makescacoldesc("colb", True) c5 = makescacoldesc("colc", 0. + 0j) c6 = makearrcoldesc("colarr", 0.) t = table("ttable.py_tmp.tab1", maketabdesc((c1, c2, c3, c4, c5, c6)), ack=False) t.addrows(2) t.putkeyword('key1', "keyval") t.putkeyword('keyrec', {'skey1': 1, 'skey2': 3.}) self.assertTrue(t._["keyrec"]['skey1'], 1) self.assertEqual(t.getkeyword('key1'), 'keyval') self.assertIn('key1', t.keywordnames()) self.assertIn('keyrec', t.keywordnames()) key1 = t.keywordnames() key2 = t.fieldnames() self.assertEqual(key1, key2) self.assertIn('skey1', t.fieldnames('keyrec')) self.assertIn('skey2', t.fieldnames('keyrec')) t.putcolkeyword('coli', 'keycoli', {'colskey': 1, 'colskey2': 3.}) self.assertEqual(t.getcolkeywords('coli')['keycoli']['colskey2'], 3) # getattr tc = t.coli self.assertEqual(tc[0], 0) self.assertEqual(t.key1, 'keyval') self.assertRaises(AttributeError, lambda: t.key2) t.removekeyword('key1') self.assertNotIn('key1', t.getcolkeywords('coli')) # Print table row # tr = t.row(['coli', 'cold') # self.assertEqual(tr[0]['coli'], 0) # # Update a few fields in the row # tr[0] = {'coli': 10, 'cold': 14} # self.assertEqual(tr[0]['coli'], 10) t.close() tabledelete("ttable.py_tmp.tab1")
def create_ms(filename, table_desc=None, dm_info=None): """Create an empty MS with the default expected sub-tables and columns.""" with tables.default_ms(filename, table_desc, dm_info) as main_table: # Add the optional SOURCE subtable source_path = os.path.join(os.getcwd(), filename, 'SOURCE') with tables.default_ms_subtable('SOURCE', source_path) as source_table: # Add the optional REST_FREQUENCY column to appease exportuvfits # (it only seems to need the column keywords) rest_freq_desc = tables.makearrcoldesc('REST_FREQUENCY', 0, valuetype='DOUBLE', ndim=1, keywords={ 'MEASINFO': { 'Ref': 'LSRK', 'type': 'frequency' }, 'QuantumUnits': 'Hz' }) source_table.addcols(rest_freq_desc) main_table.putkeyword('SOURCE', 'Table: ' + source_path)
def test_copyandrename(self): """Copy and rename tables.""" c1 = makescacoldesc("coli", 0) c2 = makescacoldesc("cold", 0.) c3 = makescacoldesc("cols", "") c4 = makescacoldesc("colb", True) c5 = makescacoldesc("colc", 0. + 0j) c6 = makearrcoldesc("colarr", 0.) t = table("ttable.py_tmp.tab1", maketabdesc((c1, c2, c3, c4, c5, c6)), ack=False) t_copy = tablecopy("ttable.py_tmp.tab1", "ttabel.tab1") self.assertEqual(t.name().split('/')[-1], 'ttable.py_tmp.tab1') self.assertEqual(t_copy.name().split('/')[-1], 'ttabel.tab1') numofrows = t.nrows() numofcols = t.ncols() self.assertEqual(t_copy.nrows(), numofrows) self.assertEqual(t_copy.ncols(), numofcols) tablerename("ttabel.tab1", "renamedttabel.tab1") self.assertEqual(t_copy.name().split('/')[-1], 'renamedttabel.tab1') t_copy.done() tabledelete("renamedttabel.tab1") t.close() tabledelete("ttable.py_tmp.tab1")
def test_check_putdata(self): """Add rows and put data.""" c1 = makescacoldesc("coli", 0) c2 = makescacoldesc("cold", 0.) c3 = makescacoldesc("cols", "") c4 = makescacoldesc("colb", True) c5 = makescacoldesc("colc", 0. + 0j) c6 = makearrcoldesc("colarr", 0.) t = table("ttable.py_tmp.tab1", maketabdesc((c1, c2, c3, c4, c5, c6)), ack=False) t.addrows(2) self.assertEqual(t.nrows(), 2) np.testing.assert_array_equal(t.getcol('coli'), np.array([0, 0])) t.putcol("coli", (1, 2)) np.testing.assert_array_equal(t.getcol('coli'), np.array([1, 2])) np.testing.assert_array_equal(t.getcol('cold'), np.array([0., 0.])) t.putcol("cold", t.getcol('coli') + 3) np.testing.assert_array_equal(t.getcol('cold'), np.array([4., 5.])) t.removerows(1) self.assertEqual(t.nrows(), 1) t.close() tabledelete("ttable.py_tmp.tab1")
def test_check_putdata(self): """Add rows and put data.""" c1 = makescacoldesc("coli", 0) c2 = makescacoldesc("cold", 0.) c3 = makescacoldesc("cols", "") c4 = makescacoldesc("colb", True) c5 = makescacoldesc("colc", 0. + 0j) c6 = makearrcoldesc("colarr", 0.) t = table("ttable.py_tmp.tab1", maketabdesc((c1, c2, c3, c4, c5, c6)), ack=False) t.addrows(2) self.assertEqual(t.nrows(), 2) np.testing.assert_array_equal(t.getcol('coli'), np.array([0, 0])) t.putcol("coli", (1, 2)) np.testing.assert_array_equal(t.getcol('coli'), np.array([1, 2])) np.testing.assert_array_equal( t.getcol('cold'), np.array([0., 0.])) t.putcol("cold", t.getcol('coli') + 3) np.testing.assert_array_equal( t.getcol('cold'), np.array([4., 5.])) t.removerows(1) self.assertEqual(t.nrows(), 1) t.close() tabledelete("ttable.py_tmp.tab1")
def test_msutil(self): """Testing msutil.""" datacoldesc = makearrcoldesc("DATA", 0., ndim=2, shape=[20, 4]) ms = default_ms("tabtemp", maketabdesc((datacoldesc))) ms.close() spw = table("tabtemp/SPECTRAL_WINDOW", readonly=False) spw.addrows() spw.putcell('NUM_CHAN', 0, 20) t = table("tabtemp", readonly=False) print(t.colnames()) addImagingColumns("tabtemp") self.assertIn('MODEL_DATA', t.colnames()) self.assertIn('CORRECTED_DATA', t.colnames()) self.assertIn('IMAGING_WEIGHT', t.colnames()) removeImagingColumns("tabtemp") self.assertNotIn('MODEL_DATA', t.colnames()) self.assertNotIn('CORRECTED_DATA', t.colnames()) self.assertNotIn('IMAGING_WEIGHT', t.colnames()) addDerivedMSCal("tabtemp") self.assertIn('PA1', t.colnames()) self.assertIn('PA2', t.colnames()) self.assertIn('LAST', t.colnames()) self.assertIn('AZEL2', t.colnames()) self.assertIn('AZEL1', t.colnames()) self.assertIn('UVW_J2000', t.colnames()) self.assertIn('LAST1', t.colnames()) self.assertIn('LAST2', t.colnames()) self.assertIn('HA1', t.colnames()) self.assertIn('HA2', t.colnames()) self.assertIn('HA', t.colnames()) removeDerivedMSCal("tabtemp") self.assertNotIn('PA1', t.colnames()) self.assertNotIn('PA2', t.colnames()) self.assertNotIn('LAST', t.colnames()) self.assertNotIn('AZEL2', t.colnames()) self.assertNotIn('AZEL1', t.colnames()) self.assertNotIn('UVW_J2000', t.colnames()) self.assertNotIn('LAST1', t.colnames()) self.assertNotIn('LAST2', t.colnames()) self.assertNotIn('HA1', t.colnames()) self.assertNotIn('HA2', t.colnames()) self.assertNotIn('HA', t.colnames()) self.assertNotIn('HA', t.colnames()) self.assertNotIn('HA', t.colnames()) taql("SELECT FROM tabtemp where TIME in (SELECT DISTINCT TIME" + " FROM tabtemp LIMIT 10) GIVING first10.MS AS PLAIN") taql("SELECT FROM tabtemp where TIME in (SELECT DISTINCT TIME" + " FROM tabtemp LIMIT 10 OFFSET 10) GIVING second10.MS AS PLAIN") msconcat(["first10.MS", "second10.MS"], "combined.MS", concatTime=True) spw.close() t.close() tabledelete("tabtemp")
def makeMAIN2(tablename,specdata,time,state_id,texp=0.2,tBW=2.5e9): # modules import casacore.tables as tb # params nrow = specdata.shape[0] nspw = specdata.shape[1] npol = specdata.shape[2] nchan = specdata.shape[3] weight = tBW/float(nchan) * texp sigma = (tBW/float(nchan) * texp)**-0.5 ind_spw = (np.linspace(0,2*nrow-1,2*nrow,dtype='int32') % 2) # tables colnames = ['UVW', 'FLAG', 'FLAG_CATEGORY', 'WEIGHT', 'SIGMA', 'ANTENNA1', 'ANTENNA2', 'ARRAY_ID', 'DATA_DESC_ID', 'EXPOSURE', 'FEED1', 'FEED2', 'FIELD_ID', 'FLAG_ROW', 'INTERVAL', 'OBSERVATION_ID', 'PROCESSOR_ID', 'SCAN_NUMBER', 'STATE_ID', 'TIME', 'TIME_CENTROID', 'FLOAT_DATA' ] colkeywords = [ {'MEASINFO': {'Ref': 'ITRF', 'type': 'uvw'},'QuantumUnits': np.array(['m', 'm', 'm'],dtype='|S2')}, {}, {'CATEGORY': np.array([],dtype='|S1')}, {},{},{},{},{},{}, {'QuantumUnits': np.array(['s'],dtype='|S2')}, {},{},{},{}, {'QuantumUnits': np.array(['s'],dtype='|S2')}, {},{},{},{}, {'MEASINFO': {'Ref': 'UTC', 'type': 'epoch'}, 'QuantumUnits': np.array(['s'],dtype='|S2')}, {'MEASINFO': {'Ref': 'UTC', 'type': 'epoch'}, 'QuantumUnits': np.array(['s'],dtype='|S2')}, {'UNIT': 'K'} ] ndims = [1,2,3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2] isarrays = [True,True,True,True,True,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,True] valuetypes = ['double','bool','bool','float','float','int','int','int','int','double','int','int','int','bool','double','int','int','int','int','double','double','float'] descs = [] for colname,colkeyword,ndim,valuetype,isarray in zip(colnames,colkeywords,ndims,valuetypes,isarrays): if colname=='UVW': descs.append(tb.makearrcoldesc(colname,0.0,datamanagertype='StandardStMan',datamanagergroup='StandardStMan',ndim=ndim,keywords=colkeyword,valuetype=valuetype,options=5,shape=np.array([3], dtype='int32'))) elif isarray: descs.append(tb.makearrcoldesc(colname,0.0,datamanagertype='StandardStMan',datamanagergroup='StandardStMan',ndim=ndim,keywords=colkeyword,valuetype=valuetype)) else: descs.append(tb.makescacoldesc(colname,0.0,datamanagertype='StandardStMan',datamanagergroup='StandardStMan',keywords=colkeyword,valuetype=valuetype)) td = tb.maketabdesc(descs=descs) returned_table = tb.table(tablename,tabledesc=td,nrow=2*nrow,readonly=False) # put values value = np.zeros([2*nrow,3],dtype='float64') returned_table.putcol('UVW',value) value = np.full([2*nrow,2],sigma) returned_table.putcol('SIGMA',value) value = np.full([2*nrow,2],weight) returned_table.putcol('WEIGHT',value) value = np.zeros([2*nrow],dtype='int32') returned_table.putcol('ANTENNA1',value) returned_table.putcol('ANTENNA2',value) returned_table.putcol('ARRAY_ID',value) returned_table.putcol('FEED1',value) returned_table.putcol('FEED2',value) returned_table.putcol('FIELD_ID',value) returned_table.putcol('OBSERVATION_ID',value) returned_table.putcol('PROCESSOR_ID',value) value = np.zeros([2*nrow],dtype='bool') returned_table.putcol('FLAG_ROW',value) value = np.full([2*nrow],texp,dtype='float64') returned_table.putcol('EXPOSURE',value) returned_table.putcol('INTERVAL',value) value = np.zeros_like(np.concatenate([specdata[:,0],specdata[:,1]],axis=0),dtype='bool') value = value.transpose(0,2,1) returned_table.putcol('FLAG',value) value = np.zeros_like(np.concatenate([specdata[:,0],specdata[:,1]],axis=0),dtype='float64') for i in range(2): value[ind_spw==i] = specdata[:,i].copy() value = value.transpose(0,2,1) returned_table.putcol('FLOAT_DATA',value) value = np.zeros(2*nrow,dtype='int32') for i in range(2): value[ind_spw==i] = state_id value = np.zeros(2*nrow,dtype='int32') for i in range(2): value[ind_spw==i] = i returned_table.putcol('DATA_DESC_ID',value) value = np.zeros(2*nrow,dtype='int32') for i in range(2): value[ind_spw==i] = state_id returned_table.putcol('STATE_ID',value) value = np.zeros(2*nrow,dtype='int32') for i in range(2): value[ind_spw==i] = np.linspace(0,nrow-1,nrow,dtype='int32') returned_table.putcol('SCAN_NUMBER',value) value = np.zeros(2*nrow,dtype='float64') for i in range(2): value[ind_spw==i] = time.copy() returned_table.putcol('TIME',value) returned_table.putcol('TIME_CENTROID',value) returned_table.flush() returned_table.close()
def test_required_desc(self): """Testing required_desc.""" # ============================================= # TEST 1 # Create a default Measurement Set # ============================================= with default_ms("ttable.py_tmp.ms1") as ms1: pass # ============================================= # TEST 2 # Create a MS with a modified UVW column, # an additional MODEL_DATA column, as well as # specs for the column data managers # ============================================= # Get the required description for an MS ms2_desc = required_ms_desc("MAIN") # Modify UVW to use a Tiled Column Storage Manager ms2_desc["UVW"].update(options=0, shape=[3], ndim=1, dataManagerGroup="UVW", dataManagerType='TiledColumnStMan') dmgroup_spec = {"UVW": {"DEFAULTTILESHAPE": [3, 128 * 64]}} # Create an array column description # as well as a data manager group spec model_data_desc = makearrcoldesc("MODEL_DATA", 0.0, options=4, valuetype="complex", shape=[16, 4], ndim=2, datamanagertype="TiledColumnStMan", datamanagergroup="DataGroup") dmgroup_spec.update({"DataGroup": {"DEFAULTTILESHAPE": [4, 16, 32]}}) # Incorporate column into table description ms2_desc.update(maketabdesc(model_data_desc)) # Construct a data manager info from the table description # and the data manager group spec ms2_dminfo = makedminfo(ms2_desc, dmgroup_spec) # Create measurement set with table description # and data manager info with default_ms("ttable.py_tmp.ms2", ms2_desc, ms2_dminfo) as ms2: # Check that UVW was correctly constructed desc = ms2.getcoldesc("UVW") self.assertTrue(desc["dataManagerType"] == "TiledColumnStMan") self.assertTrue(desc["dataManagerGroup"] == "UVW") self.assertTrue(desc["valueType"] == "double") self.assertTrue(desc["ndim"] == 1) self.assertTrue(np.all(desc["shape"] == [3])) dminfo = ms2.getdminfo("UVW") self.assertTrue(dminfo["NAME"] == "UVW") self.assertTrue(dminfo["TYPE"] == "TiledColumnStMan") self.assertTrue( np.all(dminfo["SPEC"]["DEFAULTTILESHAPE"] == [3, 128 * 64])) self.assertTrue( np.all(dminfo["SPEC"]["HYPERCUBES"]["*1"]["TileShape"] == [3, 128 * 64])) self.assertTrue("MODEL_DATA" in ms2.colnames()) # Check that MODEL_DATA was correctly constructed desc = ms2.getcoldesc("MODEL_DATA") self.assertTrue(desc["dataManagerType"] == "TiledColumnStMan") self.assertTrue(desc["dataManagerGroup"] == "DataGroup") self.assertTrue(desc["valueType"] == "complex") self.assertTrue(desc["ndim"] == 2) self.assertTrue(np.all(desc["shape"] == [16, 4])) dminfo = ms2.getdminfo("MODEL_DATA") self.assertTrue(dminfo["NAME"] == "DataGroup") self.assertTrue(dminfo["TYPE"] == "TiledColumnStMan") self.assertTrue( np.all(dminfo["SPEC"]["DEFAULTTILESHAPE"] == [4, 16, 32])) self.assertTrue( np.all(dminfo["SPEC"]["HYPERCUBES"]["*1"]["TileShape"] == [4, 16, 32])) # ============================================= # TEST 3 # Test subtable creation # ============================================= for c in subtables: # Check that we can get the default description for this table def_subt_desc = required_ms_desc(c) # Don't use it though (too much to check). # Rather model_data_desc = makearrcoldesc( "MODEL_DATA", 0.0, options=4, valuetype="complex", shape=[16, 4], ndim=2, datamanagertype="TiledColumnStMan", datamanagergroup="DataGroup") dmgroup_spec = {"DataGroup": {"DEFAULTTILESHAPE": [4, 16, 32]}} tabdesc = maketabdesc(model_data_desc) dminfo = makedminfo(tabdesc, dmgroup_spec) subtname = "ttable.py_tmp_subt_%s.ms" % c with default_ms_subtable(c, subtname, tabdesc, dminfo) as subt: self.assertTrue('MODEL_DATA' in subt.colnames()) dminfo = subt.getdminfo("MODEL_DATA") self.assertTrue(dminfo["NAME"] == "DataGroup") self.assertTrue(dminfo["TYPE"] == "TiledColumnStMan") self.assertTrue( np.all(dminfo["SPEC"]["DEFAULTTILESHAPE"] == [4, 16, 32])) self.assertTrue( np.all(dminfo["SPEC"]["HYPERCUBES"]["*1"]["TileShape"] == [4, 16, 32]))
def test_required_desc(self): """Testing required_desc.""" # ============================================= # TEST 1 # Create a default Measurement Set # ============================================= with default_ms("ttable.py_tmp.ms1") as ms1: pass # ============================================= # TEST 2 # Create a MS with a modified UVW column, # an additional MODEL_DATA column, as well as # specs for the column data managers # ============================================= # Get the required description for an MS ms2_desc = required_ms_desc("MAIN") # Modify UVW to use a Tiled Column Storage Manager ms2_desc["UVW"].update(options=0, shape=[3], ndim=1, dataManagerGroup="UVW", dataManagerType='TiledColumnStMan') dmgroup_spec = {"UVW": {"DEFAULTTILESHAPE": [3, 128 * 64]}} # Create an array column description # as well as a data manager group spec model_data_desc = makearrcoldesc("MODEL_DATA", 0.0, options=4, valuetype="complex", shape=[16, 4], ndim=2, datamanagertype="TiledColumnStMan", datamanagergroup="DataGroup") dmgroup_spec.update({ "DataGroup": {"DEFAULTTILESHAPE": [4, 16, 32]}}) # Incorporate column into table description ms2_desc.update(maketabdesc(model_data_desc)) # Construct a data manager info from the table description # and the data manager group spec ms2_dminfo = makedminfo(ms2_desc, dmgroup_spec) # Create measurement set with table description # and data manager info with default_ms("ttable.py_tmp.ms2", ms2_desc, ms2_dminfo) as ms2: # Check that UVW was correctly constructed desc = ms2.getcoldesc("UVW") self.assertTrue(desc["dataManagerType"] == "TiledColumnStMan") self.assertTrue(desc["dataManagerGroup"] == "UVW") self.assertTrue(desc["valueType"] == "double") self.assertTrue(desc["ndim"] == 1) self.assertTrue(np.all(desc["shape"] == [3])) dminfo = ms2.getdminfo("UVW") self.assertTrue(dminfo["NAME"] == "UVW") self.assertTrue(dminfo["TYPE"] == "TiledColumnStMan") self.assertTrue( np.all(dminfo["SPEC"]["DEFAULTTILESHAPE"] == [3, 128 * 64])) self.assertTrue(np.all(dminfo["SPEC"]["HYPERCUBES"][ "*1"]["TileShape"] == [3, 128 * 64])) self.assertTrue("MODEL_DATA" in ms2.colnames()) # Check that MODEL_DATA was correctly constructed desc = ms2.getcoldesc("MODEL_DATA") self.assertTrue(desc["dataManagerType"] == "TiledColumnStMan") self.assertTrue(desc["dataManagerGroup"] == "DataGroup") self.assertTrue(desc["valueType"] == "complex") self.assertTrue(desc["ndim"] == 2) self.assertTrue(np.all(desc["shape"] == [16, 4])) dminfo = ms2.getdminfo("MODEL_DATA") self.assertTrue(dminfo["NAME"] == "DataGroup") self.assertTrue(dminfo["TYPE"] == "TiledColumnStMan") self.assertTrue( np.all(dminfo["SPEC"]["DEFAULTTILESHAPE"] == [4, 16, 32])) self.assertTrue(np.all(dminfo["SPEC"]["HYPERCUBES"][ "*1"]["TileShape"] == [4, 16, 32])) # ============================================= # TEST 3 # Test subtable creation # ============================================= subtables = ("ANTENNA", "DATA_DESCRIPTION", "DOPPLER", "FEED", "FIELD", "FLAG_CMD", "FREQ_OFFSET", "HISTORY", "OBSERVATION", "POINTING", "POLARIZATION", "PROCESSOR", "SOURCE", "SPECTRAL_WINDOW", "STATE", "SYSCAL", "WEATHER") for c in subtables: # Check that we can get the default description for this table def_subt_desc = required_ms_desc(c) # Don't use it though (too much to check). # Rather model_data_desc = makearrcoldesc( "MODEL_DATA", 0.0, options=4, valuetype="complex", shape=[16, 4], ndim=2, datamanagertype="TiledColumnStMan", datamanagergroup="DataGroup") dmgroup_spec = {"DataGroup": {"DEFAULTTILESHAPE": [4, 16, 32]}} tabdesc = maketabdesc(model_data_desc) dminfo = makedminfo(tabdesc, dmgroup_spec) subtname = "ttable.py_tmp_subt_%s.ms" % c with default_ms_subtable(c, subtname, tabdesc, dminfo) as subt: self.assertTrue('MODEL_DATA' in subt.colnames()) dminfo = subt.getdminfo("MODEL_DATA") self.assertTrue(dminfo["NAME"] == "DataGroup") self.assertTrue(dminfo["TYPE"] == "TiledColumnStMan") self.assertTrue( np.all(dminfo["SPEC"]["DEFAULTTILESHAPE"] == [4, 16, 32])) self.assertTrue(np.all(dminfo["SPEC"]["HYPERCUBES"][ "*1"]["TileShape"] == [4, 16, 32]))
def addcol(msname, colname=None, shape=None, data_desc_type="array", valuetype=None, init_with=None, coldesc=None, coldmi=None, clone="DATA", rowchunk=None): """ Add a column to MS Parameters ---------- msanme : str MS to which to add the column colname : str Name of the column to be added shape : shape valuetype : data type data_desc_type : * ``scalar`` - scalar elements * ``array`` - array elements init_with : value to initialize the column with """ tab = table(msname, readonly=False) if colname in tab.colnames(): print("Column already exists") return "exists" print("Attempting to add %s column to %s" % (colname, msname)) valuetype = valuetype or "complex" if coldesc: data_desc = coldesc shape = coldesc["shape"] elif shape: data_desc = maketabdesc( makearrcoldesc(colname, init_with, shape=shape, valuetype=valuetype)) elif valuetype == "scalar": data_desc = maketabdesc( makearrcoldesc(colname, init_with, valuetype=valuetype)) elif clone: element = tab.getcell(clone, 0) try: shape = element.shape data_desc = maketabdesc( makearrcoldesc(colname, element.flatten()[0], shape=shape, valuetype=valuetype)) except AttributeError: shape = [] data_desc = maketabdesc( makearrcoldesc(colname, element, valuetype=valuetype)) colinfo = [data_desc, coldmi] if coldmi else [data_desc] tab.addcols(*colinfo) print("Column added successfully.") if init_with is None: tab.close() return "added" else: spwids = set(tab.getcol("DATA_DESC_ID")) for spw in spwids: print("Initializing column {0}. DDID is {1}".format(colname, spw)) tab_spw = tab.query("DATA_DESC_ID=={0:d}".format(spw)) nrows = tab_spw.nrows() rowchunk = rowchunk or nrows / 10 dshape = [0] + [a for a in shape] for row0 in range(0, nrows, rowchunk): nr = min(rowchunk, nrows - row0) dshape[0] = nr print("Wrtiting to column %s (rows %d to %d)" % (colname, row0, row0 + nr - 1)) dtype = init_with.dtype tab_spw.putcol(colname, np.ones(dshape, dtype=dtype) * init_with, row0, nr) tab_spw.close() tab.close()
def makePOINTING2(tablename,direction,time,nbeam=1,texp=0.2): # modules import casacore.tables as tb # params direction_rad = direction/180.*np.pi n_direction = direction.shape[0] time_end = time.max() # make table colnames = ['DIRECTION', 'ANTENNA_ID', 'INTERVAL', 'NAME', 'NUM_POLY', 'TARGET', 'TIME', 'TIME_ORIGIN', 'TRACKING'] colkeywords = [{'MEASINFO': {'Ref': 'J2000', 'type': 'direction'},'QuantumUnits': np.array(['rad', 'rad'],dtype='|S4')}, {}, {'QuantumUnits': np.array(['s'],dtype='|S2')}, {},{}, {'MEASINFO': {'Ref': 'J2000', 'type': 'direction'},'QuantumUnits': np.array(['rad', 'rad'],dtype='|S4')}, {'MEASINFO': {'Ref': 'UTC', 'type': 'epoch'}, 'QuantumUnits': np.array(['s'],dtype='|S2')}, {'MEASINFO': {'Ref': 'UTC', 'type': 'epoch'}, 'QuantumUnits': np.array(['s'],dtype='|S2')}, {} ] #ndims = [2,1,1,1,1,2,1,1,1] #isarrays = [True,False,False,False,False,True,False,False,False] valuetypes = ['double','int','double','string','int','double','double','double','bool'] ndims = [2,1,1,1,1,-1,1,1,1] descs = [] for colname,colkeyword,valuetype,ndim in zip(colnames,colkeywords,valuetypes,ndims): if (colname=='DIRECTION' or colname=='TARGET'): descs.append(tb.makearrcoldesc(colname,0.0,datamanagertype='StandardStMan',datamanagergroup='StandardStMan',ndim=ndim,keywords=colkeyword,valuetype=valuetype,options=0)) else: descs.append(tb.makescacoldesc(colname,0.0,datamanagertype='StandardStMan',datamanagergroup='StandardStMan',keywords=colkeyword,valuetype=valuetype)) td = tb.maketabdesc(descs=descs) returned_table = tb.table(tablename,tabledesc=td,nrow=n_direction,readonly=False) value = np.zeros([n_direction,1,2],dtype='float64') value[:,0] = direction_rad.copy() returned_table.putcol('DIRECTION',value) value = np.zeros([n_direction],dtype='int32') returned_table.putcol('ANTENNA_ID',value) returned_table.putcol('NUM_POLY',value) value = np.zeros([n_direction],dtype='float64') returned_table.putcol('TIME_ORIGIN',value) value = np.zeros([n_direction],dtype='bool') returned_table.putcol('TRACKING',value) value = np.full([n_direction],texp,dtype='float64') returned_table.putcol('INTERVAL',value) value = time returned_table.putcol('TIME',value) returned_table.flush() returned_table.close()
def modify_ms(dst_ms, avg_channel=4, ack=False): """ :param dst_ms: :param avg_channel:int :param ack: ack=False` prohibit the printing of a message telling if the table was opened or created successfully. :return: DOES NOT flag data """ assert avg_channel >= 1 assert isinstance(avg_channel, int) print('parameter avg_channel = % d' % avg_channel) print('loading data ......', end=' ') with table(dst_ms, ack=ack, readonly=False) as tar_tab: t1 = time() flag_dim = tar_tab.getdminfo(columnname="FLAG") # flagcategory_dim = res_tab.getdminfo(columnname="FLAG_CATEGORY") data_dim = tar_tab.getdminfo(columnname="DATA") model_dim = tar_tab.getdminfo(columnname="MODEL_DATA") corr_dim = tar_tab.getdminfo(columnname="CORRECTED_DATA") imgwg_dim = tar_tab.getdminfo(columnname="IMAGING_WEIGHT") # capture needed data weight = da.asarray(tar_tab.getcol('WEIGHT')) # (nrow, npol) vis = da.asarray(tar_tab.getcol('DATA')) # (nrow,allchan,npol) flag_data = da.asarray(tar_tab.getcol("FLAG")) # (nrow,allchan,npol) # flag_category=da.asarray(res_tab.getcol('FLAG_CATEGORY')) # Invalid operation, iscelldefined ====> False model_data = da.asarray(tar_tab.getcol("MODEL_DATA")) # (nrow,allchan,npol) # corr_data= res_tab.getcol("CORRECTED_DATA") # the column "CORRECTED_DATA" before calibration is equal to "DATA", imgwg_data = da.asarray(tar_tab.getcol("IMAGING_WEIGHT")) # (nrow,allchan) t2 = time() print('time consuming %.3fs' % (t2 - t1)) allchan = vis.shape[1] assert avg_channel <= allchan count = allchan // avg_channel last = allchan % avg_channel start = 0 i = 0 exp_weight = weight[:, None, :] cat_vis = [] cat_flag = [] cat_model = [] cat_imwg = [] if last: print("\nWARNING: the number of channels(=%d) should be divided by avg_channels(=%d). " "All channels' weight are stored in same " % (allchan, avg_channel)) count = count + 1 # loop event: 0.9s print('loop event ......', end=' ') t3 = time() write_weight = da.sum(exp_weight * da.ones(shape=(avg_channel, 1)), axis=-2) # shape = (nrow,npol) while i < count: part_flag = flag_data[:, start, :] # extract from first channel corresponding to each several continual channels part_model = model_data[:, start, :] part_imwg = imgwg_data[:, start:start + avg_channel] part_vis = vis[:, start:start + avg_channel, :] # shape=(nrow, avg_channel, npol) sum_imwg = da.sum(part_imwg, axis=1) sum_vis = da.sum(part_vis * exp_weight, axis=-2) if i == count - 1 and last > 0: avg_vis = sum_vis / (weight * last + 1e-10) else: avg_vis = sum_vis / (write_weight + 1e-10) cat_vis.append(avg_vis) cat_flag.append(part_flag) cat_model.append(part_model) cat_imwg.append(sum_imwg) i = i + 1 start = start + avg_channel write_vis = da.stack(cat_vis, axis=1) write_flag = da.stack(cat_flag, axis=1) write_model = da.stack(cat_model, axis=1) write_imwg = da.stack(cat_imwg, axis=1) t4 = time() print('time consuming %.3fs' % (t4 - t3)) print('dask computing ......', end=' ') write_vis, write_flag, write_model, write_weight, write_imwg = da.compute(write_vis, write_flag, write_model, write_weight, write_imwg) write_corr = write_vis t5 = time() print('time consuming %.3fs' % (t5 - t4)) print('modifying MAIN table......', end=' ') tar_tab.removecols(columnnames=["FLAG", "DATA", "MODEL_DATA", "CORRECTED_DATA", "IMAGING_WEIGHT"]) # "FLAG_CATEGORY", tar_tab.addcols(maketabdesc(makearrcoldesc(columnname="FLAG", value=False, ndim=2, datamanagertype='TiledShapeStMan', datamanagergroup='TiledFlag', valuetype='boolean', comment='The data flags, array of bools with same shape as data', keywords={})), flag_dim) tar_tab.addcols(maketabdesc(makearrcoldesc(columnname="DATA", value=0.0j, ndim=2, datamanagertype='TiledShapeStMan', datamanagergroup='TiledDATA', valuetype='complex', comment='The data column', keywords={})), data_dim) tar_tab.addcols(maketabdesc(makearrcoldesc(columnname="MODEL_DATA", value=0.0j, ndim=2, datamanagertype='TiledShapeStMan', datamanagergroup='TiledMODEL_DATA', valuetype='complex', comment='The model data column', keywords={})), model_dim) tar_tab.addcols(maketabdesc(makearrcoldesc(columnname="CORRECTED_DATA", value=0.0j, ndim=2, datamanagertype='TiledShapeStMan', datamanagergroup='TiledCORRECTED_DATA', valuetype='complex', comment='The corrected data column', keywords={})), corr_dim) tar_tab.addcols(maketabdesc(makearrcoldesc(columnname="IMAGING_WEIGHT", value=0.0, ndim=1, datamanagertype='TiledShapeStMan', datamanagergroup='TiledImagingWeight', valuetype='float', comment='Weight set by imaging task (e.g. uniform weighting)', keywords={})), imgwg_dim) tar_tab.putcol(columnname="FLAG", value=write_flag) tar_tab.putcol(columnname="WEIGHT", value=write_weight) tar_tab.putcol(columnname="DATA", value=write_vis) tar_tab.putcol(columnname="MODEL_DATA", value=write_model) tar_tab.putcol(columnname="CORRECTED_DATA", value=write_corr) tar_tab.putcol(columnname="IMAGING_WEIGHT", value=write_imwg) t6 = time() print('Done, time consuming %.3fs' % (t6 - t5)) print('Modifying SPECTRAL_WINDOW table......', end=' ') with table('%s/SPECTRAL_WINDOW' % dst_ms, ack=False, readonly=False) as tar_spwtab: chanfreq_dim = tar_spwtab.getdminfo(columnname="CHAN_FREQ") chanfreq_dim["NAME"] = 'StandardStManCHAN_FREQ' chanwidth_dim = tar_spwtab.getdminfo(columnname="CHAN_WIDTH") chanwidth_dim["NAME"] = 'StandardStManCHAN_WIDTH' effectivebw_dim = tar_spwtab.getdminfo(columnname="EFFECTIVE_BW") effectivebw_dim["NAME"] = 'StandardStManEFFECTIVE_BW' resolution_dim = tar_spwtab.getdminfo(columnname="RESOLUTION") resolution_dim["NAME"] = 'StandardStManRESOLUTION' frequency = tar_spwtab.getcol('CHAN_FREQ') channel_bandwidth = tar_spwtab.getcol('CHAN_WIDTH') nspw = frequency.shape[0] j = 0 start = 0 cat_chanfreq = [] cat_bandwidth = [] write_ref = numpy.average(frequency[:, 0:avg_channel], axis=1) # ref_frequency is equal to the array chan_freq[:,0] (the first column) # Use Numpy instead of dask.array to process small amounts of data while j < count: avg_chanfreq = numpy.average(frequency[:, start:start + avg_channel], axis=1) sum_chan = numpy.sum(channel_bandwidth[:, start:start + avg_channel], axis=1) cat_chanfreq.append(avg_chanfreq) cat_bandwidth.append(sum_chan) j = j + 1 start = start + avg_channel write_chanfreq = numpy.stack(cat_chanfreq, axis=1) write_bandwidth = numpy.stack(cat_bandwidth, axis=1) tar_spwtab.removecols(["CHAN_FREQ", "CHAN_WIDTH", "EFFECTIVE_BW", "RESOLUTION"]) tar_spwtab.addcols(maketabdesc(makearrcoldesc(columnname="CHAN_FREQ", value=0.0, ndim=1, datamanagertype='StandardStMan', datamanagergroup='StandardStMan', valuetype='double', comment='Center frequencies for each channel in the data matrix', keywords={'QuantumUnits': ['Hz'], 'MEASINFO': {'type': 'frequency', 'VarRefCol': 'MEAS_FREQ_REF', 'TabRefTypes': ['REST', 'LSRK', 'LSRD', 'BARY', 'GEO', 'TOPO', 'GALACTO', 'LGROUP', 'CMB'], 'TabRefCodes': [0, 1, 2, 3, 4, 5, 6, 7, 8]} })), chanfreq_dim) tar_spwtab.addcols(maketabdesc(makearrcoldesc(columnname="CHAN_WIDTH", value=0.0, ndim=1, datamanagertype='StandardStMan', datamanagergroup='StandardStManCHAN_WIDTH', valuetype='double', comment='Channel width for each channel', keywords={'QuantumUnits': ['Hz']})), chanwidth_dim) tar_spwtab.addcols(maketabdesc(makearrcoldesc(columnname="EFFECTIVE_BW", value=0.0, ndim=1, datamanagertype='StandardStMan', datamanagergroup='StandardStManEFFECTIVE_BW', valuetype='double', comment='Effective noise bandwidth of each channel', keywords={'QuantumUnits': ['Hz']})), effectivebw_dim) tar_spwtab.addcols(maketabdesc(makearrcoldesc(columnname="RESOLUTION", value=0.0, ndim=1, datamanagertype='StandardStMan', datamanagergroup='StandardStManRESOLUTION', valuetype='double', comment='The effective noise bandwidth for each channel', keywords={'QuantumUnits': ['Hz', ]})), resolution_dim) tar_spwtab.putcol(columnname="CHAN_FREQ", value=write_chanfreq) tar_spwtab.putcol(columnname="REF_FREQUENCY", value=write_ref) tar_spwtab.putcol(columnname="CHAN_WIDTH", value=write_bandwidth) tar_spwtab.putcol(columnname="EFFECTIVE_BW", value=write_bandwidth) tar_spwtab.putcol(columnname="RESOLUTION", value=write_bandwidth) tar_spwtab.putcol(columnname="NUM_CHAN", value=[count] * nspw) t7 = time() print('Done, time consuming %.3fs' % (t7 - t6)) print('Finish operating channel-average on MS')